linux/include/linux/hyperv.h
<<
>>
Prefs
   1/*
   2 *
   3 * Copyright (c) 2011, Microsoft Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16 * Place - Suite 330, Boston, MA 02111-1307 USA.
  17 *
  18 * Authors:
  19 *   Haiyang Zhang <haiyangz@microsoft.com>
  20 *   Hank Janssen  <hjanssen@microsoft.com>
  21 *   K. Y. Srinivasan <kys@microsoft.com>
  22 *
  23 */
  24
  25#ifndef _HYPERV_H
  26#define _HYPERV_H
  27
  28#include <uapi/linux/hyperv.h>
  29#include <uapi/asm/hyperv.h>
  30
  31#include <linux/types.h>
  32#include <linux/scatterlist.h>
  33#include <linux/list.h>
  34#include <linux/timer.h>
  35#include <linux/completion.h>
  36#include <linux/device.h>
  37#include <linux/mod_devicetable.h>
  38#include <linux/interrupt.h>
  39
  40#define MAX_PAGE_BUFFER_COUNT                           32
  41#define MAX_MULTIPAGE_BUFFER_COUNT                      32 /* 128K */
  42
  43#pragma pack(push, 1)
  44
  45/* Single-page buffer */
  46struct hv_page_buffer {
  47        u32 len;
  48        u32 offset;
  49        u64 pfn;
  50};
  51
  52/* Multiple-page buffer */
  53struct hv_multipage_buffer {
  54        /* Length and Offset determines the # of pfns in the array */
  55        u32 len;
  56        u32 offset;
  57        u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
  58};
  59
  60/*
  61 * Multiple-page buffer array; the pfn array is variable size:
  62 * The number of entries in the PFN array is determined by
  63 * "len" and "offset".
  64 */
  65struct hv_mpb_array {
  66        /* Length and Offset determines the # of pfns in the array */
  67        u32 len;
  68        u32 offset;
  69        u64 pfn_array[];
  70};
  71
  72/* 0x18 includes the proprietary packet header */
  73#define MAX_PAGE_BUFFER_PACKET          (0x18 +                 \
  74                                        (sizeof(struct hv_page_buffer) * \
  75                                         MAX_PAGE_BUFFER_COUNT))
  76#define MAX_MULTIPAGE_BUFFER_PACKET     (0x18 +                 \
  77                                         sizeof(struct hv_multipage_buffer))
  78
  79
  80#pragma pack(pop)
  81
  82struct hv_ring_buffer {
  83        /* Offset in bytes from the start of ring data below */
  84        u32 write_index;
  85
  86        /* Offset in bytes from the start of ring data below */
  87        u32 read_index;
  88
  89        u32 interrupt_mask;
  90
  91        /*
  92         * Win8 uses some of the reserved bits to implement
  93         * interrupt driven flow management. On the send side
  94         * we can request that the receiver interrupt the sender
  95         * when the ring transitions from being full to being able
  96         * to handle a message of size "pending_send_sz".
  97         *
  98         * Add necessary state for this enhancement.
  99         */
 100        u32 pending_send_sz;
 101
 102        u32 reserved1[12];
 103
 104        union {
 105                struct {
 106                        u32 feat_pending_send_sz:1;
 107                };
 108                u32 value;
 109        } feature_bits;
 110
 111        /* Pad it to PAGE_SIZE so that data starts on page boundary */
 112        u8      reserved2[4028];
 113
 114        /*
 115         * Ring data starts here + RingDataStartOffset
 116         * !!! DO NOT place any fields below this !!!
 117         */
 118        u8 buffer[0];
 119} __packed;
 120
 121struct hv_ring_buffer_info {
 122        struct hv_ring_buffer *ring_buffer;
 123        u32 ring_size;                  /* Include the shared header */
 124        spinlock_t ring_lock;
 125
 126        u32 ring_datasize;              /* < ring_size */
 127        u32 ring_data_startoffset;
 128        u32 priv_write_index;
 129        u32 priv_read_index;
 130        u32 cached_read_index;
 131};
 132
 133/*
 134 *
 135 * hv_get_ringbuffer_availbytes()
 136 *
 137 * Get number of bytes available to read and to write to
 138 * for the specified ring buffer
 139 */
 140static inline void
 141hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
 142                             u32 *read, u32 *write)
 143{
 144        u32 read_loc, write_loc, dsize;
 145
 146        /* Capture the read/write indices before they changed */
 147        read_loc = rbi->ring_buffer->read_index;
 148        write_loc = rbi->ring_buffer->write_index;
 149        dsize = rbi->ring_datasize;
 150
 151        *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
 152                read_loc - write_loc;
 153        *read = dsize - *write;
 154}
 155
 156static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
 157{
 158        u32 read_loc, write_loc, dsize, read;
 159
 160        dsize = rbi->ring_datasize;
 161        read_loc = rbi->ring_buffer->read_index;
 162        write_loc = READ_ONCE(rbi->ring_buffer->write_index);
 163
 164        read = write_loc >= read_loc ? (write_loc - read_loc) :
 165                (dsize - read_loc) + write_loc;
 166
 167        return read;
 168}
 169
 170static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
 171{
 172        u32 read_loc, write_loc, dsize, write;
 173
 174        dsize = rbi->ring_datasize;
 175        read_loc = READ_ONCE(rbi->ring_buffer->read_index);
 176        write_loc = rbi->ring_buffer->write_index;
 177
 178        write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
 179                read_loc - write_loc;
 180        return write;
 181}
 182
 183static inline u32 hv_get_cached_bytes_to_write(
 184        const struct hv_ring_buffer_info *rbi)
 185{
 186        u32 read_loc, write_loc, dsize, write;
 187
 188        dsize = rbi->ring_datasize;
 189        read_loc = rbi->cached_read_index;
 190        write_loc = rbi->ring_buffer->write_index;
 191
 192        write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
 193                read_loc - write_loc;
 194        return write;
 195}
 196/*
 197 * VMBUS version is 32 bit entity broken up into
 198 * two 16 bit quantities: major_number. minor_number.
 199 *
 200 * 0 . 13 (Windows Server 2008)
 201 * 1 . 1  (Windows 7)
 202 * 2 . 4  (Windows 8)
 203 * 3 . 0  (Windows 8 R2)
 204 * 4 . 0  (Windows 10)
 205 */
 206
 207#define VERSION_WS2008  ((0 << 16) | (13))
 208#define VERSION_WIN7    ((1 << 16) | (1))
 209#define VERSION_WIN8    ((2 << 16) | (4))
 210#define VERSION_WIN8_1    ((3 << 16) | (0))
 211#define VERSION_WIN10   ((4 << 16) | (0))
 212
 213#define VERSION_INVAL -1
 214
 215#define VERSION_CURRENT VERSION_WIN10
 216
 217/* Make maximum size of pipe payload of 16K */
 218#define MAX_PIPE_DATA_PAYLOAD           (sizeof(u8) * 16384)
 219
 220/* Define PipeMode values. */
 221#define VMBUS_PIPE_TYPE_BYTE            0x00000000
 222#define VMBUS_PIPE_TYPE_MESSAGE         0x00000004
 223
 224/* The size of the user defined data buffer for non-pipe offers. */
 225#define MAX_USER_DEFINED_BYTES          120
 226
 227/* The size of the user defined data buffer for pipe offers. */
 228#define MAX_PIPE_USER_DEFINED_BYTES     116
 229
 230/*
 231 * At the center of the Channel Management library is the Channel Offer. This
 232 * struct contains the fundamental information about an offer.
 233 */
 234struct vmbus_channel_offer {
 235        uuid_le if_type;
 236        uuid_le if_instance;
 237
 238        /*
 239         * These two fields are not currently used.
 240         */
 241        u64 reserved1;
 242        u64 reserved2;
 243
 244        u16 chn_flags;
 245        u16 mmio_megabytes;             /* in bytes * 1024 * 1024 */
 246
 247        union {
 248                /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
 249                struct {
 250                        unsigned char user_def[MAX_USER_DEFINED_BYTES];
 251                } std;
 252
 253                /*
 254                 * Pipes:
 255                 * The following sructure is an integrated pipe protocol, which
 256                 * is implemented on top of standard user-defined data. Pipe
 257                 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
 258                 * use.
 259                 */
 260                struct {
 261                        u32  pipe_mode;
 262                        unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
 263                } pipe;
 264        } u;
 265        /*
 266         * The sub_channel_index is defined in win8.
 267         */
 268        u16 sub_channel_index;
 269        u16 reserved3;
 270} __packed;
 271
 272/* Server Flags */
 273#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE        1
 274#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES    2
 275#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS            4
 276#define VMBUS_CHANNEL_NAMED_PIPE_MODE                   0x10
 277#define VMBUS_CHANNEL_LOOPBACK_OFFER                    0x100
 278#define VMBUS_CHANNEL_PARENT_OFFER                      0x200
 279#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION    0x400
 280#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER              0x2000
 281
 282struct vmpacket_descriptor {
 283        u16 type;
 284        u16 offset8;
 285        u16 len8;
 286        u16 flags;
 287        u64 trans_id;
 288} __packed;
 289
 290struct vmpacket_header {
 291        u32 prev_pkt_start_offset;
 292        struct vmpacket_descriptor descriptor;
 293} __packed;
 294
 295struct vmtransfer_page_range {
 296        u32 byte_count;
 297        u32 byte_offset;
 298} __packed;
 299
 300struct vmtransfer_page_packet_header {
 301        struct vmpacket_descriptor d;
 302        u16 xfer_pageset_id;
 303        u8  sender_owns_set;
 304        u8 reserved;
 305        u32 range_cnt;
 306        struct vmtransfer_page_range ranges[1];
 307} __packed;
 308
 309struct vmgpadl_packet_header {
 310        struct vmpacket_descriptor d;
 311        u32 gpadl;
 312        u32 reserved;
 313} __packed;
 314
 315struct vmadd_remove_transfer_page_set {
 316        struct vmpacket_descriptor d;
 317        u32 gpadl;
 318        u16 xfer_pageset_id;
 319        u16 reserved;
 320} __packed;
 321
 322/*
 323 * This structure defines a range in guest physical space that can be made to
 324 * look virtually contiguous.
 325 */
 326struct gpa_range {
 327        u32 byte_count;
 328        u32 byte_offset;
 329        u64 pfn_array[0];
 330};
 331
 332/*
 333 * This is the format for an Establish Gpadl packet, which contains a handle by
 334 * which this GPADL will be known and a set of GPA ranges associated with it.
 335 * This can be converted to a MDL by the guest OS.  If there are multiple GPA
 336 * ranges, then the resulting MDL will be "chained," representing multiple VA
 337 * ranges.
 338 */
 339struct vmestablish_gpadl {
 340        struct vmpacket_descriptor d;
 341        u32 gpadl;
 342        u32 range_cnt;
 343        struct gpa_range range[1];
 344} __packed;
 345
 346/*
 347 * This is the format for a Teardown Gpadl packet, which indicates that the
 348 * GPADL handle in the Establish Gpadl packet will never be referenced again.
 349 */
 350struct vmteardown_gpadl {
 351        struct vmpacket_descriptor d;
 352        u32 gpadl;
 353        u32 reserved;   /* for alignment to a 8-byte boundary */
 354} __packed;
 355
 356/*
 357 * This is the format for a GPA-Direct packet, which contains a set of GPA
 358 * ranges, in addition to commands and/or data.
 359 */
 360struct vmdata_gpa_direct {
 361        struct vmpacket_descriptor d;
 362        u32 reserved;
 363        u32 range_cnt;
 364        struct gpa_range range[1];
 365} __packed;
 366
 367/* This is the format for a Additional Data Packet. */
 368struct vmadditional_data {
 369        struct vmpacket_descriptor d;
 370        u64 total_bytes;
 371        u32 offset;
 372        u32 byte_cnt;
 373        unsigned char data[1];
 374} __packed;
 375
 376union vmpacket_largest_possible_header {
 377        struct vmpacket_descriptor simple_hdr;
 378        struct vmtransfer_page_packet_header xfer_page_hdr;
 379        struct vmgpadl_packet_header gpadl_hdr;
 380        struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
 381        struct vmestablish_gpadl establish_gpadl_hdr;
 382        struct vmteardown_gpadl teardown_gpadl_hdr;
 383        struct vmdata_gpa_direct data_gpa_direct_hdr;
 384};
 385
 386#define VMPACKET_DATA_START_ADDRESS(__packet)   \
 387        (void *)(((unsigned char *)__packet) +  \
 388         ((struct vmpacket_descriptor)__packet)->offset8 * 8)
 389
 390#define VMPACKET_DATA_LENGTH(__packet)          \
 391        ((((struct vmpacket_descriptor)__packet)->len8 -        \
 392          ((struct vmpacket_descriptor)__packet)->offset8) * 8)
 393
 394#define VMPACKET_TRANSFER_MODE(__packet)        \
 395        (((struct IMPACT)__packet)->type)
 396
 397enum vmbus_packet_type {
 398        VM_PKT_INVALID                          = 0x0,
 399        VM_PKT_SYNCH                            = 0x1,
 400        VM_PKT_ADD_XFER_PAGESET                 = 0x2,
 401        VM_PKT_RM_XFER_PAGESET                  = 0x3,
 402        VM_PKT_ESTABLISH_GPADL                  = 0x4,
 403        VM_PKT_TEARDOWN_GPADL                   = 0x5,
 404        VM_PKT_DATA_INBAND                      = 0x6,
 405        VM_PKT_DATA_USING_XFER_PAGES            = 0x7,
 406        VM_PKT_DATA_USING_GPADL                 = 0x8,
 407        VM_PKT_DATA_USING_GPA_DIRECT            = 0x9,
 408        VM_PKT_CANCEL_REQUEST                   = 0xa,
 409        VM_PKT_COMP                             = 0xb,
 410        VM_PKT_DATA_USING_ADDITIONAL_PKT        = 0xc,
 411        VM_PKT_ADDITIONAL_DATA                  = 0xd
 412};
 413
 414#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED     1
 415
 416
 417/* Version 1 messages */
 418enum vmbus_channel_message_type {
 419        CHANNELMSG_INVALID                      =  0,
 420        CHANNELMSG_OFFERCHANNEL         =  1,
 421        CHANNELMSG_RESCIND_CHANNELOFFER =  2,
 422        CHANNELMSG_REQUESTOFFERS                =  3,
 423        CHANNELMSG_ALLOFFERS_DELIVERED  =  4,
 424        CHANNELMSG_OPENCHANNEL          =  5,
 425        CHANNELMSG_OPENCHANNEL_RESULT           =  6,
 426        CHANNELMSG_CLOSECHANNEL         =  7,
 427        CHANNELMSG_GPADL_HEADER         =  8,
 428        CHANNELMSG_GPADL_BODY                   =  9,
 429        CHANNELMSG_GPADL_CREATED                = 10,
 430        CHANNELMSG_GPADL_TEARDOWN               = 11,
 431        CHANNELMSG_GPADL_TORNDOWN               = 12,
 432        CHANNELMSG_RELID_RELEASED               = 13,
 433        CHANNELMSG_INITIATE_CONTACT             = 14,
 434        CHANNELMSG_VERSION_RESPONSE             = 15,
 435        CHANNELMSG_UNLOAD                       = 16,
 436        CHANNELMSG_UNLOAD_RESPONSE              = 17,
 437        CHANNELMSG_18                           = 18,
 438        CHANNELMSG_19                           = 19,
 439        CHANNELMSG_20                           = 20,
 440        CHANNELMSG_TL_CONNECT_REQUEST           = 21,
 441        CHANNELMSG_COUNT
 442};
 443
 444struct vmbus_channel_message_header {
 445        enum vmbus_channel_message_type msgtype;
 446        u32 padding;
 447} __packed;
 448
 449/* Query VMBus Version parameters */
 450struct vmbus_channel_query_vmbus_version {
 451        struct vmbus_channel_message_header header;
 452        u32 version;
 453} __packed;
 454
 455/* VMBus Version Supported parameters */
 456struct vmbus_channel_version_supported {
 457        struct vmbus_channel_message_header header;
 458        u8 version_supported;
 459} __packed;
 460
 461/* Offer Channel parameters */
 462struct vmbus_channel_offer_channel {
 463        struct vmbus_channel_message_header header;
 464        struct vmbus_channel_offer offer;
 465        u32 child_relid;
 466        u8 monitorid;
 467        /*
 468         * win7 and beyond splits this field into a bit field.
 469         */
 470        u8 monitor_allocated:1;
 471        u8 reserved:7;
 472        /*
 473         * These are new fields added in win7 and later.
 474         * Do not access these fields without checking the
 475         * negotiated protocol.
 476         *
 477         * If "is_dedicated_interrupt" is set, we must not set the
 478         * associated bit in the channel bitmap while sending the
 479         * interrupt to the host.
 480         *
 481         * connection_id is to be used in signaling the host.
 482         */
 483        u16 is_dedicated_interrupt:1;
 484        u16 reserved1:15;
 485        u32 connection_id;
 486} __packed;
 487
 488/* Rescind Offer parameters */
 489struct vmbus_channel_rescind_offer {
 490        struct vmbus_channel_message_header header;
 491        u32 child_relid;
 492} __packed;
 493
 494/*
 495 * Request Offer -- no parameters, SynIC message contains the partition ID
 496 * Set Snoop -- no parameters, SynIC message contains the partition ID
 497 * Clear Snoop -- no parameters, SynIC message contains the partition ID
 498 * All Offers Delivered -- no parameters, SynIC message contains the partition
 499 *                         ID
 500 * Flush Client -- no parameters, SynIC message contains the partition ID
 501 */
 502
 503/* Open Channel parameters */
 504struct vmbus_channel_open_channel {
 505        struct vmbus_channel_message_header header;
 506
 507        /* Identifies the specific VMBus channel that is being opened. */
 508        u32 child_relid;
 509
 510        /* ID making a particular open request at a channel offer unique. */
 511        u32 openid;
 512
 513        /* GPADL for the channel's ring buffer. */
 514        u32 ringbuffer_gpadlhandle;
 515
 516        /*
 517         * Starting with win8, this field will be used to specify
 518         * the target virtual processor on which to deliver the interrupt for
 519         * the host to guest communication.
 520         * Prior to win8, incoming channel interrupts would only
 521         * be delivered on cpu 0. Setting this value to 0 would
 522         * preserve the earlier behavior.
 523         */
 524        u32 target_vp;
 525
 526        /*
 527        * The upstream ring buffer begins at offset zero in the memory
 528        * described by RingBufferGpadlHandle. The downstream ring buffer
 529        * follows it at this offset (in pages).
 530        */
 531        u32 downstream_ringbuffer_pageoffset;
 532
 533        /* User-specific data to be passed along to the server endpoint. */
 534        unsigned char userdata[MAX_USER_DEFINED_BYTES];
 535} __packed;
 536
 537/* Open Channel Result parameters */
 538struct vmbus_channel_open_result {
 539        struct vmbus_channel_message_header header;
 540        u32 child_relid;
 541        u32 openid;
 542        u32 status;
 543} __packed;
 544
 545/* Close channel parameters; */
 546struct vmbus_channel_close_channel {
 547        struct vmbus_channel_message_header header;
 548        u32 child_relid;
 549} __packed;
 550
 551/* Channel Message GPADL */
 552#define GPADL_TYPE_RING_BUFFER          1
 553#define GPADL_TYPE_SERVER_SAVE_AREA     2
 554#define GPADL_TYPE_TRANSACTION          8
 555
 556/*
 557 * The number of PFNs in a GPADL message is defined by the number of
 558 * pages that would be spanned by ByteCount and ByteOffset.  If the
 559 * implied number of PFNs won't fit in this packet, there will be a
 560 * follow-up packet that contains more.
 561 */
 562struct vmbus_channel_gpadl_header {
 563        struct vmbus_channel_message_header header;
 564        u32 child_relid;
 565        u32 gpadl;
 566        u16 range_buflen;
 567        u16 rangecount;
 568        struct gpa_range range[0];
 569} __packed;
 570
 571/* This is the followup packet that contains more PFNs. */
 572struct vmbus_channel_gpadl_body {
 573        struct vmbus_channel_message_header header;
 574        u32 msgnumber;
 575        u32 gpadl;
 576        u64 pfn[0];
 577} __packed;
 578
 579struct vmbus_channel_gpadl_created {
 580        struct vmbus_channel_message_header header;
 581        u32 child_relid;
 582        u32 gpadl;
 583        u32 creation_status;
 584} __packed;
 585
 586struct vmbus_channel_gpadl_teardown {
 587        struct vmbus_channel_message_header header;
 588        u32 child_relid;
 589        u32 gpadl;
 590} __packed;
 591
 592struct vmbus_channel_gpadl_torndown {
 593        struct vmbus_channel_message_header header;
 594        u32 gpadl;
 595} __packed;
 596
 597struct vmbus_channel_relid_released {
 598        struct vmbus_channel_message_header header;
 599        u32 child_relid;
 600} __packed;
 601
 602struct vmbus_channel_initiate_contact {
 603        struct vmbus_channel_message_header header;
 604        u32 vmbus_version_requested;
 605        u32 target_vcpu; /* The VCPU the host should respond to */
 606        u64 interrupt_page;
 607        u64 monitor_page1;
 608        u64 monitor_page2;
 609} __packed;
 610
 611/* Hyper-V socket: guest's connect()-ing to host */
 612struct vmbus_channel_tl_connect_request {
 613        struct vmbus_channel_message_header header;
 614        uuid_le guest_endpoint_id;
 615        uuid_le host_service_id;
 616} __packed;
 617
 618struct vmbus_channel_version_response {
 619        struct vmbus_channel_message_header header;
 620        u8 version_supported;
 621} __packed;
 622
 623enum vmbus_channel_state {
 624        CHANNEL_OFFER_STATE,
 625        CHANNEL_OPENING_STATE,
 626        CHANNEL_OPEN_STATE,
 627        CHANNEL_OPENED_STATE,
 628};
 629
 630/*
 631 * Represents each channel msg on the vmbus connection This is a
 632 * variable-size data structure depending on the msg type itself
 633 */
 634struct vmbus_channel_msginfo {
 635        /* Bookkeeping stuff */
 636        struct list_head msglistentry;
 637
 638        /* So far, this is only used to handle gpadl body message */
 639        struct list_head submsglist;
 640
 641        /* Synchronize the request/response if needed */
 642        struct completion  waitevent;
 643        struct vmbus_channel *waiting_channel;
 644        union {
 645                struct vmbus_channel_version_supported version_supported;
 646                struct vmbus_channel_open_result open_result;
 647                struct vmbus_channel_gpadl_torndown gpadl_torndown;
 648                struct vmbus_channel_gpadl_created gpadl_created;
 649                struct vmbus_channel_version_response version_response;
 650        } response;
 651
 652        u32 msgsize;
 653        /*
 654         * The channel message that goes out on the "wire".
 655         * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
 656         */
 657        unsigned char msg[0];
 658};
 659
 660struct vmbus_close_msg {
 661        struct vmbus_channel_msginfo info;
 662        struct vmbus_channel_close_channel msg;
 663};
 664
 665/* Define connection identifier type. */
 666union hv_connection_id {
 667        u32 asu32;
 668        struct {
 669                u32 id:24;
 670                u32 reserved:8;
 671        } u;
 672};
 673
 674/* Definition of the hv_signal_event hypercall input structure. */
 675struct hv_input_signal_event {
 676        union hv_connection_id connectionid;
 677        u16 flag_number;
 678        u16 rsvdz;
 679};
 680
 681struct hv_input_signal_event_buffer {
 682        u64 align8;
 683        struct hv_input_signal_event event;
 684};
 685
 686enum hv_numa_policy {
 687        HV_BALANCED = 0,
 688        HV_LOCALIZED,
 689};
 690
 691enum vmbus_device_type {
 692        HV_IDE = 0,
 693        HV_SCSI,
 694        HV_FC,
 695        HV_NIC,
 696        HV_ND,
 697        HV_PCIE,
 698        HV_FB,
 699        HV_KBD,
 700        HV_MOUSE,
 701        HV_KVP,
 702        HV_TS,
 703        HV_HB,
 704        HV_SHUTDOWN,
 705        HV_FCOPY,
 706        HV_BACKUP,
 707        HV_DM,
 708        HV_UNKNOWN,
 709};
 710
 711struct vmbus_device {
 712        u16  dev_type;
 713        uuid_le guid;
 714        bool perf_device;
 715};
 716
 717struct vmbus_channel {
 718        struct list_head listentry;
 719
 720        struct hv_device *device_obj;
 721
 722        enum vmbus_channel_state state;
 723
 724        struct vmbus_channel_offer_channel offermsg;
 725        /*
 726         * These are based on the OfferMsg.MonitorId.
 727         * Save it here for easy access.
 728         */
 729        u8 monitor_grp;
 730        u8 monitor_bit;
 731
 732        bool rescind; /* got rescind msg */
 733
 734        u32 ringbuffer_gpadlhandle;
 735
 736        /* Allocated memory for ring buffer */
 737        void *ringbuffer_pages;
 738        u32 ringbuffer_pagecount;
 739        struct hv_ring_buffer_info outbound;    /* send to parent */
 740        struct hv_ring_buffer_info inbound;     /* receive from parent */
 741        spinlock_t inbound_lock;
 742
 743        struct vmbus_close_msg close_msg;
 744
 745        /* Channel callback's invoked in softirq context */
 746        struct tasklet_struct callback_event;
 747        void (*onchannel_callback)(void *context);
 748        void *channel_callback_context;
 749
 750        /*
 751         * A channel can be marked for one of three modes of reading:
 752         *   BATCHED - callback called from taslket and should read
 753         *            channel until empty. Interrupts from the host
 754         *            are masked while read is in process (default).
 755         *   DIRECT - callback called from tasklet (softirq).
 756         *   ISR - callback called in interrupt context and must
 757         *         invoke its own deferred processing.
 758         *         Host interrupts are disabled and must be re-enabled
 759         *         when ring is empty.
 760         */
 761        enum hv_callback_mode {
 762                HV_CALL_BATCHED,
 763                HV_CALL_DIRECT,
 764                HV_CALL_ISR
 765        } callback_mode;
 766
 767        bool is_dedicated_interrupt;
 768        struct hv_input_signal_event_buffer sig_buf;
 769        struct hv_input_signal_event *sig_event;
 770
 771        /*
 772         * Starting with win8, this field will be used to specify
 773         * the target virtual processor on which to deliver the interrupt for
 774         * the host to guest communication.
 775         * Prior to win8, incoming channel interrupts would only
 776         * be delivered on cpu 0. Setting this value to 0 would
 777         * preserve the earlier behavior.
 778         */
 779        u32 target_vp;
 780        /* The corresponding CPUID in the guest */
 781        u32 target_cpu;
 782        /*
 783         * State to manage the CPU affiliation of channels.
 784         */
 785        struct cpumask alloced_cpus_in_node;
 786        int numa_node;
 787        /*
 788         * Support for sub-channels. For high performance devices,
 789         * it will be useful to have multiple sub-channels to support
 790         * a scalable communication infrastructure with the host.
 791         * The support for sub-channels is implemented as an extention
 792         * to the current infrastructure.
 793         * The initial offer is considered the primary channel and this
 794         * offer message will indicate if the host supports sub-channels.
 795         * The guest is free to ask for sub-channels to be offerred and can
 796         * open these sub-channels as a normal "primary" channel. However,
 797         * all sub-channels will have the same type and instance guids as the
 798         * primary channel. Requests sent on a given channel will result in a
 799         * response on the same channel.
 800         */
 801
 802        /*
 803         * Sub-channel creation callback. This callback will be called in
 804         * process context when a sub-channel offer is received from the host.
 805         * The guest can open the sub-channel in the context of this callback.
 806         */
 807        void (*sc_creation_callback)(struct vmbus_channel *new_sc);
 808
 809        /*
 810         * Channel rescind callback. Some channels (the hvsock ones), need to
 811         * register a callback which is invoked in vmbus_onoffer_rescind().
 812         */
 813        void (*chn_rescind_callback)(struct vmbus_channel *channel);
 814
 815        /*
 816         * The spinlock to protect the structure. It is being used to protect
 817         * test-and-set access to various attributes of the structure as well
 818         * as all sc_list operations.
 819         */
 820        spinlock_t lock;
 821        /*
 822         * All Sub-channels of a primary channel are linked here.
 823         */
 824        struct list_head sc_list;
 825        /*
 826         * Current number of sub-channels.
 827         */
 828        int num_sc;
 829        /*
 830         * Number of a sub-channel (position within sc_list) which is supposed
 831         * to be used as the next outgoing channel.
 832         */
 833        int next_oc;
 834        /*
 835         * The primary channel this sub-channel belongs to.
 836         * This will be NULL for the primary channel.
 837         */
 838        struct vmbus_channel *primary_channel;
 839        /*
 840         * Support per-channel state for use by vmbus drivers.
 841         */
 842        void *per_channel_state;
 843        /*
 844         * To support per-cpu lookup mapping of relid to channel,
 845         * link up channels based on their CPU affinity.
 846         */
 847        struct list_head percpu_list;
 848
 849        /*
 850         * Defer freeing channel until after all cpu's have
 851         * gone through grace period.
 852         */
 853        struct rcu_head rcu;
 854
 855        /*
 856         * For performance critical channels (storage, networking
 857         * etc,), Hyper-V has a mechanism to enhance the throughput
 858         * at the expense of latency:
 859         * When the host is to be signaled, we just set a bit in a shared page
 860         * and this bit will be inspected by the hypervisor within a certain
 861         * window and if the bit is set, the host will be signaled. The window
 862         * of time is the monitor latency - currently around 100 usecs. This
 863         * mechanism improves throughput by:
 864         *
 865         * A) Making the host more efficient - each time it wakes up,
 866         *    potentially it will process morev number of packets. The
 867         *    monitor latency allows a batch to build up.
 868         * B) By deferring the hypercall to signal, we will also minimize
 869         *    the interrupts.
 870         *
 871         * Clearly, these optimizations improve throughput at the expense of
 872         * latency. Furthermore, since the channel is shared for both
 873         * control and data messages, control messages currently suffer
 874         * unnecessary latency adversley impacting performance and boot
 875         * time. To fix this issue, permit tagging the channel as being
 876         * in "low latency" mode. In this mode, we will bypass the monitor
 877         * mechanism.
 878         */
 879        bool low_latency;
 880
 881        /*
 882         * NUMA distribution policy:
 883         * We support teo policies:
 884         * 1) Balanced: Here all performance critical channels are
 885         *    distributed evenly amongst all the NUMA nodes.
 886         *    This policy will be the default policy.
 887         * 2) Localized: All channels of a given instance of a
 888         *    performance critical service will be assigned CPUs
 889         *    within a selected NUMA node.
 890         */
 891        enum hv_numa_policy affinity_policy;
 892
 893};
 894
 895static inline bool is_hvsock_channel(const struct vmbus_channel *c)
 896{
 897        return !!(c->offermsg.offer.chn_flags &
 898                  VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
 899}
 900
 901static inline void set_channel_affinity_state(struct vmbus_channel *c,
 902                                              enum hv_numa_policy policy)
 903{
 904        c->affinity_policy = policy;
 905}
 906
 907static inline void set_channel_read_mode(struct vmbus_channel *c,
 908                                        enum hv_callback_mode mode)
 909{
 910        c->callback_mode = mode;
 911}
 912
 913static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
 914{
 915        c->per_channel_state = s;
 916}
 917
 918static inline void *get_per_channel_state(struct vmbus_channel *c)
 919{
 920        return c->per_channel_state;
 921}
 922
 923static inline void set_channel_pending_send_size(struct vmbus_channel *c,
 924                                                 u32 size)
 925{
 926        c->outbound.ring_buffer->pending_send_sz = size;
 927}
 928
 929static inline void set_low_latency_mode(struct vmbus_channel *c)
 930{
 931        c->low_latency = true;
 932}
 933
 934static inline void clear_low_latency_mode(struct vmbus_channel *c)
 935{
 936        c->low_latency = false;
 937}
 938
 939void vmbus_onmessage(void *context);
 940
 941int vmbus_request_offers(void);
 942
 943/*
 944 * APIs for managing sub-channels.
 945 */
 946
 947void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
 948                        void (*sc_cr_cb)(struct vmbus_channel *new_sc));
 949
 950void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
 951                void (*chn_rescind_cb)(struct vmbus_channel *));
 952
 953/*
 954 * Retrieve the (sub) channel on which to send an outgoing request.
 955 * When a primary channel has multiple sub-channels, we choose a
 956 * channel whose VCPU binding is closest to the VCPU on which
 957 * this call is being made.
 958 */
 959struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
 960
 961/*
 962 * Check if sub-channels have already been offerred. This API will be useful
 963 * when the driver is unloaded after establishing sub-channels. In this case,
 964 * when the driver is re-loaded, the driver would have to check if the
 965 * subchannels have already been established before attempting to request
 966 * the creation of sub-channels.
 967 * This function returns TRUE to indicate that subchannels have already been
 968 * created.
 969 * This function should be invoked after setting the callback function for
 970 * sub-channel creation.
 971 */
 972bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
 973
 974/* The format must be the same as struct vmdata_gpa_direct */
 975struct vmbus_channel_packet_page_buffer {
 976        u16 type;
 977        u16 dataoffset8;
 978        u16 length8;
 979        u16 flags;
 980        u64 transactionid;
 981        u32 reserved;
 982        u32 rangecount;
 983        struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
 984} __packed;
 985
 986/* The format must be the same as struct vmdata_gpa_direct */
 987struct vmbus_channel_packet_multipage_buffer {
 988        u16 type;
 989        u16 dataoffset8;
 990        u16 length8;
 991        u16 flags;
 992        u64 transactionid;
 993        u32 reserved;
 994        u32 rangecount;         /* Always 1 in this case */
 995        struct hv_multipage_buffer range;
 996} __packed;
 997
 998/* The format must be the same as struct vmdata_gpa_direct */
 999struct vmbus_packet_mpb_array {
1000        u16 type;
1001        u16 dataoffset8;
1002        u16 length8;
1003        u16 flags;
1004        u64 transactionid;
1005        u32 reserved;
1006        u32 rangecount;         /* Always 1 in this case */
1007        struct hv_mpb_array range;
1008} __packed;
1009
1010
1011extern int vmbus_open(struct vmbus_channel *channel,
1012                            u32 send_ringbuffersize,
1013                            u32 recv_ringbuffersize,
1014                            void *userdata,
1015                            u32 userdatalen,
1016                            void(*onchannel_callback)(void *context),
1017                            void *context);
1018
1019extern void vmbus_close(struct vmbus_channel *channel);
1020
1021extern int vmbus_sendpacket(struct vmbus_channel *channel,
1022                                  void *buffer,
1023                                  u32 bufferLen,
1024                                  u64 requestid,
1025                                  enum vmbus_packet_type type,
1026                                  u32 flags);
1027
1028extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
1029                                  void *buffer,
1030                                  u32 bufferLen,
1031                                  u64 requestid,
1032                                  enum vmbus_packet_type type,
1033                                  u32 flags);
1034
1035extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1036                                            struct hv_page_buffer pagebuffers[],
1037                                            u32 pagecount,
1038                                            void *buffer,
1039                                            u32 bufferlen,
1040                                            u64 requestid);
1041
1042extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
1043                                           struct hv_page_buffer pagebuffers[],
1044                                           u32 pagecount,
1045                                           void *buffer,
1046                                           u32 bufferlen,
1047                                           u64 requestid,
1048                                           u32 flags);
1049
1050extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
1051                                        struct hv_multipage_buffer *mpb,
1052                                        void *buffer,
1053                                        u32 bufferlen,
1054                                        u64 requestid);
1055
1056extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1057                                     struct vmbus_packet_mpb_array *mpb,
1058                                     u32 desc_size,
1059                                     void *buffer,
1060                                     u32 bufferlen,
1061                                     u64 requestid);
1062
1063extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1064                                      void *kbuffer,
1065                                      u32 size,
1066                                      u32 *gpadl_handle);
1067
1068extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1069                                     u32 gpadl_handle);
1070
1071extern int vmbus_recvpacket(struct vmbus_channel *channel,
1072                                  void *buffer,
1073                                  u32 bufferlen,
1074                                  u32 *buffer_actual_len,
1075                                  u64 *requestid);
1076
1077extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1078                                     void *buffer,
1079                                     u32 bufferlen,
1080                                     u32 *buffer_actual_len,
1081                                     u64 *requestid);
1082
1083
1084extern void vmbus_ontimer(unsigned long data);
1085
1086/* Base driver object */
1087struct hv_driver {
1088        const char *name;
1089
1090        /*
1091         * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1092         * channel flag, actually doesn't mean a synthetic device because the
1093         * offer's if_type/if_instance can change for every new hvsock
1094         * connection.
1095         *
1096         * However, to facilitate the notification of new-offer/rescind-offer
1097         * from vmbus driver to hvsock driver, we can handle hvsock offer as
1098         * a special vmbus device, and hence we need the below flag to
1099         * indicate if the driver is the hvsock driver or not: we need to
1100         * specially treat the hvosck offer & driver in vmbus_match().
1101         */
1102        bool hvsock;
1103
1104        /* the device type supported by this driver */
1105        uuid_le dev_type;
1106        const struct hv_vmbus_device_id *id_table;
1107
1108        struct device_driver driver;
1109
1110        /* dynamic device GUID's */
1111        struct  {
1112                spinlock_t lock;
1113                struct list_head list;
1114        } dynids;
1115
1116        int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1117        int (*remove)(struct hv_device *);
1118        void (*shutdown)(struct hv_device *);
1119
1120};
1121
1122/* Base device object */
1123struct hv_device {
1124        /* the device type id of this device */
1125        uuid_le dev_type;
1126
1127        /* the device instance id of this device */
1128        uuid_le dev_instance;
1129        u16 vendor_id;
1130        u16 device_id;
1131
1132        struct device device;
1133
1134        struct vmbus_channel *channel;
1135};
1136
1137
1138static inline struct hv_device *device_to_hv_device(struct device *d)
1139{
1140        return container_of(d, struct hv_device, device);
1141}
1142
1143static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1144{
1145        return container_of(d, struct hv_driver, driver);
1146}
1147
1148static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1149{
1150        dev_set_drvdata(&dev->device, data);
1151}
1152
1153static inline void *hv_get_drvdata(struct hv_device *dev)
1154{
1155        return dev_get_drvdata(&dev->device);
1156}
1157
1158/* Vmbus interface */
1159#define vmbus_driver_register(driver)   \
1160        __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1161int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1162                                         struct module *owner,
1163                                         const char *mod_name);
1164void vmbus_driver_unregister(struct hv_driver *hv_driver);
1165
1166void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1167
1168int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1169                        resource_size_t min, resource_size_t max,
1170                        resource_size_t size, resource_size_t align,
1171                        bool fb_overlap_ok);
1172void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1173int vmbus_cpu_number_to_vp_number(int cpu_number);
1174u64 hv_do_hypercall(u64 control, void *input, void *output);
1175
1176/*
1177 * GUID definitions of various offer types - services offered to the guest.
1178 */
1179
1180/*
1181 * Network GUID
1182 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1183 */
1184#define HV_NIC_GUID \
1185        .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1186                        0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1187
1188/*
1189 * IDE GUID
1190 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1191 */
1192#define HV_IDE_GUID \
1193        .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1194                        0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1195
1196/*
1197 * SCSI GUID
1198 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1199 */
1200#define HV_SCSI_GUID \
1201        .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1202                        0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1203
1204/*
1205 * Shutdown GUID
1206 * {0e0b6031-5213-4934-818b-38d90ced39db}
1207 */
1208#define HV_SHUTDOWN_GUID \
1209        .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1210                        0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1211
1212/*
1213 * Time Synch GUID
1214 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1215 */
1216#define HV_TS_GUID \
1217        .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1218                        0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1219
1220/*
1221 * Heartbeat GUID
1222 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1223 */
1224#define HV_HEART_BEAT_GUID \
1225        .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1226                        0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1227
1228/*
1229 * KVP GUID
1230 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1231 */
1232#define HV_KVP_GUID \
1233        .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1234                        0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1235
1236/*
1237 * Dynamic memory GUID
1238 * {525074dc-8985-46e2-8057-a307dc18a502}
1239 */
1240#define HV_DM_GUID \
1241        .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1242                        0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1243
1244/*
1245 * Mouse GUID
1246 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1247 */
1248#define HV_MOUSE_GUID \
1249        .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1250                        0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1251
1252/*
1253 * Keyboard GUID
1254 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1255 */
1256#define HV_KBD_GUID \
1257        .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1258                        0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1259
1260/*
1261 * VSS (Backup/Restore) GUID
1262 */
1263#define HV_VSS_GUID \
1264        .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1265                        0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1266/*
1267 * Synthetic Video GUID
1268 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1269 */
1270#define HV_SYNTHVID_GUID \
1271        .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1272                        0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1273
1274/*
1275 * Synthetic FC GUID
1276 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1277 */
1278#define HV_SYNTHFC_GUID \
1279        .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1280                        0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1281
1282/*
1283 * Guest File Copy Service
1284 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1285 */
1286
1287#define HV_FCOPY_GUID \
1288        .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1289                        0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1290
1291/*
1292 * NetworkDirect. This is the guest RDMA service.
1293 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1294 */
1295#define HV_ND_GUID \
1296        .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1297                        0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1298
1299/*
1300 * PCI Express Pass Through
1301 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1302 */
1303
1304#define HV_PCIE_GUID \
1305        .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1306                        0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1307
1308/*
1309 * Linux doesn't support the 3 devices: the first two are for
1310 * Automatic Virtual Machine Activation, and the third is for
1311 * Remote Desktop Virtualization.
1312 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1313 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1314 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1315 */
1316
1317#define HV_AVMA1_GUID \
1318        .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1319                        0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1320
1321#define HV_AVMA2_GUID \
1322        .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1323                        0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1324
1325#define HV_RDV_GUID \
1326        .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1327                        0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1328
1329/*
1330 * Common header for Hyper-V ICs
1331 */
1332
1333#define ICMSGTYPE_NEGOTIATE             0
1334#define ICMSGTYPE_HEARTBEAT             1
1335#define ICMSGTYPE_KVPEXCHANGE           2
1336#define ICMSGTYPE_SHUTDOWN              3
1337#define ICMSGTYPE_TIMESYNC              4
1338#define ICMSGTYPE_VSS                   5
1339
1340#define ICMSGHDRFLAG_TRANSACTION        1
1341#define ICMSGHDRFLAG_REQUEST            2
1342#define ICMSGHDRFLAG_RESPONSE           4
1343
1344
1345/*
1346 * While we want to handle util services as regular devices,
1347 * there is only one instance of each of these services; so
1348 * we statically allocate the service specific state.
1349 */
1350
1351struct hv_util_service {
1352        u8 *recv_buffer;
1353        void *channel;
1354        void (*util_cb)(void *);
1355        int (*util_init)(struct hv_util_service *);
1356        void (*util_deinit)(void);
1357};
1358
1359struct vmbuspipe_hdr {
1360        u32 flags;
1361        u32 msgsize;
1362} __packed;
1363
1364struct ic_version {
1365        u16 major;
1366        u16 minor;
1367} __packed;
1368
1369struct icmsg_hdr {
1370        struct ic_version icverframe;
1371        u16 icmsgtype;
1372        struct ic_version icvermsg;
1373        u16 icmsgsize;
1374        u32 status;
1375        u8 ictransaction_id;
1376        u8 icflags;
1377        u8 reserved[2];
1378} __packed;
1379
1380struct icmsg_negotiate {
1381        u16 icframe_vercnt;
1382        u16 icmsg_vercnt;
1383        u32 reserved;
1384        struct ic_version icversion_data[1]; /* any size array */
1385} __packed;
1386
1387struct shutdown_msg_data {
1388        u32 reason_code;
1389        u32 timeout_seconds;
1390        u32 flags;
1391        u8  display_message[2048];
1392} __packed;
1393
1394struct heartbeat_msg_data {
1395        u64 seq_num;
1396        u32 reserved[8];
1397} __packed;
1398
1399/* Time Sync IC defs */
1400#define ICTIMESYNCFLAG_PROBE    0
1401#define ICTIMESYNCFLAG_SYNC     1
1402#define ICTIMESYNCFLAG_SAMPLE   2
1403
1404#ifdef __x86_64__
1405#define WLTIMEDELTA     116444736000000000L     /* in 100ns unit */
1406#else
1407#define WLTIMEDELTA     116444736000000000LL
1408#endif
1409
1410struct ictimesync_data {
1411        u64 parenttime;
1412        u64 childtime;
1413        u64 roundtriptime;
1414        u8 flags;
1415} __packed;
1416
1417struct ictimesync_ref_data {
1418        u64 parenttime;
1419        u64 vmreferencetime;
1420        u8 flags;
1421        char leapflags;
1422        char stratum;
1423        u8 reserved[3];
1424} __packed;
1425
1426struct hyperv_service_callback {
1427        u8 msg_type;
1428        char *log_msg;
1429        uuid_le data;
1430        struct vmbus_channel *channel;
1431        void (*callback) (void *context);
1432};
1433
1434#define MAX_SRV_VER     0x7ffffff
1435extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1436                                const int *fw_version, int fw_vercnt,
1437                                const int *srv_version, int srv_vercnt,
1438                                int *nego_fw_version, int *nego_srv_version);
1439
1440void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1441
1442void vmbus_setevent(struct vmbus_channel *channel);
1443/*
1444 * Negotiated version with the Host.
1445 */
1446
1447extern __u32 vmbus_proto_version;
1448
1449int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
1450                                  const uuid_le *shv_host_servie_id);
1451void vmbus_set_event(struct vmbus_channel *channel);
1452
1453/* Get the start of the ring buffer. */
1454static inline void *
1455hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1456{
1457        return ring_info->ring_buffer->buffer;
1458}
1459
1460/*
1461 * To optimize the flow management on the send-side,
1462 * when the sender is blocked because of lack of
1463 * sufficient space in the ring buffer, potential the
1464 * consumer of the ring buffer can signal the producer.
1465 * This is controlled by the following parameters:
1466 *
1467 * 1. pending_send_sz: This is the size in bytes that the
1468 *    producer is trying to send.
1469 * 2. The feature bit feat_pending_send_sz set to indicate if
1470 *    the consumer of the ring will signal when the ring
1471 *    state transitions from being full to a state where
1472 *    there is room for the producer to send the pending packet.
1473 */
1474
1475static inline  void hv_signal_on_read(struct vmbus_channel *channel)
1476{
1477        u32 cur_write_sz, cached_write_sz;
1478        u32 pending_sz;
1479        struct hv_ring_buffer_info *rbi = &channel->inbound;
1480
1481        /*
1482         * Issue a full memory barrier before making the signaling decision.
1483         * Here is the reason for having this barrier:
1484         * If the reading of the pend_sz (in this function)
1485         * were to be reordered and read before we commit the new read
1486         * index (in the calling function)  we could
1487         * have a problem. If the host were to set the pending_sz after we
1488         * have sampled pending_sz and go to sleep before we commit the
1489         * read index, we could miss sending the interrupt. Issue a full
1490         * memory barrier to address this.
1491         */
1492        virt_mb();
1493
1494        pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
1495        /* If the other end is not blocked on write don't bother. */
1496        if (pending_sz == 0)
1497                return;
1498
1499        cur_write_sz = hv_get_bytes_to_write(rbi);
1500
1501        if (cur_write_sz < pending_sz)
1502                return;
1503
1504        cached_write_sz = hv_get_cached_bytes_to_write(rbi);
1505        if (cached_write_sz < pending_sz)
1506                vmbus_setevent(channel);
1507
1508        return;
1509}
1510
1511static inline void
1512init_cached_read_index(struct vmbus_channel *channel)
1513{
1514        struct hv_ring_buffer_info *rbi = &channel->inbound;
1515
1516        rbi->cached_read_index = rbi->ring_buffer->read_index;
1517}
1518
1519/*
1520 * Mask off host interrupt callback notifications
1521 */
1522static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1523{
1524        rbi->ring_buffer->interrupt_mask = 1;
1525
1526        /* make sure mask update is not reordered */
1527        virt_mb();
1528}
1529
1530/*
1531 * Re-enable host callback and return number of outstanding bytes
1532 */
1533static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1534{
1535
1536        rbi->ring_buffer->interrupt_mask = 0;
1537
1538        /* make sure mask update is not reordered */
1539        virt_mb();
1540
1541        /*
1542         * Now check to see if the ring buffer is still empty.
1543         * If it is not, we raced and we need to process new
1544         * incoming messages.
1545         */
1546        return hv_get_bytes_to_read(rbi);
1547}
1548
1549/*
1550 * An API to support in-place processing of incoming VMBUS packets.
1551 */
1552#define VMBUS_PKT_TRAILER       8
1553
1554static inline struct vmpacket_descriptor *
1555get_next_pkt_raw(struct vmbus_channel *channel)
1556{
1557        struct hv_ring_buffer_info *ring_info = &channel->inbound;
1558        u32 priv_read_loc = ring_info->priv_read_index;
1559        void *ring_buffer = hv_get_ring_buffer(ring_info);
1560        u32 dsize = ring_info->ring_datasize;
1561        /*
1562         * delta is the difference between what is available to read and
1563         * what was already consumed in place. We commit read index after
1564         * the whole batch is processed.
1565         */
1566        u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
1567                priv_read_loc - ring_info->ring_buffer->read_index :
1568                (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
1569        u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
1570
1571        if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
1572                return NULL;
1573
1574        return ring_buffer + priv_read_loc;
1575}
1576
1577/*
1578 * A helper function to step through packets "in-place"
1579 * This API is to be called after each successful call
1580 * get_next_pkt_raw().
1581 */
1582static inline void put_pkt_raw(struct vmbus_channel *channel,
1583                                struct vmpacket_descriptor *desc)
1584{
1585        struct hv_ring_buffer_info *ring_info = &channel->inbound;
1586        u32 packetlen = desc->len8 << 3;
1587        u32 dsize = ring_info->ring_datasize;
1588
1589        /*
1590         * Include the packet trailer.
1591         */
1592        ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
1593        ring_info->priv_read_index %= dsize;
1594}
1595
1596/*
1597 * This call commits the read index and potentially signals the host.
1598 * Here is the pattern for using the "in-place" consumption APIs:
1599 *
1600 * init_cached_read_index();
1601 *
1602 * while (get_next_pkt_raw() {
1603 *      process the packet "in-place";
1604 *      put_pkt_raw();
1605 * }
1606 * if (packets processed in place)
1607 *      commit_rd_index();
1608 */
1609static inline void commit_rd_index(struct vmbus_channel *channel)
1610{
1611        struct hv_ring_buffer_info *ring_info = &channel->inbound;
1612        /*
1613         * Make sure all reads are done before we update the read index since
1614         * the writer may start writing to the read area once the read index
1615         * is updated.
1616         */
1617        virt_rmb();
1618        ring_info->ring_buffer->read_index = ring_info->priv_read_index;
1619
1620        hv_signal_on_read(channel);
1621}
1622
1623
1624#endif /* _HYPERV_H */
1625