linux/include/linux/hyperv.h
<<
>>
Prefs
   1/*
   2 *
   3 * Copyright (c) 2011, Microsoft Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16 * Place - Suite 330, Boston, MA 02111-1307 USA.
  17 *
  18 * Authors:
  19 *   Haiyang Zhang <haiyangz@microsoft.com>
  20 *   Hank Janssen  <hjanssen@microsoft.com>
  21 *   K. Y. Srinivasan <kys@microsoft.com>
  22 *
  23 */
  24
  25#ifndef _HYPERV_H
  26#define _HYPERV_H
  27
  28#include <uapi/linux/hyperv.h>
  29
  30#include <linux/types.h>
  31#include <linux/scatterlist.h>
  32#include <linux/list.h>
  33#include <linux/timer.h>
  34#include <linux/completion.h>
  35#include <linux/device.h>
  36#include <linux/mod_devicetable.h>
  37#include <linux/interrupt.h>
  38#include <linux/reciprocal_div.h>
  39
  40#define MAX_PAGE_BUFFER_COUNT                           32
  41#define MAX_MULTIPAGE_BUFFER_COUNT                      32 /* 128K */
  42
  43#pragma pack(push, 1)
  44
  45/* Single-page buffer */
  46struct hv_page_buffer {
  47        u32 len;
  48        u32 offset;
  49        u64 pfn;
  50};
  51
  52/* Multiple-page buffer */
  53struct hv_multipage_buffer {
  54        /* Length and Offset determines the # of pfns in the array */
  55        u32 len;
  56        u32 offset;
  57        u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
  58};
  59
  60/*
  61 * Multiple-page buffer array; the pfn array is variable size:
  62 * The number of entries in the PFN array is determined by
  63 * "len" and "offset".
  64 */
  65struct hv_mpb_array {
  66        /* Length and Offset determines the # of pfns in the array */
  67        u32 len;
  68        u32 offset;
  69        u64 pfn_array[];
  70};
  71
  72/* 0x18 includes the proprietary packet header */
  73#define MAX_PAGE_BUFFER_PACKET          (0x18 +                 \
  74                                        (sizeof(struct hv_page_buffer) * \
  75                                         MAX_PAGE_BUFFER_COUNT))
  76#define MAX_MULTIPAGE_BUFFER_PACKET     (0x18 +                 \
  77                                         sizeof(struct hv_multipage_buffer))
  78
  79
  80#pragma pack(pop)
  81
  82struct hv_ring_buffer {
  83        /* Offset in bytes from the start of ring data below */
  84        u32 write_index;
  85
  86        /* Offset in bytes from the start of ring data below */
  87        u32 read_index;
  88
  89        u32 interrupt_mask;
  90
  91        /*
  92         * WS2012/Win8 and later versions of Hyper-V implement interrupt
  93         * driven flow management. The feature bit feat_pending_send_sz
  94         * is set by the host on the host->guest ring buffer, and by the
  95         * guest on the guest->host ring buffer.
  96         *
  97         * The meaning of the feature bit is a bit complex in that it has
  98         * semantics that apply to both ring buffers.  If the guest sets
  99         * the feature bit in the guest->host ring buffer, the guest is
 100         * telling the host that:
 101         * 1) It will set the pending_send_sz field in the guest->host ring
 102         *    buffer when it is waiting for space to become available, and
 103         * 2) It will read the pending_send_sz field in the host->guest
 104         *    ring buffer and interrupt the host when it frees enough space
 105         *
 106         * Similarly, if the host sets the feature bit in the host->guest
 107         * ring buffer, the host is telling the guest that:
 108         * 1) It will set the pending_send_sz field in the host->guest ring
 109         *    buffer when it is waiting for space to become available, and
 110         * 2) It will read the pending_send_sz field in the guest->host
 111         *    ring buffer and interrupt the guest when it frees enough space
 112         *
 113         * If either the guest or host does not set the feature bit that it
 114         * owns, that guest or host must do polling if it encounters a full
 115         * ring buffer, and not signal the other end with an interrupt.
 116         */
 117        u32 pending_send_sz;
 118        u32 reserved1[12];
 119        union {
 120                struct {
 121                        u32 feat_pending_send_sz:1;
 122                };
 123                u32 value;
 124        } feature_bits;
 125
 126        /* Pad it to PAGE_SIZE so that data starts on page boundary */
 127        u8      reserved2[4028];
 128
 129        /*
 130         * Ring data starts here + RingDataStartOffset
 131         * !!! DO NOT place any fields below this !!!
 132         */
 133        u8 buffer[0];
 134} __packed;
 135
 136struct hv_ring_buffer_info {
 137        struct hv_ring_buffer *ring_buffer;
 138        u32 ring_size;                  /* Include the shared header */
 139        struct reciprocal_value ring_size_div10_reciprocal;
 140        spinlock_t ring_lock;
 141
 142        u32 ring_datasize;              /* < ring_size */
 143        u32 priv_read_index;
 144        /*
 145         * The ring buffer mutex lock. This lock prevents the ring buffer from
 146         * being freed while the ring buffer is being accessed.
 147         */
 148        struct mutex ring_buffer_mutex;
 149};
 150
 151
 152static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
 153{
 154        u32 read_loc, write_loc, dsize, read;
 155
 156        dsize = rbi->ring_datasize;
 157        read_loc = rbi->ring_buffer->read_index;
 158        write_loc = READ_ONCE(rbi->ring_buffer->write_index);
 159
 160        read = write_loc >= read_loc ? (write_loc - read_loc) :
 161                (dsize - read_loc) + write_loc;
 162
 163        return read;
 164}
 165
 166static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
 167{
 168        u32 read_loc, write_loc, dsize, write;
 169
 170        dsize = rbi->ring_datasize;
 171        read_loc = READ_ONCE(rbi->ring_buffer->read_index);
 172        write_loc = rbi->ring_buffer->write_index;
 173
 174        write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
 175                read_loc - write_loc;
 176        return write;
 177}
 178
 179static inline u32 hv_get_avail_to_write_percent(
 180                const struct hv_ring_buffer_info *rbi)
 181{
 182        u32 avail_write = hv_get_bytes_to_write(rbi);
 183
 184        return reciprocal_divide(
 185                        (avail_write  << 3) + (avail_write << 1),
 186                        rbi->ring_size_div10_reciprocal);
 187}
 188
 189/*
 190 * VMBUS version is 32 bit entity broken up into
 191 * two 16 bit quantities: major_number. minor_number.
 192 *
 193 * 0 . 13 (Windows Server 2008)
 194 * 1 . 1  (Windows 7)
 195 * 2 . 4  (Windows 8)
 196 * 3 . 0  (Windows 8 R2)
 197 * 4 . 0  (Windows 10)
 198 * 4 . 1  (Windows 10 RS3)
 199 * 5 . 0  (Newer Windows 10)
 200 * 5 . 1  (Windows 10 RS4)
 201 * 5 . 2  (Windows Server 2019, RS5)
 202 */
 203
 204#define VERSION_WS2008  ((0 << 16) | (13))
 205#define VERSION_WIN7    ((1 << 16) | (1))
 206#define VERSION_WIN8    ((2 << 16) | (4))
 207#define VERSION_WIN8_1    ((3 << 16) | (0))
 208#define VERSION_WIN10 ((4 << 16) | (0))
 209#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
 210#define VERSION_WIN10_V5 ((5 << 16) | (0))
 211#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
 212#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
 213
 214/* Make maximum size of pipe payload of 16K */
 215#define MAX_PIPE_DATA_PAYLOAD           (sizeof(u8) * 16384)
 216
 217/* Define PipeMode values. */
 218#define VMBUS_PIPE_TYPE_BYTE            0x00000000
 219#define VMBUS_PIPE_TYPE_MESSAGE         0x00000004
 220
 221/* The size of the user defined data buffer for non-pipe offers. */
 222#define MAX_USER_DEFINED_BYTES          120
 223
 224/* The size of the user defined data buffer for pipe offers. */
 225#define MAX_PIPE_USER_DEFINED_BYTES     116
 226
 227/*
 228 * At the center of the Channel Management library is the Channel Offer. This
 229 * struct contains the fundamental information about an offer.
 230 */
 231struct vmbus_channel_offer {
 232        guid_t if_type;
 233        guid_t if_instance;
 234
 235        /*
 236         * These two fields are not currently used.
 237         */
 238        u64 reserved1;
 239        u64 reserved2;
 240
 241        u16 chn_flags;
 242        u16 mmio_megabytes;             /* in bytes * 1024 * 1024 */
 243
 244        union {
 245                /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
 246                struct {
 247                        unsigned char user_def[MAX_USER_DEFINED_BYTES];
 248                } std;
 249
 250                /*
 251                 * Pipes:
 252                 * The following sructure is an integrated pipe protocol, which
 253                 * is implemented on top of standard user-defined data. Pipe
 254                 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
 255                 * use.
 256                 */
 257                struct {
 258                        u32  pipe_mode;
 259                        unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
 260                } pipe;
 261        } u;
 262        /*
 263         * The sub_channel_index is defined in Win8: a value of zero means a
 264         * primary channel and a value of non-zero means a sub-channel.
 265         *
 266         * Before Win8, the field is reserved, meaning it's always zero.
 267         */
 268        u16 sub_channel_index;
 269        u16 reserved3;
 270} __packed;
 271
 272/* Server Flags */
 273#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE        1
 274#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES    2
 275#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS            4
 276#define VMBUS_CHANNEL_NAMED_PIPE_MODE                   0x10
 277#define VMBUS_CHANNEL_LOOPBACK_OFFER                    0x100
 278#define VMBUS_CHANNEL_PARENT_OFFER                      0x200
 279#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION    0x400
 280#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER              0x2000
 281
 282struct vmpacket_descriptor {
 283        u16 type;
 284        u16 offset8;
 285        u16 len8;
 286        u16 flags;
 287        u64 trans_id;
 288} __packed;
 289
 290struct vmpacket_header {
 291        u32 prev_pkt_start_offset;
 292        struct vmpacket_descriptor descriptor;
 293} __packed;
 294
 295struct vmtransfer_page_range {
 296        u32 byte_count;
 297        u32 byte_offset;
 298} __packed;
 299
 300struct vmtransfer_page_packet_header {
 301        struct vmpacket_descriptor d;
 302        u16 xfer_pageset_id;
 303        u8  sender_owns_set;
 304        u8 reserved;
 305        u32 range_cnt;
 306        struct vmtransfer_page_range ranges[1];
 307} __packed;
 308
 309struct vmgpadl_packet_header {
 310        struct vmpacket_descriptor d;
 311        u32 gpadl;
 312        u32 reserved;
 313} __packed;
 314
 315struct vmadd_remove_transfer_page_set {
 316        struct vmpacket_descriptor d;
 317        u32 gpadl;
 318        u16 xfer_pageset_id;
 319        u16 reserved;
 320} __packed;
 321
 322/*
 323 * This structure defines a range in guest physical space that can be made to
 324 * look virtually contiguous.
 325 */
 326struct gpa_range {
 327        u32 byte_count;
 328        u32 byte_offset;
 329        u64 pfn_array[0];
 330};
 331
 332/*
 333 * This is the format for an Establish Gpadl packet, which contains a handle by
 334 * which this GPADL will be known and a set of GPA ranges associated with it.
 335 * This can be converted to a MDL by the guest OS.  If there are multiple GPA
 336 * ranges, then the resulting MDL will be "chained," representing multiple VA
 337 * ranges.
 338 */
 339struct vmestablish_gpadl {
 340        struct vmpacket_descriptor d;
 341        u32 gpadl;
 342        u32 range_cnt;
 343        struct gpa_range range[1];
 344} __packed;
 345
 346/*
 347 * This is the format for a Teardown Gpadl packet, which indicates that the
 348 * GPADL handle in the Establish Gpadl packet will never be referenced again.
 349 */
 350struct vmteardown_gpadl {
 351        struct vmpacket_descriptor d;
 352        u32 gpadl;
 353        u32 reserved;   /* for alignment to a 8-byte boundary */
 354} __packed;
 355
 356/*
 357 * This is the format for a GPA-Direct packet, which contains a set of GPA
 358 * ranges, in addition to commands and/or data.
 359 */
 360struct vmdata_gpa_direct {
 361        struct vmpacket_descriptor d;
 362        u32 reserved;
 363        u32 range_cnt;
 364        struct gpa_range range[1];
 365} __packed;
 366
 367/* This is the format for a Additional Data Packet. */
 368struct vmadditional_data {
 369        struct vmpacket_descriptor d;
 370        u64 total_bytes;
 371        u32 offset;
 372        u32 byte_cnt;
 373        unsigned char data[1];
 374} __packed;
 375
 376union vmpacket_largest_possible_header {
 377        struct vmpacket_descriptor simple_hdr;
 378        struct vmtransfer_page_packet_header xfer_page_hdr;
 379        struct vmgpadl_packet_header gpadl_hdr;
 380        struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
 381        struct vmestablish_gpadl establish_gpadl_hdr;
 382        struct vmteardown_gpadl teardown_gpadl_hdr;
 383        struct vmdata_gpa_direct data_gpa_direct_hdr;
 384};
 385
 386#define VMPACKET_DATA_START_ADDRESS(__packet)   \
 387        (void *)(((unsigned char *)__packet) +  \
 388         ((struct vmpacket_descriptor)__packet)->offset8 * 8)
 389
 390#define VMPACKET_DATA_LENGTH(__packet)          \
 391        ((((struct vmpacket_descriptor)__packet)->len8 -        \
 392          ((struct vmpacket_descriptor)__packet)->offset8) * 8)
 393
 394#define VMPACKET_TRANSFER_MODE(__packet)        \
 395        (((struct IMPACT)__packet)->type)
 396
 397enum vmbus_packet_type {
 398        VM_PKT_INVALID                          = 0x0,
 399        VM_PKT_SYNCH                            = 0x1,
 400        VM_PKT_ADD_XFER_PAGESET                 = 0x2,
 401        VM_PKT_RM_XFER_PAGESET                  = 0x3,
 402        VM_PKT_ESTABLISH_GPADL                  = 0x4,
 403        VM_PKT_TEARDOWN_GPADL                   = 0x5,
 404        VM_PKT_DATA_INBAND                      = 0x6,
 405        VM_PKT_DATA_USING_XFER_PAGES            = 0x7,
 406        VM_PKT_DATA_USING_GPADL                 = 0x8,
 407        VM_PKT_DATA_USING_GPA_DIRECT            = 0x9,
 408        VM_PKT_CANCEL_REQUEST                   = 0xa,
 409        VM_PKT_COMP                             = 0xb,
 410        VM_PKT_DATA_USING_ADDITIONAL_PKT        = 0xc,
 411        VM_PKT_ADDITIONAL_DATA                  = 0xd
 412};
 413
 414#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED     1
 415
 416
 417/* Version 1 messages */
 418enum vmbus_channel_message_type {
 419        CHANNELMSG_INVALID                      =  0,
 420        CHANNELMSG_OFFERCHANNEL         =  1,
 421        CHANNELMSG_RESCIND_CHANNELOFFER =  2,
 422        CHANNELMSG_REQUESTOFFERS                =  3,
 423        CHANNELMSG_ALLOFFERS_DELIVERED  =  4,
 424        CHANNELMSG_OPENCHANNEL          =  5,
 425        CHANNELMSG_OPENCHANNEL_RESULT           =  6,
 426        CHANNELMSG_CLOSECHANNEL         =  7,
 427        CHANNELMSG_GPADL_HEADER         =  8,
 428        CHANNELMSG_GPADL_BODY                   =  9,
 429        CHANNELMSG_GPADL_CREATED                = 10,
 430        CHANNELMSG_GPADL_TEARDOWN               = 11,
 431        CHANNELMSG_GPADL_TORNDOWN               = 12,
 432        CHANNELMSG_RELID_RELEASED               = 13,
 433        CHANNELMSG_INITIATE_CONTACT             = 14,
 434        CHANNELMSG_VERSION_RESPONSE             = 15,
 435        CHANNELMSG_UNLOAD                       = 16,
 436        CHANNELMSG_UNLOAD_RESPONSE              = 17,
 437        CHANNELMSG_18                           = 18,
 438        CHANNELMSG_19                           = 19,
 439        CHANNELMSG_20                           = 20,
 440        CHANNELMSG_TL_CONNECT_REQUEST           = 21,
 441        CHANNELMSG_22                           = 22,
 442        CHANNELMSG_TL_CONNECT_RESULT            = 23,
 443        CHANNELMSG_COUNT
 444};
 445
 446/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
 447#define INVALID_RELID   U32_MAX
 448
 449struct vmbus_channel_message_header {
 450        enum vmbus_channel_message_type msgtype;
 451        u32 padding;
 452} __packed;
 453
 454/* Query VMBus Version parameters */
 455struct vmbus_channel_query_vmbus_version {
 456        struct vmbus_channel_message_header header;
 457        u32 version;
 458} __packed;
 459
 460/* VMBus Version Supported parameters */
 461struct vmbus_channel_version_supported {
 462        struct vmbus_channel_message_header header;
 463        u8 version_supported;
 464} __packed;
 465
 466/* Offer Channel parameters */
 467struct vmbus_channel_offer_channel {
 468        struct vmbus_channel_message_header header;
 469        struct vmbus_channel_offer offer;
 470        u32 child_relid;
 471        u8 monitorid;
 472        /*
 473         * win7 and beyond splits this field into a bit field.
 474         */
 475        u8 monitor_allocated:1;
 476        u8 reserved:7;
 477        /*
 478         * These are new fields added in win7 and later.
 479         * Do not access these fields without checking the
 480         * negotiated protocol.
 481         *
 482         * If "is_dedicated_interrupt" is set, we must not set the
 483         * associated bit in the channel bitmap while sending the
 484         * interrupt to the host.
 485         *
 486         * connection_id is to be used in signaling the host.
 487         */
 488        u16 is_dedicated_interrupt:1;
 489        u16 reserved1:15;
 490        u32 connection_id;
 491} __packed;
 492
 493/* Rescind Offer parameters */
 494struct vmbus_channel_rescind_offer {
 495        struct vmbus_channel_message_header header;
 496        u32 child_relid;
 497} __packed;
 498
 499static inline u32
 500hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
 501{
 502        return rbi->ring_buffer->pending_send_sz;
 503}
 504
 505/*
 506 * Request Offer -- no parameters, SynIC message contains the partition ID
 507 * Set Snoop -- no parameters, SynIC message contains the partition ID
 508 * Clear Snoop -- no parameters, SynIC message contains the partition ID
 509 * All Offers Delivered -- no parameters, SynIC message contains the partition
 510 *                         ID
 511 * Flush Client -- no parameters, SynIC message contains the partition ID
 512 */
 513
 514/* Open Channel parameters */
 515struct vmbus_channel_open_channel {
 516        struct vmbus_channel_message_header header;
 517
 518        /* Identifies the specific VMBus channel that is being opened. */
 519        u32 child_relid;
 520
 521        /* ID making a particular open request at a channel offer unique. */
 522        u32 openid;
 523
 524        /* GPADL for the channel's ring buffer. */
 525        u32 ringbuffer_gpadlhandle;
 526
 527        /*
 528         * Starting with win8, this field will be used to specify
 529         * the target virtual processor on which to deliver the interrupt for
 530         * the host to guest communication.
 531         * Prior to win8, incoming channel interrupts would only
 532         * be delivered on cpu 0. Setting this value to 0 would
 533         * preserve the earlier behavior.
 534         */
 535        u32 target_vp;
 536
 537        /*
 538         * The upstream ring buffer begins at offset zero in the memory
 539         * described by RingBufferGpadlHandle. The downstream ring buffer
 540         * follows it at this offset (in pages).
 541         */
 542        u32 downstream_ringbuffer_pageoffset;
 543
 544        /* User-specific data to be passed along to the server endpoint. */
 545        unsigned char userdata[MAX_USER_DEFINED_BYTES];
 546} __packed;
 547
 548/* Open Channel Result parameters */
 549struct vmbus_channel_open_result {
 550        struct vmbus_channel_message_header header;
 551        u32 child_relid;
 552        u32 openid;
 553        u32 status;
 554} __packed;
 555
 556/* Close channel parameters; */
 557struct vmbus_channel_close_channel {
 558        struct vmbus_channel_message_header header;
 559        u32 child_relid;
 560} __packed;
 561
 562/* Channel Message GPADL */
 563#define GPADL_TYPE_RING_BUFFER          1
 564#define GPADL_TYPE_SERVER_SAVE_AREA     2
 565#define GPADL_TYPE_TRANSACTION          8
 566
 567/*
 568 * The number of PFNs in a GPADL message is defined by the number of
 569 * pages that would be spanned by ByteCount and ByteOffset.  If the
 570 * implied number of PFNs won't fit in this packet, there will be a
 571 * follow-up packet that contains more.
 572 */
 573struct vmbus_channel_gpadl_header {
 574        struct vmbus_channel_message_header header;
 575        u32 child_relid;
 576        u32 gpadl;
 577        u16 range_buflen;
 578        u16 rangecount;
 579        struct gpa_range range[0];
 580} __packed;
 581
 582/* This is the followup packet that contains more PFNs. */
 583struct vmbus_channel_gpadl_body {
 584        struct vmbus_channel_message_header header;
 585        u32 msgnumber;
 586        u32 gpadl;
 587        u64 pfn[0];
 588} __packed;
 589
 590struct vmbus_channel_gpadl_created {
 591        struct vmbus_channel_message_header header;
 592        u32 child_relid;
 593        u32 gpadl;
 594        u32 creation_status;
 595} __packed;
 596
 597struct vmbus_channel_gpadl_teardown {
 598        struct vmbus_channel_message_header header;
 599        u32 child_relid;
 600        u32 gpadl;
 601} __packed;
 602
 603struct vmbus_channel_gpadl_torndown {
 604        struct vmbus_channel_message_header header;
 605        u32 gpadl;
 606} __packed;
 607
 608struct vmbus_channel_relid_released {
 609        struct vmbus_channel_message_header header;
 610        u32 child_relid;
 611} __packed;
 612
 613struct vmbus_channel_initiate_contact {
 614        struct vmbus_channel_message_header header;
 615        u32 vmbus_version_requested;
 616        u32 target_vcpu; /* The VCPU the host should respond to */
 617        union {
 618                u64 interrupt_page;
 619                struct {
 620                        u8      msg_sint;
 621                        u8      padding1[3];
 622                        u32     padding2;
 623                };
 624        };
 625        u64 monitor_page1;
 626        u64 monitor_page2;
 627} __packed;
 628
 629/* Hyper-V socket: guest's connect()-ing to host */
 630struct vmbus_channel_tl_connect_request {
 631        struct vmbus_channel_message_header header;
 632        guid_t guest_endpoint_id;
 633        guid_t host_service_id;
 634} __packed;
 635
 636struct vmbus_channel_version_response {
 637        struct vmbus_channel_message_header header;
 638        u8 version_supported;
 639
 640        u8 connection_state;
 641        u16 padding;
 642
 643        /*
 644         * On new hosts that support VMBus protocol 5.0, we must use
 645         * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
 646         * and for subsequent messages, we must use the Message Connection ID
 647         * field in the host-returned Version Response Message.
 648         *
 649         * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
 650         */
 651        u32 msg_conn_id;
 652} __packed;
 653
 654enum vmbus_channel_state {
 655        CHANNEL_OFFER_STATE,
 656        CHANNEL_OPENING_STATE,
 657        CHANNEL_OPEN_STATE,
 658        CHANNEL_OPENED_STATE,
 659};
 660
 661/*
 662 * Represents each channel msg on the vmbus connection This is a
 663 * variable-size data structure depending on the msg type itself
 664 */
 665struct vmbus_channel_msginfo {
 666        /* Bookkeeping stuff */
 667        struct list_head msglistentry;
 668
 669        /* So far, this is only used to handle gpadl body message */
 670        struct list_head submsglist;
 671
 672        /* Synchronize the request/response if needed */
 673        struct completion  waitevent;
 674        struct vmbus_channel *waiting_channel;
 675        union {
 676                struct vmbus_channel_version_supported version_supported;
 677                struct vmbus_channel_open_result open_result;
 678                struct vmbus_channel_gpadl_torndown gpadl_torndown;
 679                struct vmbus_channel_gpadl_created gpadl_created;
 680                struct vmbus_channel_version_response version_response;
 681        } response;
 682
 683        u32 msgsize;
 684        /*
 685         * The channel message that goes out on the "wire".
 686         * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
 687         */
 688        unsigned char msg[0];
 689};
 690
 691struct vmbus_close_msg {
 692        struct vmbus_channel_msginfo info;
 693        struct vmbus_channel_close_channel msg;
 694};
 695
 696/* Define connection identifier type. */
 697union hv_connection_id {
 698        u32 asu32;
 699        struct {
 700                u32 id:24;
 701                u32 reserved:8;
 702        } u;
 703};
 704
 705enum hv_numa_policy {
 706        HV_BALANCED = 0,
 707        HV_LOCALIZED,
 708};
 709
 710enum vmbus_device_type {
 711        HV_IDE = 0,
 712        HV_SCSI,
 713        HV_FC,
 714        HV_NIC,
 715        HV_ND,
 716        HV_PCIE,
 717        HV_FB,
 718        HV_KBD,
 719        HV_MOUSE,
 720        HV_KVP,
 721        HV_TS,
 722        HV_HB,
 723        HV_SHUTDOWN,
 724        HV_FCOPY,
 725        HV_BACKUP,
 726        HV_DM,
 727        HV_UNKNOWN,
 728};
 729
 730struct vmbus_device {
 731        u16  dev_type;
 732        guid_t guid;
 733        bool perf_device;
 734};
 735
 736struct vmbus_channel {
 737        struct list_head listentry;
 738
 739        struct hv_device *device_obj;
 740
 741        enum vmbus_channel_state state;
 742
 743        struct vmbus_channel_offer_channel offermsg;
 744        /*
 745         * These are based on the OfferMsg.MonitorId.
 746         * Save it here for easy access.
 747         */
 748        u8 monitor_grp;
 749        u8 monitor_bit;
 750
 751        bool rescind; /* got rescind msg */
 752        struct completion rescind_event;
 753
 754        u32 ringbuffer_gpadlhandle;
 755
 756        /* Allocated memory for ring buffer */
 757        struct page *ringbuffer_page;
 758        u32 ringbuffer_pagecount;
 759        u32 ringbuffer_send_offset;
 760        struct hv_ring_buffer_info outbound;    /* send to parent */
 761        struct hv_ring_buffer_info inbound;     /* receive from parent */
 762
 763        struct vmbus_close_msg close_msg;
 764
 765        /* Statistics */
 766        u64     interrupts;     /* Host to Guest interrupts */
 767        u64     sig_events;     /* Guest to Host events */
 768
 769        /*
 770         * Guest to host interrupts caused by the outbound ring buffer changing
 771         * from empty to not empty.
 772         */
 773        u64 intr_out_empty;
 774
 775        /*
 776         * Indicates that a full outbound ring buffer was encountered. The flag
 777         * is set to true when a full outbound ring buffer is encountered and
 778         * set to false when a write to the outbound ring buffer is completed.
 779         */
 780        bool out_full_flag;
 781
 782        /* Channel callback's invoked in softirq context */
 783        struct tasklet_struct callback_event;
 784        void (*onchannel_callback)(void *context);
 785        void *channel_callback_context;
 786
 787        /*
 788         * A channel can be marked for one of three modes of reading:
 789         *   BATCHED - callback called from taslket and should read
 790         *            channel until empty. Interrupts from the host
 791         *            are masked while read is in process (default).
 792         *   DIRECT - callback called from tasklet (softirq).
 793         *   ISR - callback called in interrupt context and must
 794         *         invoke its own deferred processing.
 795         *         Host interrupts are disabled and must be re-enabled
 796         *         when ring is empty.
 797         */
 798        enum hv_callback_mode {
 799                HV_CALL_BATCHED,
 800                HV_CALL_DIRECT,
 801                HV_CALL_ISR
 802        } callback_mode;
 803
 804        bool is_dedicated_interrupt;
 805        u64 sig_event;
 806
 807        /*
 808         * Starting with win8, this field will be used to specify
 809         * the target virtual processor on which to deliver the interrupt for
 810         * the host to guest communication.
 811         * Prior to win8, incoming channel interrupts would only
 812         * be delivered on cpu 0. Setting this value to 0 would
 813         * preserve the earlier behavior.
 814         */
 815        u32 target_vp;
 816        /* The corresponding CPUID in the guest */
 817        u32 target_cpu;
 818        /*
 819         * State to manage the CPU affiliation of channels.
 820         */
 821        struct cpumask alloced_cpus_in_node;
 822        int numa_node;
 823        /*
 824         * Support for sub-channels. For high performance devices,
 825         * it will be useful to have multiple sub-channels to support
 826         * a scalable communication infrastructure with the host.
 827         * The support for sub-channels is implemented as an extention
 828         * to the current infrastructure.
 829         * The initial offer is considered the primary channel and this
 830         * offer message will indicate if the host supports sub-channels.
 831         * The guest is free to ask for sub-channels to be offerred and can
 832         * open these sub-channels as a normal "primary" channel. However,
 833         * all sub-channels will have the same type and instance guids as the
 834         * primary channel. Requests sent on a given channel will result in a
 835         * response on the same channel.
 836         */
 837
 838        /*
 839         * Sub-channel creation callback. This callback will be called in
 840         * process context when a sub-channel offer is received from the host.
 841         * The guest can open the sub-channel in the context of this callback.
 842         */
 843        void (*sc_creation_callback)(struct vmbus_channel *new_sc);
 844
 845        /*
 846         * Channel rescind callback. Some channels (the hvsock ones), need to
 847         * register a callback which is invoked in vmbus_onoffer_rescind().
 848         */
 849        void (*chn_rescind_callback)(struct vmbus_channel *channel);
 850
 851        /*
 852         * The spinlock to protect the structure. It is being used to protect
 853         * test-and-set access to various attributes of the structure as well
 854         * as all sc_list operations.
 855         */
 856        spinlock_t lock;
 857        /*
 858         * All Sub-channels of a primary channel are linked here.
 859         */
 860        struct list_head sc_list;
 861        /*
 862         * The primary channel this sub-channel belongs to.
 863         * This will be NULL for the primary channel.
 864         */
 865        struct vmbus_channel *primary_channel;
 866        /*
 867         * Support per-channel state for use by vmbus drivers.
 868         */
 869        void *per_channel_state;
 870        /*
 871         * To support per-cpu lookup mapping of relid to channel,
 872         * link up channels based on their CPU affinity.
 873         */
 874        struct list_head percpu_list;
 875
 876        /*
 877         * Defer freeing channel until after all cpu's have
 878         * gone through grace period.
 879         */
 880        struct rcu_head rcu;
 881
 882        /*
 883         * For sysfs per-channel properties.
 884         */
 885        struct kobject                  kobj;
 886
 887        /*
 888         * For performance critical channels (storage, networking
 889         * etc,), Hyper-V has a mechanism to enhance the throughput
 890         * at the expense of latency:
 891         * When the host is to be signaled, we just set a bit in a shared page
 892         * and this bit will be inspected by the hypervisor within a certain
 893         * window and if the bit is set, the host will be signaled. The window
 894         * of time is the monitor latency - currently around 100 usecs. This
 895         * mechanism improves throughput by:
 896         *
 897         * A) Making the host more efficient - each time it wakes up,
 898         *    potentially it will process morev number of packets. The
 899         *    monitor latency allows a batch to build up.
 900         * B) By deferring the hypercall to signal, we will also minimize
 901         *    the interrupts.
 902         *
 903         * Clearly, these optimizations improve throughput at the expense of
 904         * latency. Furthermore, since the channel is shared for both
 905         * control and data messages, control messages currently suffer
 906         * unnecessary latency adversley impacting performance and boot
 907         * time. To fix this issue, permit tagging the channel as being
 908         * in "low latency" mode. In this mode, we will bypass the monitor
 909         * mechanism.
 910         */
 911        bool low_latency;
 912
 913        /*
 914         * NUMA distribution policy:
 915         * We support two policies:
 916         * 1) Balanced: Here all performance critical channels are
 917         *    distributed evenly amongst all the NUMA nodes.
 918         *    This policy will be the default policy.
 919         * 2) Localized: All channels of a given instance of a
 920         *    performance critical service will be assigned CPUs
 921         *    within a selected NUMA node.
 922         */
 923        enum hv_numa_policy affinity_policy;
 924
 925        bool probe_done;
 926
 927        /*
 928         * We must offload the handling of the primary/sub channels
 929         * from the single-threaded vmbus_connection.work_queue to
 930         * two different workqueue, otherwise we can block
 931         * vmbus_connection.work_queue and hang: see vmbus_process_offer().
 932         */
 933        struct work_struct add_channel_work;
 934
 935        /*
 936         * Guest to host interrupts caused by the inbound ring buffer changing
 937         * from full to not full while a packet is waiting.
 938         */
 939        u64 intr_in_full;
 940
 941        /*
 942         * The total number of write operations that encountered a full
 943         * outbound ring buffer.
 944         */
 945        u64 out_full_total;
 946
 947        /*
 948         * The number of write operations that were the first to encounter a
 949         * full outbound ring buffer.
 950         */
 951        u64 out_full_first;
 952
 953        /* enabling/disabling fuzz testing on the channel (default is false)*/
 954        bool fuzz_testing_state;
 955
 956        /*
 957         * Interrupt delay will delay the guest from emptying the ring buffer
 958         * for a specific amount of time. The delay is in microseconds and will
 959         * be between 1 to a maximum of 1000, its default is 0 (no delay).
 960         * The  Message delay will delay guest reading on a per message basis
 961         * in microseconds between 1 to 1000 with the default being 0
 962         * (no delay).
 963         */
 964        u32 fuzz_testing_interrupt_delay;
 965        u32 fuzz_testing_message_delay;
 966
 967};
 968
 969static inline bool is_hvsock_channel(const struct vmbus_channel *c)
 970{
 971        return !!(c->offermsg.offer.chn_flags &
 972                  VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
 973}
 974
 975static inline bool is_sub_channel(const struct vmbus_channel *c)
 976{
 977        return c->offermsg.offer.sub_channel_index != 0;
 978}
 979
 980static inline void set_channel_affinity_state(struct vmbus_channel *c,
 981                                              enum hv_numa_policy policy)
 982{
 983        c->affinity_policy = policy;
 984}
 985
 986static inline void set_channel_read_mode(struct vmbus_channel *c,
 987                                        enum hv_callback_mode mode)
 988{
 989        c->callback_mode = mode;
 990}
 991
 992static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
 993{
 994        c->per_channel_state = s;
 995}
 996
 997static inline void *get_per_channel_state(struct vmbus_channel *c)
 998{
 999        return c->per_channel_state;
1000}
1001
1002static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1003                                                 u32 size)
1004{
1005        unsigned long flags;
1006
1007        if (size) {
1008                spin_lock_irqsave(&c->outbound.ring_lock, flags);
1009                ++c->out_full_total;
1010
1011                if (!c->out_full_flag) {
1012                        ++c->out_full_first;
1013                        c->out_full_flag = true;
1014                }
1015                spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1016        } else {
1017                c->out_full_flag = false;
1018        }
1019
1020        c->outbound.ring_buffer->pending_send_sz = size;
1021}
1022
1023static inline void set_low_latency_mode(struct vmbus_channel *c)
1024{
1025        c->low_latency = true;
1026}
1027
1028static inline void clear_low_latency_mode(struct vmbus_channel *c)
1029{
1030        c->low_latency = false;
1031}
1032
1033void vmbus_onmessage(void *context);
1034
1035int vmbus_request_offers(void);
1036
1037/*
1038 * APIs for managing sub-channels.
1039 */
1040
1041void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1042                        void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1043
1044void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1045                void (*chn_rescind_cb)(struct vmbus_channel *));
1046
1047/*
1048 * Check if sub-channels have already been offerred. This API will be useful
1049 * when the driver is unloaded after establishing sub-channels. In this case,
1050 * when the driver is re-loaded, the driver would have to check if the
1051 * subchannels have already been established before attempting to request
1052 * the creation of sub-channels.
1053 * This function returns TRUE to indicate that subchannels have already been
1054 * created.
1055 * This function should be invoked after setting the callback function for
1056 * sub-channel creation.
1057 */
1058bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
1059
1060/* The format must be the same as struct vmdata_gpa_direct */
1061struct vmbus_channel_packet_page_buffer {
1062        u16 type;
1063        u16 dataoffset8;
1064        u16 length8;
1065        u16 flags;
1066        u64 transactionid;
1067        u32 reserved;
1068        u32 rangecount;
1069        struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1070} __packed;
1071
1072/* The format must be the same as struct vmdata_gpa_direct */
1073struct vmbus_channel_packet_multipage_buffer {
1074        u16 type;
1075        u16 dataoffset8;
1076        u16 length8;
1077        u16 flags;
1078        u64 transactionid;
1079        u32 reserved;
1080        u32 rangecount;         /* Always 1 in this case */
1081        struct hv_multipage_buffer range;
1082} __packed;
1083
1084/* The format must be the same as struct vmdata_gpa_direct */
1085struct vmbus_packet_mpb_array {
1086        u16 type;
1087        u16 dataoffset8;
1088        u16 length8;
1089        u16 flags;
1090        u64 transactionid;
1091        u32 reserved;
1092        u32 rangecount;         /* Always 1 in this case */
1093        struct hv_mpb_array range;
1094} __packed;
1095
1096int vmbus_alloc_ring(struct vmbus_channel *channel,
1097                     u32 send_size, u32 recv_size);
1098void vmbus_free_ring(struct vmbus_channel *channel);
1099
1100int vmbus_connect_ring(struct vmbus_channel *channel,
1101                       void (*onchannel_callback)(void *context),
1102                       void *context);
1103int vmbus_disconnect_ring(struct vmbus_channel *channel);
1104
1105extern int vmbus_open(struct vmbus_channel *channel,
1106                            u32 send_ringbuffersize,
1107                            u32 recv_ringbuffersize,
1108                            void *userdata,
1109                            u32 userdatalen,
1110                            void (*onchannel_callback)(void *context),
1111                            void *context);
1112
1113extern void vmbus_close(struct vmbus_channel *channel);
1114
1115extern int vmbus_sendpacket(struct vmbus_channel *channel,
1116                                  void *buffer,
1117                                  u32 bufferLen,
1118                                  u64 requestid,
1119                                  enum vmbus_packet_type type,
1120                                  u32 flags);
1121
1122extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1123                                            struct hv_page_buffer pagebuffers[],
1124                                            u32 pagecount,
1125                                            void *buffer,
1126                                            u32 bufferlen,
1127                                            u64 requestid);
1128
1129extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1130                                     struct vmbus_packet_mpb_array *mpb,
1131                                     u32 desc_size,
1132                                     void *buffer,
1133                                     u32 bufferlen,
1134                                     u64 requestid);
1135
1136extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1137                                      void *kbuffer,
1138                                      u32 size,
1139                                      u32 *gpadl_handle);
1140
1141extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1142                                     u32 gpadl_handle);
1143
1144void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1145
1146extern int vmbus_recvpacket(struct vmbus_channel *channel,
1147                                  void *buffer,
1148                                  u32 bufferlen,
1149                                  u32 *buffer_actual_len,
1150                                  u64 *requestid);
1151
1152extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1153                                     void *buffer,
1154                                     u32 bufferlen,
1155                                     u32 *buffer_actual_len,
1156                                     u64 *requestid);
1157
1158
1159extern void vmbus_ontimer(unsigned long data);
1160
1161/* Base driver object */
1162struct hv_driver {
1163        const char *name;
1164
1165        /*
1166         * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1167         * channel flag, actually doesn't mean a synthetic device because the
1168         * offer's if_type/if_instance can change for every new hvsock
1169         * connection.
1170         *
1171         * However, to facilitate the notification of new-offer/rescind-offer
1172         * from vmbus driver to hvsock driver, we can handle hvsock offer as
1173         * a special vmbus device, and hence we need the below flag to
1174         * indicate if the driver is the hvsock driver or not: we need to
1175         * specially treat the hvosck offer & driver in vmbus_match().
1176         */
1177        bool hvsock;
1178
1179        /* the device type supported by this driver */
1180        guid_t dev_type;
1181        const struct hv_vmbus_device_id *id_table;
1182
1183        struct device_driver driver;
1184
1185        /* dynamic device GUID's */
1186        struct  {
1187                spinlock_t lock;
1188                struct list_head list;
1189        } dynids;
1190
1191        int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1192        int (*remove)(struct hv_device *);
1193        void (*shutdown)(struct hv_device *);
1194
1195        int (*suspend)(struct hv_device *);
1196        int (*resume)(struct hv_device *);
1197
1198};
1199
1200/* Base device object */
1201struct hv_device {
1202        /* the device type id of this device */
1203        guid_t dev_type;
1204
1205        /* the device instance id of this device */
1206        guid_t dev_instance;
1207        u16 vendor_id;
1208        u16 device_id;
1209
1210        struct device device;
1211        char *driver_override; /* Driver name to force a match */
1212
1213        struct vmbus_channel *channel;
1214        struct kset          *channels_kset;
1215
1216        /* place holder to keep track of the dir for hv device in debugfs */
1217        struct dentry *debug_dir;
1218
1219};
1220
1221
1222static inline struct hv_device *device_to_hv_device(struct device *d)
1223{
1224        return container_of(d, struct hv_device, device);
1225}
1226
1227static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1228{
1229        return container_of(d, struct hv_driver, driver);
1230}
1231
1232static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1233{
1234        dev_set_drvdata(&dev->device, data);
1235}
1236
1237static inline void *hv_get_drvdata(struct hv_device *dev)
1238{
1239        return dev_get_drvdata(&dev->device);
1240}
1241
1242struct hv_ring_buffer_debug_info {
1243        u32 current_interrupt_mask;
1244        u32 current_read_index;
1245        u32 current_write_index;
1246        u32 bytes_avail_toread;
1247        u32 bytes_avail_towrite;
1248};
1249
1250
1251int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1252                                struct hv_ring_buffer_debug_info *debug_info);
1253
1254/* Vmbus interface */
1255#define vmbus_driver_register(driver)   \
1256        __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1257int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1258                                         struct module *owner,
1259                                         const char *mod_name);
1260void vmbus_driver_unregister(struct hv_driver *hv_driver);
1261
1262void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1263
1264int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1265                        resource_size_t min, resource_size_t max,
1266                        resource_size_t size, resource_size_t align,
1267                        bool fb_overlap_ok);
1268void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1269
1270/*
1271 * GUID definitions of various offer types - services offered to the guest.
1272 */
1273
1274/*
1275 * Network GUID
1276 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1277 */
1278#define HV_NIC_GUID \
1279        .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1280                          0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1281
1282/*
1283 * IDE GUID
1284 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1285 */
1286#define HV_IDE_GUID \
1287        .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1288                          0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1289
1290/*
1291 * SCSI GUID
1292 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1293 */
1294#define HV_SCSI_GUID \
1295        .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1296                          0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1297
1298/*
1299 * Shutdown GUID
1300 * {0e0b6031-5213-4934-818b-38d90ced39db}
1301 */
1302#define HV_SHUTDOWN_GUID \
1303        .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1304                          0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1305
1306/*
1307 * Time Synch GUID
1308 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1309 */
1310#define HV_TS_GUID \
1311        .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1312                          0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1313
1314/*
1315 * Heartbeat GUID
1316 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1317 */
1318#define HV_HEART_BEAT_GUID \
1319        .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1320                          0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1321
1322/*
1323 * KVP GUID
1324 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1325 */
1326#define HV_KVP_GUID \
1327        .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1328                          0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1329
1330/*
1331 * Dynamic memory GUID
1332 * {525074dc-8985-46e2-8057-a307dc18a502}
1333 */
1334#define HV_DM_GUID \
1335        .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1336                          0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1337
1338/*
1339 * Mouse GUID
1340 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1341 */
1342#define HV_MOUSE_GUID \
1343        .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1344                          0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1345
1346/*
1347 * Keyboard GUID
1348 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1349 */
1350#define HV_KBD_GUID \
1351        .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1352                          0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1353
1354/*
1355 * VSS (Backup/Restore) GUID
1356 */
1357#define HV_VSS_GUID \
1358        .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1359                          0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1360/*
1361 * Synthetic Video GUID
1362 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1363 */
1364#define HV_SYNTHVID_GUID \
1365        .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1366                          0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1367
1368/*
1369 * Synthetic FC GUID
1370 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1371 */
1372#define HV_SYNTHFC_GUID \
1373        .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1374                          0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1375
1376/*
1377 * Guest File Copy Service
1378 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1379 */
1380
1381#define HV_FCOPY_GUID \
1382        .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1383                          0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1384
1385/*
1386 * NetworkDirect. This is the guest RDMA service.
1387 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1388 */
1389#define HV_ND_GUID \
1390        .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1391                          0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1392
1393/*
1394 * PCI Express Pass Through
1395 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1396 */
1397
1398#define HV_PCIE_GUID \
1399        .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1400                          0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1401
1402/*
1403 * Linux doesn't support the 3 devices: the first two are for
1404 * Automatic Virtual Machine Activation, and the third is for
1405 * Remote Desktop Virtualization.
1406 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1407 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1408 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1409 */
1410
1411#define HV_AVMA1_GUID \
1412        .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1413                          0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1414
1415#define HV_AVMA2_GUID \
1416        .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1417                          0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1418
1419#define HV_RDV_GUID \
1420        .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1421                          0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1422
1423/*
1424 * Common header for Hyper-V ICs
1425 */
1426
1427#define ICMSGTYPE_NEGOTIATE             0
1428#define ICMSGTYPE_HEARTBEAT             1
1429#define ICMSGTYPE_KVPEXCHANGE           2
1430#define ICMSGTYPE_SHUTDOWN              3
1431#define ICMSGTYPE_TIMESYNC              4
1432#define ICMSGTYPE_VSS                   5
1433
1434#define ICMSGHDRFLAG_TRANSACTION        1
1435#define ICMSGHDRFLAG_REQUEST            2
1436#define ICMSGHDRFLAG_RESPONSE           4
1437
1438
1439/*
1440 * While we want to handle util services as regular devices,
1441 * there is only one instance of each of these services; so
1442 * we statically allocate the service specific state.
1443 */
1444
1445struct hv_util_service {
1446        u8 *recv_buffer;
1447        void *channel;
1448        void (*util_cb)(void *);
1449        int (*util_init)(struct hv_util_service *);
1450        void (*util_deinit)(void);
1451        int (*util_pre_suspend)(void);
1452        int (*util_pre_resume)(void);
1453};
1454
1455struct vmbuspipe_hdr {
1456        u32 flags;
1457        u32 msgsize;
1458} __packed;
1459
1460struct ic_version {
1461        u16 major;
1462        u16 minor;
1463} __packed;
1464
1465struct icmsg_hdr {
1466        struct ic_version icverframe;
1467        u16 icmsgtype;
1468        struct ic_version icvermsg;
1469        u16 icmsgsize;
1470        u32 status;
1471        u8 ictransaction_id;
1472        u8 icflags;
1473        u8 reserved[2];
1474} __packed;
1475
1476struct icmsg_negotiate {
1477        u16 icframe_vercnt;
1478        u16 icmsg_vercnt;
1479        u32 reserved;
1480        struct ic_version icversion_data[1]; /* any size array */
1481} __packed;
1482
1483struct shutdown_msg_data {
1484        u32 reason_code;
1485        u32 timeout_seconds;
1486        u32 flags;
1487        u8  display_message[2048];
1488} __packed;
1489
1490struct heartbeat_msg_data {
1491        u64 seq_num;
1492        u32 reserved[8];
1493} __packed;
1494
1495/* Time Sync IC defs */
1496#define ICTIMESYNCFLAG_PROBE    0
1497#define ICTIMESYNCFLAG_SYNC     1
1498#define ICTIMESYNCFLAG_SAMPLE   2
1499
1500#ifdef __x86_64__
1501#define WLTIMEDELTA     116444736000000000L     /* in 100ns unit */
1502#else
1503#define WLTIMEDELTA     116444736000000000LL
1504#endif
1505
1506struct ictimesync_data {
1507        u64 parenttime;
1508        u64 childtime;
1509        u64 roundtriptime;
1510        u8 flags;
1511} __packed;
1512
1513struct ictimesync_ref_data {
1514        u64 parenttime;
1515        u64 vmreferencetime;
1516        u8 flags;
1517        char leapflags;
1518        char stratum;
1519        u8 reserved[3];
1520} __packed;
1521
1522struct hyperv_service_callback {
1523        u8 msg_type;
1524        char *log_msg;
1525        guid_t data;
1526        struct vmbus_channel *channel;
1527        void (*callback)(void *context);
1528};
1529
1530#define MAX_SRV_VER     0x7ffffff
1531extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1532                                const int *fw_version, int fw_vercnt,
1533                                const int *srv_version, int srv_vercnt,
1534                                int *nego_fw_version, int *nego_srv_version);
1535
1536void hv_process_channel_removal(struct vmbus_channel *channel);
1537
1538void vmbus_setevent(struct vmbus_channel *channel);
1539/*
1540 * Negotiated version with the Host.
1541 */
1542
1543extern __u32 vmbus_proto_version;
1544
1545int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1546                                  const guid_t *shv_host_servie_id);
1547void vmbus_set_event(struct vmbus_channel *channel);
1548
1549/* Get the start of the ring buffer. */
1550static inline void *
1551hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1552{
1553        return ring_info->ring_buffer->buffer;
1554}
1555
1556/*
1557 * Mask off host interrupt callback notifications
1558 */
1559static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1560{
1561        rbi->ring_buffer->interrupt_mask = 1;
1562
1563        /* make sure mask update is not reordered */
1564        virt_mb();
1565}
1566
1567/*
1568 * Re-enable host callback and return number of outstanding bytes
1569 */
1570static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1571{
1572
1573        rbi->ring_buffer->interrupt_mask = 0;
1574
1575        /* make sure mask update is not reordered */
1576        virt_mb();
1577
1578        /*
1579         * Now check to see if the ring buffer is still empty.
1580         * If it is not, we raced and we need to process new
1581         * incoming messages.
1582         */
1583        return hv_get_bytes_to_read(rbi);
1584}
1585
1586/*
1587 * An API to support in-place processing of incoming VMBUS packets.
1588 */
1589
1590/* Get data payload associated with descriptor */
1591static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1592{
1593        return (void *)((unsigned long)desc + (desc->offset8 << 3));
1594}
1595
1596/* Get data size associated with descriptor */
1597static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1598{
1599        return (desc->len8 << 3) - (desc->offset8 << 3);
1600}
1601
1602
1603struct vmpacket_descriptor *
1604hv_pkt_iter_first(struct vmbus_channel *channel);
1605
1606struct vmpacket_descriptor *
1607__hv_pkt_iter_next(struct vmbus_channel *channel,
1608                   const struct vmpacket_descriptor *pkt);
1609
1610void hv_pkt_iter_close(struct vmbus_channel *channel);
1611
1612/*
1613 * Get next packet descriptor from iterator
1614 * If at end of list, return NULL and update host.
1615 */
1616static inline struct vmpacket_descriptor *
1617hv_pkt_iter_next(struct vmbus_channel *channel,
1618                 const struct vmpacket_descriptor *pkt)
1619{
1620        struct vmpacket_descriptor *nxt;
1621
1622        nxt = __hv_pkt_iter_next(channel, pkt);
1623        if (!nxt)
1624                hv_pkt_iter_close(channel);
1625
1626        return nxt;
1627}
1628
1629#define foreach_vmbus_pkt(pkt, channel) \
1630        for (pkt = hv_pkt_iter_first(channel); pkt; \
1631            pkt = hv_pkt_iter_next(channel, pkt))
1632
1633/*
1634 * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
1635 * sends requests to read and write blocks. Each block must be 128 bytes or
1636 * smaller. Optionally, the VF driver can register a callback function which
1637 * will be invoked when the host says that one or more of the first 64 block
1638 * IDs is "invalid" which means that the VF driver should reread them.
1639 */
1640#define HV_CONFIG_BLOCK_SIZE_MAX 128
1641
1642int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1643                        unsigned int block_id, unsigned int *bytes_returned);
1644int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1645                         unsigned int block_id);
1646int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1647                                void (*block_invalidate)(void *context,
1648                                                         u64 block_mask));
1649
1650struct hyperv_pci_block_ops {
1651        int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1652                          unsigned int block_id, unsigned int *bytes_returned);
1653        int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1654                           unsigned int block_id);
1655        int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1656                                  void (*block_invalidate)(void *context,
1657                                                           u64 block_mask));
1658};
1659
1660extern struct hyperv_pci_block_ops hvpci_block_ops;
1661
1662#endif /* _HYPERV_H */
1663