linux/include/linux/vmw_vmci_defs.h
<<
>>
Prefs
   1/*
   2 * VMware VMCI Driver
   3 *
   4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License as published by the
   8 * Free Software Foundation version 2 and no later version.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 * for more details.
  14 */
  15
  16#ifndef _VMW_VMCI_DEF_H_
  17#define _VMW_VMCI_DEF_H_
  18
  19#include <linux/atomic.h>
  20
  21/* Register offsets. */
  22#define VMCI_STATUS_ADDR      0x00
  23#define VMCI_CONTROL_ADDR     0x04
  24#define VMCI_ICR_ADDR         0x08
  25#define VMCI_IMR_ADDR         0x0c
  26#define VMCI_DATA_OUT_ADDR    0x10
  27#define VMCI_DATA_IN_ADDR     0x14
  28#define VMCI_CAPS_ADDR        0x18
  29#define VMCI_RESULT_LOW_ADDR  0x1c
  30#define VMCI_RESULT_HIGH_ADDR 0x20
  31
  32/* Max number of devices. */
  33#define VMCI_MAX_DEVICES 1
  34
  35/* Status register bits. */
  36#define VMCI_STATUS_INT_ON     0x1
  37
  38/* Control register bits. */
  39#define VMCI_CONTROL_RESET        0x1
  40#define VMCI_CONTROL_INT_ENABLE   0x2
  41#define VMCI_CONTROL_INT_DISABLE  0x4
  42
  43/* Capabilities register bits. */
  44#define VMCI_CAPS_HYPERCALL     0x1
  45#define VMCI_CAPS_GUESTCALL     0x2
  46#define VMCI_CAPS_DATAGRAM      0x4
  47#define VMCI_CAPS_NOTIFICATIONS 0x8
  48#define VMCI_CAPS_PPN64         0x10
  49
  50/* Interrupt Cause register bits. */
  51#define VMCI_ICR_DATAGRAM      0x1
  52#define VMCI_ICR_NOTIFICATION  0x2
  53
  54/* Interrupt Mask register bits. */
  55#define VMCI_IMR_DATAGRAM      0x1
  56#define VMCI_IMR_NOTIFICATION  0x2
  57
  58/* Maximum MSI/MSI-X interrupt vectors in the device. */
  59#define VMCI_MAX_INTRS 2
  60
  61/*
  62 * Supported interrupt vectors.  There is one for each ICR value above,
  63 * but here they indicate the position in the vector array/message ID.
  64 */
  65enum {
  66        VMCI_INTR_DATAGRAM = 0,
  67        VMCI_INTR_NOTIFICATION = 1,
  68};
  69
  70/*
  71 * A single VMCI device has an upper limit of 128MB on the amount of
  72 * memory that can be used for queue pairs. Since each queue pair
  73 * consists of at least two pages, the memory limit also dictates the
  74 * number of queue pairs a guest can create.
  75 */
  76#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
  77#define VMCI_MAX_GUEST_QP_COUNT  (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
  78
  79/*
  80 * There can be at most PAGE_SIZE doorbells since there is one doorbell
  81 * per byte in the doorbell bitmap page.
  82 */
  83#define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE
  84
  85/*
  86 * Queues with pre-mapped data pages must be small, so that we don't pin
  87 * too much kernel memory (especially on vmkernel).  We limit a queuepair to
  88 * 32 KB, or 16 KB per queue for symmetrical pairs.
  89 */
  90#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
  91
  92/*
  93 * We have a fixed set of resource IDs available in the VMX.
  94 * This allows us to have a very simple implementation since we statically
  95 * know how many will create datagram handles. If a new caller arrives and
  96 * we have run out of slots we can manually increment the maximum size of
  97 * available resource IDs.
  98 *
  99 * VMCI reserved hypervisor datagram resource IDs.
 100 */
 101enum {
 102        VMCI_RESOURCES_QUERY = 0,
 103        VMCI_GET_CONTEXT_ID = 1,
 104        VMCI_SET_NOTIFY_BITMAP = 2,
 105        VMCI_DOORBELL_LINK = 3,
 106        VMCI_DOORBELL_UNLINK = 4,
 107        VMCI_DOORBELL_NOTIFY = 5,
 108        /*
 109         * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
 110         * obsoleted by the removal of VM to VM communication.
 111         */
 112        VMCI_DATAGRAM_REQUEST_MAP = 6,
 113        VMCI_DATAGRAM_REMOVE_MAP = 7,
 114        VMCI_EVENT_SUBSCRIBE = 8,
 115        VMCI_EVENT_UNSUBSCRIBE = 9,
 116        VMCI_QUEUEPAIR_ALLOC = 10,
 117        VMCI_QUEUEPAIR_DETACH = 11,
 118
 119        /*
 120         * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
 121         * WS 7.0/7.1 and ESX 4.1
 122         */
 123        VMCI_HGFS_TRANSPORT = 13,
 124        VMCI_UNITY_PBRPC_REGISTER = 14,
 125        VMCI_RPC_PRIVILEGED = 15,
 126        VMCI_RPC_UNPRIVILEGED = 16,
 127        VMCI_RESOURCE_MAX = 17,
 128};
 129
 130/*
 131 * struct vmci_handle - Ownership information structure
 132 * @context:    The VMX context ID.
 133 * @resource:   The resource ID (used for locating in resource hash).
 134 *
 135 * The vmci_handle structure is used to track resources used within
 136 * vmw_vmci.
 137 */
 138struct vmci_handle {
 139        u32 context;
 140        u32 resource;
 141};
 142
 143#define vmci_make_handle(_cid, _rid) \
 144        (struct vmci_handle){ .context = _cid, .resource = _rid }
 145
 146static inline bool vmci_handle_is_equal(struct vmci_handle h1,
 147                                        struct vmci_handle h2)
 148{
 149        return h1.context == h2.context && h1.resource == h2.resource;
 150}
 151
 152#define VMCI_INVALID_ID ~0
 153static const struct vmci_handle VMCI_INVALID_HANDLE = {
 154        .context = VMCI_INVALID_ID,
 155        .resource = VMCI_INVALID_ID
 156};
 157
 158static inline bool vmci_handle_is_invalid(struct vmci_handle h)
 159{
 160        return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE);
 161}
 162
 163/*
 164 * The below defines can be used to send anonymous requests.
 165 * This also indicates that no response is expected.
 166 */
 167#define VMCI_ANON_SRC_CONTEXT_ID   VMCI_INVALID_ID
 168#define VMCI_ANON_SRC_RESOURCE_ID  VMCI_INVALID_ID
 169static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
 170        .context = VMCI_ANON_SRC_CONTEXT_ID,
 171        .resource = VMCI_ANON_SRC_RESOURCE_ID
 172};
 173
 174/* The lowest 16 context ids are reserved for internal use. */
 175#define VMCI_RESERVED_CID_LIMIT ((u32) 16)
 176
 177/*
 178 * Hypervisor context id, used for calling into hypervisor
 179 * supplied services from the VM.
 180 */
 181#define VMCI_HYPERVISOR_CONTEXT_ID 0
 182
 183/*
 184 * Well-known context id, a logical context that contains a set of
 185 * well-known services. This context ID is now obsolete.
 186 */
 187#define VMCI_WELL_KNOWN_CONTEXT_ID 1
 188
 189/*
 190 * Context ID used by host endpoints.
 191 */
 192#define VMCI_HOST_CONTEXT_ID  2
 193
 194#define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) &&          \
 195                                  (_cid) > VMCI_HOST_CONTEXT_ID)
 196
 197/*
 198 * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make
 199 * handles that refer to a specific context.
 200 */
 201#define VMCI_CONTEXT_RESOURCE_ID 0
 202
 203/*
 204 * VMCI error codes.
 205 */
 206enum {
 207        VMCI_SUCCESS_QUEUEPAIR_ATTACH   = 5,
 208        VMCI_SUCCESS_QUEUEPAIR_CREATE   = 4,
 209        VMCI_SUCCESS_LAST_DETACH        = 3,
 210        VMCI_SUCCESS_ACCESS_GRANTED     = 2,
 211        VMCI_SUCCESS_ENTRY_DEAD         = 1,
 212        VMCI_SUCCESS                     = 0,
 213        VMCI_ERROR_INVALID_RESOURCE      = (-1),
 214        VMCI_ERROR_INVALID_ARGS          = (-2),
 215        VMCI_ERROR_NO_MEM                = (-3),
 216        VMCI_ERROR_DATAGRAM_FAILED       = (-4),
 217        VMCI_ERROR_MORE_DATA             = (-5),
 218        VMCI_ERROR_NO_MORE_DATAGRAMS     = (-6),
 219        VMCI_ERROR_NO_ACCESS             = (-7),
 220        VMCI_ERROR_NO_HANDLE             = (-8),
 221        VMCI_ERROR_DUPLICATE_ENTRY       = (-9),
 222        VMCI_ERROR_DST_UNREACHABLE       = (-10),
 223        VMCI_ERROR_PAYLOAD_TOO_LARGE     = (-11),
 224        VMCI_ERROR_INVALID_PRIV          = (-12),
 225        VMCI_ERROR_GENERIC               = (-13),
 226        VMCI_ERROR_PAGE_ALREADY_SHARED   = (-14),
 227        VMCI_ERROR_CANNOT_SHARE_PAGE     = (-15),
 228        VMCI_ERROR_CANNOT_UNSHARE_PAGE   = (-16),
 229        VMCI_ERROR_NO_PROCESS            = (-17),
 230        VMCI_ERROR_NO_DATAGRAM           = (-18),
 231        VMCI_ERROR_NO_RESOURCES          = (-19),
 232        VMCI_ERROR_UNAVAILABLE           = (-20),
 233        VMCI_ERROR_NOT_FOUND             = (-21),
 234        VMCI_ERROR_ALREADY_EXISTS        = (-22),
 235        VMCI_ERROR_NOT_PAGE_ALIGNED      = (-23),
 236        VMCI_ERROR_INVALID_SIZE          = (-24),
 237        VMCI_ERROR_REGION_ALREADY_SHARED = (-25),
 238        VMCI_ERROR_TIMEOUT               = (-26),
 239        VMCI_ERROR_DATAGRAM_INCOMPLETE   = (-27),
 240        VMCI_ERROR_INCORRECT_IRQL        = (-28),
 241        VMCI_ERROR_EVENT_UNKNOWN         = (-29),
 242        VMCI_ERROR_OBSOLETE              = (-30),
 243        VMCI_ERROR_QUEUEPAIR_MISMATCH    = (-31),
 244        VMCI_ERROR_QUEUEPAIR_NOTSET      = (-32),
 245        VMCI_ERROR_QUEUEPAIR_NOTOWNER    = (-33),
 246        VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34),
 247        VMCI_ERROR_QUEUEPAIR_NOSPACE     = (-35),
 248        VMCI_ERROR_QUEUEPAIR_NODATA      = (-36),
 249        VMCI_ERROR_BUSMEM_INVALIDATION   = (-37),
 250        VMCI_ERROR_MODULE_NOT_LOADED     = (-38),
 251        VMCI_ERROR_DEVICE_NOT_FOUND      = (-39),
 252        VMCI_ERROR_QUEUEPAIR_NOT_READY   = (-40),
 253        VMCI_ERROR_WOULD_BLOCK           = (-41),
 254
 255        /* VMCI clients should return error code within this range */
 256        VMCI_ERROR_CLIENT_MIN            = (-500),
 257        VMCI_ERROR_CLIENT_MAX            = (-550),
 258
 259        /* Internal error codes. */
 260        VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000),
 261};
 262
 263/* VMCI reserved events. */
 264enum {
 265        /* Only applicable to guest endpoints */
 266        VMCI_EVENT_CTX_ID_UPDATE  = 0,
 267
 268        /* Applicable to guest and host */
 269        VMCI_EVENT_CTX_REMOVED    = 1,
 270
 271        /* Only applicable to guest endpoints */
 272        VMCI_EVENT_QP_RESUMED     = 2,
 273
 274        /* Applicable to guest and host */
 275        VMCI_EVENT_QP_PEER_ATTACH = 3,
 276
 277        /* Applicable to guest and host */
 278        VMCI_EVENT_QP_PEER_DETACH = 4,
 279
 280        /*
 281         * Applicable to VMX and vmk.  On vmk,
 282         * this event has the Context payload type.
 283         */
 284        VMCI_EVENT_MEM_ACCESS_ON  = 5,
 285
 286        /*
 287         * Applicable to VMX and vmk.  Same as
 288         * above for the payload type.
 289         */
 290        VMCI_EVENT_MEM_ACCESS_OFF = 6,
 291        VMCI_EVENT_MAX            = 7,
 292};
 293
 294/*
 295 * Of the above events, a few are reserved for use in the VMX, and
 296 * other endpoints (guest and host kernel) should not use them. For
 297 * the rest of the events, we allow both host and guest endpoints to
 298 * subscribe to them, to maintain the same API for host and guest
 299 * endpoints.
 300 */
 301#define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
 302                                      (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
 303
 304#define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX &&          \
 305                                  !VMCI_EVENT_VALID_VMX(_event))
 306
 307/* Reserved guest datagram resource ids. */
 308#define VMCI_EVENT_HANDLER 0
 309
 310/*
 311 * VMCI coarse-grained privileges (per context or host
 312 * process/endpoint. An entity with the restricted flag is only
 313 * allowed to interact with the hypervisor and trusted entities.
 314 */
 315enum {
 316        VMCI_NO_PRIVILEGE_FLAGS = 0,
 317        VMCI_PRIVILEGE_FLAG_RESTRICTED = 1,
 318        VMCI_PRIVILEGE_FLAG_TRUSTED = 2,
 319        VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED |
 320                                    VMCI_PRIVILEGE_FLAG_TRUSTED),
 321        VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS,
 322        VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED,
 323        VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED,
 324};
 325
 326/* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
 327#define VMCI_RESERVED_RESOURCE_ID_MAX 1023
 328
 329/*
 330 * Driver version.
 331 *
 332 * Increment major version when you make an incompatible change.
 333 * Compatibility goes both ways (old driver with new executable
 334 * as well as new driver with old executable).
 335 */
 336
 337/* Never change VMCI_VERSION_SHIFT_WIDTH */
 338#define VMCI_VERSION_SHIFT_WIDTH 16
 339#define VMCI_MAKE_VERSION(_major, _minor)                       \
 340        ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
 341
 342#define VMCI_VERSION_MAJOR(v)  ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
 343#define VMCI_VERSION_MINOR(v)  ((u16) (v))
 344
 345/*
 346 * VMCI_VERSION is always the current version.  Subsequently listed
 347 * versions are ways of detecting previous versions of the connecting
 348 * application (i.e., VMX).
 349 *
 350 * VMCI_VERSION_NOVMVM: This version removed support for VM to VM
 351 * communication.
 352 *
 353 * VMCI_VERSION_NOTIFY: This version introduced doorbell notification
 354 * support.
 355 *
 356 * VMCI_VERSION_HOSTQP: This version introduced host end point support
 357 * for hosted products.
 358 *
 359 * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
 360 * support for host end-points.
 361 *
 362 * VMCI_VERSION_PREVERS2: This fictional version number is intended to
 363 * represent the version of a VMX which doesn't call into the driver
 364 * with ioctl VERSION2 and thus doesn't establish its version with the
 365 * driver.
 366 */
 367
 368#define VMCI_VERSION                VMCI_VERSION_NOVMVM
 369#define VMCI_VERSION_NOVMVM         VMCI_MAKE_VERSION(11, 0)
 370#define VMCI_VERSION_NOTIFY         VMCI_MAKE_VERSION(10, 0)
 371#define VMCI_VERSION_HOSTQP         VMCI_MAKE_VERSION(9, 0)
 372#define VMCI_VERSION_PREHOSTQP      VMCI_MAKE_VERSION(8, 0)
 373#define VMCI_VERSION_PREVERS2       VMCI_MAKE_VERSION(1, 0)
 374
 375#define VMCI_SOCKETS_MAKE_VERSION(_p)                                   \
 376        ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
 377
 378/*
 379 * The VMCI IOCTLs.  We use identity code 7, as noted in ioctl-number.h, and
 380 * we start at sequence 9f.  This gives us the same values that our shipping
 381 * products use, starting at 1951, provided we leave out the direction and
 382 * structure size.  Note that VMMon occupies the block following us, starting
 383 * at 2001.
 384 */
 385#define IOCTL_VMCI_VERSION                      _IO(7, 0x9f)    /* 1951 */
 386#define IOCTL_VMCI_INIT_CONTEXT                 _IO(7, 0xa0)
 387#define IOCTL_VMCI_QUEUEPAIR_SETVA              _IO(7, 0xa4)
 388#define IOCTL_VMCI_NOTIFY_RESOURCE              _IO(7, 0xa5)
 389#define IOCTL_VMCI_NOTIFICATIONS_RECEIVE        _IO(7, 0xa6)
 390#define IOCTL_VMCI_VERSION2                     _IO(7, 0xa7)
 391#define IOCTL_VMCI_QUEUEPAIR_ALLOC              _IO(7, 0xa8)
 392#define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE        _IO(7, 0xa9)
 393#define IOCTL_VMCI_QUEUEPAIR_DETACH             _IO(7, 0xaa)
 394#define IOCTL_VMCI_DATAGRAM_SEND                _IO(7, 0xab)
 395#define IOCTL_VMCI_DATAGRAM_RECEIVE             _IO(7, 0xac)
 396#define IOCTL_VMCI_CTX_ADD_NOTIFICATION         _IO(7, 0xaf)
 397#define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION      _IO(7, 0xb0)
 398#define IOCTL_VMCI_CTX_GET_CPT_STATE            _IO(7, 0xb1)
 399#define IOCTL_VMCI_CTX_SET_CPT_STATE            _IO(7, 0xb2)
 400#define IOCTL_VMCI_GET_CONTEXT_ID               _IO(7, 0xb3)
 401#define IOCTL_VMCI_SOCKETS_VERSION              _IO(7, 0xb4)
 402#define IOCTL_VMCI_SOCKETS_GET_AF_VALUE         _IO(7, 0xb8)
 403#define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID        _IO(7, 0xb9)
 404#define IOCTL_VMCI_SET_NOTIFY                   _IO(7, 0xcb)    /* 1995 */
 405/*IOCTL_VMMON_START                             _IO(7, 0xd1)*/  /* 2001 */
 406
 407/*
 408 * struct vmci_queue_header - VMCI Queue Header information.
 409 *
 410 * A Queue cannot stand by itself as designed.  Each Queue's header
 411 * contains a pointer into itself (the producer_tail) and into its peer
 412 * (consumer_head).  The reason for the separation is one of
 413 * accessibility: Each end-point can modify two things: where the next
 414 * location to enqueue is within its produce_q (producer_tail); and
 415 * where the next dequeue location is in its consume_q (consumer_head).
 416 *
 417 * An end-point cannot modify the pointers of its peer (guest to
 418 * guest; NOTE that in the host both queue headers are mapped r/w).
 419 * But, each end-point needs read access to both Queue header
 420 * structures in order to determine how much space is used (or left)
 421 * in the Queue.  This is because for an end-point to know how full
 422 * its produce_q is, it needs to use the consumer_head that points into
 423 * the produce_q but -that- consumer_head is in the Queue header for
 424 * that end-points consume_q.
 425 *
 426 * Thoroughly confused?  Sorry.
 427 *
 428 * producer_tail: the point to enqueue new entrants.  When you approach
 429 * a line in a store, for example, you walk up to the tail.
 430 *
 431 * consumer_head: the point in the queue from which the next element is
 432 * dequeued.  In other words, who is next in line is he who is at the
 433 * head of the line.
 434 *
 435 * Also, producer_tail points to an empty byte in the Queue, whereas
 436 * consumer_head points to a valid byte of data (unless producer_tail ==
 437 * consumer_head in which case consumer_head does not point to a valid
 438 * byte of data).
 439 *
 440 * For a queue of buffer 'size' bytes, the tail and head pointers will be in
 441 * the range [0, size-1].
 442 *
 443 * If produce_q_header->producer_tail == consume_q_header->consumer_head
 444 * then the produce_q is empty.
 445 */
 446struct vmci_queue_header {
 447        /* All fields are 64bit and aligned. */
 448        struct vmci_handle handle;      /* Identifier. */
 449        atomic64_t producer_tail;       /* Offset in this queue. */
 450        atomic64_t consumer_head;       /* Offset in peer queue. */
 451};
 452
 453/*
 454 * struct vmci_datagram - Base struct for vmci datagrams.
 455 * @dst:        A vmci_handle that tracks the destination of the datagram.
 456 * @src:        A vmci_handle that tracks the source of the datagram.
 457 * @payload_size:       The size of the payload.
 458 *
 459 * vmci_datagram structs are used when sending vmci datagrams.  They include
 460 * the necessary source and destination information to properly route
 461 * the information along with the size of the package.
 462 */
 463struct vmci_datagram {
 464        struct vmci_handle dst;
 465        struct vmci_handle src;
 466        u64 payload_size;
 467};
 468
 469/*
 470 * Second flag is for creating a well-known handle instead of a per context
 471 * handle.  Next flag is for deferring datagram delivery, so that the
 472 * datagram callback is invoked in a delayed context (not interrupt context).
 473 */
 474#define VMCI_FLAG_DG_NONE          0
 475#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
 476#define VMCI_FLAG_ANYCID_DG_HND    0x2
 477#define VMCI_FLAG_DG_DELAYED_CB    0x4
 478
 479/*
 480 * Maximum supported size of a VMCI datagram for routable datagrams.
 481 * Datagrams going to the hypervisor are allowed to be larger.
 482 */
 483#define VMCI_MAX_DG_SIZE (17 * 4096)
 484#define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
 485                                  sizeof(struct vmci_datagram))
 486#define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) +                   \
 487                                      sizeof(struct vmci_datagram))
 488#define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
 489#define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
 490#define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
 491#define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
 492
 493struct vmci_event_payload_qp {
 494        struct vmci_handle handle;  /* queue_pair handle. */
 495        u32 peer_id;                /* Context id of attaching/detaching VM. */
 496        u32 _pad;
 497};
 498
 499/* Flags for VMCI queue_pair API. */
 500enum {
 501        /* Fail alloc if QP not created by peer. */
 502        VMCI_QPFLAG_ATTACH_ONLY = 1 << 0,
 503
 504        /* Only allow attaches from local context. */
 505        VMCI_QPFLAG_LOCAL = 1 << 1,
 506
 507        /* Host won't block when guest is quiesced. */
 508        VMCI_QPFLAG_NONBLOCK = 1 << 2,
 509
 510        /* Pin data pages in ESX.  Used with NONBLOCK */
 511        VMCI_QPFLAG_PINNED = 1 << 3,
 512
 513        /* Update the following flag when adding new flags. */
 514        VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL |
 515                             VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
 516
 517        /* Convenience flags */
 518        VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
 519        VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM),
 520};
 521
 522/*
 523 * We allow at least 1024 more event datagrams from the hypervisor past the
 524 * normally allowed datagrams pending for a given context.  We define this
 525 * limit on event datagrams from the hypervisor to guard against DoS attack
 526 * from a malicious VM which could repeatedly attach to and detach from a queue
 527 * pair, causing events to be queued at the destination VM.  However, the rate
 528 * at which such events can be generated is small since it requires a VM exit
 529 * and handling of queue pair attach/detach call at the hypervisor.  Event
 530 * datagrams may be queued up at the destination VM if it has interrupts
 531 * disabled or if it is not draining events for some other reason.  1024
 532 * datagrams is a grossly conservative estimate of the time for which
 533 * interrupts may be disabled in the destination VM, but at the same time does
 534 * not exacerbate the memory pressure problem on the host by much (size of each
 535 * event datagram is small).
 536 */
 537#define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE                          \
 538        (VMCI_MAX_DATAGRAM_QUEUE_SIZE +                                 \
 539         1024 * (sizeof(struct vmci_datagram) +                         \
 540                 sizeof(struct vmci_event_data_max)))
 541
 542/*
 543 * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
 544 * hypervisor resources.  Struct size is 16 bytes. All fields in struct are
 545 * aligned to their natural alignment.
 546 */
 547struct vmci_resource_query_hdr {
 548        struct vmci_datagram hdr;
 549        u32 num_resources;
 550        u32 _padding;
 551};
 552
 553/*
 554 * Convenience struct for negotiating vectors. Must match layout of
 555 * VMCIResourceQueryHdr minus the struct vmci_datagram header.
 556 */
 557struct vmci_resource_query_msg {
 558        u32 num_resources;
 559        u32 _padding;
 560        u32 resources[1];
 561};
 562
 563/*
 564 * The maximum number of resources that can be queried using
 565 * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
 566 * bits of a positive return value. Negative values are reserved for
 567 * errors.
 568 */
 569#define VMCI_RESOURCE_QUERY_MAX_NUM 31
 570
 571/* Maximum size for the VMCI_RESOURCE_QUERY request. */
 572#define VMCI_RESOURCE_QUERY_MAX_SIZE                            \
 573        (sizeof(struct vmci_resource_query_hdr) +               \
 574         sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
 575
 576/*
 577 * Struct used for setting the notification bitmap.  All fields in
 578 * struct are aligned to their natural alignment.
 579 */
 580struct vmci_notify_bm_set_msg {
 581        struct vmci_datagram hdr;
 582        union {
 583                u32 bitmap_ppn32;
 584                u64 bitmap_ppn64;
 585        };
 586};
 587
 588/*
 589 * Struct used for linking a doorbell handle with an index in the
 590 * notify bitmap. All fields in struct are aligned to their natural
 591 * alignment.
 592 */
 593struct vmci_doorbell_link_msg {
 594        struct vmci_datagram hdr;
 595        struct vmci_handle handle;
 596        u64 notify_idx;
 597};
 598
 599/*
 600 * Struct used for unlinking a doorbell handle from an index in the
 601 * notify bitmap. All fields in struct are aligned to their natural
 602 * alignment.
 603 */
 604struct vmci_doorbell_unlink_msg {
 605        struct vmci_datagram hdr;
 606        struct vmci_handle handle;
 607};
 608
 609/*
 610 * Struct used for generating a notification on a doorbell handle. All
 611 * fields in struct are aligned to their natural alignment.
 612 */
 613struct vmci_doorbell_notify_msg {
 614        struct vmci_datagram hdr;
 615        struct vmci_handle handle;
 616};
 617
 618/*
 619 * This struct is used to contain data for events.  Size of this struct is a
 620 * multiple of 8 bytes, and all fields are aligned to their natural alignment.
 621 */
 622struct vmci_event_data {
 623        u32 event;              /* 4 bytes. */
 624        u32 _pad;
 625        /* Event payload is put here. */
 626};
 627
 628/*
 629 * Define the different VMCI_EVENT payload data types here.  All structs must
 630 * be a multiple of 8 bytes, and fields must be aligned to their natural
 631 * alignment.
 632 */
 633struct vmci_event_payld_ctx {
 634        u32 context_id; /* 4 bytes. */
 635        u32 _pad;
 636};
 637
 638struct vmci_event_payld_qp {
 639        struct vmci_handle handle;  /* queue_pair handle. */
 640        u32 peer_id;        /* Context id of attaching/detaching VM. */
 641        u32 _pad;
 642};
 643
 644/*
 645 * We define the following struct to get the size of the maximum event
 646 * data the hypervisor may send to the guest.  If adding a new event
 647 * payload type above, add it to the following struct too (inside the
 648 * union).
 649 */
 650struct vmci_event_data_max {
 651        struct vmci_event_data event_data;
 652        union {
 653                struct vmci_event_payld_ctx context_payload;
 654                struct vmci_event_payld_qp qp_payload;
 655        } ev_data_payload;
 656};
 657
 658/*
 659 * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and
 660 * VMCI_EVENT_HANDLER messages.  Struct size is 32 bytes.  All fields
 661 * in struct are aligned to their natural alignment.
 662 */
 663struct vmci_event_msg {
 664        struct vmci_datagram hdr;
 665
 666        /* Has event type and payload. */
 667        struct vmci_event_data event_data;
 668
 669        /* Payload gets put here. */
 670};
 671
 672/* Event with context payload. */
 673struct vmci_event_ctx {
 674        struct vmci_event_msg msg;
 675        struct vmci_event_payld_ctx payload;
 676};
 677
 678/* Event with QP payload. */
 679struct vmci_event_qp {
 680        struct vmci_event_msg msg;
 681        struct vmci_event_payld_qp payload;
 682};
 683
 684/*
 685 * Structs used for queue_pair alloc and detach messages.  We align fields of
 686 * these structs to 64bit boundaries.
 687 */
 688struct vmci_qp_alloc_msg {
 689        struct vmci_datagram hdr;
 690        struct vmci_handle handle;
 691        u32 peer;
 692        u32 flags;
 693        u64 produce_size;
 694        u64 consume_size;
 695        u64 num_ppns;
 696
 697        /* List of PPNs placed here. */
 698};
 699
 700struct vmci_qp_detach_msg {
 701        struct vmci_datagram hdr;
 702        struct vmci_handle handle;
 703};
 704
 705/* VMCI Doorbell API. */
 706#define VMCI_FLAG_DELAYED_CB 0x01
 707
 708typedef void (*vmci_callback) (void *client_data);
 709
 710/*
 711 * struct vmci_qp - A vmw_vmci queue pair handle.
 712 *
 713 * This structure is used as a handle to a queue pair created by
 714 * VMCI.  It is intentionally left opaque to clients.
 715 */
 716struct vmci_qp;
 717
 718/* Callback needed for correctly waiting on events. */
 719typedef int (*vmci_datagram_recv_cb) (void *client_data,
 720                                      struct vmci_datagram *msg);
 721
 722/* VMCI Event API. */
 723typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed,
 724                               void *client_data);
 725
 726/*
 727 * We use the following inline function to access the payload data
 728 * associated with an event data.
 729 */
 730static inline const void *
 731vmci_event_data_const_payload(const struct vmci_event_data *ev_data)
 732{
 733        return (const char *)ev_data + sizeof(*ev_data);
 734}
 735
 736static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
 737{
 738        return (void *)vmci_event_data_const_payload(ev_data);
 739}
 740
 741/*
 742 * Helper to read a value from a head or tail pointer. For X86_32, the
 743 * pointer is treated as a 32bit value, since the pointer value
 744 * never exceeds a 32bit value in this case. Also, doing an
 745 * atomic64_read on X86_32 uniprocessor systems may be implemented
 746 * as a non locked cmpxchg8b, that may end up overwriting updates done
 747 * by the VMCI device to the memory location. On 32bit SMP, the lock
 748 * prefix will be used, so correctness isn't an issue, but using a
 749 * 64bit operation still adds unnecessary overhead.
 750 */
 751static inline u64 vmci_q_read_pointer(atomic64_t *var)
 752{
 753#if defined(CONFIG_X86_32)
 754        return atomic_read((atomic_t *)var);
 755#else
 756        return atomic64_read(var);
 757#endif
 758}
 759
 760/*
 761 * Helper to set the value of a head or tail pointer. For X86_32, the
 762 * pointer is treated as a 32bit value, since the pointer value
 763 * never exceeds a 32bit value in this case. On 32bit SMP, using a
 764 * locked cmpxchg8b adds unnecessary overhead.
 765 */
 766static inline void vmci_q_set_pointer(atomic64_t *var,
 767                                      u64 new_val)
 768{
 769#if defined(CONFIG_X86_32)
 770        return atomic_set((atomic_t *)var, (u32)new_val);
 771#else
 772        return atomic64_set(var, new_val);
 773#endif
 774}
 775
 776/*
 777 * Helper to add a given offset to a head or tail pointer. Wraps the
 778 * value of the pointer around the max size of the queue.
 779 */
 780static inline void vmci_qp_add_pointer(atomic64_t *var,
 781                                       size_t add,
 782                                       u64 size)
 783{
 784        u64 new_val = vmci_q_read_pointer(var);
 785
 786        if (new_val >= size - add)
 787                new_val -= size;
 788
 789        new_val += add;
 790
 791        vmci_q_set_pointer(var, new_val);
 792}
 793
 794/*
 795 * Helper routine to get the Producer Tail from the supplied queue.
 796 */
 797static inline u64
 798vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
 799{
 800        struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
 801        return vmci_q_read_pointer(&qh->producer_tail);
 802}
 803
 804/*
 805 * Helper routine to get the Consumer Head from the supplied queue.
 806 */
 807static inline u64
 808vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
 809{
 810        struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
 811        return vmci_q_read_pointer(&qh->consumer_head);
 812}
 813
 814/*
 815 * Helper routine to increment the Producer Tail.  Fundamentally,
 816 * vmci_qp_add_pointer() is used to manipulate the tail itself.
 817 */
 818static inline void
 819vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header,
 820                                size_t add,
 821                                u64 queue_size)
 822{
 823        vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size);
 824}
 825
 826/*
 827 * Helper routine to increment the Consumer Head.  Fundamentally,
 828 * vmci_qp_add_pointer() is used to manipulate the head itself.
 829 */
 830static inline void
 831vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header,
 832                                size_t add,
 833                                u64 queue_size)
 834{
 835        vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size);
 836}
 837
 838/*
 839 * Helper routine for getting the head and the tail pointer for a queue.
 840 * Both the VMCIQueues are needed to get both the pointers for one queue.
 841 */
 842static inline void
 843vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header,
 844                           const struct vmci_queue_header *consume_q_header,
 845                           u64 *producer_tail,
 846                           u64 *consumer_head)
 847{
 848        if (producer_tail)
 849                *producer_tail = vmci_q_header_producer_tail(produce_q_header);
 850
 851        if (consumer_head)
 852                *consumer_head = vmci_q_header_consumer_head(consume_q_header);
 853}
 854
 855static inline void vmci_q_header_init(struct vmci_queue_header *q_header,
 856                                      const struct vmci_handle handle)
 857{
 858        q_header->handle = handle;
 859        atomic64_set(&q_header->producer_tail, 0);
 860        atomic64_set(&q_header->consumer_head, 0);
 861}
 862
 863/*
 864 * Finds available free space in a produce queue to enqueue more
 865 * data or reports an error if queue pair corruption is detected.
 866 */
 867static s64
 868vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header,
 869                         const struct vmci_queue_header *consume_q_header,
 870                         const u64 produce_q_size)
 871{
 872        u64 tail;
 873        u64 head;
 874        u64 free_space;
 875
 876        tail = vmci_q_header_producer_tail(produce_q_header);
 877        head = vmci_q_header_consumer_head(consume_q_header);
 878
 879        if (tail >= produce_q_size || head >= produce_q_size)
 880                return VMCI_ERROR_INVALID_SIZE;
 881
 882        /*
 883         * Deduct 1 to avoid tail becoming equal to head which causes
 884         * ambiguity. If head and tail are equal it means that the
 885         * queue is empty.
 886         */
 887        if (tail >= head)
 888                free_space = produce_q_size - (tail - head) - 1;
 889        else
 890                free_space = head - tail - 1;
 891
 892        return free_space;
 893}
 894
 895/*
 896 * vmci_q_header_free_space() does all the heavy lifting of
 897 * determing the number of free bytes in a Queue.  This routine,
 898 * then subtracts that size from the full size of the Queue so
 899 * the caller knows how many bytes are ready to be dequeued.
 900 * Results:
 901 * On success, available data size in bytes (up to MAX_INT64).
 902 * On failure, appropriate error code.
 903 */
 904static inline s64
 905vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header,
 906                        const struct vmci_queue_header *produce_q_header,
 907                        const u64 consume_q_size)
 908{
 909        s64 free_space;
 910
 911        free_space = vmci_q_header_free_space(consume_q_header,
 912                                              produce_q_header, consume_q_size);
 913        if (free_space < VMCI_SUCCESS)
 914                return free_space;
 915
 916        return consume_q_size - free_space - 1;
 917}
 918
 919
 920#endif /* _VMW_VMCI_DEF_H_ */
 921