linux/include/xen/interface/io/ring.h
<<
>>
Prefs
   1/******************************************************************************
   2 * ring.h
   3 *
   4 * Shared producer-consumer ring macros.
   5 *
   6 * Tim Deegan and Andrew Warfield November 2004.
   7 */
   8
   9#ifndef __XEN_PUBLIC_IO_RING_H__
  10#define __XEN_PUBLIC_IO_RING_H__
  11
  12typedef unsigned int RING_IDX;
  13
  14/* Round a 32-bit unsigned constant down to the nearest power of two. */
  15#define __RD2(_x)  (((_x) & 0x00000002) ? 0x2                  : ((_x) & 0x1))
  16#define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x))
  17#define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x))
  18#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x))
  19#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
  20
  21/*
  22 * Calculate size of a shared ring, given the total available space for the
  23 * ring and indexes (_sz), and the name tag of the request/response structure.
  24 * A ring contains as many entries as will fit, rounded down to the nearest
  25 * power of two (so we can mask with (size-1) to loop around).
  26 */
  27#define __CONST_RING_SIZE(_s, _sz)                              \
  28        (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) /   \
  29                sizeof(((struct _s##_sring *)0)->ring[0])))
  30
  31/*
  32 * The same for passing in an actual pointer instead of a name tag.
  33 */
  34#define __RING_SIZE(_s, _sz)                                            \
  35        (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
  36
  37/*
  38 * Macros to make the correct C datatypes for a new kind of ring.
  39 *
  40 * To make a new ring datatype, you need to have two message structures,
  41 * let's say struct request, and struct response already defined.
  42 *
  43 * In a header where you want the ring datatype declared, you then do:
  44 *
  45 *     DEFINE_RING_TYPES(mytag, struct request, struct response);
  46 *
  47 * These expand out to give you a set of types, as you can see below.
  48 * The most important of these are:
  49 *
  50 *     struct mytag_sring      - The shared ring.
  51 *     struct mytag_front_ring - The 'front' half of the ring.
  52 *     struct mytag_back_ring  - The 'back' half of the ring.
  53 *
  54 * To initialize a ring in your code you need to know the location and size
  55 * of the shared memory area (PAGE_SIZE, for instance). To initialise
  56 * the front half:
  57 *
  58 *     struct mytag_front_ring front_ring;
  59 *     SHARED_RING_INIT((struct mytag_sring *)shared_page);
  60 *     FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
  61 *                     PAGE_SIZE);
  62 *
  63 * Initializing the back follows similarly (note that only the front
  64 * initializes the shared ring):
  65 *
  66 *     struct mytag_back_ring back_ring;
  67 *     BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
  68 *                    PAGE_SIZE);
  69 */
  70
  71#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)                     \
  72                                                                        \
  73/* Shared ring entry */                                                 \
  74union __name##_sring_entry {                                            \
  75    __req_t req;                                                        \
  76    __rsp_t rsp;                                                        \
  77};                                                                      \
  78                                                                        \
  79/* Shared ring page */                                                  \
  80struct __name##_sring {                                                 \
  81    RING_IDX req_prod, req_event;                                       \
  82    RING_IDX rsp_prod, rsp_event;                                       \
  83    uint8_t  pad[48];                                                   \
  84    union __name##_sring_entry ring[1]; /* variable-length */           \
  85};                                                                      \
  86                                                                        \
  87/* "Front" end's private variables */                                   \
  88struct __name##_front_ring {                                            \
  89    RING_IDX req_prod_pvt;                                              \
  90    RING_IDX rsp_cons;                                                  \
  91    unsigned int nr_ents;                                               \
  92    struct __name##_sring *sring;                                       \
  93};                                                                      \
  94                                                                        \
  95/* "Back" end's private variables */                                    \
  96struct __name##_back_ring {                                             \
  97    RING_IDX rsp_prod_pvt;                                              \
  98    RING_IDX req_cons;                                                  \
  99    unsigned int nr_ents;                                               \
 100    struct __name##_sring *sring;                                       \
 101};
 102
 103/*
 104 * Macros for manipulating rings.
 105 *
 106 * FRONT_RING_whatever works on the "front end" of a ring: here
 107 * requests are pushed on to the ring and responses taken off it.
 108 *
 109 * BACK_RING_whatever works on the "back end" of a ring: here
 110 * requests are taken off the ring and responses put on.
 111 *
 112 * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
 113 * This is OK in 1-for-1 request-response situations where the
 114 * requestor (front end) never has more than RING_SIZE()-1
 115 * outstanding requests.
 116 */
 117
 118/* Initialising empty rings */
 119#define SHARED_RING_INIT(_s) do {                                       \
 120    (_s)->req_prod  = (_s)->rsp_prod  = 0;                              \
 121    (_s)->req_event = (_s)->rsp_event = 1;                              \
 122    memset((_s)->pad, 0, sizeof((_s)->pad));                            \
 123} while(0)
 124
 125#define FRONT_RING_INIT(_r, _s, __size) do {                            \
 126    (_r)->req_prod_pvt = 0;                                             \
 127    (_r)->rsp_cons = 0;                                                 \
 128    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
 129    (_r)->sring = (_s);                                                 \
 130} while (0)
 131
 132#define BACK_RING_INIT(_r, _s, __size) do {                             \
 133    (_r)->rsp_prod_pvt = 0;                                             \
 134    (_r)->req_cons = 0;                                                 \
 135    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
 136    (_r)->sring = (_s);                                                 \
 137} while (0)
 138
 139/* Initialize to existing shared indexes -- for recovery */
 140#define FRONT_RING_ATTACH(_r, _s, __size) do {                          \
 141    (_r)->sring = (_s);                                                 \
 142    (_r)->req_prod_pvt = (_s)->req_prod;                                \
 143    (_r)->rsp_cons = (_s)->rsp_prod;                                    \
 144    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
 145} while (0)
 146
 147#define BACK_RING_ATTACH(_r, _s, __size) do {                           \
 148    (_r)->sring = (_s);                                                 \
 149    (_r)->rsp_prod_pvt = (_s)->rsp_prod;                                \
 150    (_r)->req_cons = (_s)->req_prod;                                    \
 151    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
 152} while (0)
 153
 154/* How big is this ring? */
 155#define RING_SIZE(_r)                                                   \
 156    ((_r)->nr_ents)
 157
 158/* Number of free requests (for use on front side only). */
 159#define RING_FREE_REQUESTS(_r)                                          \
 160    (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
 161
 162/* Test if there is an empty slot available on the front ring.
 163 * (This is only meaningful from the front. )
 164 */
 165#define RING_FULL(_r)                                                   \
 166    (RING_FREE_REQUESTS(_r) == 0)
 167
 168/* Test if there are outstanding messages to be processed on a ring. */
 169#define RING_HAS_UNCONSUMED_RESPONSES(_r)                               \
 170    ((_r)->sring->rsp_prod - (_r)->rsp_cons)
 171
 172#define RING_HAS_UNCONSUMED_REQUESTS(_r)                                \
 173    ({                                                                  \
 174        unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;      \
 175        unsigned int rsp = RING_SIZE(_r) -                              \
 176                           ((_r)->req_cons - (_r)->rsp_prod_pvt);       \
 177        req < rsp ? req : rsp;                                          \
 178    })
 179
 180/* Direct access to individual ring elements, by index. */
 181#define RING_GET_REQUEST(_r, _idx)                                      \
 182    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
 183
 184/*
 185 * Get a local copy of a request.
 186 *
 187 * Use this in preference to RING_GET_REQUEST() so all processing is
 188 * done on a local copy that cannot be modified by the other end.
 189 *
 190 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
 191 * to be ineffective where _req is a struct which consists of only bitfields.
 192 */
 193#define RING_COPY_REQUEST(_r, _idx, _req) do {                          \
 194        /* Use volatile to force the copy into _req. */                 \
 195        *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx);   \
 196} while (0)
 197
 198#define RING_GET_RESPONSE(_r, _idx)                                     \
 199    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
 200
 201/* Loop termination condition: Would the specified index overflow the ring? */
 202#define RING_REQUEST_CONS_OVERFLOW(_r, _cons)                           \
 203    (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
 204
 205/* Ill-behaved frontend determination: Can there be this many requests? */
 206#define RING_REQUEST_PROD_OVERFLOW(_r, _prod)               \
 207    (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
 208
 209
 210#define RING_PUSH_REQUESTS(_r) do {                                     \
 211    virt_wmb(); /* back sees requests /before/ updated producer index */        \
 212    (_r)->sring->req_prod = (_r)->req_prod_pvt;                         \
 213} while (0)
 214
 215#define RING_PUSH_RESPONSES(_r) do {                                    \
 216    virt_wmb(); /* front sees responses /before/ updated producer index */      \
 217    (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;                         \
 218} while (0)
 219
 220/*
 221 * Notification hold-off (req_event and rsp_event):
 222 *
 223 * When queueing requests or responses on a shared ring, it may not always be
 224 * necessary to notify the remote end. For example, if requests are in flight
 225 * in a backend, the front may be able to queue further requests without
 226 * notifying the back (if the back checks for new requests when it queues
 227 * responses).
 228 *
 229 * When enqueuing requests or responses:
 230 *
 231 *  Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
 232 *  is a boolean return value. True indicates that the receiver requires an
 233 *  asynchronous notification.
 234 *
 235 * After dequeuing requests or responses (before sleeping the connection):
 236 *
 237 *  Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
 238 *  The second argument is a boolean return value. True indicates that there
 239 *  are pending messages on the ring (i.e., the connection should not be put
 240 *  to sleep).
 241 *
 242 *  These macros will set the req_event/rsp_event field to trigger a
 243 *  notification on the very next message that is enqueued. If you want to
 244 *  create batches of work (i.e., only receive a notification after several
 245 *  messages have been enqueued) then you will need to create a customised
 246 *  version of the FINAL_CHECK macro in your own code, which sets the event
 247 *  field appropriately.
 248 */
 249
 250#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {           \
 251    RING_IDX __old = (_r)->sring->req_prod;                             \
 252    RING_IDX __new = (_r)->req_prod_pvt;                                \
 253    virt_wmb(); /* back sees requests /before/ updated producer index */        \
 254    (_r)->sring->req_prod = __new;                                      \
 255    virt_mb(); /* back sees new requests /before/ we check req_event */ \
 256    (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <           \
 257                 (RING_IDX)(__new - __old));                            \
 258} while (0)
 259
 260#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {          \
 261    RING_IDX __old = (_r)->sring->rsp_prod;                             \
 262    RING_IDX __new = (_r)->rsp_prod_pvt;                                \
 263    virt_wmb(); /* front sees responses /before/ updated producer index */      \
 264    (_r)->sring->rsp_prod = __new;                                      \
 265    virt_mb(); /* front sees new responses /before/ we check rsp_event */       \
 266    (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <           \
 267                 (RING_IDX)(__new - __old));                            \
 268} while (0)
 269
 270#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {             \
 271    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
 272    if (_work_to_do) break;                                             \
 273    (_r)->sring->req_event = (_r)->req_cons + 1;                        \
 274    virt_mb();                                                          \
 275    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
 276} while (0)
 277
 278#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {            \
 279    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
 280    if (_work_to_do) break;                                             \
 281    (_r)->sring->rsp_event = (_r)->rsp_cons + 1;                        \
 282    virt_mb();                                                          \
 283    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
 284} while (0)
 285
 286
 287/*
 288 * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
 289 * functions to check if there is data on the ring, and to read and
 290 * write to them.
 291 *
 292 * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
 293 * does not define the indexes page. As different protocols can have
 294 * extensions to the basic format, this macro allow them to define their
 295 * own struct.
 296 *
 297 * XEN_FLEX_RING_SIZE
 298 *   Convenience macro to calculate the size of one of the two rings
 299 *   from the overall order.
 300 *
 301 * $NAME_mask
 302 *   Function to apply the size mask to an index, to reduce the index
 303 *   within the range [0-size].
 304 *
 305 * $NAME_read_packet
 306 *   Function to read data from the ring. The amount of data to read is
 307 *   specified by the "size" argument.
 308 *
 309 * $NAME_write_packet
 310 *   Function to write data to the ring. The amount of data to write is
 311 *   specified by the "size" argument.
 312 *
 313 * $NAME_get_ring_ptr
 314 *   Convenience function that returns a pointer to read/write to the
 315 *   ring at the right location.
 316 *
 317 * $NAME_data_intf
 318 *   Indexes page, shared between frontend and backend. It also
 319 *   contains the array of grant refs.
 320 *
 321 * $NAME_queued
 322 *   Function to calculate how many bytes are currently on the ring,
 323 *   ready to be read. It can also be used to calculate how much free
 324 *   space is currently on the ring (XEN_FLEX_RING_SIZE() -
 325 *   $NAME_queued()).
 326 */
 327
 328#ifndef XEN_PAGE_SHIFT
 329/* The PAGE_SIZE for ring protocols and hypercall interfaces is always
 330 * 4K, regardless of the architecture, and page granularity chosen by
 331 * operating systems.
 332 */
 333#define XEN_PAGE_SHIFT 12
 334#endif
 335#define XEN_FLEX_RING_SIZE(order)                                             \
 336    (1UL << ((order) + XEN_PAGE_SHIFT - 1))
 337
 338#define DEFINE_XEN_FLEX_RING(name)                                            \
 339static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size)          \
 340{                                                                             \
 341    return idx & (ring_size - 1);                                             \
 342}                                                                             \
 343                                                                              \
 344static inline unsigned char *name##_get_ring_ptr(unsigned char *buf,          \
 345                                                 RING_IDX idx,                \
 346                                                 RING_IDX ring_size)          \
 347{                                                                             \
 348    return buf + name##_mask(idx, ring_size);                                 \
 349}                                                                             \
 350                                                                              \
 351static inline void name##_read_packet(void *opaque,                           \
 352                                      const unsigned char *buf,               \
 353                                      size_t size,                            \
 354                                      RING_IDX masked_prod,                   \
 355                                      RING_IDX *masked_cons,                  \
 356                                      RING_IDX ring_size)                     \
 357{                                                                             \
 358    if (*masked_cons < masked_prod ||                                         \
 359        size <= ring_size - *masked_cons) {                                   \
 360        memcpy(opaque, buf + *masked_cons, size);                             \
 361    } else {                                                                  \
 362        memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons);         \
 363        memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf,       \
 364               size - (ring_size - *masked_cons));                            \
 365    }                                                                         \
 366    *masked_cons = name##_mask(*masked_cons + size, ring_size);               \
 367}                                                                             \
 368                                                                              \
 369static inline void name##_write_packet(unsigned char *buf,                    \
 370                                       const void *opaque,                    \
 371                                       size_t size,                           \
 372                                       RING_IDX *masked_prod,                 \
 373                                       RING_IDX masked_cons,                  \
 374                                       RING_IDX ring_size)                    \
 375{                                                                             \
 376    if (*masked_prod < masked_cons ||                                         \
 377        size <= ring_size - *masked_prod) {                                   \
 378        memcpy(buf + *masked_prod, opaque, size);                             \
 379    } else {                                                                  \
 380        memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod);         \
 381        memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod),     \
 382               size - (ring_size - *masked_prod));                            \
 383    }                                                                         \
 384    *masked_prod = name##_mask(*masked_prod + size, ring_size);               \
 385}                                                                             \
 386                                                                              \
 387static inline RING_IDX name##_queued(RING_IDX prod,                           \
 388                                     RING_IDX cons,                           \
 389                                     RING_IDX ring_size)                      \
 390{                                                                             \
 391    RING_IDX size;                                                            \
 392                                                                              \
 393    if (prod == cons)                                                         \
 394        return 0;                                                             \
 395                                                                              \
 396    prod = name##_mask(prod, ring_size);                                      \
 397    cons = name##_mask(cons, ring_size);                                      \
 398                                                                              \
 399    if (prod == cons)                                                         \
 400        return ring_size;                                                     \
 401                                                                              \
 402    if (prod > cons)                                                          \
 403        size = prod - cons;                                                   \
 404    else                                                                      \
 405        size = ring_size - (cons - prod);                                     \
 406    return size;                                                              \
 407}                                                                             \
 408                                                                              \
 409struct name##_data {                                                          \
 410    unsigned char *in; /* half of the allocation */                           \
 411    unsigned char *out; /* half of the allocation */                          \
 412}
 413
 414#define DEFINE_XEN_FLEX_RING_AND_INTF(name)                                   \
 415struct name##_data_intf {                                                     \
 416    RING_IDX in_cons, in_prod;                                                \
 417                                                                              \
 418    uint8_t pad1[56];                                                         \
 419                                                                              \
 420    RING_IDX out_cons, out_prod;                                              \
 421                                                                              \
 422    uint8_t pad2[56];                                                         \
 423                                                                              \
 424    RING_IDX ring_order;                                                      \
 425    grant_ref_t ref[];                                                        \
 426};                                                                            \
 427DEFINE_XEN_FLEX_RING(name)
 428
 429#endif /* __XEN_PUBLIC_IO_RING_H__ */
 430