linux/tools/lib/bpf/xsk.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
   2
   3/*
   4 * AF_XDP user-space access library.
   5 *
   6 * Copyright (c) 2018 - 2019 Intel Corporation.
   7 * Copyright (c) 2019 Facebook
   8 *
   9 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
  10 */
  11
  12#ifndef __LIBBPF_XSK_H
  13#define __LIBBPF_XSK_H
  14
  15#include <stdio.h>
  16#include <stdint.h>
  17#include <stdbool.h>
  18#include <linux/if_xdp.h>
  19
  20#include "libbpf.h"
  21
  22#ifdef __cplusplus
  23extern "C" {
  24#endif
  25
  26/* Load-Acquire Store-Release barriers used by the XDP socket
  27 * library. The following macros should *NOT* be considered part of
  28 * the xsk.h API, and is subject to change anytime.
  29 *
  30 * LIBRARY INTERNAL
  31 */
  32
  33#define __XSK_READ_ONCE(x) (*(volatile typeof(x) *)&x)
  34#define __XSK_WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
  35
  36#if defined(__i386__) || defined(__x86_64__)
  37# define libbpf_smp_store_release(p, v)                                 \
  38        do {                                                            \
  39                asm volatile("" : : : "memory");                        \
  40                __XSK_WRITE_ONCE(*p, v);                                \
  41        } while (0)
  42# define libbpf_smp_load_acquire(p)                                     \
  43        ({                                                              \
  44                typeof(*p) ___p1 = __XSK_READ_ONCE(*p);                 \
  45                asm volatile("" : : : "memory");                        \
  46                ___p1;                                                  \
  47        })
  48#elif defined(__aarch64__)
  49# define libbpf_smp_store_release(p, v)                                 \
  50                asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory")
  51# define libbpf_smp_load_acquire(p)                                     \
  52        ({                                                              \
  53                typeof(*p) ___p1;                                       \
  54                asm volatile ("ldar %w0, %1"                            \
  55                              : "=r" (___p1) : "Q" (*p) : "memory");    \
  56                ___p1;                                                  \
  57        })
  58#elif defined(__riscv)
  59# define libbpf_smp_store_release(p, v)                                 \
  60        do {                                                            \
  61                asm volatile ("fence rw,w" : : : "memory");             \
  62                __XSK_WRITE_ONCE(*p, v);                                \
  63        } while (0)
  64# define libbpf_smp_load_acquire(p)                                     \
  65        ({                                                              \
  66                typeof(*p) ___p1 = __XSK_READ_ONCE(*p);                 \
  67                asm volatile ("fence r,rw" : : : "memory");             \
  68                ___p1;                                                  \
  69        })
  70#endif
  71
  72#ifndef libbpf_smp_store_release
  73#define libbpf_smp_store_release(p, v)                                  \
  74        do {                                                            \
  75                __sync_synchronize();                                   \
  76                __XSK_WRITE_ONCE(*p, v);                                \
  77        } while (0)
  78#endif
  79
  80#ifndef libbpf_smp_load_acquire
  81#define libbpf_smp_load_acquire(p)                                      \
  82        ({                                                              \
  83                typeof(*p) ___p1 = __XSK_READ_ONCE(*p);                 \
  84                __sync_synchronize();                                   \
  85                ___p1;                                                  \
  86        })
  87#endif
  88
  89/* LIBRARY INTERNAL -- END */
  90
  91/* Do not access these members directly. Use the functions below. */
  92#define DEFINE_XSK_RING(name) \
  93struct name { \
  94        __u32 cached_prod; \
  95        __u32 cached_cons; \
  96        __u32 mask; \
  97        __u32 size; \
  98        __u32 *producer; \
  99        __u32 *consumer; \
 100        void *ring; \
 101        __u32 *flags; \
 102}
 103
 104DEFINE_XSK_RING(xsk_ring_prod);
 105DEFINE_XSK_RING(xsk_ring_cons);
 106
 107/* For a detailed explanation on the memory barriers associated with the
 108 * ring, please take a look at net/xdp/xsk_queue.h.
 109 */
 110
 111struct xsk_umem;
 112struct xsk_socket;
 113
 114static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
 115                                              __u32 idx)
 116{
 117        __u64 *addrs = (__u64 *)fill->ring;
 118
 119        return &addrs[idx & fill->mask];
 120}
 121
 122static inline const __u64 *
 123xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
 124{
 125        const __u64 *addrs = (const __u64 *)comp->ring;
 126
 127        return &addrs[idx & comp->mask];
 128}
 129
 130static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
 131                                                      __u32 idx)
 132{
 133        struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
 134
 135        return &descs[idx & tx->mask];
 136}
 137
 138static inline const struct xdp_desc *
 139xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
 140{
 141        const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
 142
 143        return &descs[idx & rx->mask];
 144}
 145
 146static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
 147{
 148        return *r->flags & XDP_RING_NEED_WAKEUP;
 149}
 150
 151static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
 152{
 153        __u32 free_entries = r->cached_cons - r->cached_prod;
 154
 155        if (free_entries >= nb)
 156                return free_entries;
 157
 158        /* Refresh the local tail pointer.
 159         * cached_cons is r->size bigger than the real consumer pointer so
 160         * that this addition can be avoided in the more frequently
 161         * executed code that computs free_entries in the beginning of
 162         * this function. Without this optimization it whould have been
 163         * free_entries = r->cached_prod - r->cached_cons + r->size.
 164         */
 165        r->cached_cons = libbpf_smp_load_acquire(r->consumer);
 166        r->cached_cons += r->size;
 167
 168        return r->cached_cons - r->cached_prod;
 169}
 170
 171static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
 172{
 173        __u32 entries = r->cached_prod - r->cached_cons;
 174
 175        if (entries == 0) {
 176                r->cached_prod = libbpf_smp_load_acquire(r->producer);
 177                entries = r->cached_prod - r->cached_cons;
 178        }
 179
 180        return (entries > nb) ? nb : entries;
 181}
 182
 183static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx)
 184{
 185        if (xsk_prod_nb_free(prod, nb) < nb)
 186                return 0;
 187
 188        *idx = prod->cached_prod;
 189        prod->cached_prod += nb;
 190
 191        return nb;
 192}
 193
 194static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
 195{
 196        /* Make sure everything has been written to the ring before indicating
 197         * this to the kernel by writing the producer pointer.
 198         */
 199        libbpf_smp_store_release(prod->producer, *prod->producer + nb);
 200}
 201
 202static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
 203{
 204        __u32 entries = xsk_cons_nb_avail(cons, nb);
 205
 206        if (entries > 0) {
 207                *idx = cons->cached_cons;
 208                cons->cached_cons += entries;
 209        }
 210
 211        return entries;
 212}
 213
 214static inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb)
 215{
 216        cons->cached_cons -= nb;
 217}
 218
 219static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb)
 220{
 221        /* Make sure data has been read before indicating we are done
 222         * with the entries by updating the consumer pointer.
 223         */
 224        libbpf_smp_store_release(cons->consumer, *cons->consumer + nb);
 225
 226}
 227
 228static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
 229{
 230        return &((char *)umem_area)[addr];
 231}
 232
 233static inline __u64 xsk_umem__extract_addr(__u64 addr)
 234{
 235        return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
 236}
 237
 238static inline __u64 xsk_umem__extract_offset(__u64 addr)
 239{
 240        return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
 241}
 242
 243static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
 244{
 245        return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
 246}
 247
 248LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
 249LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
 250
 251#define XSK_RING_CONS__DEFAULT_NUM_DESCS      2048
 252#define XSK_RING_PROD__DEFAULT_NUM_DESCS      2048
 253#define XSK_UMEM__DEFAULT_FRAME_SHIFT    12 /* 4096 bytes */
 254#define XSK_UMEM__DEFAULT_FRAME_SIZE     (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
 255#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
 256#define XSK_UMEM__DEFAULT_FLAGS 0
 257
 258struct xsk_umem_config {
 259        __u32 fill_size;
 260        __u32 comp_size;
 261        __u32 frame_size;
 262        __u32 frame_headroom;
 263        __u32 flags;
 264};
 265
 266LIBBPF_API int xsk_setup_xdp_prog(int ifindex,
 267                                  int *xsks_map_fd);
 268LIBBPF_API int xsk_socket__update_xskmap(struct xsk_socket *xsk,
 269                                         int xsks_map_fd);
 270
 271/* Flags for the libbpf_flags field. */
 272#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
 273
 274struct xsk_socket_config {
 275        __u32 rx_size;
 276        __u32 tx_size;
 277        __u32 libbpf_flags;
 278        __u32 xdp_flags;
 279        __u16 bind_flags;
 280};
 281
 282/* Set config to NULL to get the default configuration. */
 283LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
 284                                void *umem_area, __u64 size,
 285                                struct xsk_ring_prod *fill,
 286                                struct xsk_ring_cons *comp,
 287                                const struct xsk_umem_config *config);
 288LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
 289                                       void *umem_area, __u64 size,
 290                                       struct xsk_ring_prod *fill,
 291                                       struct xsk_ring_cons *comp,
 292                                       const struct xsk_umem_config *config);
 293LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
 294                                       void *umem_area, __u64 size,
 295                                       struct xsk_ring_prod *fill,
 296                                       struct xsk_ring_cons *comp,
 297                                       const struct xsk_umem_config *config);
 298LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
 299                                  const char *ifname, __u32 queue_id,
 300                                  struct xsk_umem *umem,
 301                                  struct xsk_ring_cons *rx,
 302                                  struct xsk_ring_prod *tx,
 303                                  const struct xsk_socket_config *config);
 304LIBBPF_API int
 305xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
 306                          const char *ifname,
 307                          __u32 queue_id, struct xsk_umem *umem,
 308                          struct xsk_ring_cons *rx,
 309                          struct xsk_ring_prod *tx,
 310                          struct xsk_ring_prod *fill,
 311                          struct xsk_ring_cons *comp,
 312                          const struct xsk_socket_config *config);
 313
 314/* Returns 0 for success and -EBUSY if the umem is still in use. */
 315LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
 316LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk);
 317
 318#ifdef __cplusplus
 319} /* extern "C" */
 320#endif
 321
 322#endif /* __LIBBPF_XSK_H */
 323