linux/include/net/xdp_sock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* AF_XDP internal functions
   3 * Copyright(c) 2018 Intel Corporation.
   4 */
   5
   6#ifndef _LINUX_XDP_SOCK_H
   7#define _LINUX_XDP_SOCK_H
   8
   9#include <linux/workqueue.h>
  10#include <linux/if_xdp.h>
  11#include <linux/mutex.h>
  12#include <linux/spinlock.h>
  13#include <linux/mm.h>
  14#include <net/sock.h>
  15
  16struct net_device;
  17struct xsk_queue;
  18
  19struct xdp_umem_page {
  20        void *addr;
  21        dma_addr_t dma;
  22};
  23
  24struct xdp_umem_fq_reuse {
  25        u32 nentries;
  26        u32 length;
  27        u64 handles[];
  28};
  29
  30struct xdp_umem {
  31        struct xsk_queue *fq;
  32        struct xsk_queue *cq;
  33        struct xdp_umem_page *pages;
  34        u64 chunk_mask;
  35        u64 size;
  36        u32 headroom;
  37        u32 chunk_size_nohr;
  38        struct user_struct *user;
  39        unsigned long address;
  40        refcount_t users;
  41        struct work_struct work;
  42        struct page **pgs;
  43        u32 npgs;
  44        int id;
  45        struct net_device *dev;
  46        struct xdp_umem_fq_reuse *fq_reuse;
  47        u16 queue_id;
  48        bool zc;
  49        spinlock_t xsk_list_lock;
  50        struct list_head xsk_list;
  51};
  52
  53struct xdp_sock {
  54        /* struct sock must be the first member of struct xdp_sock */
  55        struct sock sk;
  56        struct xsk_queue *rx;
  57        struct net_device *dev;
  58        struct xdp_umem *umem;
  59        struct list_head flush_node;
  60        u16 queue_id;
  61        bool zc;
  62        enum {
  63                XSK_READY = 0,
  64                XSK_BOUND,
  65                XSK_UNBOUND,
  66        } state;
  67        /* Protects multiple processes in the control path */
  68        struct mutex mutex;
  69        struct xsk_queue *tx ____cacheline_aligned_in_smp;
  70        struct list_head list;
  71        /* Mutual exclusion of NAPI TX thread and sendmsg error paths
  72         * in the SKB destructor callback.
  73         */
  74        spinlock_t tx_completion_lock;
  75        /* Protects generic receive. */
  76        spinlock_t rx_lock;
  77        u64 rx_dropped;
  78};
  79
  80struct xdp_buff;
  81#ifdef CONFIG_XDP_SOCKETS
  82int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
  83int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
  84void xsk_flush(struct xdp_sock *xs);
  85bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
  86/* Used from netdev driver */
  87bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
  88u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
  89void xsk_umem_discard_addr(struct xdp_umem *umem);
  90void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
  91bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
  92void xsk_umem_consume_tx_done(struct xdp_umem *umem);
  93struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
  94struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
  95                                          struct xdp_umem_fq_reuse *newq);
  96void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
  97struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
  98
  99static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
 100{
 101        return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
 102}
 103
 104static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
 105{
 106        return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
 107}
 108
 109/* Reuse-queue aware version of FILL queue helpers */
 110static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
 111{
 112        struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
 113
 114        if (rq->length >= cnt)
 115                return true;
 116
 117        return xsk_umem_has_addrs(umem, cnt - rq->length);
 118}
 119
 120static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
 121{
 122        struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
 123
 124        if (!rq->length)
 125                return xsk_umem_peek_addr(umem, addr);
 126
 127        *addr = rq->handles[rq->length - 1];
 128        return addr;
 129}
 130
 131static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
 132{
 133        struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
 134
 135        if (!rq->length)
 136                xsk_umem_discard_addr(umem);
 137        else
 138                rq->length--;
 139}
 140
 141static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
 142{
 143        struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
 144
 145        rq->handles[rq->length++] = addr;
 146}
 147#else
 148static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
 149{
 150        return -ENOTSUPP;
 151}
 152
 153static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
 154{
 155        return -ENOTSUPP;
 156}
 157
 158static inline void xsk_flush(struct xdp_sock *xs)
 159{
 160}
 161
 162static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
 163{
 164        return false;
 165}
 166
 167static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
 168{
 169        return false;
 170}
 171
 172static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
 173{
 174        return NULL;
 175}
 176
 177static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
 178{
 179}
 180
 181static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
 182{
 183}
 184
 185static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
 186                                       struct xdp_desc *desc)
 187{
 188        return false;
 189}
 190
 191static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
 192{
 193}
 194
 195static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
 196{
 197        return NULL;
 198}
 199
 200static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
 201        struct xdp_umem *umem,
 202        struct xdp_umem_fq_reuse *newq)
 203{
 204        return NULL;
 205}
 206static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
 207{
 208}
 209
 210static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
 211                                                     u16 queue_id)
 212{
 213        return NULL;
 214}
 215
 216static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
 217{
 218        return NULL;
 219}
 220
 221static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
 222{
 223        return 0;
 224}
 225
 226static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
 227{
 228        return false;
 229}
 230
 231static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
 232{
 233        return NULL;
 234}
 235
 236static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
 237{
 238}
 239
 240static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
 241{
 242}
 243
 244#endif /* CONFIG_XDP_SOCKETS */
 245
 246#endif /* _LINUX_XDP_SOCK_H */
 247