linux/include/net/xdp_sock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* AF_XDP internal functions
   3 * Copyright(c) 2018 Intel Corporation.
   4 */
   5
   6#ifndef _LINUX_XDP_SOCK_H
   7#define _LINUX_XDP_SOCK_H
   8
   9#include <linux/workqueue.h>
  10#include <linux/if_xdp.h>
  11#include <linux/mutex.h>
  12#include <linux/spinlock.h>
  13#include <linux/mm.h>
  14#include <net/sock.h>
  15
  16struct net_device;
  17struct xsk_queue;
  18struct xdp_buff;
  19
  20struct xdp_umem {
  21        void *addrs;
  22        u64 size;
  23        u32 headroom;
  24        u32 chunk_size;
  25        u32 chunks;
  26        u32 npgs;
  27        struct user_struct *user;
  28        refcount_t users;
  29        u8 flags;
  30        bool zc;
  31        struct page **pgs;
  32        int id;
  33        struct list_head xsk_dma_list;
  34        struct work_struct work;
  35};
  36
  37struct xsk_map {
  38        struct bpf_map map;
  39        spinlock_t lock; /* Synchronize map updates */
  40        struct xdp_sock __rcu *xsk_map[];
  41};
  42
  43struct xdp_sock {
  44        /* struct sock must be the first member of struct xdp_sock */
  45        struct sock sk;
  46        struct xsk_queue *rx ____cacheline_aligned_in_smp;
  47        struct net_device *dev;
  48        struct xdp_umem *umem;
  49        struct list_head flush_node;
  50        struct xsk_buff_pool *pool;
  51        u16 queue_id;
  52        bool zc;
  53        enum {
  54                XSK_READY = 0,
  55                XSK_BOUND,
  56                XSK_UNBOUND,
  57        } state;
  58
  59        struct xsk_queue *tx ____cacheline_aligned_in_smp;
  60        struct list_head tx_list;
  61        /* Protects generic receive. */
  62        spinlock_t rx_lock;
  63
  64        /* Statistics */
  65        u64 rx_dropped;
  66        u64 rx_queue_full;
  67
  68        struct list_head map_list;
  69        /* Protects map_list */
  70        spinlock_t map_list_lock;
  71        /* Protects multiple processes in the control path */
  72        struct mutex mutex;
  73        struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
  74        struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
  75};
  76
  77#ifdef CONFIG_XDP_SOCKETS
  78
  79int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
  80int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
  81void __xsk_map_flush(void);
  82
  83#else
  84
  85static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
  86{
  87        return -ENOTSUPP;
  88}
  89
  90static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
  91{
  92        return -EOPNOTSUPP;
  93}
  94
  95static inline void __xsk_map_flush(void)
  96{
  97}
  98
  99#endif /* CONFIG_XDP_SOCKETS */
 100
 101#endif /* _LINUX_XDP_SOCK_H */
 102