1
2
3
4
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
9#include <linux/workqueue.h>
10#include <linux/if_xdp.h>
11#include <linux/mutex.h>
12#include <linux/spinlock.h>
13#include <linux/mm.h>
14#include <net/sock.h>
15
16struct net_device;
17struct xsk_queue;
18
19
20
21
22
23#define XSK_NEXT_PG_CONTIG_SHIFT 0
24#define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT)
25
26struct xdp_umem_page {
27 void *addr;
28 dma_addr_t dma;
29};
30
31struct xdp_umem_fq_reuse {
32 u32 nentries;
33 u32 length;
34 u64 handles[];
35};
36
37
38
39
40
41
42#define XDP_UMEM_USES_NEED_WAKEUP (1 << 1)
43
44struct xdp_umem {
45 struct xsk_queue *fq;
46 struct xsk_queue *cq;
47 struct xdp_umem_page *pages;
48 u64 chunk_mask;
49 u64 size;
50 u32 headroom;
51 u32 chunk_size_nohr;
52 struct user_struct *user;
53 unsigned long address;
54 refcount_t users;
55 struct work_struct work;
56 struct page **pgs;
57 u32 npgs;
58 u16 queue_id;
59 u8 need_wakeup;
60 u8 flags;
61 int id;
62 struct net_device *dev;
63 struct xdp_umem_fq_reuse *fq_reuse;
64 bool zc;
65 spinlock_t xsk_list_lock;
66 struct list_head xsk_list;
67};
68
69
70
71
72
73struct xsk_map {
74 struct bpf_map map;
75 spinlock_t lock;
76 struct xdp_sock *xsk_map[];
77};
78
79struct xsk_map_node {
80 struct list_head node;
81 struct xsk_map *map;
82 struct xdp_sock **map_entry;
83};
84
85struct xdp_sock {
86
87 struct sock sk;
88 struct xsk_queue *rx;
89 struct net_device *dev;
90 struct xdp_umem *umem;
91 struct list_head flush_node;
92 u16 queue_id;
93 bool zc;
94 enum {
95 XSK_READY = 0,
96 XSK_BOUND,
97 XSK_UNBOUND,
98 } state;
99
100 struct mutex mutex;
101 struct xsk_queue *tx ____cacheline_aligned_in_smp;
102 struct list_head list;
103
104
105
106 spinlock_t tx_completion_lock;
107
108 spinlock_t rx_lock;
109 u64 rx_dropped;
110 struct list_head map_list;
111
112 spinlock_t map_list_lock;
113};
114
115struct xdp_buff;
116#ifdef CONFIG_XDP_SOCKETS
117int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
118bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
119
120bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
121bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
122void xsk_umem_release_addr(struct xdp_umem *umem);
123void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
124bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
125void xsk_umem_consume_tx_done(struct xdp_umem *umem);
126struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
127struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
128 struct xdp_umem_fq_reuse *newq);
129void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
130struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
131void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
132void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
133void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
134void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
135bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
136
137void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
138 struct xdp_sock **map_entry);
139int xsk_map_inc(struct xsk_map *map);
140void xsk_map_put(struct xsk_map *map);
141int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
142void __xsk_map_flush(void);
143
144static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
145 u32 key)
146{
147 struct xsk_map *m = container_of(map, struct xsk_map, map);
148 struct xdp_sock *xs;
149
150 if (key >= map->max_entries)
151 return NULL;
152
153 xs = READ_ONCE(m->xsk_map[key]);
154 return xs;
155}
156
157static inline u64 xsk_umem_extract_addr(u64 addr)
158{
159 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
160}
161
162static inline u64 xsk_umem_extract_offset(u64 addr)
163{
164 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
165}
166
167static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
168{
169 return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr);
170}
171
172static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
173{
174 unsigned long page_addr;
175
176 addr = xsk_umem_add_offset_to_addr(addr);
177 page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
178
179 return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
180}
181
182static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
183{
184 addr = xsk_umem_add_offset_to_addr(addr);
185
186 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
187}
188
189
190static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
191{
192 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
193
194 if (rq->length >= cnt)
195 return true;
196
197 return xsk_umem_has_addrs(umem, cnt - rq->length);
198}
199
200static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
201{
202 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
203
204 if (!rq->length)
205 return xsk_umem_peek_addr(umem, addr);
206
207 *addr = rq->handles[rq->length - 1];
208 return addr;
209}
210
211static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
212{
213 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
214
215 if (!rq->length)
216 xsk_umem_release_addr(umem);
217 else
218 rq->length--;
219}
220
221static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
222{
223 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
224
225 rq->handles[rq->length++] = addr;
226}
227
228
229
230
231
232static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
233 u64 offset)
234{
235 if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
236 return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
237 else
238 return address + offset;
239}
240#else
241static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
242{
243 return -ENOTSUPP;
244}
245
246static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
247{
248 return false;
249}
250
251static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
252{
253 return false;
254}
255
256static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
257{
258 return NULL;
259}
260
261static inline void xsk_umem_release_addr(struct xdp_umem *umem)
262{
263}
264
265static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
266{
267}
268
269static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
270 struct xdp_desc *desc)
271{
272 return false;
273}
274
275static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
276{
277}
278
279static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
280{
281 return NULL;
282}
283
284static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
285 struct xdp_umem *umem,
286 struct xdp_umem_fq_reuse *newq)
287{
288 return NULL;
289}
290static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
291{
292}
293
294static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
295 u16 queue_id)
296{
297 return NULL;
298}
299
300static inline u64 xsk_umem_extract_addr(u64 addr)
301{
302 return 0;
303}
304
305static inline u64 xsk_umem_extract_offset(u64 addr)
306{
307 return 0;
308}
309
310static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
311{
312 return 0;
313}
314
315static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
316{
317 return NULL;
318}
319
320static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
321{
322 return 0;
323}
324
325static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
326{
327 return false;
328}
329
330static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
331{
332 return NULL;
333}
334
335static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
336{
337}
338
339static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
340{
341}
342
343static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
344{
345}
346
347static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
348{
349}
350
351static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
352{
353}
354
355static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
356{
357}
358
359static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
360{
361 return false;
362}
363
364static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
365 u64 offset)
366{
367 return 0;
368}
369
370static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
371{
372 return -EOPNOTSUPP;
373}
374
375static inline void __xsk_map_flush(void)
376{
377}
378
379static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
380 u32 key)
381{
382 return NULL;
383}
384#endif
385
386#endif
387