1
2
3
4
5
6
7
8
9
10
11#include <errno.h>
12#include <stdlib.h>
13#include <string.h>
14#include <unistd.h>
15#include <arpa/inet.h>
16#include <asm/barrier.h>
17#include <linux/compiler.h>
18#include <linux/ethtool.h>
19#include <linux/filter.h>
20#include <linux/if_ether.h>
21#include <linux/if_packet.h>
22#include <linux/if_xdp.h>
23#include <linux/sockios.h>
24#include <net/if.h>
25#include <sys/ioctl.h>
26#include <sys/mman.h>
27#include <sys/socket.h>
28#include <sys/types.h>
29
30#include "bpf.h"
31#include "libbpf.h"
32#include "libbpf_internal.h"
33#include "xsk.h"
34
35#ifndef SOL_XDP
36 #define SOL_XDP 283
37#endif
38
39#ifndef AF_XDP
40 #define AF_XDP 44
41#endif
42
43#ifndef PF_XDP
44 #define PF_XDP AF_XDP
45#endif
46
47struct xsk_umem {
48 struct xsk_ring_prod *fill;
49 struct xsk_ring_cons *comp;
50 char *umem_area;
51 struct xsk_umem_config config;
52 int fd;
53 int refcount;
54};
55
56struct xsk_socket {
57 struct xsk_ring_cons *rx;
58 struct xsk_ring_prod *tx;
59 __u64 outstanding_tx;
60 struct xsk_umem *umem;
61 struct xsk_socket_config config;
62 int fd;
63 int ifindex;
64 int prog_fd;
65 int xsks_map_fd;
66 __u32 queue_id;
67 char ifname[IFNAMSIZ];
68 bool zc;
69};
70
71struct xsk_nl_info {
72 bool xdp_prog_attached;
73 int ifindex;
74 int fd;
75};
76
77
78
79
80static inline void *xsk_mmap(void *addr, size_t length, int prot, int flags,
81 int fd, __u64 offset)
82{
83#ifdef __NR_mmap2
84 unsigned int page_shift = __builtin_ffs(getpagesize()) - 1;
85 long ret = syscall(__NR_mmap2, addr, length, prot, flags, fd,
86 (off_t)(offset >> page_shift));
87
88 return (void *)ret;
89#else
90 return mmap(addr, length, prot, flags, fd, offset);
91#endif
92}
93
94int xsk_umem__fd(const struct xsk_umem *umem)
95{
96 return umem ? umem->fd : -EINVAL;
97}
98
99int xsk_socket__fd(const struct xsk_socket *xsk)
100{
101 return xsk ? xsk->fd : -EINVAL;
102}
103
104static bool xsk_page_aligned(void *buffer)
105{
106 unsigned long addr = (unsigned long)buffer;
107
108 return !(addr & (getpagesize() - 1));
109}
110
111static void xsk_set_umem_config(struct xsk_umem_config *cfg,
112 const struct xsk_umem_config *usr_cfg)
113{
114 if (!usr_cfg) {
115 cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
116 cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
117 cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
118 cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
119 return;
120 }
121
122 cfg->fill_size = usr_cfg->fill_size;
123 cfg->comp_size = usr_cfg->comp_size;
124 cfg->frame_size = usr_cfg->frame_size;
125 cfg->frame_headroom = usr_cfg->frame_headroom;
126}
127
128static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
129 const struct xsk_socket_config *usr_cfg)
130{
131 if (!usr_cfg) {
132 cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
133 cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
134 cfg->libbpf_flags = 0;
135 cfg->xdp_flags = 0;
136 cfg->bind_flags = 0;
137 return 0;
138 }
139
140 if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
141 return -EINVAL;
142
143 cfg->rx_size = usr_cfg->rx_size;
144 cfg->tx_size = usr_cfg->tx_size;
145 cfg->libbpf_flags = usr_cfg->libbpf_flags;
146 cfg->xdp_flags = usr_cfg->xdp_flags;
147 cfg->bind_flags = usr_cfg->bind_flags;
148
149 return 0;
150}
151
152int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
153 struct xsk_ring_prod *fill, struct xsk_ring_cons *comp,
154 const struct xsk_umem_config *usr_config)
155{
156 struct xdp_mmap_offsets off;
157 struct xdp_umem_reg mr;
158 struct xsk_umem *umem;
159 socklen_t optlen;
160 void *map;
161 int err;
162
163 if (!umem_area || !umem_ptr || !fill || !comp)
164 return -EFAULT;
165 if (!size && !xsk_page_aligned(umem_area))
166 return -EINVAL;
167
168 umem = calloc(1, sizeof(*umem));
169 if (!umem)
170 return -ENOMEM;
171
172 umem->fd = socket(AF_XDP, SOCK_RAW, 0);
173 if (umem->fd < 0) {
174 err = -errno;
175 goto out_umem_alloc;
176 }
177
178 umem->umem_area = umem_area;
179 xsk_set_umem_config(&umem->config, usr_config);
180
181 mr.addr = (uintptr_t)umem_area;
182 mr.len = size;
183 mr.chunk_size = umem->config.frame_size;
184 mr.headroom = umem->config.frame_headroom;
185
186 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
187 if (err) {
188 err = -errno;
189 goto out_socket;
190 }
191 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING,
192 &umem->config.fill_size,
193 sizeof(umem->config.fill_size));
194 if (err) {
195 err = -errno;
196 goto out_socket;
197 }
198 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
199 &umem->config.comp_size,
200 sizeof(umem->config.comp_size));
201 if (err) {
202 err = -errno;
203 goto out_socket;
204 }
205
206 optlen = sizeof(off);
207 err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
208 if (err) {
209 err = -errno;
210 goto out_socket;
211 }
212
213 map = xsk_mmap(NULL, off.fr.desc +
214 umem->config.fill_size * sizeof(__u64),
215 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
216 umem->fd, XDP_UMEM_PGOFF_FILL_RING);
217 if (map == MAP_FAILED) {
218 err = -errno;
219 goto out_socket;
220 }
221
222 umem->fill = fill;
223 fill->mask = umem->config.fill_size - 1;
224 fill->size = umem->config.fill_size;
225 fill->producer = map + off.fr.producer;
226 fill->consumer = map + off.fr.consumer;
227 fill->ring = map + off.fr.desc;
228 fill->cached_cons = umem->config.fill_size;
229
230 map = xsk_mmap(NULL,
231 off.cr.desc + umem->config.comp_size * sizeof(__u64),
232 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
233 umem->fd, XDP_UMEM_PGOFF_COMPLETION_RING);
234 if (map == MAP_FAILED) {
235 err = -errno;
236 goto out_mmap;
237 }
238
239 umem->comp = comp;
240 comp->mask = umem->config.comp_size - 1;
241 comp->size = umem->config.comp_size;
242 comp->producer = map + off.cr.producer;
243 comp->consumer = map + off.cr.consumer;
244 comp->ring = map + off.cr.desc;
245
246 *umem_ptr = umem;
247 return 0;
248
249out_mmap:
250 munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
251out_socket:
252 close(umem->fd);
253out_umem_alloc:
254 free(umem);
255 return err;
256}
257
258static int xsk_load_xdp_prog(struct xsk_socket *xsk)
259{
260 static const int log_buf_size = 16 * 1024;
261 char log_buf[log_buf_size];
262 int err, prog_fd;
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277 struct bpf_insn prog[] = {
278
279 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16),
280
281 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4),
282 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
284 BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
285 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
287 BPF_MOV32_IMM(BPF_REG_0, 2),
288
289 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
290
291 BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
292 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
293 BPF_MOV32_IMM(BPF_REG_3, 0),
294 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
295
296 BPF_EXIT_INSN(),
297 };
298 size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
299
300 prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt,
301 "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
302 log_buf_size);
303 if (prog_fd < 0) {
304 pr_warning("BPF log buffer:\n%s", log_buf);
305 return prog_fd;
306 }
307
308 err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags);
309 if (err) {
310 close(prog_fd);
311 return err;
312 }
313
314 xsk->prog_fd = prog_fd;
315 return 0;
316}
317
318static int xsk_get_max_queues(struct xsk_socket *xsk)
319{
320 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
321 struct ifreq ifr = {};
322 int fd, err, ret;
323
324 fd = socket(AF_INET, SOCK_DGRAM, 0);
325 if (fd < 0)
326 return -errno;
327
328 ifr.ifr_data = (void *)&channels;
329 memcpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
330 ifr.ifr_name[IFNAMSIZ - 1] = '\0';
331 err = ioctl(fd, SIOCETHTOOL, &ifr);
332 if (err && errno != EOPNOTSUPP) {
333 ret = -errno;
334 goto out;
335 }
336
337 if (err || channels.max_combined == 0)
338
339
340
341 ret = 1;
342 else
343 ret = channels.max_combined;
344
345out:
346 close(fd);
347 return ret;
348}
349
350static int xsk_create_bpf_maps(struct xsk_socket *xsk)
351{
352 int max_queues;
353 int fd;
354
355 max_queues = xsk_get_max_queues(xsk);
356 if (max_queues < 0)
357 return max_queues;
358
359 fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
360 sizeof(int), sizeof(int), max_queues, 0);
361 if (fd < 0)
362 return fd;
363
364 xsk->xsks_map_fd = fd;
365
366 return 0;
367}
368
369static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
370{
371 bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id);
372 close(xsk->xsks_map_fd);
373}
374
375static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
376{
377 __u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
378 __u32 map_len = sizeof(struct bpf_map_info);
379 struct bpf_prog_info prog_info = {};
380 struct bpf_map_info map_info;
381 int fd, err;
382
383 err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
384 if (err)
385 return err;
386
387 num_maps = prog_info.nr_map_ids;
388
389 map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
390 if (!map_ids)
391 return -ENOMEM;
392
393 memset(&prog_info, 0, prog_len);
394 prog_info.nr_map_ids = num_maps;
395 prog_info.map_ids = (__u64)(unsigned long)map_ids;
396
397 err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
398 if (err)
399 goto out_map_ids;
400
401 xsk->xsks_map_fd = -1;
402
403 for (i = 0; i < prog_info.nr_map_ids; i++) {
404 fd = bpf_map_get_fd_by_id(map_ids[i]);
405 if (fd < 0)
406 continue;
407
408 err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
409 if (err) {
410 close(fd);
411 continue;
412 }
413
414 if (!strcmp(map_info.name, "xsks_map")) {
415 xsk->xsks_map_fd = fd;
416 continue;
417 }
418
419 close(fd);
420 }
421
422 err = 0;
423 if (xsk->xsks_map_fd == -1)
424 err = -ENOENT;
425
426out_map_ids:
427 free(map_ids);
428 return err;
429}
430
431static int xsk_set_bpf_maps(struct xsk_socket *xsk)
432{
433 return bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id,
434 &xsk->fd, 0);
435}
436
437static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
438{
439 __u32 prog_id = 0;
440 int err;
441
442 err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id,
443 xsk->config.xdp_flags);
444 if (err)
445 return err;
446
447 if (!prog_id) {
448 err = xsk_create_bpf_maps(xsk);
449 if (err)
450 return err;
451
452 err = xsk_load_xdp_prog(xsk);
453 if (err) {
454 xsk_delete_bpf_maps(xsk);
455 return err;
456 }
457 } else {
458 xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
459 err = xsk_lookup_bpf_maps(xsk);
460 if (err) {
461 close(xsk->prog_fd);
462 return err;
463 }
464 }
465
466 err = xsk_set_bpf_maps(xsk);
467 if (err) {
468 xsk_delete_bpf_maps(xsk);
469 close(xsk->prog_fd);
470 return err;
471 }
472
473 return 0;
474}
475
476int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
477 __u32 queue_id, struct xsk_umem *umem,
478 struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
479 const struct xsk_socket_config *usr_config)
480{
481 void *rx_map = NULL, *tx_map = NULL;
482 struct sockaddr_xdp sxdp = {};
483 struct xdp_mmap_offsets off;
484 struct xdp_options opts;
485 struct xsk_socket *xsk;
486 socklen_t optlen;
487 int err;
488
489 if (!umem || !xsk_ptr || !rx || !tx)
490 return -EFAULT;
491
492 if (umem->refcount) {
493 pr_warning("Error: shared umems not supported by libbpf.\n");
494 return -EBUSY;
495 }
496
497 xsk = calloc(1, sizeof(*xsk));
498 if (!xsk)
499 return -ENOMEM;
500
501 if (umem->refcount++ > 0) {
502 xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
503 if (xsk->fd < 0) {
504 err = -errno;
505 goto out_xsk_alloc;
506 }
507 } else {
508 xsk->fd = umem->fd;
509 }
510
511 xsk->outstanding_tx = 0;
512 xsk->queue_id = queue_id;
513 xsk->umem = umem;
514 xsk->ifindex = if_nametoindex(ifname);
515 if (!xsk->ifindex) {
516 err = -errno;
517 goto out_socket;
518 }
519 memcpy(xsk->ifname, ifname, IFNAMSIZ - 1);
520 xsk->ifname[IFNAMSIZ - 1] = '\0';
521
522 err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
523 if (err)
524 goto out_socket;
525
526 if (rx) {
527 err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
528 &xsk->config.rx_size,
529 sizeof(xsk->config.rx_size));
530 if (err) {
531 err = -errno;
532 goto out_socket;
533 }
534 }
535 if (tx) {
536 err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
537 &xsk->config.tx_size,
538 sizeof(xsk->config.tx_size));
539 if (err) {
540 err = -errno;
541 goto out_socket;
542 }
543 }
544
545 optlen = sizeof(off);
546 err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
547 if (err) {
548 err = -errno;
549 goto out_socket;
550 }
551
552 if (rx) {
553 rx_map = xsk_mmap(NULL, off.rx.desc +
554 xsk->config.rx_size * sizeof(struct xdp_desc),
555 PROT_READ | PROT_WRITE,
556 MAP_SHARED | MAP_POPULATE,
557 xsk->fd, XDP_PGOFF_RX_RING);
558 if (rx_map == MAP_FAILED) {
559 err = -errno;
560 goto out_socket;
561 }
562
563 rx->mask = xsk->config.rx_size - 1;
564 rx->size = xsk->config.rx_size;
565 rx->producer = rx_map + off.rx.producer;
566 rx->consumer = rx_map + off.rx.consumer;
567 rx->ring = rx_map + off.rx.desc;
568 }
569 xsk->rx = rx;
570
571 if (tx) {
572 tx_map = xsk_mmap(NULL, off.tx.desc +
573 xsk->config.tx_size * sizeof(struct xdp_desc),
574 PROT_READ | PROT_WRITE,
575 MAP_SHARED | MAP_POPULATE,
576 xsk->fd, XDP_PGOFF_TX_RING);
577 if (tx_map == MAP_FAILED) {
578 err = -errno;
579 goto out_mmap_rx;
580 }
581
582 tx->mask = xsk->config.tx_size - 1;
583 tx->size = xsk->config.tx_size;
584 tx->producer = tx_map + off.tx.producer;
585 tx->consumer = tx_map + off.tx.consumer;
586 tx->ring = tx_map + off.tx.desc;
587 tx->cached_cons = xsk->config.tx_size;
588 }
589 xsk->tx = tx;
590
591 sxdp.sxdp_family = PF_XDP;
592 sxdp.sxdp_ifindex = xsk->ifindex;
593 sxdp.sxdp_queue_id = xsk->queue_id;
594 sxdp.sxdp_flags = xsk->config.bind_flags;
595
596 err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
597 if (err) {
598 err = -errno;
599 goto out_mmap_tx;
600 }
601
602 xsk->prog_fd = -1;
603
604 optlen = sizeof(opts);
605 err = getsockopt(xsk->fd, SOL_XDP, XDP_OPTIONS, &opts, &optlen);
606 if (err) {
607 err = -errno;
608 goto out_mmap_tx;
609 }
610
611 xsk->zc = opts.flags & XDP_OPTIONS_ZEROCOPY;
612
613 if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
614 err = xsk_setup_xdp_prog(xsk);
615 if (err)
616 goto out_mmap_tx;
617 }
618
619 *xsk_ptr = xsk;
620 return 0;
621
622out_mmap_tx:
623 if (tx)
624 munmap(tx_map, off.tx.desc +
625 xsk->config.tx_size * sizeof(struct xdp_desc));
626out_mmap_rx:
627 if (rx)
628 munmap(rx_map, off.rx.desc +
629 xsk->config.rx_size * sizeof(struct xdp_desc));
630out_socket:
631 if (--umem->refcount)
632 close(xsk->fd);
633out_xsk_alloc:
634 free(xsk);
635 return err;
636}
637
638int xsk_umem__delete(struct xsk_umem *umem)
639{
640 struct xdp_mmap_offsets off;
641 socklen_t optlen;
642 int err;
643
644 if (!umem)
645 return 0;
646
647 if (umem->refcount)
648 return -EBUSY;
649
650 optlen = sizeof(off);
651 err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
652 if (!err) {
653 munmap(umem->fill->ring - off.fr.desc,
654 off.fr.desc + umem->config.fill_size * sizeof(__u64));
655 munmap(umem->comp->ring - off.cr.desc,
656 off.cr.desc + umem->config.comp_size * sizeof(__u64));
657 }
658
659 close(umem->fd);
660 free(umem);
661
662 return 0;
663}
664
665void xsk_socket__delete(struct xsk_socket *xsk)
666{
667 size_t desc_sz = sizeof(struct xdp_desc);
668 struct xdp_mmap_offsets off;
669 socklen_t optlen;
670 int err;
671
672 if (!xsk)
673 return;
674
675 if (xsk->prog_fd != -1) {
676 xsk_delete_bpf_maps(xsk);
677 close(xsk->prog_fd);
678 }
679
680 optlen = sizeof(off);
681 err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
682 if (!err) {
683 if (xsk->rx) {
684 munmap(xsk->rx->ring - off.rx.desc,
685 off.rx.desc + xsk->config.rx_size * desc_sz);
686 }
687 if (xsk->tx) {
688 munmap(xsk->tx->ring - off.tx.desc,
689 off.tx.desc + xsk->config.tx_size * desc_sz);
690 }
691
692 }
693
694 xsk->umem->refcount--;
695
696
697
698 if (xsk->fd != xsk->umem->fd)
699 close(xsk->fd);
700 free(xsk);
701}
702