1
2#include <test_progs.h>
3#include <error.h>
4#include <linux/if.h>
5#include <linux/if_tun.h>
6#include <sys/uio.h>
7
8#ifndef IP_MF
9#define IP_MF 0x2000
10#endif
11
12#define CHECK_FLOW_KEYS(desc, got, expected) \
13 CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \
14 desc, \
15 "nhoff=%u/%u " \
16 "thoff=%u/%u " \
17 "addr_proto=0x%x/0x%x " \
18 "is_frag=%u/%u " \
19 "is_first_frag=%u/%u " \
20 "is_encap=%u/%u " \
21 "ip_proto=0x%x/0x%x " \
22 "n_proto=0x%x/0x%x " \
23 "flow_label=0x%x/0x%x " \
24 "sport=%u/%u " \
25 "dport=%u/%u\n", \
26 got.nhoff, expected.nhoff, \
27 got.thoff, expected.thoff, \
28 got.addr_proto, expected.addr_proto, \
29 got.is_frag, expected.is_frag, \
30 got.is_first_frag, expected.is_first_frag, \
31 got.is_encap, expected.is_encap, \
32 got.ip_proto, expected.ip_proto, \
33 got.n_proto, expected.n_proto, \
34 got.flow_label, expected.flow_label, \
35 got.sport, expected.sport, \
36 got.dport, expected.dport)
37
38struct ipv4_pkt {
39 struct ethhdr eth;
40 struct iphdr iph;
41 struct tcphdr tcp;
42} __packed;
43
44struct ipip_pkt {
45 struct ethhdr eth;
46 struct iphdr iph;
47 struct iphdr iph_inner;
48 struct tcphdr tcp;
49} __packed;
50
51struct svlan_ipv4_pkt {
52 struct ethhdr eth;
53 __u16 vlan_tci;
54 __u16 vlan_proto;
55 struct iphdr iph;
56 struct tcphdr tcp;
57} __packed;
58
59struct ipv6_pkt {
60 struct ethhdr eth;
61 struct ipv6hdr iph;
62 struct tcphdr tcp;
63} __packed;
64
65struct ipv6_frag_pkt {
66 struct ethhdr eth;
67 struct ipv6hdr iph;
68 struct frag_hdr {
69 __u8 nexthdr;
70 __u8 reserved;
71 __be16 frag_off;
72 __be32 identification;
73 } ipf;
74 struct tcphdr tcp;
75} __packed;
76
77struct dvlan_ipv6_pkt {
78 struct ethhdr eth;
79 __u16 vlan_tci;
80 __u16 vlan_proto;
81 __u16 vlan_tci2;
82 __u16 vlan_proto2;
83 struct ipv6hdr iph;
84 struct tcphdr tcp;
85} __packed;
86
87struct test {
88 const char *name;
89 union {
90 struct ipv4_pkt ipv4;
91 struct svlan_ipv4_pkt svlan_ipv4;
92 struct ipip_pkt ipip;
93 struct ipv6_pkt ipv6;
94 struct ipv6_frag_pkt ipv6_frag;
95 struct dvlan_ipv6_pkt dvlan_ipv6;
96 } pkt;
97 struct bpf_flow_keys keys;
98 __u32 flags;
99};
100
101#define VLAN_HLEN 4
102
103struct test tests[] = {
104 {
105 .name = "ipv4",
106 .pkt.ipv4 = {
107 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
108 .iph.ihl = 5,
109 .iph.protocol = IPPROTO_TCP,
110 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
111 .tcp.doff = 5,
112 .tcp.source = 80,
113 .tcp.dest = 8080,
114 },
115 .keys = {
116 .nhoff = ETH_HLEN,
117 .thoff = ETH_HLEN + sizeof(struct iphdr),
118 .addr_proto = ETH_P_IP,
119 .ip_proto = IPPROTO_TCP,
120 .n_proto = __bpf_constant_htons(ETH_P_IP),
121 .sport = 80,
122 .dport = 8080,
123 },
124 },
125 {
126 .name = "ipv6",
127 .pkt.ipv6 = {
128 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
129 .iph.nexthdr = IPPROTO_TCP,
130 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
131 .tcp.doff = 5,
132 .tcp.source = 80,
133 .tcp.dest = 8080,
134 },
135 .keys = {
136 .nhoff = ETH_HLEN,
137 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
138 .addr_proto = ETH_P_IPV6,
139 .ip_proto = IPPROTO_TCP,
140 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
141 .sport = 80,
142 .dport = 8080,
143 },
144 },
145 {
146 .name = "802.1q-ipv4",
147 .pkt.svlan_ipv4 = {
148 .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
149 .vlan_proto = __bpf_constant_htons(ETH_P_IP),
150 .iph.ihl = 5,
151 .iph.protocol = IPPROTO_TCP,
152 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
153 .tcp.doff = 5,
154 .tcp.source = 80,
155 .tcp.dest = 8080,
156 },
157 .keys = {
158 .nhoff = ETH_HLEN + VLAN_HLEN,
159 .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
160 .addr_proto = ETH_P_IP,
161 .ip_proto = IPPROTO_TCP,
162 .n_proto = __bpf_constant_htons(ETH_P_IP),
163 .sport = 80,
164 .dport = 8080,
165 },
166 },
167 {
168 .name = "802.1ad-ipv6",
169 .pkt.dvlan_ipv6 = {
170 .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
171 .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
172 .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
173 .iph.nexthdr = IPPROTO_TCP,
174 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
175 .tcp.doff = 5,
176 .tcp.source = 80,
177 .tcp.dest = 8080,
178 },
179 .keys = {
180 .nhoff = ETH_HLEN + VLAN_HLEN * 2,
181 .thoff = ETH_HLEN + VLAN_HLEN * 2 +
182 sizeof(struct ipv6hdr),
183 .addr_proto = ETH_P_IPV6,
184 .ip_proto = IPPROTO_TCP,
185 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
186 .sport = 80,
187 .dport = 8080,
188 },
189 },
190 {
191 .name = "ipv4-frag",
192 .pkt.ipv4 = {
193 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
194 .iph.ihl = 5,
195 .iph.protocol = IPPROTO_TCP,
196 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
197 .iph.frag_off = __bpf_constant_htons(IP_MF),
198 .tcp.doff = 5,
199 .tcp.source = 80,
200 .tcp.dest = 8080,
201 },
202 .keys = {
203 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
204 .nhoff = ETH_HLEN,
205 .thoff = ETH_HLEN + sizeof(struct iphdr),
206 .addr_proto = ETH_P_IP,
207 .ip_proto = IPPROTO_TCP,
208 .n_proto = __bpf_constant_htons(ETH_P_IP),
209 .is_frag = true,
210 .is_first_frag = true,
211 .sport = 80,
212 .dport = 8080,
213 },
214 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
215 },
216 {
217 .name = "ipv4-no-frag",
218 .pkt.ipv4 = {
219 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
220 .iph.ihl = 5,
221 .iph.protocol = IPPROTO_TCP,
222 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
223 .iph.frag_off = __bpf_constant_htons(IP_MF),
224 .tcp.doff = 5,
225 .tcp.source = 80,
226 .tcp.dest = 8080,
227 },
228 .keys = {
229 .nhoff = ETH_HLEN,
230 .thoff = ETH_HLEN + sizeof(struct iphdr),
231 .addr_proto = ETH_P_IP,
232 .ip_proto = IPPROTO_TCP,
233 .n_proto = __bpf_constant_htons(ETH_P_IP),
234 .is_frag = true,
235 .is_first_frag = true,
236 },
237 },
238 {
239 .name = "ipv6-frag",
240 .pkt.ipv6_frag = {
241 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
242 .iph.nexthdr = IPPROTO_FRAGMENT,
243 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
244 .ipf.nexthdr = IPPROTO_TCP,
245 .tcp.doff = 5,
246 .tcp.source = 80,
247 .tcp.dest = 8080,
248 },
249 .keys = {
250 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
251 .nhoff = ETH_HLEN,
252 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
253 sizeof(struct frag_hdr),
254 .addr_proto = ETH_P_IPV6,
255 .ip_proto = IPPROTO_TCP,
256 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
257 .is_frag = true,
258 .is_first_frag = true,
259 .sport = 80,
260 .dport = 8080,
261 },
262 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
263 },
264 {
265 .name = "ipv6-no-frag",
266 .pkt.ipv6_frag = {
267 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
268 .iph.nexthdr = IPPROTO_FRAGMENT,
269 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
270 .ipf.nexthdr = IPPROTO_TCP,
271 .tcp.doff = 5,
272 .tcp.source = 80,
273 .tcp.dest = 8080,
274 },
275 .keys = {
276 .nhoff = ETH_HLEN,
277 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
278 sizeof(struct frag_hdr),
279 .addr_proto = ETH_P_IPV6,
280 .ip_proto = IPPROTO_TCP,
281 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
282 .is_frag = true,
283 .is_first_frag = true,
284 },
285 },
286 {
287 .name = "ipv6-flow-label",
288 .pkt.ipv6 = {
289 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
290 .iph.nexthdr = IPPROTO_TCP,
291 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
292 .iph.flow_lbl = { 0xb, 0xee, 0xef },
293 .tcp.doff = 5,
294 .tcp.source = 80,
295 .tcp.dest = 8080,
296 },
297 .keys = {
298 .nhoff = ETH_HLEN,
299 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
300 .addr_proto = ETH_P_IPV6,
301 .ip_proto = IPPROTO_TCP,
302 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
303 .sport = 80,
304 .dport = 8080,
305 .flow_label = __bpf_constant_htonl(0xbeeef),
306 },
307 },
308 {
309 .name = "ipv6-no-flow-label",
310 .pkt.ipv6 = {
311 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
312 .iph.nexthdr = IPPROTO_TCP,
313 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
314 .iph.flow_lbl = { 0xb, 0xee, 0xef },
315 .tcp.doff = 5,
316 .tcp.source = 80,
317 .tcp.dest = 8080,
318 },
319 .keys = {
320 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
321 .nhoff = ETH_HLEN,
322 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
323 .addr_proto = ETH_P_IPV6,
324 .ip_proto = IPPROTO_TCP,
325 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
326 .flow_label = __bpf_constant_htonl(0xbeeef),
327 },
328 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
329 },
330 {
331 .name = "ipip-encap",
332 .pkt.ipip = {
333 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
334 .iph.ihl = 5,
335 .iph.protocol = IPPROTO_IPIP,
336 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
337 .iph_inner.ihl = 5,
338 .iph_inner.protocol = IPPROTO_TCP,
339 .iph_inner.tot_len =
340 __bpf_constant_htons(MAGIC_BYTES) -
341 sizeof(struct iphdr),
342 .tcp.doff = 5,
343 .tcp.source = 80,
344 .tcp.dest = 8080,
345 },
346 .keys = {
347 .nhoff = ETH_HLEN,
348 .thoff = ETH_HLEN + sizeof(struct iphdr) +
349 sizeof(struct iphdr),
350 .addr_proto = ETH_P_IP,
351 .ip_proto = IPPROTO_TCP,
352 .n_proto = __bpf_constant_htons(ETH_P_IP),
353 .is_encap = true,
354 .sport = 80,
355 .dport = 8080,
356 },
357 },
358 {
359 .name = "ipip-no-encap",
360 .pkt.ipip = {
361 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
362 .iph.ihl = 5,
363 .iph.protocol = IPPROTO_IPIP,
364 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
365 .iph_inner.ihl = 5,
366 .iph_inner.protocol = IPPROTO_TCP,
367 .iph_inner.tot_len =
368 __bpf_constant_htons(MAGIC_BYTES) -
369 sizeof(struct iphdr),
370 .tcp.doff = 5,
371 .tcp.source = 80,
372 .tcp.dest = 8080,
373 },
374 .keys = {
375 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
376 .nhoff = ETH_HLEN,
377 .thoff = ETH_HLEN + sizeof(struct iphdr),
378 .addr_proto = ETH_P_IP,
379 .ip_proto = IPPROTO_IPIP,
380 .n_proto = __bpf_constant_htons(ETH_P_IP),
381 .is_encap = true,
382 },
383 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
384 },
385};
386
387static int create_tap(const char *ifname)
388{
389 struct ifreq ifr = {
390 .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
391 };
392 int fd, ret;
393
394 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
395
396 fd = open("/dev/net/tun", O_RDWR);
397 if (fd < 0)
398 return -1;
399
400 ret = ioctl(fd, TUNSETIFF, &ifr);
401 if (ret)
402 return -1;
403
404 return fd;
405}
406
407static int tx_tap(int fd, void *pkt, size_t len)
408{
409 struct iovec iov[] = {
410 {
411 .iov_len = len,
412 .iov_base = pkt,
413 },
414 };
415 return writev(fd, iov, ARRAY_SIZE(iov));
416}
417
418static int ifup(const char *ifname)
419{
420 struct ifreq ifr = {};
421 int sk, ret;
422
423 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
424
425 sk = socket(PF_INET, SOCK_DGRAM, 0);
426 if (sk < 0)
427 return -1;
428
429 ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
430 if (ret) {
431 close(sk);
432 return -1;
433 }
434
435 ifr.ifr_flags |= IFF_UP;
436 ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
437 if (ret) {
438 close(sk);
439 return -1;
440 }
441
442 close(sk);
443 return 0;
444}
445
446void test_flow_dissector(void)
447{
448 int i, err, prog_fd, keys_fd = -1, tap_fd;
449 struct bpf_object *obj;
450 __u32 duration = 0;
451
452 err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
453 "jmp_table", "last_dissection", &prog_fd, &keys_fd);
454 if (CHECK_FAIL(err))
455 return;
456
457 for (i = 0; i < ARRAY_SIZE(tests); i++) {
458 struct bpf_flow_keys flow_keys;
459 struct bpf_prog_test_run_attr tattr = {
460 .prog_fd = prog_fd,
461 .data_in = &tests[i].pkt,
462 .data_size_in = sizeof(tests[i].pkt),
463 .data_out = &flow_keys,
464 };
465 static struct bpf_flow_keys ctx = {};
466
467 if (tests[i].flags) {
468 tattr.ctx_in = &ctx;
469 tattr.ctx_size_in = sizeof(ctx);
470 ctx.flags = tests[i].flags;
471 }
472
473 err = bpf_prog_test_run_xattr(&tattr);
474 CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
475 err || tattr.retval != 1,
476 tests[i].name,
477 "err %d errno %d retval %d duration %d size %u/%lu\n",
478 err, errno, tattr.retval, tattr.duration,
479 tattr.data_size_out, sizeof(flow_keys));
480 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
481 }
482
483
484
485
486
487
488
489 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
490 CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno);
491
492 tap_fd = create_tap("tap0");
493 CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
494 err = ifup("tap0");
495 CHECK(err, "ifup", "err %d errno %d\n", err, errno);
496
497 for (i = 0; i < ARRAY_SIZE(tests); i++) {
498
499 __u32 eth_get_headlen_flags =
500 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
501 struct bpf_prog_test_run_attr tattr = {};
502 struct bpf_flow_keys flow_keys = {};
503 __u32 key = (__u32)(tests[i].keys.sport) << 16 |
504 tests[i].keys.dport;
505
506
507
508
509
510 if (tests[i].flags != eth_get_headlen_flags)
511 continue;
512
513 err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
514 CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
515
516 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
517 CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
518
519 CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
520 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
521
522 err = bpf_map_delete_elem(keys_fd, &key);
523 CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err);
524 }
525
526 bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
527 bpf_object__close(obj);
528}
529