1
2#include <test_progs.h>
3#include <error.h>
4#include <linux/if.h>
5#include <linux/if_tun.h>
6#include <sys/uio.h>
7
8#define CHECK_FLOW_KEYS(desc, got, expected) \
9 CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \
10 desc, \
11 "nhoff=%u/%u " \
12 "thoff=%u/%u " \
13 "addr_proto=0x%x/0x%x " \
14 "is_frag=%u/%u " \
15 "is_first_frag=%u/%u " \
16 "is_encap=%u/%u " \
17 "ip_proto=0x%x/0x%x " \
18 "n_proto=0x%x/0x%x " \
19 "sport=%u/%u " \
20 "dport=%u/%u\n", \
21 got.nhoff, expected.nhoff, \
22 got.thoff, expected.thoff, \
23 got.addr_proto, expected.addr_proto, \
24 got.is_frag, expected.is_frag, \
25 got.is_first_frag, expected.is_first_frag, \
26 got.is_encap, expected.is_encap, \
27 got.ip_proto, expected.ip_proto, \
28 got.n_proto, expected.n_proto, \
29 got.sport, expected.sport, \
30 got.dport, expected.dport)
31
32struct ipv4_pkt {
33 struct ethhdr eth;
34 struct iphdr iph;
35 struct tcphdr tcp;
36} __packed;
37
38struct svlan_ipv4_pkt {
39 struct ethhdr eth;
40 __u16 vlan_tci;
41 __u16 vlan_proto;
42 struct iphdr iph;
43 struct tcphdr tcp;
44} __packed;
45
46struct ipv6_pkt {
47 struct ethhdr eth;
48 struct ipv6hdr iph;
49 struct tcphdr tcp;
50} __packed;
51
52struct dvlan_ipv6_pkt {
53 struct ethhdr eth;
54 __u16 vlan_tci;
55 __u16 vlan_proto;
56 __u16 vlan_tci2;
57 __u16 vlan_proto2;
58 struct ipv6hdr iph;
59 struct tcphdr tcp;
60} __packed;
61
62struct test {
63 const char *name;
64 union {
65 struct ipv4_pkt ipv4;
66 struct svlan_ipv4_pkt svlan_ipv4;
67 struct ipv6_pkt ipv6;
68 struct dvlan_ipv6_pkt dvlan_ipv6;
69 } pkt;
70 struct bpf_flow_keys keys;
71};
72
73#define VLAN_HLEN 4
74
75struct test tests[] = {
76 {
77 .name = "ipv4",
78 .pkt.ipv4 = {
79 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
80 .iph.ihl = 5,
81 .iph.protocol = IPPROTO_TCP,
82 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
83 .tcp.doff = 5,
84 .tcp.source = 80,
85 .tcp.dest = 8080,
86 },
87 .keys = {
88 .nhoff = ETH_HLEN,
89 .thoff = ETH_HLEN + sizeof(struct iphdr),
90 .addr_proto = ETH_P_IP,
91 .ip_proto = IPPROTO_TCP,
92 .n_proto = __bpf_constant_htons(ETH_P_IP),
93 .sport = 80,
94 .dport = 8080,
95 },
96 },
97 {
98 .name = "ipv6",
99 .pkt.ipv6 = {
100 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
101 .iph.nexthdr = IPPROTO_TCP,
102 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
103 .tcp.doff = 5,
104 .tcp.source = 80,
105 .tcp.dest = 8080,
106 },
107 .keys = {
108 .nhoff = ETH_HLEN,
109 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
110 .addr_proto = ETH_P_IPV6,
111 .ip_proto = IPPROTO_TCP,
112 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
113 .sport = 80,
114 .dport = 8080,
115 },
116 },
117 {
118 .name = "802.1q-ipv4",
119 .pkt.svlan_ipv4 = {
120 .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
121 .vlan_proto = __bpf_constant_htons(ETH_P_IP),
122 .iph.ihl = 5,
123 .iph.protocol = IPPROTO_TCP,
124 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
125 .tcp.doff = 5,
126 .tcp.source = 80,
127 .tcp.dest = 8080,
128 },
129 .keys = {
130 .nhoff = ETH_HLEN + VLAN_HLEN,
131 .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
132 .addr_proto = ETH_P_IP,
133 .ip_proto = IPPROTO_TCP,
134 .n_proto = __bpf_constant_htons(ETH_P_IP),
135 .sport = 80,
136 .dport = 8080,
137 },
138 },
139 {
140 .name = "802.1ad-ipv6",
141 .pkt.dvlan_ipv6 = {
142 .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
143 .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
144 .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
145 .iph.nexthdr = IPPROTO_TCP,
146 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
147 .tcp.doff = 5,
148 .tcp.source = 80,
149 .tcp.dest = 8080,
150 },
151 .keys = {
152 .nhoff = ETH_HLEN + VLAN_HLEN * 2,
153 .thoff = ETH_HLEN + VLAN_HLEN * 2 +
154 sizeof(struct ipv6hdr),
155 .addr_proto = ETH_P_IPV6,
156 .ip_proto = IPPROTO_TCP,
157 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
158 .sport = 80,
159 .dport = 8080,
160 },
161 },
162};
163
164static int create_tap(const char *ifname)
165{
166 struct ifreq ifr = {
167 .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
168 };
169 int fd, ret;
170
171 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
172
173 fd = open("/dev/net/tun", O_RDWR);
174 if (fd < 0)
175 return -1;
176
177 ret = ioctl(fd, TUNSETIFF, &ifr);
178 if (ret)
179 return -1;
180
181 return fd;
182}
183
184static int tx_tap(int fd, void *pkt, size_t len)
185{
186 struct iovec iov[] = {
187 {
188 .iov_len = len,
189 .iov_base = pkt,
190 },
191 };
192 return writev(fd, iov, ARRAY_SIZE(iov));
193}
194
195static int ifup(const char *ifname)
196{
197 struct ifreq ifr = {};
198 int sk, ret;
199
200 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
201
202 sk = socket(PF_INET, SOCK_DGRAM, 0);
203 if (sk < 0)
204 return -1;
205
206 ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
207 if (ret) {
208 close(sk);
209 return -1;
210 }
211
212 ifr.ifr_flags |= IFF_UP;
213 ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
214 if (ret) {
215 close(sk);
216 return -1;
217 }
218
219 close(sk);
220 return 0;
221}
222
223void test_flow_dissector(void)
224{
225 int i, err, prog_fd, keys_fd = -1, tap_fd;
226 struct bpf_object *obj;
227 __u32 duration = 0;
228
229 err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
230 "jmp_table", "last_dissection", &prog_fd, &keys_fd);
231 if (err) {
232 error_cnt++;
233 return;
234 }
235
236 for (i = 0; i < ARRAY_SIZE(tests); i++) {
237 struct bpf_flow_keys flow_keys;
238 struct bpf_prog_test_run_attr tattr = {
239 .prog_fd = prog_fd,
240 .data_in = &tests[i].pkt,
241 .data_size_in = sizeof(tests[i].pkt),
242 .data_out = &flow_keys,
243 };
244
245 err = bpf_prog_test_run_xattr(&tattr);
246 CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
247 err || tattr.retval != 1,
248 tests[i].name,
249 "err %d errno %d retval %d duration %d size %u/%lu\n",
250 err, errno, tattr.retval, tattr.duration,
251 tattr.data_size_out, sizeof(flow_keys));
252 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
253 }
254
255
256
257
258
259
260
261 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
262 CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno);
263
264 tap_fd = create_tap("tap0");
265 CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
266 err = ifup("tap0");
267 CHECK(err, "ifup", "err %d errno %d\n", err, errno);
268
269 for (i = 0; i < ARRAY_SIZE(tests); i++) {
270 struct bpf_flow_keys flow_keys = {};
271 struct bpf_prog_test_run_attr tattr = {};
272 __u32 key = (__u32)(tests[i].keys.sport) << 16 |
273 tests[i].keys.dport;
274
275 err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
276 CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
277
278 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
279 CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
280
281 CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
282 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
283
284 err = bpf_map_delete_elem(keys_fd, &key);
285 CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err);
286 }
287
288 bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
289 bpf_object__close(obj);
290}
291