1
2
3
4
5
6
7
8
9
10#include <stdio.h>
11#include <unistd.h>
12#include <linux/bpf.h>
13#include <errno.h>
14#include <linux/unistd.h>
15#include <string.h>
16#include <linux/filter.h>
17#include <stddef.h>
18#include <stdbool.h>
19#include <sys/resource.h>
20#include "libbpf.h"
21
22#define MAX_INSNS 512
23#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
24
25#define MAX_FIXUPS 8
26
27struct bpf_test {
28 const char *descr;
29 struct bpf_insn insns[MAX_INSNS];
30 int fixup[MAX_FIXUPS];
31 int prog_array_fixup[MAX_FIXUPS];
32 const char *errstr;
33 const char *errstr_unpriv;
34 enum {
35 UNDEF,
36 ACCEPT,
37 REJECT
38 } result, result_unpriv;
39 enum bpf_prog_type prog_type;
40};
41
42static struct bpf_test tests[] = {
43 {
44 "add+sub+mul",
45 .insns = {
46 BPF_MOV64_IMM(BPF_REG_1, 1),
47 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
48 BPF_MOV64_IMM(BPF_REG_2, 3),
49 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
50 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
51 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
52 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
53 BPF_EXIT_INSN(),
54 },
55 .result = ACCEPT,
56 },
57 {
58 "unreachable",
59 .insns = {
60 BPF_EXIT_INSN(),
61 BPF_EXIT_INSN(),
62 },
63 .errstr = "unreachable",
64 .result = REJECT,
65 },
66 {
67 "unreachable2",
68 .insns = {
69 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
70 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
71 BPF_EXIT_INSN(),
72 },
73 .errstr = "unreachable",
74 .result = REJECT,
75 },
76 {
77 "out of range jump",
78 .insns = {
79 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
80 BPF_EXIT_INSN(),
81 },
82 .errstr = "jump out of range",
83 .result = REJECT,
84 },
85 {
86 "out of range jump2",
87 .insns = {
88 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
89 BPF_EXIT_INSN(),
90 },
91 .errstr = "jump out of range",
92 .result = REJECT,
93 },
94 {
95 "test1 ld_imm64",
96 .insns = {
97 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
98 BPF_LD_IMM64(BPF_REG_0, 0),
99 BPF_LD_IMM64(BPF_REG_0, 0),
100 BPF_LD_IMM64(BPF_REG_0, 1),
101 BPF_LD_IMM64(BPF_REG_0, 1),
102 BPF_MOV64_IMM(BPF_REG_0, 2),
103 BPF_EXIT_INSN(),
104 },
105 .errstr = "invalid BPF_LD_IMM insn",
106 .errstr_unpriv = "R1 pointer comparison",
107 .result = REJECT,
108 },
109 {
110 "test2 ld_imm64",
111 .insns = {
112 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
113 BPF_LD_IMM64(BPF_REG_0, 0),
114 BPF_LD_IMM64(BPF_REG_0, 0),
115 BPF_LD_IMM64(BPF_REG_0, 1),
116 BPF_LD_IMM64(BPF_REG_0, 1),
117 BPF_EXIT_INSN(),
118 },
119 .errstr = "invalid BPF_LD_IMM insn",
120 .errstr_unpriv = "R1 pointer comparison",
121 .result = REJECT,
122 },
123 {
124 "test3 ld_imm64",
125 .insns = {
126 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
127 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
128 BPF_LD_IMM64(BPF_REG_0, 0),
129 BPF_LD_IMM64(BPF_REG_0, 0),
130 BPF_LD_IMM64(BPF_REG_0, 1),
131 BPF_LD_IMM64(BPF_REG_0, 1),
132 BPF_EXIT_INSN(),
133 },
134 .errstr = "invalid bpf_ld_imm64 insn",
135 .result = REJECT,
136 },
137 {
138 "test4 ld_imm64",
139 .insns = {
140 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
141 BPF_EXIT_INSN(),
142 },
143 .errstr = "invalid bpf_ld_imm64 insn",
144 .result = REJECT,
145 },
146 {
147 "test5 ld_imm64",
148 .insns = {
149 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
150 },
151 .errstr = "invalid bpf_ld_imm64 insn",
152 .result = REJECT,
153 },
154 {
155 "no bpf_exit",
156 .insns = {
157 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
158 },
159 .errstr = "jump out of range",
160 .result = REJECT,
161 },
162 {
163 "loop (back-edge)",
164 .insns = {
165 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
166 BPF_EXIT_INSN(),
167 },
168 .errstr = "back-edge",
169 .result = REJECT,
170 },
171 {
172 "loop2 (back-edge)",
173 .insns = {
174 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
175 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
176 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
177 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
178 BPF_EXIT_INSN(),
179 },
180 .errstr = "back-edge",
181 .result = REJECT,
182 },
183 {
184 "conditional loop",
185 .insns = {
186 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
188 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
189 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
190 BPF_EXIT_INSN(),
191 },
192 .errstr = "back-edge",
193 .result = REJECT,
194 },
195 {
196 "read uninitialized register",
197 .insns = {
198 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
199 BPF_EXIT_INSN(),
200 },
201 .errstr = "R2 !read_ok",
202 .result = REJECT,
203 },
204 {
205 "read invalid register",
206 .insns = {
207 BPF_MOV64_REG(BPF_REG_0, -1),
208 BPF_EXIT_INSN(),
209 },
210 .errstr = "R15 is invalid",
211 .result = REJECT,
212 },
213 {
214 "program doesn't init R0 before exit",
215 .insns = {
216 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
217 BPF_EXIT_INSN(),
218 },
219 .errstr = "R0 !read_ok",
220 .result = REJECT,
221 },
222 {
223 "program doesn't init R0 before exit in all branches",
224 .insns = {
225 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
226 BPF_MOV64_IMM(BPF_REG_0, 1),
227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
228 BPF_EXIT_INSN(),
229 },
230 .errstr = "R0 !read_ok",
231 .errstr_unpriv = "R1 pointer comparison",
232 .result = REJECT,
233 },
234 {
235 "stack out of bounds",
236 .insns = {
237 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
238 BPF_EXIT_INSN(),
239 },
240 .errstr = "invalid stack",
241 .result = REJECT,
242 },
243 {
244 "invalid call insn1",
245 .insns = {
246 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
247 BPF_EXIT_INSN(),
248 },
249 .errstr = "BPF_CALL uses reserved",
250 .result = REJECT,
251 },
252 {
253 "invalid call insn2",
254 .insns = {
255 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
256 BPF_EXIT_INSN(),
257 },
258 .errstr = "BPF_CALL uses reserved",
259 .result = REJECT,
260 },
261 {
262 "invalid function call",
263 .insns = {
264 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
265 BPF_EXIT_INSN(),
266 },
267 .errstr = "invalid func 1234567",
268 .result = REJECT,
269 },
270 {
271 "uninitialized stack1",
272 .insns = {
273 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
275 BPF_LD_MAP_FD(BPF_REG_1, 0),
276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
277 BPF_EXIT_INSN(),
278 },
279 .fixup = {2},
280 .errstr = "invalid indirect read from stack",
281 .result = REJECT,
282 },
283 {
284 "uninitialized stack2",
285 .insns = {
286 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
287 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
288 BPF_EXIT_INSN(),
289 },
290 .errstr = "invalid read from stack",
291 .result = REJECT,
292 },
293 {
294 "check valid spill/fill",
295 .insns = {
296
297 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
298
299
300 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
301
302
303
304 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
305 BPF_EXIT_INSN(),
306 },
307 .errstr_unpriv = "R0 leaks addr",
308 .result = ACCEPT,
309 .result_unpriv = REJECT,
310 },
311 {
312 "check corrupted spill/fill",
313 .insns = {
314
315 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
316
317
318 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
319
320
321 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
322
323 BPF_EXIT_INSN(),
324 },
325 .errstr_unpriv = "attempt to corrupt spilled",
326 .errstr = "corrupted spill",
327 .result = REJECT,
328 },
329 {
330 "invalid src register in STX",
331 .insns = {
332 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
333 BPF_EXIT_INSN(),
334 },
335 .errstr = "R15 is invalid",
336 .result = REJECT,
337 },
338 {
339 "invalid dst register in STX",
340 .insns = {
341 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
342 BPF_EXIT_INSN(),
343 },
344 .errstr = "R14 is invalid",
345 .result = REJECT,
346 },
347 {
348 "invalid dst register in ST",
349 .insns = {
350 BPF_ST_MEM(BPF_B, 14, -1, -1),
351 BPF_EXIT_INSN(),
352 },
353 .errstr = "R14 is invalid",
354 .result = REJECT,
355 },
356 {
357 "invalid src register in LDX",
358 .insns = {
359 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
360 BPF_EXIT_INSN(),
361 },
362 .errstr = "R12 is invalid",
363 .result = REJECT,
364 },
365 {
366 "invalid dst register in LDX",
367 .insns = {
368 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
369 BPF_EXIT_INSN(),
370 },
371 .errstr = "R11 is invalid",
372 .result = REJECT,
373 },
374 {
375 "junk insn",
376 .insns = {
377 BPF_RAW_INSN(0, 0, 0, 0, 0),
378 BPF_EXIT_INSN(),
379 },
380 .errstr = "invalid BPF_LD_IMM",
381 .result = REJECT,
382 },
383 {
384 "junk insn2",
385 .insns = {
386 BPF_RAW_INSN(1, 0, 0, 0, 0),
387 BPF_EXIT_INSN(),
388 },
389 .errstr = "BPF_LDX uses reserved fields",
390 .result = REJECT,
391 },
392 {
393 "junk insn3",
394 .insns = {
395 BPF_RAW_INSN(-1, 0, 0, 0, 0),
396 BPF_EXIT_INSN(),
397 },
398 .errstr = "invalid BPF_ALU opcode f0",
399 .result = REJECT,
400 },
401 {
402 "junk insn4",
403 .insns = {
404 BPF_RAW_INSN(-1, -1, -1, -1, -1),
405 BPF_EXIT_INSN(),
406 },
407 .errstr = "invalid BPF_ALU opcode f0",
408 .result = REJECT,
409 },
410 {
411 "junk insn5",
412 .insns = {
413 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
414 BPF_EXIT_INSN(),
415 },
416 .errstr = "BPF_ALU uses reserved fields",
417 .result = REJECT,
418 },
419 {
420 "misaligned read from stack",
421 .insns = {
422 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
423 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
424 BPF_EXIT_INSN(),
425 },
426 .errstr = "misaligned access",
427 .result = REJECT,
428 },
429 {
430 "invalid map_fd for function call",
431 .insns = {
432 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
433 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
435 BPF_LD_MAP_FD(BPF_REG_1, 0),
436 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
437 BPF_EXIT_INSN(),
438 },
439 .errstr = "fd 0 is not pointing to valid bpf_map",
440 .result = REJECT,
441 },
442 {
443 "don't check return value before access",
444 .insns = {
445 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
446 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
448 BPF_LD_MAP_FD(BPF_REG_1, 0),
449 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
450 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
451 BPF_EXIT_INSN(),
452 },
453 .fixup = {3},
454 .errstr = "R0 invalid mem access 'map_value_or_null'",
455 .result = REJECT,
456 },
457 {
458 "access memory with incorrect alignment",
459 .insns = {
460 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
461 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
463 BPF_LD_MAP_FD(BPF_REG_1, 0),
464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
466 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
467 BPF_EXIT_INSN(),
468 },
469 .fixup = {3},
470 .errstr = "misaligned access",
471 .result = REJECT,
472 },
473 {
474 "sometimes access memory with incorrect alignment",
475 .insns = {
476 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
477 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
479 BPF_LD_MAP_FD(BPF_REG_1, 0),
480 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
481 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
482 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
483 BPF_EXIT_INSN(),
484 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
485 BPF_EXIT_INSN(),
486 },
487 .fixup = {3},
488 .errstr = "R0 invalid mem access",
489 .errstr_unpriv = "R0 leaks addr",
490 .result = REJECT,
491 },
492 {
493 "jump test 1",
494 .insns = {
495 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
496 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
497 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
498 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
500 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
501 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
502 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
503 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
504 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
505 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
506 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
508 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
509 BPF_MOV64_IMM(BPF_REG_0, 0),
510 BPF_EXIT_INSN(),
511 },
512 .errstr_unpriv = "R1 pointer comparison",
513 .result_unpriv = REJECT,
514 .result = ACCEPT,
515 },
516 {
517 "jump test 2",
518 .insns = {
519 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
520 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
521 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
522 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
523 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
524 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
525 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
526 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
527 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
528 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
529 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
530 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
531 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
533 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
534 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
535 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
536 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
537 BPF_MOV64_IMM(BPF_REG_0, 0),
538 BPF_EXIT_INSN(),
539 },
540 .errstr_unpriv = "R1 pointer comparison",
541 .result_unpriv = REJECT,
542 .result = ACCEPT,
543 },
544 {
545 "jump test 3",
546 .insns = {
547 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
548 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
549 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
551 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
552 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
553 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
555 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
556 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
557 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
559 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
561 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
563 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
565 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
567 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
568 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
569 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
571 BPF_LD_MAP_FD(BPF_REG_1, 0),
572 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
573 BPF_EXIT_INSN(),
574 },
575 .fixup = {24},
576 .errstr_unpriv = "R1 pointer comparison",
577 .result_unpriv = REJECT,
578 .result = ACCEPT,
579 },
580 {
581 "jump test 4",
582 .insns = {
583 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
586 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
589 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
590 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
591 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
592 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
593 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
594 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
595 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
597 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
598 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
599 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
600 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
601 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
603 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
605 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
606 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
607 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
608 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
609 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
610 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
611 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
612 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
613 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
614 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
615 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
616 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
617 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
618 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
619 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
622 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
623 BPF_MOV64_IMM(BPF_REG_0, 0),
624 BPF_EXIT_INSN(),
625 },
626 .errstr_unpriv = "R1 pointer comparison",
627 .result_unpriv = REJECT,
628 .result = ACCEPT,
629 },
630 {
631 "jump test 5",
632 .insns = {
633 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
634 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
635 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
636 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
637 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
638 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
639 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
640 BPF_MOV64_IMM(BPF_REG_0, 0),
641 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
642 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
643 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
644 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
645 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
646 BPF_MOV64_IMM(BPF_REG_0, 0),
647 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
648 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
649 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
650 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
651 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
652 BPF_MOV64_IMM(BPF_REG_0, 0),
653 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
654 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
655 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
656 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
657 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
658 BPF_MOV64_IMM(BPF_REG_0, 0),
659 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
660 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
661 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
662 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
663 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
664 BPF_MOV64_IMM(BPF_REG_0, 0),
665 BPF_EXIT_INSN(),
666 },
667 .errstr_unpriv = "R1 pointer comparison",
668 .result_unpriv = REJECT,
669 .result = ACCEPT,
670 },
671 {
672 "access skb fields ok",
673 .insns = {
674 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
675 offsetof(struct __sk_buff, len)),
676 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
677 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
678 offsetof(struct __sk_buff, mark)),
679 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
680 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
681 offsetof(struct __sk_buff, pkt_type)),
682 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
683 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
684 offsetof(struct __sk_buff, queue_mapping)),
685 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
686 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
687 offsetof(struct __sk_buff, protocol)),
688 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
689 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
690 offsetof(struct __sk_buff, vlan_present)),
691 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
692 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
693 offsetof(struct __sk_buff, vlan_tci)),
694 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
695 BPF_EXIT_INSN(),
696 },
697 .result = ACCEPT,
698 },
699 {
700 "access skb fields bad1",
701 .insns = {
702 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
703 BPF_EXIT_INSN(),
704 },
705 .errstr = "invalid bpf_context access",
706 .result = REJECT,
707 },
708 {
709 "access skb fields bad2",
710 .insns = {
711 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
712 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
713 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
715 BPF_LD_MAP_FD(BPF_REG_1, 0),
716 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
717 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
718 BPF_EXIT_INSN(),
719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
720 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
721 offsetof(struct __sk_buff, pkt_type)),
722 BPF_EXIT_INSN(),
723 },
724 .fixup = {4},
725 .errstr = "different pointers",
726 .errstr_unpriv = "R1 pointer comparison",
727 .result = REJECT,
728 },
729 {
730 "access skb fields bad3",
731 .insns = {
732 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
733 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
734 offsetof(struct __sk_buff, pkt_type)),
735 BPF_EXIT_INSN(),
736 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
737 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
739 BPF_LD_MAP_FD(BPF_REG_1, 0),
740 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
741 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
742 BPF_EXIT_INSN(),
743 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
744 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
745 },
746 .fixup = {6},
747 .errstr = "different pointers",
748 .errstr_unpriv = "R1 pointer comparison",
749 .result = REJECT,
750 },
751 {
752 "access skb fields bad4",
753 .insns = {
754 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
755 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
756 offsetof(struct __sk_buff, len)),
757 BPF_MOV64_IMM(BPF_REG_0, 0),
758 BPF_EXIT_INSN(),
759 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
760 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
762 BPF_LD_MAP_FD(BPF_REG_1, 0),
763 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
764 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
765 BPF_EXIT_INSN(),
766 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
767 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
768 },
769 .fixup = {7},
770 .errstr = "different pointers",
771 .errstr_unpriv = "R1 pointer comparison",
772 .result = REJECT,
773 },
774 {
775 "check skb->mark is not writeable by sockets",
776 .insns = {
777 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
778 offsetof(struct __sk_buff, mark)),
779 BPF_EXIT_INSN(),
780 },
781 .errstr = "invalid bpf_context access",
782 .errstr_unpriv = "R1 leaks addr",
783 .result = REJECT,
784 },
785 {
786 "check skb->tc_index is not writeable by sockets",
787 .insns = {
788 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
789 offsetof(struct __sk_buff, tc_index)),
790 BPF_EXIT_INSN(),
791 },
792 .errstr = "invalid bpf_context access",
793 .errstr_unpriv = "R1 leaks addr",
794 .result = REJECT,
795 },
796 {
797 "check non-u32 access to cb",
798 .insns = {
799 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
800 offsetof(struct __sk_buff, cb[0])),
801 BPF_EXIT_INSN(),
802 },
803 .errstr = "invalid bpf_context access",
804 .errstr_unpriv = "R1 leaks addr",
805 .result = REJECT,
806 },
807 {
808 "check out of range skb->cb access",
809 .insns = {
810 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
811 offsetof(struct __sk_buff, cb[0]) + 256),
812 BPF_EXIT_INSN(),
813 },
814 .errstr = "invalid bpf_context access",
815 .errstr_unpriv = "",
816 .result = REJECT,
817 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
818 },
819 {
820 "write skb fields from socket prog",
821 .insns = {
822 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
823 offsetof(struct __sk_buff, cb[4])),
824 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
825 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
826 offsetof(struct __sk_buff, mark)),
827 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
828 offsetof(struct __sk_buff, tc_index)),
829 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
830 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
831 offsetof(struct __sk_buff, cb[0])),
832 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
833 offsetof(struct __sk_buff, cb[2])),
834 BPF_EXIT_INSN(),
835 },
836 .result = ACCEPT,
837 .errstr_unpriv = "R1 leaks addr",
838 .result_unpriv = REJECT,
839 },
840 {
841 "write skb fields from tc_cls_act prog",
842 .insns = {
843 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
844 offsetof(struct __sk_buff, cb[0])),
845 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
846 offsetof(struct __sk_buff, mark)),
847 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
848 offsetof(struct __sk_buff, tc_index)),
849 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
850 offsetof(struct __sk_buff, tc_index)),
851 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
852 offsetof(struct __sk_buff, cb[3])),
853 BPF_EXIT_INSN(),
854 },
855 .errstr_unpriv = "",
856 .result_unpriv = REJECT,
857 .result = ACCEPT,
858 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
859 },
860 {
861 "PTR_TO_STACK store/load",
862 .insns = {
863 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
865 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
866 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
867 BPF_EXIT_INSN(),
868 },
869 .result = ACCEPT,
870 },
871 {
872 "PTR_TO_STACK store/load - bad alignment on off",
873 .insns = {
874 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
876 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
877 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
878 BPF_EXIT_INSN(),
879 },
880 .result = REJECT,
881 .errstr = "misaligned access off -6 size 8",
882 },
883 {
884 "PTR_TO_STACK store/load - bad alignment on reg",
885 .insns = {
886 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
888 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
889 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
890 BPF_EXIT_INSN(),
891 },
892 .result = REJECT,
893 .errstr = "misaligned access off -2 size 8",
894 },
895 {
896 "PTR_TO_STACK store/load - out of bounds low",
897 .insns = {
898 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
900 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
901 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
902 BPF_EXIT_INSN(),
903 },
904 .result = REJECT,
905 .errstr = "invalid stack off=-79992 size=8",
906 },
907 {
908 "PTR_TO_STACK store/load - out of bounds high",
909 .insns = {
910 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
912 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
913 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
914 BPF_EXIT_INSN(),
915 },
916 .result = REJECT,
917 .errstr = "invalid stack off=0 size=8",
918 },
919 {
920 "unpriv: return pointer",
921 .insns = {
922 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
923 BPF_EXIT_INSN(),
924 },
925 .result = ACCEPT,
926 .result_unpriv = REJECT,
927 .errstr_unpriv = "R0 leaks addr",
928 },
929 {
930 "unpriv: add const to pointer",
931 .insns = {
932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
933 BPF_MOV64_IMM(BPF_REG_0, 0),
934 BPF_EXIT_INSN(),
935 },
936 .result = ACCEPT,
937 .result_unpriv = REJECT,
938 .errstr_unpriv = "R1 pointer arithmetic",
939 },
940 {
941 "unpriv: add pointer to pointer",
942 .insns = {
943 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
944 BPF_MOV64_IMM(BPF_REG_0, 0),
945 BPF_EXIT_INSN(),
946 },
947 .result = ACCEPT,
948 .result_unpriv = REJECT,
949 .errstr_unpriv = "R1 pointer arithmetic",
950 },
951 {
952 "unpriv: neg pointer",
953 .insns = {
954 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
955 BPF_MOV64_IMM(BPF_REG_0, 0),
956 BPF_EXIT_INSN(),
957 },
958 .result = ACCEPT,
959 .result_unpriv = REJECT,
960 .errstr_unpriv = "R1 pointer arithmetic",
961 },
962 {
963 "unpriv: cmp pointer with const",
964 .insns = {
965 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
966 BPF_MOV64_IMM(BPF_REG_0, 0),
967 BPF_EXIT_INSN(),
968 },
969 .result = ACCEPT,
970 .result_unpriv = REJECT,
971 .errstr_unpriv = "R1 pointer comparison",
972 },
973 {
974 "unpriv: cmp pointer with pointer",
975 .insns = {
976 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
977 BPF_MOV64_IMM(BPF_REG_0, 0),
978 BPF_EXIT_INSN(),
979 },
980 .result = ACCEPT,
981 .result_unpriv = REJECT,
982 .errstr_unpriv = "R10 pointer comparison",
983 },
984 {
985 "unpriv: check that printk is disallowed",
986 .insns = {
987 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
988 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
990 BPF_MOV64_IMM(BPF_REG_2, 8),
991 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
993 BPF_MOV64_IMM(BPF_REG_0, 0),
994 BPF_EXIT_INSN(),
995 },
996 .errstr_unpriv = "unknown func 6",
997 .result_unpriv = REJECT,
998 .result = ACCEPT,
999 },
1000 {
1001 "unpriv: pass pointer to helper function",
1002 .insns = {
1003 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1004 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1006 BPF_LD_MAP_FD(BPF_REG_1, 0),
1007 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1008 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1009 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
1010 BPF_MOV64_IMM(BPF_REG_0, 0),
1011 BPF_EXIT_INSN(),
1012 },
1013 .fixup = {3},
1014 .errstr_unpriv = "R4 leaks addr",
1015 .result_unpriv = REJECT,
1016 .result = ACCEPT,
1017 },
1018 {
1019 "unpriv: indirectly pass pointer on stack to helper function",
1020 .insns = {
1021 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1022 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1024 BPF_LD_MAP_FD(BPF_REG_1, 0),
1025 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1026 BPF_MOV64_IMM(BPF_REG_0, 0),
1027 BPF_EXIT_INSN(),
1028 },
1029 .fixup = {3},
1030 .errstr = "invalid indirect read from stack off -8+0 size 8",
1031 .result = REJECT,
1032 },
1033 {
1034 "unpriv: mangle pointer on stack 1",
1035 .insns = {
1036 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1037 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1038 BPF_MOV64_IMM(BPF_REG_0, 0),
1039 BPF_EXIT_INSN(),
1040 },
1041 .errstr_unpriv = "attempt to corrupt spilled",
1042 .result_unpriv = REJECT,
1043 .result = ACCEPT,
1044 },
1045 {
1046 "unpriv: mangle pointer on stack 2",
1047 .insns = {
1048 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1049 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1050 BPF_MOV64_IMM(BPF_REG_0, 0),
1051 BPF_EXIT_INSN(),
1052 },
1053 .errstr_unpriv = "attempt to corrupt spilled",
1054 .result_unpriv = REJECT,
1055 .result = ACCEPT,
1056 },
1057 {
1058 "unpriv: read pointer from stack in small chunks",
1059 .insns = {
1060 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1061 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1062 BPF_MOV64_IMM(BPF_REG_0, 0),
1063 BPF_EXIT_INSN(),
1064 },
1065 .errstr = "invalid size",
1066 .result = REJECT,
1067 },
1068 {
1069 "unpriv: write pointer into ctx",
1070 .insns = {
1071 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1072 BPF_MOV64_IMM(BPF_REG_0, 0),
1073 BPF_EXIT_INSN(),
1074 },
1075 .errstr_unpriv = "R1 leaks addr",
1076 .result_unpriv = REJECT,
1077 .errstr = "invalid bpf_context access",
1078 .result = REJECT,
1079 },
1080 {
1081 "unpriv: write pointer into map elem value",
1082 .insns = {
1083 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1084 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1086 BPF_LD_MAP_FD(BPF_REG_1, 0),
1087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1088 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1089 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1090 BPF_EXIT_INSN(),
1091 },
1092 .fixup = {3},
1093 .errstr_unpriv = "R0 leaks addr",
1094 .result_unpriv = REJECT,
1095 .result = ACCEPT,
1096 },
1097 {
1098 "unpriv: partial copy of pointer",
1099 .insns = {
1100 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1101 BPF_MOV64_IMM(BPF_REG_0, 0),
1102 BPF_EXIT_INSN(),
1103 },
1104 .errstr_unpriv = "R10 partial copy",
1105 .result_unpriv = REJECT,
1106 .result = ACCEPT,
1107 },
1108 {
1109 "unpriv: pass pointer to tail_call",
1110 .insns = {
1111 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1112 BPF_LD_MAP_FD(BPF_REG_2, 0),
1113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
1114 BPF_MOV64_IMM(BPF_REG_0, 0),
1115 BPF_EXIT_INSN(),
1116 },
1117 .prog_array_fixup = {1},
1118 .errstr_unpriv = "R3 leaks addr into helper",
1119 .result_unpriv = REJECT,
1120 .result = ACCEPT,
1121 },
1122 {
1123 "unpriv: cmp map pointer with zero",
1124 .insns = {
1125 BPF_MOV64_IMM(BPF_REG_1, 0),
1126 BPF_LD_MAP_FD(BPF_REG_1, 0),
1127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1128 BPF_MOV64_IMM(BPF_REG_0, 0),
1129 BPF_EXIT_INSN(),
1130 },
1131 .fixup = {1},
1132 .errstr_unpriv = "R1 pointer comparison",
1133 .result_unpriv = REJECT,
1134 .result = ACCEPT,
1135 },
1136 {
1137 "unpriv: write into frame pointer",
1138 .insns = {
1139 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1140 BPF_MOV64_IMM(BPF_REG_0, 0),
1141 BPF_EXIT_INSN(),
1142 },
1143 .errstr = "frame pointer is read only",
1144 .result = REJECT,
1145 },
1146 {
1147 "unpriv: cmp of frame pointer",
1148 .insns = {
1149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1150 BPF_MOV64_IMM(BPF_REG_0, 0),
1151 BPF_EXIT_INSN(),
1152 },
1153 .errstr_unpriv = "R10 pointer comparison",
1154 .result_unpriv = REJECT,
1155 .result = ACCEPT,
1156 },
1157 {
1158 "unpriv: cmp of stack pointer",
1159 .insns = {
1160 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1163 BPF_MOV64_IMM(BPF_REG_0, 0),
1164 BPF_EXIT_INSN(),
1165 },
1166 .errstr_unpriv = "R2 pointer comparison",
1167 .result_unpriv = REJECT,
1168 .result = ACCEPT,
1169 },
1170 {
1171 "unpriv: obfuscate stack pointer",
1172 .insns = {
1173 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1176 BPF_MOV64_IMM(BPF_REG_0, 0),
1177 BPF_EXIT_INSN(),
1178 },
1179 .errstr_unpriv = "R2 pointer arithmetic",
1180 .result_unpriv = REJECT,
1181 .result = ACCEPT,
1182 },
1183};
1184
1185static int probe_filter_length(struct bpf_insn *fp)
1186{
1187 int len = 0;
1188
1189 for (len = MAX_INSNS - 1; len > 0; --len)
1190 if (fp[len].code != 0 || fp[len].imm != 0)
1191 break;
1192
1193 return len + 1;
1194}
1195
1196static int create_map(void)
1197{
1198 int map_fd;
1199
1200 map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
1201 sizeof(long long), sizeof(long long), 1024);
1202 if (map_fd < 0)
1203 printf("failed to create map '%s'\n", strerror(errno));
1204
1205 return map_fd;
1206}
1207
1208static int create_prog_array(void)
1209{
1210 int map_fd;
1211
1212 map_fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY,
1213 sizeof(int), sizeof(int), 4);
1214 if (map_fd < 0)
1215 printf("failed to create prog_array '%s'\n", strerror(errno));
1216
1217 return map_fd;
1218}
1219
1220static int test(void)
1221{
1222 int prog_fd, i, pass_cnt = 0, err_cnt = 0;
1223 bool unpriv = geteuid() != 0;
1224
1225 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1226 struct bpf_insn *prog = tests[i].insns;
1227 int prog_type = tests[i].prog_type;
1228 int prog_len = probe_filter_length(prog);
1229 int *fixup = tests[i].fixup;
1230 int *prog_array_fixup = tests[i].prog_array_fixup;
1231 int expected_result;
1232 const char *expected_errstr;
1233 int map_fd = -1, prog_array_fd = -1;
1234
1235 if (*fixup) {
1236 map_fd = create_map();
1237
1238 do {
1239 prog[*fixup].imm = map_fd;
1240 fixup++;
1241 } while (*fixup);
1242 }
1243 if (*prog_array_fixup) {
1244 prog_array_fd = create_prog_array();
1245
1246 do {
1247 prog[*prog_array_fixup].imm = prog_array_fd;
1248 prog_array_fixup++;
1249 } while (*prog_array_fixup);
1250 }
1251 printf("#%d %s ", i, tests[i].descr);
1252
1253 prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
1254 prog, prog_len * sizeof(struct bpf_insn),
1255 "GPL", 0);
1256
1257 if (unpriv && tests[i].result_unpriv != UNDEF)
1258 expected_result = tests[i].result_unpriv;
1259 else
1260 expected_result = tests[i].result;
1261
1262 if (unpriv && tests[i].errstr_unpriv)
1263 expected_errstr = tests[i].errstr_unpriv;
1264 else
1265 expected_errstr = tests[i].errstr;
1266
1267 if (expected_result == ACCEPT) {
1268 if (prog_fd < 0) {
1269 printf("FAIL\nfailed to load prog '%s'\n",
1270 strerror(errno));
1271 printf("%s", bpf_log_buf);
1272 err_cnt++;
1273 goto fail;
1274 }
1275 } else {
1276 if (prog_fd >= 0) {
1277 printf("FAIL\nunexpected success to load\n");
1278 printf("%s", bpf_log_buf);
1279 err_cnt++;
1280 goto fail;
1281 }
1282 if (strstr(bpf_log_buf, expected_errstr) == 0) {
1283 printf("FAIL\nunexpected error message: %s",
1284 bpf_log_buf);
1285 err_cnt++;
1286 goto fail;
1287 }
1288 }
1289
1290 pass_cnt++;
1291 printf("OK\n");
1292fail:
1293 if (map_fd >= 0)
1294 close(map_fd);
1295 if (prog_array_fd >= 0)
1296 close(prog_array_fd);
1297 close(prog_fd);
1298
1299 }
1300 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
1301
1302 return 0;
1303}
1304
1305int main(void)
1306{
1307 struct rlimit r = {1 << 20, 1 << 20};
1308
1309 setrlimit(RLIMIT_MEMLOCK, &r);
1310 return test();
1311}
1312