1
2
3
4
5
6
7
8
9
10
11#include <linux/netdevice.h>
12#include <linux/filter.h>
13#include <linux/if_vlan.h>
14#include <linux/bpf.h>
15
16#include <asm/set_memory.h>
17#include <asm/nospec-branch.h>
18
19
20
21
22extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
23extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
24extern u8 sk_load_byte_positive_offset[];
25extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
26extern u8 sk_load_byte_negative_offset[];
27
28static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29{
30 if (len == 1)
31 *ptr = bytes;
32 else if (len == 2)
33 *(u16 *)ptr = bytes;
34 else {
35 *(u32 *)ptr = bytes;
36 barrier();
37 }
38 return ptr + len;
39}
40
41#define EMIT(bytes, len) \
42 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
43
44#define EMIT1(b1) EMIT(b1, 1)
45#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
46#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
47#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
48#define EMIT1_off32(b1, off) \
49 do {EMIT1(b1); EMIT(off, 4); } while (0)
50#define EMIT2_off32(b1, b2, off) \
51 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
52#define EMIT3_off32(b1, b2, b3, off) \
53 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54#define EMIT4_off32(b1, b2, b3, b4, off) \
55 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
56
57static bool is_imm8(int value)
58{
59 return value <= 127 && value >= -128;
60}
61
62static bool is_simm32(s64 value)
63{
64 return value == (s64)(s32)value;
65}
66
67static bool is_uimm32(u64 value)
68{
69 return value == (u64)(u32)value;
70}
71
72
73#define EMIT_mov(DST, SRC) \
74 do {if (DST != SRC) \
75 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
76 } while (0)
77
78static int bpf_size_to_x86_bytes(int bpf_size)
79{
80 if (bpf_size == BPF_W)
81 return 4;
82 else if (bpf_size == BPF_H)
83 return 2;
84 else if (bpf_size == BPF_B)
85 return 1;
86 else if (bpf_size == BPF_DW)
87 return 4;
88 else
89 return 0;
90}
91
92
93
94
95#define X86_JB 0x72
96#define X86_JAE 0x73
97#define X86_JE 0x74
98#define X86_JNE 0x75
99#define X86_JBE 0x76
100#define X86_JA 0x77
101#define X86_JL 0x7C
102#define X86_JGE 0x7D
103#define X86_JLE 0x7E
104#define X86_JG 0x7F
105
106#define CHOOSE_LOAD_FUNC(K, func) \
107 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
108
109
110#define AUX_REG (MAX_BPF_JIT_REG + 1)
111
112
113
114
115
116
117
118
119
120
121static const int reg2hex[] = {
122 [BPF_REG_0] = 0,
123 [BPF_REG_1] = 7,
124 [BPF_REG_2] = 6,
125 [BPF_REG_3] = 2,
126 [BPF_REG_4] = 1,
127 [BPF_REG_5] = 0,
128 [BPF_REG_6] = 3,
129 [BPF_REG_7] = 5,
130 [BPF_REG_8] = 6,
131 [BPF_REG_9] = 7,
132 [BPF_REG_FP] = 5,
133 [BPF_REG_AX] = 2,
134 [AUX_REG] = 3,
135};
136
137
138
139
140
141static bool is_ereg(u32 reg)
142{
143 return (1 << reg) & (BIT(BPF_REG_5) |
144 BIT(AUX_REG) |
145 BIT(BPF_REG_7) |
146 BIT(BPF_REG_8) |
147 BIT(BPF_REG_9) |
148 BIT(BPF_REG_AX));
149}
150
151static bool is_axreg(u32 reg)
152{
153 return reg == BPF_REG_0;
154}
155
156
157static u8 add_1mod(u8 byte, u32 reg)
158{
159 if (is_ereg(reg))
160 byte |= 1;
161 return byte;
162}
163
164static u8 add_2mod(u8 byte, u32 r1, u32 r2)
165{
166 if (is_ereg(r1))
167 byte |= 1;
168 if (is_ereg(r2))
169 byte |= 4;
170 return byte;
171}
172
173
174static u8 add_1reg(u8 byte, u32 dst_reg)
175{
176 return byte + reg2hex[dst_reg];
177}
178
179
180static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
181{
182 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
183}
184
185static void jit_fill_hole(void *area, unsigned int size)
186{
187
188 memset(area, 0xcc, size);
189}
190
191struct jit_context {
192 int cleanup_addr;
193 bool seen_ld_abs;
194 bool seen_ax_reg;
195};
196
197
198#define BPF_MAX_INSN_SIZE 128
199#define BPF_INSN_SAFETY 64
200
201#define AUX_STACK_SPACE \
202 (32 + \
203 8 )
204
205#define PROLOGUE_SIZE 37
206
207
208
209
210static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
211{
212 u8 *prog = *pprog;
213 int cnt = 0;
214
215 EMIT1(0x55);
216 EMIT3(0x48, 0x89, 0xE5);
217
218
219 EMIT3_off32(0x48, 0x81, 0xEC,
220 round_up(stack_depth, 8) + AUX_STACK_SPACE);
221
222
223 EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE);
224
225
226
227
228 EMIT4(0x48, 0x89, 0x5D, 0);
229
230
231
232
233
234
235
236
237
238
239 EMIT4(0x4C, 0x89, 0x6D, 8);
240
241 EMIT4(0x4C, 0x89, 0x75, 16);
242
243 EMIT4(0x4C, 0x89, 0x7D, 24);
244
245 if (!ebpf_from_cbpf) {
246
247
248
249
250
251
252
253 EMIT2(0x31, 0xc0);
254
255 EMIT4(0x48, 0x89, 0x45, 32);
256
257 BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
258 }
259
260 *pprog = prog;
261}
262
263
264
265
266
267
268
269
270
271
272
273
274
275static void emit_bpf_tail_call(u8 **pprog)
276{
277 u8 *prog = *pprog;
278 int label1, label2, label3;
279 int cnt = 0;
280
281
282
283
284
285
286
287
288
289 EMIT2(0x89, 0xD2);
290 EMIT3(0x39, 0x56,
291 offsetof(struct bpf_array, map.max_entries));
292#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE)
293 EMIT2(X86_JBE, OFFSET1);
294 label1 = cnt;
295
296
297
298
299 EMIT2_off32(0x8B, 0x85, 36);
300 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);
301#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
302 EMIT2(X86_JA, OFFSET2);
303 label2 = cnt;
304 EMIT3(0x83, 0xC0, 0x01);
305 EMIT2_off32(0x89, 0x85, 36);
306
307
308 EMIT4_off32(0x48, 0x8B, 0x84, 0xD6,
309 offsetof(struct bpf_array, ptrs));
310
311
312
313
314 EMIT3(0x48, 0x85, 0xC0);
315#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
316 EMIT2(X86_JE, OFFSET3);
317 label3 = cnt;
318
319
320 EMIT4(0x48, 0x8B, 0x40,
321 offsetof(struct bpf_prog, bpf_func));
322 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);
323
324
325
326
327
328 RETPOLINE_RAX_BPF_JIT();
329
330
331 BUILD_BUG_ON(cnt - label1 != OFFSET1);
332 BUILD_BUG_ON(cnt - label2 != OFFSET2);
333 BUILD_BUG_ON(cnt - label3 != OFFSET3);
334 *pprog = prog;
335}
336
337
338static void emit_load_skb_data_hlen(u8 **pprog)
339{
340 u8 *prog = *pprog;
341 int cnt = 0;
342
343
344
345
346
347 EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
348
349
350 EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
351
352
353 EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
354 *pprog = prog;
355}
356
357static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
358 u32 dst_reg, const u32 imm32)
359{
360 u8 *prog = *pprog;
361 u8 b1, b2, b3;
362 int cnt = 0;
363
364
365
366
367 if (sign_propagate && (s32)imm32 < 0) {
368
369 b1 = add_1mod(0x48, dst_reg);
370 b2 = 0xC7;
371 b3 = 0xC0;
372 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
373 goto done;
374 }
375
376
377
378
379 if (imm32 == 0) {
380 if (is_ereg(dst_reg))
381 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
382 b2 = 0x31;
383 b3 = 0xC0;
384 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
385 goto done;
386 }
387
388
389 if (is_ereg(dst_reg))
390 EMIT1(add_1mod(0x40, dst_reg));
391 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
392done:
393 *pprog = prog;
394}
395
396static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
397 const u32 imm32_hi, const u32 imm32_lo)
398{
399 u8 *prog = *pprog;
400 int cnt = 0;
401
402 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
403
404
405
406
407
408 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
409 } else {
410
411 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
412 EMIT(imm32_lo, 4);
413 EMIT(imm32_hi, 4);
414 }
415
416 *pprog = prog;
417}
418
419static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
420{
421 u8 *prog = *pprog;
422 int cnt = 0;
423
424 if (is64) {
425
426 EMIT_mov(dst_reg, src_reg);
427 } else {
428
429 if (is_ereg(dst_reg) || is_ereg(src_reg))
430 EMIT1(add_2mod(0x40, dst_reg, src_reg));
431 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
432 }
433
434 *pprog = prog;
435}
436
437static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
438 int oldproglen, struct jit_context *ctx)
439{
440 struct bpf_insn *insn = bpf_prog->insnsi;
441 int insn_cnt = bpf_prog->len;
442 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
443 bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0);
444 bool seen_exit = false;
445 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
446 int i, cnt = 0;
447 int proglen = 0;
448 u8 *prog = temp;
449
450 emit_prologue(&prog, bpf_prog->aux->stack_depth,
451 bpf_prog_was_classic(bpf_prog));
452
453 if (seen_ld_abs)
454 emit_load_skb_data_hlen(&prog);
455
456 for (i = 0; i < insn_cnt; i++, insn++) {
457 const s32 imm32 = insn->imm;
458 u32 dst_reg = insn->dst_reg;
459 u32 src_reg = insn->src_reg;
460 u8 b2 = 0, b3 = 0;
461 s64 jmp_offset;
462 u8 jmp_cond;
463 bool reload_skb_data;
464 int ilen;
465 u8 *func;
466
467 if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
468 ctx->seen_ax_reg = seen_ax_reg = true;
469
470 switch (insn->code) {
471
472 case BPF_ALU | BPF_ADD | BPF_X:
473 case BPF_ALU | BPF_SUB | BPF_X:
474 case BPF_ALU | BPF_AND | BPF_X:
475 case BPF_ALU | BPF_OR | BPF_X:
476 case BPF_ALU | BPF_XOR | BPF_X:
477 case BPF_ALU64 | BPF_ADD | BPF_X:
478 case BPF_ALU64 | BPF_SUB | BPF_X:
479 case BPF_ALU64 | BPF_AND | BPF_X:
480 case BPF_ALU64 | BPF_OR | BPF_X:
481 case BPF_ALU64 | BPF_XOR | BPF_X:
482 switch (BPF_OP(insn->code)) {
483 case BPF_ADD: b2 = 0x01; break;
484 case BPF_SUB: b2 = 0x29; break;
485 case BPF_AND: b2 = 0x21; break;
486 case BPF_OR: b2 = 0x09; break;
487 case BPF_XOR: b2 = 0x31; break;
488 }
489 if (BPF_CLASS(insn->code) == BPF_ALU64)
490 EMIT1(add_2mod(0x48, dst_reg, src_reg));
491 else if (is_ereg(dst_reg) || is_ereg(src_reg))
492 EMIT1(add_2mod(0x40, dst_reg, src_reg));
493 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
494 break;
495
496 case BPF_ALU64 | BPF_MOV | BPF_X:
497 case BPF_ALU | BPF_MOV | BPF_X:
498 emit_mov_reg(&prog,
499 BPF_CLASS(insn->code) == BPF_ALU64,
500 dst_reg, src_reg);
501 break;
502
503
504 case BPF_ALU | BPF_NEG:
505 case BPF_ALU64 | BPF_NEG:
506 if (BPF_CLASS(insn->code) == BPF_ALU64)
507 EMIT1(add_1mod(0x48, dst_reg));
508 else if (is_ereg(dst_reg))
509 EMIT1(add_1mod(0x40, dst_reg));
510 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
511 break;
512
513 case BPF_ALU | BPF_ADD | BPF_K:
514 case BPF_ALU | BPF_SUB | BPF_K:
515 case BPF_ALU | BPF_AND | BPF_K:
516 case BPF_ALU | BPF_OR | BPF_K:
517 case BPF_ALU | BPF_XOR | BPF_K:
518 case BPF_ALU64 | BPF_ADD | BPF_K:
519 case BPF_ALU64 | BPF_SUB | BPF_K:
520 case BPF_ALU64 | BPF_AND | BPF_K:
521 case BPF_ALU64 | BPF_OR | BPF_K:
522 case BPF_ALU64 | BPF_XOR | BPF_K:
523 if (BPF_CLASS(insn->code) == BPF_ALU64)
524 EMIT1(add_1mod(0x48, dst_reg));
525 else if (is_ereg(dst_reg))
526 EMIT1(add_1mod(0x40, dst_reg));
527
528
529
530
531 switch (BPF_OP(insn->code)) {
532 case BPF_ADD:
533 b3 = 0xC0;
534 b2 = 0x05;
535 break;
536 case BPF_SUB:
537 b3 = 0xE8;
538 b2 = 0x2D;
539 break;
540 case BPF_AND:
541 b3 = 0xE0;
542 b2 = 0x25;
543 break;
544 case BPF_OR:
545 b3 = 0xC8;
546 b2 = 0x0D;
547 break;
548 case BPF_XOR:
549 b3 = 0xF0;
550 b2 = 0x35;
551 break;
552 }
553
554 if (is_imm8(imm32))
555 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
556 else if (is_axreg(dst_reg))
557 EMIT1_off32(b2, imm32);
558 else
559 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
560 break;
561
562 case BPF_ALU64 | BPF_MOV | BPF_K:
563 case BPF_ALU | BPF_MOV | BPF_K:
564 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
565 dst_reg, imm32);
566 break;
567
568 case BPF_LD | BPF_IMM | BPF_DW:
569 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
570 insn++;
571 i++;
572 break;
573
574
575 case BPF_ALU | BPF_MOD | BPF_X:
576 case BPF_ALU | BPF_DIV | BPF_X:
577 case BPF_ALU | BPF_MOD | BPF_K:
578 case BPF_ALU | BPF_DIV | BPF_K:
579 case BPF_ALU64 | BPF_MOD | BPF_X:
580 case BPF_ALU64 | BPF_DIV | BPF_X:
581 case BPF_ALU64 | BPF_MOD | BPF_K:
582 case BPF_ALU64 | BPF_DIV | BPF_K:
583 EMIT1(0x50);
584 EMIT1(0x52);
585
586 if (BPF_SRC(insn->code) == BPF_X)
587
588 EMIT_mov(AUX_REG, src_reg);
589 else
590
591 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
592
593
594 EMIT_mov(BPF_REG_0, dst_reg);
595
596
597
598
599 EMIT2(0x31, 0xd2);
600
601 if (BPF_CLASS(insn->code) == BPF_ALU64)
602
603 EMIT3(0x49, 0xF7, 0xF3);
604 else
605
606 EMIT3(0x41, 0xF7, 0xF3);
607
608 if (BPF_OP(insn->code) == BPF_MOD)
609
610 EMIT3(0x49, 0x89, 0xD3);
611 else
612
613 EMIT3(0x49, 0x89, 0xC3);
614
615 EMIT1(0x5A);
616 EMIT1(0x58);
617
618
619 EMIT_mov(dst_reg, AUX_REG);
620 break;
621
622 case BPF_ALU | BPF_MUL | BPF_K:
623 case BPF_ALU | BPF_MUL | BPF_X:
624 case BPF_ALU64 | BPF_MUL | BPF_K:
625 case BPF_ALU64 | BPF_MUL | BPF_X:
626 {
627 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
628
629 if (dst_reg != BPF_REG_0)
630 EMIT1(0x50);
631 if (dst_reg != BPF_REG_3)
632 EMIT1(0x52);
633
634
635 EMIT_mov(AUX_REG, dst_reg);
636
637 if (BPF_SRC(insn->code) == BPF_X)
638 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
639 else
640 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
641
642 if (is64)
643 EMIT1(add_1mod(0x48, AUX_REG));
644 else if (is_ereg(AUX_REG))
645 EMIT1(add_1mod(0x40, AUX_REG));
646
647 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
648
649 if (dst_reg != BPF_REG_3)
650 EMIT1(0x5A);
651 if (dst_reg != BPF_REG_0) {
652
653 EMIT_mov(dst_reg, BPF_REG_0);
654 EMIT1(0x58);
655 }
656 break;
657 }
658
659 case BPF_ALU | BPF_LSH | BPF_K:
660 case BPF_ALU | BPF_RSH | BPF_K:
661 case BPF_ALU | BPF_ARSH | BPF_K:
662 case BPF_ALU64 | BPF_LSH | BPF_K:
663 case BPF_ALU64 | BPF_RSH | BPF_K:
664 case BPF_ALU64 | BPF_ARSH | BPF_K:
665 if (BPF_CLASS(insn->code) == BPF_ALU64)
666 EMIT1(add_1mod(0x48, dst_reg));
667 else if (is_ereg(dst_reg))
668 EMIT1(add_1mod(0x40, dst_reg));
669
670 switch (BPF_OP(insn->code)) {
671 case BPF_LSH: b3 = 0xE0; break;
672 case BPF_RSH: b3 = 0xE8; break;
673 case BPF_ARSH: b3 = 0xF8; break;
674 }
675
676 if (imm32 == 1)
677 EMIT2(0xD1, add_1reg(b3, dst_reg));
678 else
679 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
680 break;
681
682 case BPF_ALU | BPF_LSH | BPF_X:
683 case BPF_ALU | BPF_RSH | BPF_X:
684 case BPF_ALU | BPF_ARSH | BPF_X:
685 case BPF_ALU64 | BPF_LSH | BPF_X:
686 case BPF_ALU64 | BPF_RSH | BPF_X:
687 case BPF_ALU64 | BPF_ARSH | BPF_X:
688
689
690 if (dst_reg == BPF_REG_4) {
691
692 EMIT_mov(AUX_REG, dst_reg);
693 dst_reg = AUX_REG;
694 }
695
696 if (src_reg != BPF_REG_4) {
697 EMIT1(0x51);
698
699
700 EMIT_mov(BPF_REG_4, src_reg);
701 }
702
703
704 if (BPF_CLASS(insn->code) == BPF_ALU64)
705 EMIT1(add_1mod(0x48, dst_reg));
706 else if (is_ereg(dst_reg))
707 EMIT1(add_1mod(0x40, dst_reg));
708
709 switch (BPF_OP(insn->code)) {
710 case BPF_LSH: b3 = 0xE0; break;
711 case BPF_RSH: b3 = 0xE8; break;
712 case BPF_ARSH: b3 = 0xF8; break;
713 }
714 EMIT2(0xD3, add_1reg(b3, dst_reg));
715
716 if (src_reg != BPF_REG_4)
717 EMIT1(0x59);
718
719 if (insn->dst_reg == BPF_REG_4)
720
721 EMIT_mov(insn->dst_reg, AUX_REG);
722 break;
723
724 case BPF_ALU | BPF_END | BPF_FROM_BE:
725 switch (imm32) {
726 case 16:
727
728 EMIT1(0x66);
729 if (is_ereg(dst_reg))
730 EMIT1(0x41);
731 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
732
733
734 if (is_ereg(dst_reg))
735 EMIT3(0x45, 0x0F, 0xB7);
736 else
737 EMIT2(0x0F, 0xB7);
738 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
739 break;
740 case 32:
741
742 if (is_ereg(dst_reg))
743 EMIT2(0x41, 0x0F);
744 else
745 EMIT1(0x0F);
746 EMIT1(add_1reg(0xC8, dst_reg));
747 break;
748 case 64:
749
750 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
751 add_1reg(0xC8, dst_reg));
752 break;
753 }
754 break;
755
756 case BPF_ALU | BPF_END | BPF_FROM_LE:
757 switch (imm32) {
758 case 16:
759
760
761
762 if (is_ereg(dst_reg))
763 EMIT3(0x45, 0x0F, 0xB7);
764 else
765 EMIT2(0x0F, 0xB7);
766 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
767 break;
768 case 32:
769
770 if (is_ereg(dst_reg))
771 EMIT1(0x45);
772 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
773 break;
774 case 64:
775
776 break;
777 }
778 break;
779
780
781 case BPF_ST | BPF_MEM | BPF_B:
782 if (is_ereg(dst_reg))
783 EMIT2(0x41, 0xC6);
784 else
785 EMIT1(0xC6);
786 goto st;
787 case BPF_ST | BPF_MEM | BPF_H:
788 if (is_ereg(dst_reg))
789 EMIT3(0x66, 0x41, 0xC7);
790 else
791 EMIT2(0x66, 0xC7);
792 goto st;
793 case BPF_ST | BPF_MEM | BPF_W:
794 if (is_ereg(dst_reg))
795 EMIT2(0x41, 0xC7);
796 else
797 EMIT1(0xC7);
798 goto st;
799 case BPF_ST | BPF_MEM | BPF_DW:
800 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
801
802st: if (is_imm8(insn->off))
803 EMIT2(add_1reg(0x40, dst_reg), insn->off);
804 else
805 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
806
807 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
808 break;
809
810
811 case BPF_STX | BPF_MEM | BPF_B:
812
813 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
814
815 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
816 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
817 else
818 EMIT1(0x88);
819 goto stx;
820 case BPF_STX | BPF_MEM | BPF_H:
821 if (is_ereg(dst_reg) || is_ereg(src_reg))
822 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
823 else
824 EMIT2(0x66, 0x89);
825 goto stx;
826 case BPF_STX | BPF_MEM | BPF_W:
827 if (is_ereg(dst_reg) || is_ereg(src_reg))
828 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
829 else
830 EMIT1(0x89);
831 goto stx;
832 case BPF_STX | BPF_MEM | BPF_DW:
833 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
834stx: if (is_imm8(insn->off))
835 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
836 else
837 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
838 insn->off);
839 break;
840
841
842 case BPF_LDX | BPF_MEM | BPF_B:
843
844 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
845 goto ldx;
846 case BPF_LDX | BPF_MEM | BPF_H:
847
848 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
849 goto ldx;
850 case BPF_LDX | BPF_MEM | BPF_W:
851
852 if (is_ereg(dst_reg) || is_ereg(src_reg))
853 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
854 else
855 EMIT1(0x8B);
856 goto ldx;
857 case BPF_LDX | BPF_MEM | BPF_DW:
858
859 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
860ldx:
861
862
863
864 if (is_imm8(insn->off))
865 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
866 else
867 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
868 insn->off);
869 break;
870
871
872 case BPF_STX | BPF_XADD | BPF_W:
873
874 if (is_ereg(dst_reg) || is_ereg(src_reg))
875 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
876 else
877 EMIT2(0xF0, 0x01);
878 goto xadd;
879 case BPF_STX | BPF_XADD | BPF_DW:
880 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
881xadd: if (is_imm8(insn->off))
882 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
883 else
884 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
885 insn->off);
886 break;
887
888
889 case BPF_JMP | BPF_CALL:
890 func = (u8 *) __bpf_call_base + imm32;
891 jmp_offset = func - (image + addrs[i]);
892 if (seen_ld_abs) {
893 reload_skb_data = bpf_helper_changes_pkt_data(func);
894 if (reload_skb_data) {
895 EMIT1(0x57);
896 jmp_offset += 22;
897 } else {
898 EMIT2(0x41, 0x52);
899 EMIT2(0x41, 0x51);
900
901
902
903 jmp_offset += 4;
904 }
905 }
906 if (!imm32 || !is_simm32(jmp_offset)) {
907 pr_err("unsupported bpf func %d addr %p image %p\n",
908 imm32, func, image);
909 return -EINVAL;
910 }
911 EMIT1_off32(0xE8, jmp_offset);
912 if (seen_ld_abs) {
913 if (reload_skb_data) {
914 EMIT1(0x5F);
915 emit_load_skb_data_hlen(&prog);
916 } else {
917 EMIT2(0x41, 0x59);
918 EMIT2(0x41, 0x5A);
919 }
920 }
921 break;
922
923 case BPF_JMP | BPF_TAIL_CALL:
924 emit_bpf_tail_call(&prog);
925 break;
926
927
928 case BPF_JMP | BPF_JEQ | BPF_X:
929 case BPF_JMP | BPF_JNE | BPF_X:
930 case BPF_JMP | BPF_JGT | BPF_X:
931 case BPF_JMP | BPF_JLT | BPF_X:
932 case BPF_JMP | BPF_JGE | BPF_X:
933 case BPF_JMP | BPF_JLE | BPF_X:
934 case BPF_JMP | BPF_JSGT | BPF_X:
935 case BPF_JMP | BPF_JSLT | BPF_X:
936 case BPF_JMP | BPF_JSGE | BPF_X:
937 case BPF_JMP | BPF_JSLE | BPF_X:
938
939 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
940 add_2reg(0xC0, dst_reg, src_reg));
941 goto emit_cond_jmp;
942
943 case BPF_JMP | BPF_JSET | BPF_X:
944
945 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
946 add_2reg(0xC0, dst_reg, src_reg));
947 goto emit_cond_jmp;
948
949 case BPF_JMP | BPF_JSET | BPF_K:
950
951 EMIT1(add_1mod(0x48, dst_reg));
952 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
953 goto emit_cond_jmp;
954
955 case BPF_JMP | BPF_JEQ | BPF_K:
956 case BPF_JMP | BPF_JNE | BPF_K:
957 case BPF_JMP | BPF_JGT | BPF_K:
958 case BPF_JMP | BPF_JLT | BPF_K:
959 case BPF_JMP | BPF_JGE | BPF_K:
960 case BPF_JMP | BPF_JLE | BPF_K:
961 case BPF_JMP | BPF_JSGT | BPF_K:
962 case BPF_JMP | BPF_JSLT | BPF_K:
963 case BPF_JMP | BPF_JSGE | BPF_K:
964 case BPF_JMP | BPF_JSLE | BPF_K:
965
966 EMIT1(add_1mod(0x48, dst_reg));
967
968 if (is_imm8(imm32))
969 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
970 else
971 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
972
973emit_cond_jmp:
974 switch (BPF_OP(insn->code)) {
975 case BPF_JEQ:
976 jmp_cond = X86_JE;
977 break;
978 case BPF_JSET:
979 case BPF_JNE:
980 jmp_cond = X86_JNE;
981 break;
982 case BPF_JGT:
983
984 jmp_cond = X86_JA;
985 break;
986 case BPF_JLT:
987
988 jmp_cond = X86_JB;
989 break;
990 case BPF_JGE:
991
992 jmp_cond = X86_JAE;
993 break;
994 case BPF_JLE:
995
996 jmp_cond = X86_JBE;
997 break;
998 case BPF_JSGT:
999
1000 jmp_cond = X86_JG;
1001 break;
1002 case BPF_JSLT:
1003
1004 jmp_cond = X86_JL;
1005 break;
1006 case BPF_JSGE:
1007
1008 jmp_cond = X86_JGE;
1009 break;
1010 case BPF_JSLE:
1011
1012 jmp_cond = X86_JLE;
1013 break;
1014 default:
1015 return -EFAULT;
1016 }
1017 jmp_offset = addrs[i + insn->off] - addrs[i];
1018 if (is_imm8(jmp_offset)) {
1019 EMIT2(jmp_cond, jmp_offset);
1020 } else if (is_simm32(jmp_offset)) {
1021 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1022 } else {
1023 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1024 return -EFAULT;
1025 }
1026
1027 break;
1028
1029 case BPF_JMP | BPF_JA:
1030 if (insn->off == -1)
1031
1032
1033
1034
1035
1036
1037 jmp_offset = -2;
1038 else
1039 jmp_offset = addrs[i + insn->off] - addrs[i];
1040
1041 if (!jmp_offset)
1042
1043 break;
1044emit_jmp:
1045 if (is_imm8(jmp_offset)) {
1046 EMIT2(0xEB, jmp_offset);
1047 } else if (is_simm32(jmp_offset)) {
1048 EMIT1_off32(0xE9, jmp_offset);
1049 } else {
1050 pr_err("jmp gen bug %llx\n", jmp_offset);
1051 return -EFAULT;
1052 }
1053 break;
1054
1055 case BPF_LD | BPF_IND | BPF_W:
1056 func = sk_load_word;
1057 goto common_load;
1058 case BPF_LD | BPF_ABS | BPF_W:
1059 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
1060common_load:
1061 ctx->seen_ld_abs = seen_ld_abs = true;
1062 jmp_offset = func - (image + addrs[i]);
1063 if (!func || !is_simm32(jmp_offset)) {
1064 pr_err("unsupported bpf func %d addr %p image %p\n",
1065 imm32, func, image);
1066 return -EINVAL;
1067 }
1068 if (BPF_MODE(insn->code) == BPF_ABS) {
1069
1070 EMIT1_off32(0xBE, imm32);
1071 } else {
1072
1073 EMIT_mov(BPF_REG_2, src_reg);
1074 if (imm32) {
1075 if (is_imm8(imm32))
1076
1077 EMIT3(0x83, 0xC6, imm32);
1078 else
1079
1080 EMIT2_off32(0x81, 0xC6, imm32);
1081 }
1082 }
1083
1084
1085
1086
1087
1088 if (seen_ax_reg)
1089
1090 EMIT3_off32(0x4c, 0x8b, 0x93,
1091 offsetof(struct sk_buff, data));
1092 EMIT1_off32(0xE8, jmp_offset);
1093 break;
1094
1095 case BPF_LD | BPF_IND | BPF_H:
1096 func = sk_load_half;
1097 goto common_load;
1098 case BPF_LD | BPF_ABS | BPF_H:
1099 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
1100 goto common_load;
1101 case BPF_LD | BPF_IND | BPF_B:
1102 func = sk_load_byte;
1103 goto common_load;
1104 case BPF_LD | BPF_ABS | BPF_B:
1105 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
1106 goto common_load;
1107
1108 case BPF_JMP | BPF_EXIT:
1109 if (seen_exit) {
1110 jmp_offset = ctx->cleanup_addr - addrs[i];
1111 goto emit_jmp;
1112 }
1113 seen_exit = true;
1114
1115 ctx->cleanup_addr = proglen;
1116
1117 EMIT4(0x48, 0x8B, 0x5D, 0);
1118
1119 EMIT4(0x4C, 0x8B, 0x6D, 8);
1120
1121 EMIT4(0x4C, 0x8B, 0x75, 16);
1122
1123 EMIT4(0x4C, 0x8B, 0x7D, 24);
1124
1125
1126 EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE);
1127 EMIT1(0xC9);
1128 EMIT1(0xC3);
1129 break;
1130
1131 default:
1132
1133
1134
1135
1136
1137 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1138 return -EINVAL;
1139 }
1140
1141 ilen = prog - temp;
1142 if (ilen > BPF_MAX_INSN_SIZE) {
1143 pr_err("bpf_jit: fatal insn size error\n");
1144 return -EFAULT;
1145 }
1146
1147 if (image) {
1148 if (unlikely(proglen + ilen > oldproglen)) {
1149 pr_err("bpf_jit: fatal error\n");
1150 return -EFAULT;
1151 }
1152 memcpy(image + proglen, temp, ilen);
1153 }
1154 proglen += ilen;
1155 addrs[i] = proglen;
1156 prog = temp;
1157 }
1158 return proglen;
1159}
1160
1161struct x64_jit_data {
1162 struct bpf_binary_header *header;
1163 int *addrs;
1164 u8 *image;
1165 int proglen;
1166 struct jit_context ctx;
1167};
1168
1169struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1170{
1171 struct bpf_binary_header *header = NULL;
1172 struct bpf_prog *tmp, *orig_prog = prog;
1173 struct x64_jit_data *jit_data;
1174 int proglen, oldproglen = 0;
1175 struct jit_context ctx = {};
1176 bool tmp_blinded = false;
1177 bool extra_pass = false;
1178 u8 *image = NULL;
1179 int *addrs;
1180 int pass;
1181 int i;
1182
1183 if (!prog->jit_requested)
1184 return orig_prog;
1185
1186 tmp = bpf_jit_blind_constants(prog);
1187
1188
1189
1190 if (IS_ERR(tmp))
1191 return orig_prog;
1192 if (tmp != prog) {
1193 tmp_blinded = true;
1194 prog = tmp;
1195 }
1196
1197 jit_data = prog->aux->jit_data;
1198 if (!jit_data) {
1199 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1200 if (!jit_data) {
1201 prog = orig_prog;
1202 goto out;
1203 }
1204 prog->aux->jit_data = jit_data;
1205 }
1206 addrs = jit_data->addrs;
1207 if (addrs) {
1208 ctx = jit_data->ctx;
1209 oldproglen = jit_data->proglen;
1210 image = jit_data->image;
1211 header = jit_data->header;
1212 extra_pass = true;
1213 goto skip_init_addrs;
1214 }
1215 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
1216 if (!addrs) {
1217 prog = orig_prog;
1218 goto out_addrs;
1219 }
1220
1221
1222
1223
1224 for (proglen = 0, i = 0; i < prog->len; i++) {
1225 proglen += 64;
1226 addrs[i] = proglen;
1227 }
1228 ctx.cleanup_addr = proglen;
1229skip_init_addrs:
1230
1231
1232
1233
1234
1235
1236 for (pass = 0; pass < 20 || image; pass++) {
1237 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1238 if (proglen <= 0) {
1239out_image:
1240 image = NULL;
1241 if (header)
1242 bpf_jit_binary_free(header);
1243 prog = orig_prog;
1244 goto out_addrs;
1245 }
1246 if (image) {
1247 if (proglen != oldproglen) {
1248 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1249 proglen, oldproglen);
1250 goto out_image;
1251 }
1252 break;
1253 }
1254 if (proglen == oldproglen) {
1255 header = bpf_jit_binary_alloc(proglen, &image,
1256 1, jit_fill_hole);
1257 if (!header) {
1258 prog = orig_prog;
1259 goto out_addrs;
1260 }
1261 }
1262 oldproglen = proglen;
1263 cond_resched();
1264 }
1265
1266 if (bpf_jit_enable > 1)
1267 bpf_jit_dump(prog->len, proglen, pass + 1, image);
1268
1269 if (image) {
1270 if (!prog->is_func || extra_pass) {
1271 bpf_jit_binary_lock_ro(header);
1272 } else {
1273 jit_data->addrs = addrs;
1274 jit_data->ctx = ctx;
1275 jit_data->proglen = proglen;
1276 jit_data->image = image;
1277 jit_data->header = header;
1278 }
1279 prog->bpf_func = (void *)image;
1280 prog->jited = 1;
1281 prog->jited_len = proglen;
1282 } else {
1283 prog = orig_prog;
1284 }
1285
1286 if (!image || !prog->is_func || extra_pass) {
1287out_addrs:
1288 kfree(addrs);
1289 kfree(jit_data);
1290 prog->aux->jit_data = NULL;
1291 }
1292out:
1293 if (tmp_blinded)
1294 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1295 tmp : orig_prog);
1296 return prog;
1297}
1298