1
2
3
4
5
6
7
8#include <linux/netdevice.h>
9#include <linux/filter.h>
10#include <linux/if_vlan.h>
11#include <linux/bpf.h>
12#include <linux/memory.h>
13#include <linux/sort.h>
14#include <asm/extable.h>
15#include <asm/set_memory.h>
16#include <asm/nospec-branch.h>
17#include <asm/text-patching.h>
18#include <asm/asm-prototypes.h>
19
20static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
21{
22 if (len == 1)
23 *ptr = bytes;
24 else if (len == 2)
25 *(u16 *)ptr = bytes;
26 else {
27 *(u32 *)ptr = bytes;
28 barrier();
29 }
30 return ptr + len;
31}
32
33#define EMIT(bytes, len) \
34 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
35
36#define EMIT1(b1) EMIT(b1, 1)
37#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
38#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
39#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
40
41#define EMIT1_off32(b1, off) \
42 do { EMIT1(b1); EMIT(off, 4); } while (0)
43#define EMIT2_off32(b1, b2, off) \
44 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
45#define EMIT3_off32(b1, b2, b3, off) \
46 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
47#define EMIT4_off32(b1, b2, b3, b4, off) \
48 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
49
50static bool is_imm8(int value)
51{
52 return value <= 127 && value >= -128;
53}
54
55static bool is_simm32(s64 value)
56{
57 return value == (s64)(s32)value;
58}
59
60static bool is_uimm32(u64 value)
61{
62 return value == (u64)(u32)value;
63}
64
65
66#define EMIT_mov(DST, SRC) \
67 do { \
68 if (DST != SRC) \
69 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
70 } while (0)
71
72static int bpf_size_to_x86_bytes(int bpf_size)
73{
74 if (bpf_size == BPF_W)
75 return 4;
76 else if (bpf_size == BPF_H)
77 return 2;
78 else if (bpf_size == BPF_B)
79 return 1;
80 else if (bpf_size == BPF_DW)
81 return 4;
82 else
83 return 0;
84}
85
86
87
88
89
90#define X86_JB 0x72
91#define X86_JAE 0x73
92#define X86_JE 0x74
93#define X86_JNE 0x75
94#define X86_JBE 0x76
95#define X86_JA 0x77
96#define X86_JL 0x7C
97#define X86_JGE 0x7D
98#define X86_JLE 0x7E
99#define X86_JG 0x7F
100
101
102#define AUX_REG (MAX_BPF_JIT_REG + 1)
103#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
104
105
106
107
108
109
110
111
112
113
114
115static const int reg2hex[] = {
116 [BPF_REG_0] = 0,
117 [BPF_REG_1] = 7,
118 [BPF_REG_2] = 6,
119 [BPF_REG_3] = 2,
120 [BPF_REG_4] = 1,
121 [BPF_REG_5] = 0,
122 [BPF_REG_6] = 3,
123 [BPF_REG_7] = 5,
124 [BPF_REG_8] = 6,
125 [BPF_REG_9] = 7,
126 [BPF_REG_FP] = 5,
127 [BPF_REG_AX] = 2,
128 [AUX_REG] = 3,
129 [X86_REG_R9] = 1,
130};
131
132static const int reg2pt_regs[] = {
133 [BPF_REG_0] = offsetof(struct pt_regs, ax),
134 [BPF_REG_1] = offsetof(struct pt_regs, di),
135 [BPF_REG_2] = offsetof(struct pt_regs, si),
136 [BPF_REG_3] = offsetof(struct pt_regs, dx),
137 [BPF_REG_4] = offsetof(struct pt_regs, cx),
138 [BPF_REG_5] = offsetof(struct pt_regs, r8),
139 [BPF_REG_6] = offsetof(struct pt_regs, bx),
140 [BPF_REG_7] = offsetof(struct pt_regs, r13),
141 [BPF_REG_8] = offsetof(struct pt_regs, r14),
142 [BPF_REG_9] = offsetof(struct pt_regs, r15),
143};
144
145
146
147
148
149
150static bool is_ereg(u32 reg)
151{
152 return (1 << reg) & (BIT(BPF_REG_5) |
153 BIT(AUX_REG) |
154 BIT(BPF_REG_7) |
155 BIT(BPF_REG_8) |
156 BIT(BPF_REG_9) |
157 BIT(X86_REG_R9) |
158 BIT(BPF_REG_AX));
159}
160
161
162
163
164
165
166static bool is_ereg_8l(u32 reg)
167{
168 return is_ereg(reg) ||
169 (1 << reg) & (BIT(BPF_REG_1) |
170 BIT(BPF_REG_2) |
171 BIT(BPF_REG_FP));
172}
173
174static bool is_axreg(u32 reg)
175{
176 return reg == BPF_REG_0;
177}
178
179
180static u8 add_1mod(u8 byte, u32 reg)
181{
182 if (is_ereg(reg))
183 byte |= 1;
184 return byte;
185}
186
187static u8 add_2mod(u8 byte, u32 r1, u32 r2)
188{
189 if (is_ereg(r1))
190 byte |= 1;
191 if (is_ereg(r2))
192 byte |= 4;
193 return byte;
194}
195
196
197static u8 add_1reg(u8 byte, u32 dst_reg)
198{
199 return byte + reg2hex[dst_reg];
200}
201
202
203static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
204{
205 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
206}
207
208
209static u8 simple_alu_opcodes[] = {
210 [BPF_ADD] = 0x01,
211 [BPF_SUB] = 0x29,
212 [BPF_AND] = 0x21,
213 [BPF_OR] = 0x09,
214 [BPF_XOR] = 0x31,
215 [BPF_LSH] = 0xE0,
216 [BPF_RSH] = 0xE8,
217 [BPF_ARSH] = 0xF8,
218};
219
220static void jit_fill_hole(void *area, unsigned int size)
221{
222
223 memset(area, 0xcc, size);
224}
225
226struct jit_context {
227 int cleanup_addr;
228};
229
230
231#define BPF_MAX_INSN_SIZE 128
232#define BPF_INSN_SAFETY 64
233
234
235#define X86_PATCH_SIZE 5
236
237#define X86_TAIL_CALL_OFFSET 11
238
239static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
240{
241 u8 *prog = *pprog;
242 int cnt = 0;
243
244 if (callee_regs_used[0])
245 EMIT1(0x53);
246 if (callee_regs_used[1])
247 EMIT2(0x41, 0x55);
248 if (callee_regs_used[2])
249 EMIT2(0x41, 0x56);
250 if (callee_regs_used[3])
251 EMIT2(0x41, 0x57);
252 *pprog = prog;
253}
254
255static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
256{
257 u8 *prog = *pprog;
258 int cnt = 0;
259
260 if (callee_regs_used[3])
261 EMIT2(0x41, 0x5F);
262 if (callee_regs_used[2])
263 EMIT2(0x41, 0x5E);
264 if (callee_regs_used[1])
265 EMIT2(0x41, 0x5D);
266 if (callee_regs_used[0])
267 EMIT1(0x5B);
268 *pprog = prog;
269}
270
271
272
273
274
275
276static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
277 bool tail_call_reachable, bool is_subprog)
278{
279 u8 *prog = *pprog;
280 int cnt = X86_PATCH_SIZE;
281
282
283
284
285 memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
286 prog += cnt;
287 if (!ebpf_from_cbpf) {
288 if (tail_call_reachable && !is_subprog)
289 EMIT2(0x31, 0xC0);
290 else
291 EMIT2(0x66, 0x90);
292 }
293 EMIT1(0x55);
294 EMIT3(0x48, 0x89, 0xE5);
295
296 if (stack_depth)
297 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
298 if (tail_call_reachable)
299 EMIT1(0x50);
300 *pprog = prog;
301}
302
303static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
304{
305 u8 *prog = *pprog;
306 int cnt = 0;
307 s64 offset;
308
309 offset = func - (ip + X86_PATCH_SIZE);
310 if (!is_simm32(offset)) {
311 pr_err("Target call %p is out of range\n", func);
312 return -ERANGE;
313 }
314 EMIT1_off32(opcode, offset);
315 *pprog = prog;
316 return 0;
317}
318
319static int emit_call(u8 **pprog, void *func, void *ip)
320{
321 return emit_patch(pprog, func, ip, 0xE8);
322}
323
324static int emit_jump(u8 **pprog, void *func, void *ip)
325{
326 return emit_patch(pprog, func, ip, 0xE9);
327}
328
329static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
330 void *old_addr, void *new_addr,
331 const bool text_live)
332{
333 const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
334 u8 old_insn[X86_PATCH_SIZE];
335 u8 new_insn[X86_PATCH_SIZE];
336 u8 *prog;
337 int ret;
338
339 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
340 if (old_addr) {
341 prog = old_insn;
342 ret = t == BPF_MOD_CALL ?
343 emit_call(&prog, old_addr, ip) :
344 emit_jump(&prog, old_addr, ip);
345 if (ret)
346 return ret;
347 }
348
349 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
350 if (new_addr) {
351 prog = new_insn;
352 ret = t == BPF_MOD_CALL ?
353 emit_call(&prog, new_addr, ip) :
354 emit_jump(&prog, new_addr, ip);
355 if (ret)
356 return ret;
357 }
358
359 ret = -EBUSY;
360 mutex_lock(&text_mutex);
361 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
362 goto out;
363 ret = 1;
364 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
365 if (text_live)
366 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
367 else
368 memcpy(ip, new_insn, X86_PATCH_SIZE);
369 ret = 0;
370 }
371out:
372 mutex_unlock(&text_mutex);
373 return ret;
374}
375
376int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
377 void *old_addr, void *new_addr)
378{
379 if (!is_kernel_text((long)ip) &&
380 !is_bpf_text_address((long)ip))
381
382 return -EINVAL;
383
384 return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
385}
386
387static int get_pop_bytes(bool *callee_regs_used)
388{
389 int bytes = 0;
390
391 if (callee_regs_used[3])
392 bytes += 2;
393 if (callee_regs_used[2])
394 bytes += 2;
395 if (callee_regs_used[1])
396 bytes += 2;
397 if (callee_regs_used[0])
398 bytes += 1;
399
400 return bytes;
401}
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
418 u32 stack_depth)
419{
420 int tcc_off = -4 - round_up(stack_depth, 8);
421 u8 *prog = *pprog;
422 int pop_bytes = 0;
423 int off1 = 42;
424 int off2 = 31;
425 int off3 = 9;
426 int cnt = 0;
427
428
429
430
431
432 pop_bytes = get_pop_bytes(callee_regs_used);
433 off1 += pop_bytes;
434 off2 += pop_bytes;
435 off3 += pop_bytes;
436
437 if (stack_depth) {
438 off1 += 7;
439 off2 += 7;
440 off3 += 7;
441 }
442
443
444
445
446
447
448
449
450
451
452
453 EMIT2(0x89, 0xD2);
454 EMIT3(0x39, 0x56,
455 offsetof(struct bpf_array, map.max_entries));
456#define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE)
457 EMIT2(X86_JBE, OFFSET1);
458
459
460
461
462
463 EMIT2_off32(0x8B, 0x85, tcc_off);
464 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);
465#define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
466 EMIT2(X86_JA, OFFSET2);
467 EMIT3(0x83, 0xC0, 0x01);
468 EMIT2_off32(0x89, 0x85, tcc_off);
469
470
471 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,
472 offsetof(struct bpf_array, ptrs));
473
474
475
476
477
478 EMIT3(0x48, 0x85, 0xC9);
479#define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
480 EMIT2(X86_JE, OFFSET3);
481
482 *pprog = prog;
483 pop_callee_regs(pprog, callee_regs_used);
484 prog = *pprog;
485
486 EMIT1(0x58);
487 if (stack_depth)
488 EMIT3_off32(0x48, 0x81, 0xC4,
489 round_up(stack_depth, 8));
490
491
492 EMIT4(0x48, 0x8B, 0x49,
493 offsetof(struct bpf_prog, bpf_func));
494 EMIT4(0x48, 0x83, 0xC1,
495 X86_TAIL_CALL_OFFSET);
496
497
498
499
500
501 RETPOLINE_RCX_BPF_JIT();
502
503
504 *pprog = prog;
505}
506
507static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
508 u8 **pprog, int addr, u8 *image,
509 bool *callee_regs_used, u32 stack_depth)
510{
511 int tcc_off = -4 - round_up(stack_depth, 8);
512 u8 *prog = *pprog;
513 int pop_bytes = 0;
514 int off1 = 20;
515 int poke_off;
516 int cnt = 0;
517
518
519
520
521
522 pop_bytes = get_pop_bytes(callee_regs_used);
523 off1 += pop_bytes;
524
525
526
527
528
529
530
531
532 poke_off = X86_PATCH_SIZE + pop_bytes + 1;
533 if (stack_depth) {
534 poke_off += 7;
535 off1 += 7;
536 }
537
538
539
540
541
542 EMIT2_off32(0x8B, 0x85, tcc_off);
543 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);
544 EMIT2(X86_JA, off1);
545 EMIT3(0x83, 0xC0, 0x01);
546 EMIT2_off32(0x89, 0x85, tcc_off);
547
548 poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
549 poke->adj_off = X86_TAIL_CALL_OFFSET;
550 poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
551 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
552
553 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
554 poke->tailcall_bypass);
555
556 *pprog = prog;
557 pop_callee_regs(pprog, callee_regs_used);
558 prog = *pprog;
559 EMIT1(0x58);
560 if (stack_depth)
561 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
562
563 memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
564 prog += X86_PATCH_SIZE;
565
566
567 *pprog = prog;
568}
569
570static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
571{
572 struct bpf_jit_poke_descriptor *poke;
573 struct bpf_array *array;
574 struct bpf_prog *target;
575 int i, ret;
576
577 for (i = 0; i < prog->aux->size_poke_tab; i++) {
578 poke = &prog->aux->poke_tab[i];
579 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
580
581 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
582 continue;
583
584 array = container_of(poke->tail_call.map, struct bpf_array, map);
585 mutex_lock(&array->aux->poke_mutex);
586 target = array->ptrs[poke->tail_call.key];
587 if (target) {
588
589
590
591
592
593
594
595
596
597 ret = __bpf_arch_text_poke(poke->tailcall_target,
598 BPF_MOD_JUMP, NULL,
599 (u8 *)target->bpf_func +
600 poke->adj_off, false);
601 BUG_ON(ret < 0);
602 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
603 BPF_MOD_JUMP,
604 (u8 *)poke->tailcall_target +
605 X86_PATCH_SIZE, NULL, false);
606 BUG_ON(ret < 0);
607 }
608 WRITE_ONCE(poke->tailcall_target_stable, true);
609 mutex_unlock(&array->aux->poke_mutex);
610 }
611}
612
613static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
614 u32 dst_reg, const u32 imm32)
615{
616 u8 *prog = *pprog;
617 u8 b1, b2, b3;
618 int cnt = 0;
619
620
621
622
623
624 if (sign_propagate && (s32)imm32 < 0) {
625
626 b1 = add_1mod(0x48, dst_reg);
627 b2 = 0xC7;
628 b3 = 0xC0;
629 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
630 goto done;
631 }
632
633
634
635
636
637 if (imm32 == 0) {
638 if (is_ereg(dst_reg))
639 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
640 b2 = 0x31;
641 b3 = 0xC0;
642 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
643 goto done;
644 }
645
646
647 if (is_ereg(dst_reg))
648 EMIT1(add_1mod(0x40, dst_reg));
649 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
650done:
651 *pprog = prog;
652}
653
654static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
655 const u32 imm32_hi, const u32 imm32_lo)
656{
657 u8 *prog = *pprog;
658 int cnt = 0;
659
660 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
661
662
663
664
665
666
667 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
668 } else {
669
670 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
671 EMIT(imm32_lo, 4);
672 EMIT(imm32_hi, 4);
673 }
674
675 *pprog = prog;
676}
677
678static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
679{
680 u8 *prog = *pprog;
681 int cnt = 0;
682
683 if (is64) {
684
685 EMIT_mov(dst_reg, src_reg);
686 } else {
687
688 if (is_ereg(dst_reg) || is_ereg(src_reg))
689 EMIT1(add_2mod(0x40, dst_reg, src_reg));
690 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
691 }
692
693 *pprog = prog;
694}
695
696
697static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
698{
699 u8 *prog = *pprog;
700 int cnt = 0;
701
702 if (is_imm8(off)) {
703
704
705
706
707
708
709 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
710 } else {
711
712 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
713 }
714 *pprog = prog;
715}
716
717
718
719
720static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
721{
722 u8 *prog = *pprog;
723 int cnt = 0;
724
725 if (is64)
726 EMIT1(add_2mod(0x48, dst_reg, src_reg));
727 else if (is_ereg(dst_reg) || is_ereg(src_reg))
728 EMIT1(add_2mod(0x40, dst_reg, src_reg));
729 *pprog = prog;
730}
731
732
733static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
734{
735 u8 *prog = *pprog;
736 int cnt = 0;
737
738 switch (size) {
739 case BPF_B:
740
741 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
742 break;
743 case BPF_H:
744
745 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
746 break;
747 case BPF_W:
748
749 if (is_ereg(dst_reg) || is_ereg(src_reg))
750 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
751 else
752 EMIT1(0x8B);
753 break;
754 case BPF_DW:
755
756 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
757 break;
758 }
759 emit_insn_suffix(&prog, src_reg, dst_reg, off);
760 *pprog = prog;
761}
762
763
764static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
765{
766 u8 *prog = *pprog;
767 int cnt = 0;
768
769 switch (size) {
770 case BPF_B:
771
772 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
773
774 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
775 else
776 EMIT1(0x88);
777 break;
778 case BPF_H:
779 if (is_ereg(dst_reg) || is_ereg(src_reg))
780 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
781 else
782 EMIT2(0x66, 0x89);
783 break;
784 case BPF_W:
785 if (is_ereg(dst_reg) || is_ereg(src_reg))
786 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
787 else
788 EMIT1(0x89);
789 break;
790 case BPF_DW:
791 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
792 break;
793 }
794 emit_insn_suffix(&prog, dst_reg, src_reg, off);
795 *pprog = prog;
796}
797
798static int emit_atomic(u8 **pprog, u8 atomic_op,
799 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
800{
801 u8 *prog = *pprog;
802 int cnt = 0;
803
804 EMIT1(0xF0);
805
806 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
807
808
809 switch (atomic_op) {
810 case BPF_ADD:
811 case BPF_SUB:
812 case BPF_AND:
813 case BPF_OR:
814 case BPF_XOR:
815
816 EMIT1(simple_alu_opcodes[atomic_op]);
817 break;
818 case BPF_ADD | BPF_FETCH:
819
820 EMIT2(0x0F, 0xC1);
821 break;
822 case BPF_XCHG:
823
824 EMIT1(0x87);
825 break;
826 case BPF_CMPXCHG:
827
828 EMIT2(0x0F, 0xB1);
829 break;
830 default:
831 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
832 return -EFAULT;
833 }
834
835 emit_insn_suffix(&prog, dst_reg, src_reg, off);
836
837 *pprog = prog;
838 return 0;
839}
840
841static bool ex_handler_bpf(const struct exception_table_entry *x,
842 struct pt_regs *regs, int trapnr,
843 unsigned long error_code, unsigned long fault_addr)
844{
845 u32 reg = x->fixup >> 8;
846
847
848 *(unsigned long *)((void *)regs + reg) = 0;
849 regs->ip += x->fixup & 0xff;
850 return true;
851}
852
853static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
854 bool *regs_used, bool *tail_call_seen)
855{
856 int i;
857
858 for (i = 1; i <= insn_cnt; i++, insn++) {
859 if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
860 *tail_call_seen = true;
861 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
862 regs_used[0] = true;
863 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
864 regs_used[1] = true;
865 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
866 regs_used[2] = true;
867 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
868 regs_used[3] = true;
869 }
870}
871
872static int emit_nops(u8 **pprog, int len)
873{
874 u8 *prog = *pprog;
875 int i, noplen, cnt = 0;
876
877 while (len > 0) {
878 noplen = len;
879
880 if (noplen > ASM_NOP_MAX)
881 noplen = ASM_NOP_MAX;
882
883 for (i = 0; i < noplen; i++)
884 EMIT1(ideal_nops[noplen][i]);
885 len -= noplen;
886 }
887
888 *pprog = prog;
889
890 return cnt;
891}
892
893#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
894
895static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
896 int oldproglen, struct jit_context *ctx, bool jmp_padding)
897{
898 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
899 struct bpf_insn *insn = bpf_prog->insnsi;
900 bool callee_regs_used[4] = {};
901 int insn_cnt = bpf_prog->len;
902 bool tail_call_seen = false;
903 bool seen_exit = false;
904 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
905 int i, cnt = 0, excnt = 0;
906 int ilen, proglen = 0;
907 u8 *prog = temp;
908 int err;
909
910 detect_reg_usage(insn, insn_cnt, callee_regs_used,
911 &tail_call_seen);
912
913
914 tail_call_reachable |= tail_call_seen;
915
916 emit_prologue(&prog, bpf_prog->aux->stack_depth,
917 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
918 bpf_prog->aux->func_idx != 0);
919 push_callee_regs(&prog, callee_regs_used);
920
921 ilen = prog - temp;
922 if (image)
923 memcpy(image + proglen, temp, ilen);
924 proglen += ilen;
925 addrs[0] = proglen;
926 prog = temp;
927
928 for (i = 1; i <= insn_cnt; i++, insn++) {
929 const s32 imm32 = insn->imm;
930 u32 dst_reg = insn->dst_reg;
931 u32 src_reg = insn->src_reg;
932 u8 b2 = 0, b3 = 0;
933 u8 *start_of_ldx;
934 s64 jmp_offset;
935 u8 jmp_cond;
936 u8 *func;
937 int nops;
938
939 switch (insn->code) {
940
941 case BPF_ALU | BPF_ADD | BPF_X:
942 case BPF_ALU | BPF_SUB | BPF_X:
943 case BPF_ALU | BPF_AND | BPF_X:
944 case BPF_ALU | BPF_OR | BPF_X:
945 case BPF_ALU | BPF_XOR | BPF_X:
946 case BPF_ALU64 | BPF_ADD | BPF_X:
947 case BPF_ALU64 | BPF_SUB | BPF_X:
948 case BPF_ALU64 | BPF_AND | BPF_X:
949 case BPF_ALU64 | BPF_OR | BPF_X:
950 case BPF_ALU64 | BPF_XOR | BPF_X:
951 maybe_emit_mod(&prog, dst_reg, src_reg,
952 BPF_CLASS(insn->code) == BPF_ALU64);
953 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
954 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
955 break;
956
957 case BPF_ALU64 | BPF_MOV | BPF_X:
958 case BPF_ALU | BPF_MOV | BPF_X:
959 emit_mov_reg(&prog,
960 BPF_CLASS(insn->code) == BPF_ALU64,
961 dst_reg, src_reg);
962 break;
963
964
965 case BPF_ALU | BPF_NEG:
966 case BPF_ALU64 | BPF_NEG:
967 if (BPF_CLASS(insn->code) == BPF_ALU64)
968 EMIT1(add_1mod(0x48, dst_reg));
969 else if (is_ereg(dst_reg))
970 EMIT1(add_1mod(0x40, dst_reg));
971 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
972 break;
973
974 case BPF_ALU | BPF_ADD | BPF_K:
975 case BPF_ALU | BPF_SUB | BPF_K:
976 case BPF_ALU | BPF_AND | BPF_K:
977 case BPF_ALU | BPF_OR | BPF_K:
978 case BPF_ALU | BPF_XOR | BPF_K:
979 case BPF_ALU64 | BPF_ADD | BPF_K:
980 case BPF_ALU64 | BPF_SUB | BPF_K:
981 case BPF_ALU64 | BPF_AND | BPF_K:
982 case BPF_ALU64 | BPF_OR | BPF_K:
983 case BPF_ALU64 | BPF_XOR | BPF_K:
984 if (BPF_CLASS(insn->code) == BPF_ALU64)
985 EMIT1(add_1mod(0x48, dst_reg));
986 else if (is_ereg(dst_reg))
987 EMIT1(add_1mod(0x40, dst_reg));
988
989
990
991
992
993 switch (BPF_OP(insn->code)) {
994 case BPF_ADD:
995 b3 = 0xC0;
996 b2 = 0x05;
997 break;
998 case BPF_SUB:
999 b3 = 0xE8;
1000 b2 = 0x2D;
1001 break;
1002 case BPF_AND:
1003 b3 = 0xE0;
1004 b2 = 0x25;
1005 break;
1006 case BPF_OR:
1007 b3 = 0xC8;
1008 b2 = 0x0D;
1009 break;
1010 case BPF_XOR:
1011 b3 = 0xF0;
1012 b2 = 0x35;
1013 break;
1014 }
1015
1016 if (is_imm8(imm32))
1017 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1018 else if (is_axreg(dst_reg))
1019 EMIT1_off32(b2, imm32);
1020 else
1021 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1022 break;
1023
1024 case BPF_ALU64 | BPF_MOV | BPF_K:
1025 case BPF_ALU | BPF_MOV | BPF_K:
1026 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1027 dst_reg, imm32);
1028 break;
1029
1030 case BPF_LD | BPF_IMM | BPF_DW:
1031 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1032 insn++;
1033 i++;
1034 break;
1035
1036
1037 case BPF_ALU | BPF_MOD | BPF_X:
1038 case BPF_ALU | BPF_DIV | BPF_X:
1039 case BPF_ALU | BPF_MOD | BPF_K:
1040 case BPF_ALU | BPF_DIV | BPF_K:
1041 case BPF_ALU64 | BPF_MOD | BPF_X:
1042 case BPF_ALU64 | BPF_DIV | BPF_X:
1043 case BPF_ALU64 | BPF_MOD | BPF_K:
1044 case BPF_ALU64 | BPF_DIV | BPF_K:
1045 EMIT1(0x50);
1046 EMIT1(0x52);
1047
1048 if (BPF_SRC(insn->code) == BPF_X)
1049
1050 EMIT_mov(AUX_REG, src_reg);
1051 else
1052
1053 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1054
1055
1056 EMIT_mov(BPF_REG_0, dst_reg);
1057
1058
1059
1060
1061
1062 EMIT2(0x31, 0xd2);
1063
1064 if (BPF_CLASS(insn->code) == BPF_ALU64)
1065
1066 EMIT3(0x49, 0xF7, 0xF3);
1067 else
1068
1069 EMIT3(0x41, 0xF7, 0xF3);
1070
1071 if (BPF_OP(insn->code) == BPF_MOD)
1072
1073 EMIT3(0x49, 0x89, 0xD3);
1074 else
1075
1076 EMIT3(0x49, 0x89, 0xC3);
1077
1078 EMIT1(0x5A);
1079 EMIT1(0x58);
1080
1081
1082 EMIT_mov(dst_reg, AUX_REG);
1083 break;
1084
1085 case BPF_ALU | BPF_MUL | BPF_K:
1086 case BPF_ALU | BPF_MUL | BPF_X:
1087 case BPF_ALU64 | BPF_MUL | BPF_K:
1088 case BPF_ALU64 | BPF_MUL | BPF_X:
1089 {
1090 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1091
1092 if (dst_reg != BPF_REG_0)
1093 EMIT1(0x50);
1094 if (dst_reg != BPF_REG_3)
1095 EMIT1(0x52);
1096
1097
1098 EMIT_mov(AUX_REG, dst_reg);
1099
1100 if (BPF_SRC(insn->code) == BPF_X)
1101 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
1102 else
1103 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
1104
1105 if (is64)
1106 EMIT1(add_1mod(0x48, AUX_REG));
1107 else if (is_ereg(AUX_REG))
1108 EMIT1(add_1mod(0x40, AUX_REG));
1109
1110 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
1111
1112 if (dst_reg != BPF_REG_3)
1113 EMIT1(0x5A);
1114 if (dst_reg != BPF_REG_0) {
1115
1116 EMIT_mov(dst_reg, BPF_REG_0);
1117 EMIT1(0x58);
1118 }
1119 break;
1120 }
1121
1122 case BPF_ALU | BPF_LSH | BPF_K:
1123 case BPF_ALU | BPF_RSH | BPF_K:
1124 case BPF_ALU | BPF_ARSH | BPF_K:
1125 case BPF_ALU64 | BPF_LSH | BPF_K:
1126 case BPF_ALU64 | BPF_RSH | BPF_K:
1127 case BPF_ALU64 | BPF_ARSH | BPF_K:
1128 if (BPF_CLASS(insn->code) == BPF_ALU64)
1129 EMIT1(add_1mod(0x48, dst_reg));
1130 else if (is_ereg(dst_reg))
1131 EMIT1(add_1mod(0x40, dst_reg));
1132
1133 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1134 if (imm32 == 1)
1135 EMIT2(0xD1, add_1reg(b3, dst_reg));
1136 else
1137 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1138 break;
1139
1140 case BPF_ALU | BPF_LSH | BPF_X:
1141 case BPF_ALU | BPF_RSH | BPF_X:
1142 case BPF_ALU | BPF_ARSH | BPF_X:
1143 case BPF_ALU64 | BPF_LSH | BPF_X:
1144 case BPF_ALU64 | BPF_RSH | BPF_X:
1145 case BPF_ALU64 | BPF_ARSH | BPF_X:
1146
1147
1148 if (dst_reg == BPF_REG_4) {
1149
1150 EMIT_mov(AUX_REG, dst_reg);
1151 dst_reg = AUX_REG;
1152 }
1153
1154 if (src_reg != BPF_REG_4) {
1155 EMIT1(0x51);
1156
1157
1158 EMIT_mov(BPF_REG_4, src_reg);
1159 }
1160
1161
1162 if (BPF_CLASS(insn->code) == BPF_ALU64)
1163 EMIT1(add_1mod(0x48, dst_reg));
1164 else if (is_ereg(dst_reg))
1165 EMIT1(add_1mod(0x40, dst_reg));
1166
1167 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1168 EMIT2(0xD3, add_1reg(b3, dst_reg));
1169
1170 if (src_reg != BPF_REG_4)
1171 EMIT1(0x59);
1172
1173 if (insn->dst_reg == BPF_REG_4)
1174
1175 EMIT_mov(insn->dst_reg, AUX_REG);
1176 break;
1177
1178 case BPF_ALU | BPF_END | BPF_FROM_BE:
1179 switch (imm32) {
1180 case 16:
1181
1182 EMIT1(0x66);
1183 if (is_ereg(dst_reg))
1184 EMIT1(0x41);
1185 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1186
1187
1188 if (is_ereg(dst_reg))
1189 EMIT3(0x45, 0x0F, 0xB7);
1190 else
1191 EMIT2(0x0F, 0xB7);
1192 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1193 break;
1194 case 32:
1195
1196 if (is_ereg(dst_reg))
1197 EMIT2(0x41, 0x0F);
1198 else
1199 EMIT1(0x0F);
1200 EMIT1(add_1reg(0xC8, dst_reg));
1201 break;
1202 case 64:
1203
1204 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1205 add_1reg(0xC8, dst_reg));
1206 break;
1207 }
1208 break;
1209
1210 case BPF_ALU | BPF_END | BPF_FROM_LE:
1211 switch (imm32) {
1212 case 16:
1213
1214
1215
1216
1217 if (is_ereg(dst_reg))
1218 EMIT3(0x45, 0x0F, 0xB7);
1219 else
1220 EMIT2(0x0F, 0xB7);
1221 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1222 break;
1223 case 32:
1224
1225 if (is_ereg(dst_reg))
1226 EMIT1(0x45);
1227 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1228 break;
1229 case 64:
1230
1231 break;
1232 }
1233 break;
1234
1235
1236 case BPF_ST | BPF_MEM | BPF_B:
1237 if (is_ereg(dst_reg))
1238 EMIT2(0x41, 0xC6);
1239 else
1240 EMIT1(0xC6);
1241 goto st;
1242 case BPF_ST | BPF_MEM | BPF_H:
1243 if (is_ereg(dst_reg))
1244 EMIT3(0x66, 0x41, 0xC7);
1245 else
1246 EMIT2(0x66, 0xC7);
1247 goto st;
1248 case BPF_ST | BPF_MEM | BPF_W:
1249 if (is_ereg(dst_reg))
1250 EMIT2(0x41, 0xC7);
1251 else
1252 EMIT1(0xC7);
1253 goto st;
1254 case BPF_ST | BPF_MEM | BPF_DW:
1255 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1256
1257st: if (is_imm8(insn->off))
1258 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1259 else
1260 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1261
1262 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1263 break;
1264
1265
1266 case BPF_STX | BPF_MEM | BPF_B:
1267 case BPF_STX | BPF_MEM | BPF_H:
1268 case BPF_STX | BPF_MEM | BPF_W:
1269 case BPF_STX | BPF_MEM | BPF_DW:
1270 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1271 break;
1272
1273
1274 case BPF_LDX | BPF_MEM | BPF_B:
1275 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1276 case BPF_LDX | BPF_MEM | BPF_H:
1277 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1278 case BPF_LDX | BPF_MEM | BPF_W:
1279 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1280 case BPF_LDX | BPF_MEM | BPF_DW:
1281 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1282 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1283
1284 maybe_emit_mod(&prog, src_reg, src_reg, true);
1285 EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
1286
1287 EMIT2(X86_JNE, 0);
1288
1289 emit_mov_imm32(&prog, false, dst_reg, 0);
1290
1291 EMIT2(0xEB, 0);
1292
1293
1294 temp[4] = prog - temp - 5 ;
1295 start_of_ldx = prog;
1296 }
1297 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1298 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1299 struct exception_table_entry *ex;
1300 u8 *_insn = image + proglen;
1301 s64 delta;
1302
1303
1304 start_of_ldx[-1] = prog - start_of_ldx;
1305
1306 if (!bpf_prog->aux->extable)
1307 break;
1308
1309 if (excnt >= bpf_prog->aux->num_exentries) {
1310 pr_err("ex gen bug\n");
1311 return -EFAULT;
1312 }
1313 ex = &bpf_prog->aux->extable[excnt++];
1314
1315 delta = _insn - (u8 *)&ex->insn;
1316 if (!is_simm32(delta)) {
1317 pr_err("extable->insn doesn't fit into 32-bit\n");
1318 return -EFAULT;
1319 }
1320 ex->insn = delta;
1321
1322 delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
1323 if (!is_simm32(delta)) {
1324 pr_err("extable->handler doesn't fit into 32-bit\n");
1325 return -EFAULT;
1326 }
1327 ex->handler = delta;
1328
1329 if (dst_reg > BPF_REG_9) {
1330 pr_err("verifier error\n");
1331 return -EFAULT;
1332 }
1333
1334
1335
1336
1337
1338
1339
1340
1341 ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1342 }
1343 break;
1344
1345 case BPF_STX | BPF_ATOMIC | BPF_W:
1346 case BPF_STX | BPF_ATOMIC | BPF_DW:
1347 if (insn->imm == (BPF_AND | BPF_FETCH) ||
1348 insn->imm == (BPF_OR | BPF_FETCH) ||
1349 insn->imm == (BPF_XOR | BPF_FETCH)) {
1350 u8 *branch_target;
1351 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1352 u32 real_src_reg = src_reg;
1353
1354
1355
1356
1357
1358
1359
1360 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1361 if (src_reg == BPF_REG_0)
1362 real_src_reg = BPF_REG_AX;
1363
1364 branch_target = prog;
1365
1366 emit_ldx(&prog, BPF_SIZE(insn->code),
1367 BPF_REG_0, dst_reg, insn->off);
1368
1369
1370
1371
1372 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1373 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1374 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1375 add_2reg(0xC0, AUX_REG, real_src_reg));
1376
1377 err = emit_atomic(&prog, BPF_CMPXCHG,
1378 dst_reg, AUX_REG, insn->off,
1379 BPF_SIZE(insn->code));
1380 if (WARN_ON(err))
1381 return err;
1382
1383
1384
1385
1386 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1387
1388 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1389
1390 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1391 break;
1392
1393 }
1394
1395 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1396 insn->off, BPF_SIZE(insn->code));
1397 if (err)
1398 return err;
1399 break;
1400
1401
1402 case BPF_JMP | BPF_CALL:
1403 func = (u8 *) __bpf_call_base + imm32;
1404 if (tail_call_reachable) {
1405 EMIT3_off32(0x48, 0x8B, 0x85,
1406 -(bpf_prog->aux->stack_depth + 8));
1407 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1408 return -EINVAL;
1409 } else {
1410 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1411 return -EINVAL;
1412 }
1413 break;
1414
1415 case BPF_JMP | BPF_TAIL_CALL:
1416 if (imm32)
1417 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1418 &prog, addrs[i], image,
1419 callee_regs_used,
1420 bpf_prog->aux->stack_depth);
1421 else
1422 emit_bpf_tail_call_indirect(&prog,
1423 callee_regs_used,
1424 bpf_prog->aux->stack_depth);
1425 break;
1426
1427
1428 case BPF_JMP | BPF_JEQ | BPF_X:
1429 case BPF_JMP | BPF_JNE | BPF_X:
1430 case BPF_JMP | BPF_JGT | BPF_X:
1431 case BPF_JMP | BPF_JLT | BPF_X:
1432 case BPF_JMP | BPF_JGE | BPF_X:
1433 case BPF_JMP | BPF_JLE | BPF_X:
1434 case BPF_JMP | BPF_JSGT | BPF_X:
1435 case BPF_JMP | BPF_JSLT | BPF_X:
1436 case BPF_JMP | BPF_JSGE | BPF_X:
1437 case BPF_JMP | BPF_JSLE | BPF_X:
1438 case BPF_JMP32 | BPF_JEQ | BPF_X:
1439 case BPF_JMP32 | BPF_JNE | BPF_X:
1440 case BPF_JMP32 | BPF_JGT | BPF_X:
1441 case BPF_JMP32 | BPF_JLT | BPF_X:
1442 case BPF_JMP32 | BPF_JGE | BPF_X:
1443 case BPF_JMP32 | BPF_JLE | BPF_X:
1444 case BPF_JMP32 | BPF_JSGT | BPF_X:
1445 case BPF_JMP32 | BPF_JSLT | BPF_X:
1446 case BPF_JMP32 | BPF_JSGE | BPF_X:
1447 case BPF_JMP32 | BPF_JSLE | BPF_X:
1448
1449 maybe_emit_mod(&prog, dst_reg, src_reg,
1450 BPF_CLASS(insn->code) == BPF_JMP);
1451 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1452 goto emit_cond_jmp;
1453
1454 case BPF_JMP | BPF_JSET | BPF_X:
1455 case BPF_JMP32 | BPF_JSET | BPF_X:
1456
1457 maybe_emit_mod(&prog, dst_reg, src_reg,
1458 BPF_CLASS(insn->code) == BPF_JMP);
1459 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1460 goto emit_cond_jmp;
1461
1462 case BPF_JMP | BPF_JSET | BPF_K:
1463 case BPF_JMP32 | BPF_JSET | BPF_K:
1464
1465 if (BPF_CLASS(insn->code) == BPF_JMP)
1466 EMIT1(add_1mod(0x48, dst_reg));
1467 else if (is_ereg(dst_reg))
1468 EMIT1(add_1mod(0x40, dst_reg));
1469 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1470 goto emit_cond_jmp;
1471
1472 case BPF_JMP | BPF_JEQ | BPF_K:
1473 case BPF_JMP | BPF_JNE | BPF_K:
1474 case BPF_JMP | BPF_JGT | BPF_K:
1475 case BPF_JMP | BPF_JLT | BPF_K:
1476 case BPF_JMP | BPF_JGE | BPF_K:
1477 case BPF_JMP | BPF_JLE | BPF_K:
1478 case BPF_JMP | BPF_JSGT | BPF_K:
1479 case BPF_JMP | BPF_JSLT | BPF_K:
1480 case BPF_JMP | BPF_JSGE | BPF_K:
1481 case BPF_JMP | BPF_JSLE | BPF_K:
1482 case BPF_JMP32 | BPF_JEQ | BPF_K:
1483 case BPF_JMP32 | BPF_JNE | BPF_K:
1484 case BPF_JMP32 | BPF_JGT | BPF_K:
1485 case BPF_JMP32 | BPF_JLT | BPF_K:
1486 case BPF_JMP32 | BPF_JGE | BPF_K:
1487 case BPF_JMP32 | BPF_JLE | BPF_K:
1488 case BPF_JMP32 | BPF_JSGT | BPF_K:
1489 case BPF_JMP32 | BPF_JSLT | BPF_K:
1490 case BPF_JMP32 | BPF_JSGE | BPF_K:
1491 case BPF_JMP32 | BPF_JSLE | BPF_K:
1492
1493 if (imm32 == 0) {
1494 maybe_emit_mod(&prog, dst_reg, dst_reg,
1495 BPF_CLASS(insn->code) == BPF_JMP);
1496 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1497 goto emit_cond_jmp;
1498 }
1499
1500
1501 if (BPF_CLASS(insn->code) == BPF_JMP)
1502 EMIT1(add_1mod(0x48, dst_reg));
1503 else if (is_ereg(dst_reg))
1504 EMIT1(add_1mod(0x40, dst_reg));
1505
1506 if (is_imm8(imm32))
1507 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1508 else
1509 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1510
1511emit_cond_jmp:
1512 switch (BPF_OP(insn->code)) {
1513 case BPF_JEQ:
1514 jmp_cond = X86_JE;
1515 break;
1516 case BPF_JSET:
1517 case BPF_JNE:
1518 jmp_cond = X86_JNE;
1519 break;
1520 case BPF_JGT:
1521
1522 jmp_cond = X86_JA;
1523 break;
1524 case BPF_JLT:
1525
1526 jmp_cond = X86_JB;
1527 break;
1528 case BPF_JGE:
1529
1530 jmp_cond = X86_JAE;
1531 break;
1532 case BPF_JLE:
1533
1534 jmp_cond = X86_JBE;
1535 break;
1536 case BPF_JSGT:
1537
1538 jmp_cond = X86_JG;
1539 break;
1540 case BPF_JSLT:
1541
1542 jmp_cond = X86_JL;
1543 break;
1544 case BPF_JSGE:
1545
1546 jmp_cond = X86_JGE;
1547 break;
1548 case BPF_JSLE:
1549
1550 jmp_cond = X86_JLE;
1551 break;
1552 default:
1553 return -EFAULT;
1554 }
1555 jmp_offset = addrs[i + insn->off] - addrs[i];
1556 if (is_imm8(jmp_offset)) {
1557 if (jmp_padding) {
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 nops = INSN_SZ_DIFF - 2;
1574 if (nops != 0 && nops != 4) {
1575 pr_err("unexpected jmp_cond padding: %d bytes\n",
1576 nops);
1577 return -EFAULT;
1578 }
1579 cnt += emit_nops(&prog, nops);
1580 }
1581 EMIT2(jmp_cond, jmp_offset);
1582 } else if (is_simm32(jmp_offset)) {
1583 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1584 } else {
1585 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1586 return -EFAULT;
1587 }
1588
1589 break;
1590
1591 case BPF_JMP | BPF_JA:
1592 if (insn->off == -1)
1593
1594
1595
1596
1597
1598
1599 jmp_offset = -2;
1600 else
1601 jmp_offset = addrs[i + insn->off] - addrs[i];
1602
1603 if (!jmp_offset) {
1604
1605
1606
1607
1608 if (jmp_padding) {
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619 nops = INSN_SZ_DIFF;
1620 if (nops != 0 && nops != 2 && nops != 5) {
1621 pr_err("unexpected nop jump padding: %d bytes\n",
1622 nops);
1623 return -EFAULT;
1624 }
1625 cnt += emit_nops(&prog, nops);
1626 }
1627 break;
1628 }
1629emit_jmp:
1630 if (is_imm8(jmp_offset)) {
1631 if (jmp_padding) {
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644 nops = INSN_SZ_DIFF - 2;
1645 if (nops != 0 && nops != 3) {
1646 pr_err("unexpected jump padding: %d bytes\n",
1647 nops);
1648 return -EFAULT;
1649 }
1650 cnt += emit_nops(&prog, INSN_SZ_DIFF - 2);
1651 }
1652 EMIT2(0xEB, jmp_offset);
1653 } else if (is_simm32(jmp_offset)) {
1654 EMIT1_off32(0xE9, jmp_offset);
1655 } else {
1656 pr_err("jmp gen bug %llx\n", jmp_offset);
1657 return -EFAULT;
1658 }
1659 break;
1660
1661 case BPF_JMP | BPF_EXIT:
1662 if (seen_exit) {
1663 jmp_offset = ctx->cleanup_addr - addrs[i];
1664 goto emit_jmp;
1665 }
1666 seen_exit = true;
1667
1668 ctx->cleanup_addr = proglen;
1669 pop_callee_regs(&prog, callee_regs_used);
1670 EMIT1(0xC9);
1671 EMIT1(0xC3);
1672 break;
1673
1674 default:
1675
1676
1677
1678
1679
1680
1681 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1682 return -EINVAL;
1683 }
1684
1685 ilen = prog - temp;
1686 if (ilen > BPF_MAX_INSN_SIZE) {
1687 pr_err("bpf_jit: fatal insn size error\n");
1688 return -EFAULT;
1689 }
1690
1691 if (image) {
1692
1693
1694
1695
1696
1697
1698
1699
1700 if (unlikely(proglen + ilen > oldproglen ||
1701 proglen + ilen != addrs[i])) {
1702 pr_err("bpf_jit: fatal error\n");
1703 return -EFAULT;
1704 }
1705 memcpy(image + proglen, temp, ilen);
1706 }
1707 proglen += ilen;
1708 addrs[i] = proglen;
1709 prog = temp;
1710 }
1711
1712 if (image && excnt != bpf_prog->aux->num_exentries) {
1713 pr_err("extable is not populated\n");
1714 return -EFAULT;
1715 }
1716 return proglen;
1717}
1718
1719static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1720 int stack_size)
1721{
1722 int i;
1723
1724
1725
1726
1727
1728 for (i = 0; i < min(nr_args, 6); i++)
1729 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1730 BPF_REG_FP,
1731 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1732 -(stack_size - i * 8));
1733}
1734
1735static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1736 int stack_size)
1737{
1738 int i;
1739
1740
1741
1742
1743
1744
1745 for (i = 0; i < min(nr_args, 6); i++)
1746 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1747 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1748 BPF_REG_FP,
1749 -(stack_size - i * 8));
1750}
1751
1752static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1753 struct bpf_prog *p, int stack_size, bool mod_ret)
1754{
1755 u8 *prog = *pprog;
1756 u8 *jmp_insn;
1757 int cnt = 0;
1758
1759
1760 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1761 if (emit_call(&prog,
1762 p->aux->sleepable ? __bpf_prog_enter_sleepable :
1763 __bpf_prog_enter, prog))
1764 return -EINVAL;
1765
1766 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1767
1768
1769
1770
1771 EMIT3(0x48, 0x85, 0xC0);
1772
1773 jmp_insn = prog;
1774 emit_nops(&prog, 2);
1775
1776
1777 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1778
1779 if (!p->jited)
1780 emit_mov_imm64(&prog, BPF_REG_2,
1781 (long) p->insnsi >> 32,
1782 (u32) (long) p->insnsi);
1783
1784 if (emit_call(&prog, p->bpf_func, prog))
1785 return -EINVAL;
1786
1787
1788
1789
1790
1791 if (mod_ret)
1792 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1793
1794
1795 jmp_insn[0] = X86_JE;
1796 jmp_insn[1] = prog - jmp_insn - 2;
1797
1798
1799 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1800
1801 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1802 if (emit_call(&prog,
1803 p->aux->sleepable ? __bpf_prog_exit_sleepable :
1804 __bpf_prog_exit, prog))
1805 return -EINVAL;
1806
1807 *pprog = prog;
1808 return 0;
1809}
1810
1811static void emit_align(u8 **pprog, u32 align)
1812{
1813 u8 *target, *prog = *pprog;
1814
1815 target = PTR_ALIGN(prog, align);
1816 if (target != prog)
1817 emit_nops(&prog, target - prog);
1818
1819 *pprog = prog;
1820}
1821
1822static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1823{
1824 u8 *prog = *pprog;
1825 int cnt = 0;
1826 s64 offset;
1827
1828 offset = func - (ip + 2 + 4);
1829 if (!is_simm32(offset)) {
1830 pr_err("Target %p is out of range\n", func);
1831 return -EINVAL;
1832 }
1833 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1834 *pprog = prog;
1835 return 0;
1836}
1837
1838static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1839 struct bpf_tramp_progs *tp, int stack_size)
1840{
1841 int i;
1842 u8 *prog = *pprog;
1843
1844 for (i = 0; i < tp->nr_progs; i++) {
1845 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
1846 return -EINVAL;
1847 }
1848 *pprog = prog;
1849 return 0;
1850}
1851
1852static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1853 struct bpf_tramp_progs *tp, int stack_size,
1854 u8 **branches)
1855{
1856 u8 *prog = *pprog;
1857 int i, cnt = 0;
1858
1859
1860
1861
1862 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1863 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1864 for (i = 0; i < tp->nr_progs; i++) {
1865 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1866 return -EINVAL;
1867
1868
1869
1870
1871
1872
1873 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1874
1875
1876
1877
1878
1879
1880 branches[i] = prog;
1881 emit_nops(&prog, 4 + 2);
1882 }
1883
1884 *pprog = prog;
1885 return 0;
1886}
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1949 const struct btf_func_model *m, u32 flags,
1950 struct bpf_tramp_progs *tprogs,
1951 void *orig_call)
1952{
1953 int ret, i, cnt = 0, nr_args = m->nr_args;
1954 int stack_size = nr_args * 8;
1955 struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
1956 struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
1957 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
1958 u8 **branches = NULL;
1959 u8 *prog;
1960
1961
1962 if (nr_args > 6)
1963 return -ENOTSUPP;
1964
1965 if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1966 (flags & BPF_TRAMP_F_SKIP_FRAME))
1967 return -EINVAL;
1968
1969 if (flags & BPF_TRAMP_F_CALL_ORIG)
1970 stack_size += 8;
1971
1972 if (flags & BPF_TRAMP_F_SKIP_FRAME)
1973
1974
1975
1976 orig_call += X86_PATCH_SIZE;
1977
1978 prog = image;
1979
1980 EMIT1(0x55);
1981 EMIT3(0x48, 0x89, 0xE5);
1982 EMIT4(0x48, 0x83, 0xEC, stack_size);
1983 EMIT1(0x53);
1984
1985 save_regs(m, &prog, nr_args, stack_size);
1986
1987 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1988
1989 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
1990 if (emit_call(&prog, __bpf_tramp_enter, prog)) {
1991 ret = -EINVAL;
1992 goto cleanup;
1993 }
1994 }
1995
1996 if (fentry->nr_progs)
1997 if (invoke_bpf(m, &prog, fentry, stack_size))
1998 return -EINVAL;
1999
2000 if (fmod_ret->nr_progs) {
2001 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
2002 GFP_KERNEL);
2003 if (!branches)
2004 return -ENOMEM;
2005
2006 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
2007 branches)) {
2008 ret = -EINVAL;
2009 goto cleanup;
2010 }
2011 }
2012
2013 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2014 restore_regs(m, &prog, nr_args, stack_size);
2015
2016
2017 if (emit_call(&prog, orig_call, prog)) {
2018 ret = -EINVAL;
2019 goto cleanup;
2020 }
2021
2022 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2023 im->ip_after_call = prog;
2024 memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
2025 prog += X86_PATCH_SIZE;
2026 }
2027
2028 if (fmod_ret->nr_progs) {
2029
2030
2031
2032
2033
2034 emit_align(&prog, 16);
2035
2036
2037
2038 for (i = 0; i < fmod_ret->nr_progs; i++)
2039 emit_cond_near_jump(&branches[i], prog, branches[i],
2040 X86_JNE);
2041 }
2042
2043 if (fexit->nr_progs)
2044 if (invoke_bpf(m, &prog, fexit, stack_size)) {
2045 ret = -EINVAL;
2046 goto cleanup;
2047 }
2048
2049 if (flags & BPF_TRAMP_F_RESTORE_REGS)
2050 restore_regs(m, &prog, nr_args, stack_size);
2051
2052
2053
2054
2055
2056 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2057 im->ip_epilogue = prog;
2058
2059 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2060 if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2061 ret = -EINVAL;
2062 goto cleanup;
2063 }
2064
2065 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2066 }
2067
2068 EMIT1(0x5B);
2069 EMIT1(0xC9);
2070 if (flags & BPF_TRAMP_F_SKIP_FRAME)
2071
2072 EMIT4(0x48, 0x83, 0xC4, 8);
2073 EMIT1(0xC3);
2074
2075 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2076 ret = -EFAULT;
2077 goto cleanup;
2078 }
2079 ret = prog - (u8 *)image;
2080
2081cleanup:
2082 kfree(branches);
2083 return ret;
2084}
2085
2086static int emit_fallback_jump(u8 **pprog)
2087{
2088 u8 *prog = *pprog;
2089 int err = 0;
2090
2091#ifdef CONFIG_RETPOLINE
2092
2093
2094
2095
2096 err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
2097#else
2098 int cnt = 0;
2099
2100 EMIT2(0xFF, 0xE2);
2101#endif
2102 *pprog = prog;
2103 return err;
2104}
2105
2106static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
2107{
2108 u8 *jg_reloc, *prog = *pprog;
2109 int pivot, err, jg_bytes = 1, cnt = 0;
2110 s64 jg_offset;
2111
2112 if (a == b) {
2113
2114
2115
2116 EMIT1(add_1mod(0x48, BPF_REG_3));
2117 if (!is_simm32(progs[a]))
2118 return -1;
2119 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2120 progs[a]);
2121 err = emit_cond_near_jump(&prog,
2122 (void *)progs[a], prog,
2123 X86_JE);
2124 if (err)
2125 return err;
2126
2127 err = emit_fallback_jump(&prog);
2128 if (err)
2129 return err;
2130
2131 *pprog = prog;
2132 return 0;
2133 }
2134
2135
2136
2137
2138 pivot = (b - a) / 2;
2139 EMIT1(add_1mod(0x48, BPF_REG_3));
2140 if (!is_simm32(progs[a + pivot]))
2141 return -1;
2142 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2143
2144 if (pivot > 2) {
2145
2146 jg_bytes = 4;
2147 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2148 } else {
2149 EMIT2(X86_JG, 0);
2150 }
2151 jg_reloc = prog;
2152
2153 err = emit_bpf_dispatcher(&prog, a, a + pivot,
2154 progs);
2155 if (err)
2156 return err;
2157
2158
2159
2160
2161
2162
2163 emit_align(&prog, 16);
2164 jg_offset = prog - jg_reloc;
2165 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2166
2167 err = emit_bpf_dispatcher(&prog, a + pivot + 1,
2168 b, progs);
2169 if (err)
2170 return err;
2171
2172 *pprog = prog;
2173 return 0;
2174}
2175
2176static int cmp_ips(const void *a, const void *b)
2177{
2178 const s64 *ipa = a;
2179 const s64 *ipb = b;
2180
2181 if (*ipa > *ipb)
2182 return 1;
2183 if (*ipa < *ipb)
2184 return -1;
2185 return 0;
2186}
2187
2188int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
2189{
2190 u8 *prog = image;
2191
2192 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2193 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2194}
2195
2196struct x64_jit_data {
2197 struct bpf_binary_header *header;
2198 int *addrs;
2199 u8 *image;
2200 int proglen;
2201 struct jit_context ctx;
2202};
2203
2204#define MAX_PASSES 20
2205#define PADDING_PASSES (MAX_PASSES - 5)
2206
2207struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2208{
2209 struct bpf_binary_header *header = NULL;
2210 struct bpf_prog *tmp, *orig_prog = prog;
2211 struct x64_jit_data *jit_data;
2212 int proglen, oldproglen = 0;
2213 struct jit_context ctx = {};
2214 bool tmp_blinded = false;
2215 bool extra_pass = false;
2216 bool padding = false;
2217 u8 *image = NULL;
2218 int *addrs;
2219 int pass;
2220 int i;
2221
2222 if (!prog->jit_requested)
2223 return orig_prog;
2224
2225 tmp = bpf_jit_blind_constants(prog);
2226
2227
2228
2229
2230 if (IS_ERR(tmp))
2231 return orig_prog;
2232 if (tmp != prog) {
2233 tmp_blinded = true;
2234 prog = tmp;
2235 }
2236
2237 jit_data = prog->aux->jit_data;
2238 if (!jit_data) {
2239 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2240 if (!jit_data) {
2241 prog = orig_prog;
2242 goto out;
2243 }
2244 prog->aux->jit_data = jit_data;
2245 }
2246 addrs = jit_data->addrs;
2247 if (addrs) {
2248 ctx = jit_data->ctx;
2249 oldproglen = jit_data->proglen;
2250 image = jit_data->image;
2251 header = jit_data->header;
2252 extra_pass = true;
2253 padding = true;
2254 goto skip_init_addrs;
2255 }
2256 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2257 if (!addrs) {
2258 prog = orig_prog;
2259 goto out_addrs;
2260 }
2261
2262
2263
2264
2265
2266 for (proglen = 0, i = 0; i <= prog->len; i++) {
2267 proglen += 64;
2268 addrs[i] = proglen;
2269 }
2270 ctx.cleanup_addr = proglen;
2271skip_init_addrs:
2272
2273
2274
2275
2276
2277
2278
2279 for (pass = 0; pass < MAX_PASSES || image; pass++) {
2280 if (!padding && pass >= PADDING_PASSES)
2281 padding = true;
2282 proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
2283 if (proglen <= 0) {
2284out_image:
2285 image = NULL;
2286 if (header)
2287 bpf_jit_binary_free(header);
2288 prog = orig_prog;
2289 goto out_addrs;
2290 }
2291 if (image) {
2292 if (proglen != oldproglen) {
2293 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2294 proglen, oldproglen);
2295 goto out_image;
2296 }
2297 break;
2298 }
2299 if (proglen == oldproglen) {
2300
2301
2302
2303
2304
2305
2306 u32 align = __alignof__(struct exception_table_entry);
2307 u32 extable_size = prog->aux->num_exentries *
2308 sizeof(struct exception_table_entry);
2309
2310
2311 header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
2312 &image, align, jit_fill_hole);
2313 if (!header) {
2314 prog = orig_prog;
2315 goto out_addrs;
2316 }
2317 prog->aux->extable = (void *) image + roundup(proglen, align);
2318 }
2319 oldproglen = proglen;
2320 cond_resched();
2321 }
2322
2323 if (bpf_jit_enable > 1)
2324 bpf_jit_dump(prog->len, proglen, pass + 1, image);
2325
2326 if (image) {
2327 if (!prog->is_func || extra_pass) {
2328 bpf_tail_call_direct_fixup(prog);
2329 bpf_jit_binary_lock_ro(header);
2330 } else {
2331 jit_data->addrs = addrs;
2332 jit_data->ctx = ctx;
2333 jit_data->proglen = proglen;
2334 jit_data->image = image;
2335 jit_data->header = header;
2336 }
2337 prog->bpf_func = (void *)image;
2338 prog->jited = 1;
2339 prog->jited_len = proglen;
2340 } else {
2341 prog = orig_prog;
2342 }
2343
2344 if (!image || !prog->is_func || extra_pass) {
2345 if (image)
2346 bpf_prog_fill_jited_linfo(prog, addrs + 1);
2347out_addrs:
2348 kvfree(addrs);
2349 kfree(jit_data);
2350 prog->aux->jit_data = NULL;
2351 }
2352out:
2353 if (tmp_blinded)
2354 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2355 tmp : orig_prog);
2356 return prog;
2357}
2358