linux/arch/x86/net/bpf_jit_comp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * bpf_jit_comp.c: BPF JIT compiler
   4 *
   5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
   6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   7 */
   8#include <linux/netdevice.h>
   9#include <linux/filter.h>
  10#include <linux/if_vlan.h>
  11#include <linux/bpf.h>
  12#include <linux/memory.h>
  13#include <asm/extable.h>
  14#include <asm/set_memory.h>
  15#include <asm/nospec-branch.h>
  16#include <asm/text-patching.h>
  17
  18static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
  19{
  20        if (len == 1)
  21                *ptr = bytes;
  22        else if (len == 2)
  23                *(u16 *)ptr = bytes;
  24        else {
  25                *(u32 *)ptr = bytes;
  26                barrier();
  27        }
  28        return ptr + len;
  29}
  30
  31#define EMIT(bytes, len) \
  32        do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
  33
  34#define EMIT1(b1)               EMIT(b1, 1)
  35#define EMIT2(b1, b2)           EMIT((b1) + ((b2) << 8), 2)
  36#define EMIT3(b1, b2, b3)       EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
  37#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
  38
  39#define EMIT1_off32(b1, off) \
  40        do { EMIT1(b1); EMIT(off, 4); } while (0)
  41#define EMIT2_off32(b1, b2, off) \
  42        do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
  43#define EMIT3_off32(b1, b2, b3, off) \
  44        do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
  45#define EMIT4_off32(b1, b2, b3, b4, off) \
  46        do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
  47
  48static bool is_imm8(int value)
  49{
  50        return value <= 127 && value >= -128;
  51}
  52
  53static bool is_simm32(s64 value)
  54{
  55        return value == (s64)(s32)value;
  56}
  57
  58static bool is_uimm32(u64 value)
  59{
  60        return value == (u64)(u32)value;
  61}
  62
  63/* mov dst, src */
  64#define EMIT_mov(DST, SRC)                                                               \
  65        do {                                                                             \
  66                if (DST != SRC)                                                          \
  67                        EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
  68        } while (0)
  69
  70static int bpf_size_to_x86_bytes(int bpf_size)
  71{
  72        if (bpf_size == BPF_W)
  73                return 4;
  74        else if (bpf_size == BPF_H)
  75                return 2;
  76        else if (bpf_size == BPF_B)
  77                return 1;
  78        else if (bpf_size == BPF_DW)
  79                return 4; /* imm32 */
  80        else
  81                return 0;
  82}
  83
  84/*
  85 * List of x86 cond jumps opcodes (. + s8)
  86 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
  87 */
  88#define X86_JB  0x72
  89#define X86_JAE 0x73
  90#define X86_JE  0x74
  91#define X86_JNE 0x75
  92#define X86_JBE 0x76
  93#define X86_JA  0x77
  94#define X86_JL  0x7C
  95#define X86_JGE 0x7D
  96#define X86_JLE 0x7E
  97#define X86_JG  0x7F
  98
  99/* Pick a register outside of BPF range for JIT internal work */
 100#define AUX_REG (MAX_BPF_JIT_REG + 1)
 101#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
 102
 103/*
 104 * The following table maps BPF registers to x86-64 registers.
 105 *
 106 * x86-64 register R12 is unused, since if used as base address
 107 * register in load/store instructions, it always needs an
 108 * extra byte of encoding and is callee saved.
 109 *
 110 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
 111 * trampoline. x86-64 register R10 is used for blinding (if enabled).
 112 */
 113static const int reg2hex[] = {
 114        [BPF_REG_0] = 0,  /* RAX */
 115        [BPF_REG_1] = 7,  /* RDI */
 116        [BPF_REG_2] = 6,  /* RSI */
 117        [BPF_REG_3] = 2,  /* RDX */
 118        [BPF_REG_4] = 1,  /* RCX */
 119        [BPF_REG_5] = 0,  /* R8  */
 120        [BPF_REG_6] = 3,  /* RBX callee saved */
 121        [BPF_REG_7] = 5,  /* R13 callee saved */
 122        [BPF_REG_8] = 6,  /* R14 callee saved */
 123        [BPF_REG_9] = 7,  /* R15 callee saved */
 124        [BPF_REG_FP] = 5, /* RBP readonly */
 125        [BPF_REG_AX] = 2, /* R10 temp register */
 126        [AUX_REG] = 3,    /* R11 temp register */
 127        [X86_REG_R9] = 1, /* R9 register, 6th function argument */
 128};
 129
 130static const int reg2pt_regs[] = {
 131        [BPF_REG_0] = offsetof(struct pt_regs, ax),
 132        [BPF_REG_1] = offsetof(struct pt_regs, di),
 133        [BPF_REG_2] = offsetof(struct pt_regs, si),
 134        [BPF_REG_3] = offsetof(struct pt_regs, dx),
 135        [BPF_REG_4] = offsetof(struct pt_regs, cx),
 136        [BPF_REG_5] = offsetof(struct pt_regs, r8),
 137        [BPF_REG_6] = offsetof(struct pt_regs, bx),
 138        [BPF_REG_7] = offsetof(struct pt_regs, r13),
 139        [BPF_REG_8] = offsetof(struct pt_regs, r14),
 140        [BPF_REG_9] = offsetof(struct pt_regs, r15),
 141};
 142
 143/*
 144 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
 145 * which need extra byte of encoding.
 146 * rax,rcx,...,rbp have simpler encoding
 147 */
 148static bool is_ereg(u32 reg)
 149{
 150        return (1 << reg) & (BIT(BPF_REG_5) |
 151                             BIT(AUX_REG) |
 152                             BIT(BPF_REG_7) |
 153                             BIT(BPF_REG_8) |
 154                             BIT(BPF_REG_9) |
 155                             BIT(X86_REG_R9) |
 156                             BIT(BPF_REG_AX));
 157}
 158
 159static bool is_axreg(u32 reg)
 160{
 161        return reg == BPF_REG_0;
 162}
 163
 164/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
 165static u8 add_1mod(u8 byte, u32 reg)
 166{
 167        if (is_ereg(reg))
 168                byte |= 1;
 169        return byte;
 170}
 171
 172static u8 add_2mod(u8 byte, u32 r1, u32 r2)
 173{
 174        if (is_ereg(r1))
 175                byte |= 1;
 176        if (is_ereg(r2))
 177                byte |= 4;
 178        return byte;
 179}
 180
 181/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
 182static u8 add_1reg(u8 byte, u32 dst_reg)
 183{
 184        return byte + reg2hex[dst_reg];
 185}
 186
 187/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
 188static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
 189{
 190        return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
 191}
 192
 193static void jit_fill_hole(void *area, unsigned int size)
 194{
 195        /* Fill whole space with INT3 instructions */
 196        memset(area, 0xcc, size);
 197}
 198
 199struct jit_context {
 200        int cleanup_addr; /* Epilogue code offset */
 201};
 202
 203/* Maximum number of bytes emitted while JITing one eBPF insn */
 204#define BPF_MAX_INSN_SIZE       128
 205#define BPF_INSN_SAFETY         64
 206
 207/* Number of bytes emit_patch() needs to generate instructions */
 208#define X86_PATCH_SIZE          5
 209
 210#define PROLOGUE_SIZE           25
 211
 212/*
 213 * Emit x86-64 prologue code for BPF program and check its size.
 214 * bpf_tail_call helper will skip it while jumping into another program
 215 */
 216static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
 217{
 218        u8 *prog = *pprog;
 219        int cnt = X86_PATCH_SIZE;
 220
 221        /* BPF trampoline can be made to work without these nops,
 222         * but let's waste 5 bytes for now and optimize later
 223         */
 224        memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
 225        prog += cnt;
 226        EMIT1(0x55);             /* push rbp */
 227        EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
 228        /* sub rsp, rounded_stack_depth */
 229        EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
 230        EMIT1(0x53);             /* push rbx */
 231        EMIT2(0x41, 0x55);       /* push r13 */
 232        EMIT2(0x41, 0x56);       /* push r14 */
 233        EMIT2(0x41, 0x57);       /* push r15 */
 234        if (!ebpf_from_cbpf) {
 235                /* zero init tail_call_cnt */
 236                EMIT2(0x6a, 0x00);
 237                BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
 238        }
 239        *pprog = prog;
 240}
 241
 242static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
 243{
 244        u8 *prog = *pprog;
 245        int cnt = 0;
 246        s64 offset;
 247
 248        offset = func - (ip + X86_PATCH_SIZE);
 249        if (!is_simm32(offset)) {
 250                pr_err("Target call %p is out of range\n", func);
 251                return -ERANGE;
 252        }
 253        EMIT1_off32(opcode, offset);
 254        *pprog = prog;
 255        return 0;
 256}
 257
 258static int emit_call(u8 **pprog, void *func, void *ip)
 259{
 260        return emit_patch(pprog, func, ip, 0xE8);
 261}
 262
 263static int emit_jump(u8 **pprog, void *func, void *ip)
 264{
 265        return emit_patch(pprog, func, ip, 0xE9);
 266}
 267
 268static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
 269                                void *old_addr, void *new_addr,
 270                                const bool text_live)
 271{
 272        const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
 273        u8 old_insn[X86_PATCH_SIZE];
 274        u8 new_insn[X86_PATCH_SIZE];
 275        u8 *prog;
 276        int ret;
 277
 278        memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
 279        if (old_addr) {
 280                prog = old_insn;
 281                ret = t == BPF_MOD_CALL ?
 282                      emit_call(&prog, old_addr, ip) :
 283                      emit_jump(&prog, old_addr, ip);
 284                if (ret)
 285                        return ret;
 286        }
 287
 288        memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
 289        if (new_addr) {
 290                prog = new_insn;
 291                ret = t == BPF_MOD_CALL ?
 292                      emit_call(&prog, new_addr, ip) :
 293                      emit_jump(&prog, new_addr, ip);
 294                if (ret)
 295                        return ret;
 296        }
 297
 298        ret = -EBUSY;
 299        mutex_lock(&text_mutex);
 300        if (memcmp(ip, old_insn, X86_PATCH_SIZE))
 301                goto out;
 302        if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
 303                if (text_live)
 304                        text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
 305                else
 306                        memcpy(ip, new_insn, X86_PATCH_SIZE);
 307        }
 308        ret = 0;
 309out:
 310        mutex_unlock(&text_mutex);
 311        return ret;
 312}
 313
 314int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
 315                       void *old_addr, void *new_addr)
 316{
 317        if (!is_kernel_text((long)ip) &&
 318            !is_bpf_text_address((long)ip))
 319                /* BPF poking in modules is not supported */
 320                return -EINVAL;
 321
 322        return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
 323}
 324
 325/*
 326 * Generate the following code:
 327 *
 328 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
 329 *   if (index >= array->map.max_entries)
 330 *     goto out;
 331 *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
 332 *     goto out;
 333 *   prog = array->ptrs[index];
 334 *   if (prog == NULL)
 335 *     goto out;
 336 *   goto *(prog->bpf_func + prologue_size);
 337 * out:
 338 */
 339static void emit_bpf_tail_call_indirect(u8 **pprog)
 340{
 341        u8 *prog = *pprog;
 342        int label1, label2, label3;
 343        int cnt = 0;
 344
 345        /*
 346         * rdi - pointer to ctx
 347         * rsi - pointer to bpf_array
 348         * rdx - index in bpf_array
 349         */
 350
 351        /*
 352         * if (index >= array->map.max_entries)
 353         *      goto out;
 354         */
 355        EMIT2(0x89, 0xD2);                        /* mov edx, edx */
 356        EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
 357              offsetof(struct bpf_array, map.max_entries));
 358#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
 359        EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
 360        label1 = cnt;
 361
 362        /*
 363         * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
 364         *      goto out;
 365         */
 366        EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
 367        EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
 368#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
 369        EMIT2(X86_JA, OFFSET2);                   /* ja out */
 370        label2 = cnt;
 371        EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
 372        EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
 373
 374        /* prog = array->ptrs[index]; */
 375        EMIT4_off32(0x48, 0x8B, 0x84, 0xD6,       /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
 376                    offsetof(struct bpf_array, ptrs));
 377
 378        /*
 379         * if (prog == NULL)
 380         *      goto out;
 381         */
 382        EMIT3(0x48, 0x85, 0xC0);                  /* test rax,rax */
 383#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
 384        EMIT2(X86_JE, OFFSET3);                   /* je out */
 385        label3 = cnt;
 386
 387        /* goto *(prog->bpf_func + prologue_size); */
 388        EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
 389              offsetof(struct bpf_prog, bpf_func));
 390        EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */
 391
 392        /*
 393         * Wow we're ready to jump into next BPF program
 394         * rdi == ctx (1st arg)
 395         * rax == prog->bpf_func + prologue_size
 396         */
 397        RETPOLINE_RAX_BPF_JIT();
 398
 399        /* out: */
 400        BUILD_BUG_ON(cnt - label1 != OFFSET1);
 401        BUILD_BUG_ON(cnt - label2 != OFFSET2);
 402        BUILD_BUG_ON(cnt - label3 != OFFSET3);
 403        *pprog = prog;
 404}
 405
 406static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
 407                                      u8 **pprog, int addr, u8 *image)
 408{
 409        u8 *prog = *pprog;
 410        int cnt = 0;
 411
 412        /*
 413         * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
 414         *      goto out;
 415         */
 416        EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
 417        EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
 418        EMIT2(X86_JA, 14);                            /* ja out */
 419        EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
 420        EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
 421
 422        poke->ip = image + (addr - X86_PATCH_SIZE);
 423        poke->adj_off = PROLOGUE_SIZE;
 424
 425        memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
 426        prog += X86_PATCH_SIZE;
 427        /* out: */
 428
 429        *pprog = prog;
 430}
 431
 432static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
 433{
 434        struct bpf_jit_poke_descriptor *poke;
 435        struct bpf_array *array;
 436        struct bpf_prog *target;
 437        int i, ret;
 438
 439        for (i = 0; i < prog->aux->size_poke_tab; i++) {
 440                poke = &prog->aux->poke_tab[i];
 441                WARN_ON_ONCE(READ_ONCE(poke->ip_stable));
 442
 443                if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
 444                        continue;
 445
 446                array = container_of(poke->tail_call.map, struct bpf_array, map);
 447                mutex_lock(&array->aux->poke_mutex);
 448                target = array->ptrs[poke->tail_call.key];
 449                if (target) {
 450                        /* Plain memcpy is used when image is not live yet
 451                         * and still not locked as read-only. Once poke
 452                         * location is active (poke->ip_stable), any parallel
 453                         * bpf_arch_text_poke() might occur still on the
 454                         * read-write image until we finally locked it as
 455                         * read-only. Both modifications on the given image
 456                         * are under text_mutex to avoid interference.
 457                         */
 458                        ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
 459                                                   (u8 *)target->bpf_func +
 460                                                   poke->adj_off, false);
 461                        BUG_ON(ret < 0);
 462                }
 463                WRITE_ONCE(poke->ip_stable, true);
 464                mutex_unlock(&array->aux->poke_mutex);
 465        }
 466}
 467
 468static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
 469                           u32 dst_reg, const u32 imm32)
 470{
 471        u8 *prog = *pprog;
 472        u8 b1, b2, b3;
 473        int cnt = 0;
 474
 475        /*
 476         * Optimization: if imm32 is positive, use 'mov %eax, imm32'
 477         * (which zero-extends imm32) to save 2 bytes.
 478         */
 479        if (sign_propagate && (s32)imm32 < 0) {
 480                /* 'mov %rax, imm32' sign extends imm32 */
 481                b1 = add_1mod(0x48, dst_reg);
 482                b2 = 0xC7;
 483                b3 = 0xC0;
 484                EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
 485                goto done;
 486        }
 487
 488        /*
 489         * Optimization: if imm32 is zero, use 'xor %eax, %eax'
 490         * to save 3 bytes.
 491         */
 492        if (imm32 == 0) {
 493                if (is_ereg(dst_reg))
 494                        EMIT1(add_2mod(0x40, dst_reg, dst_reg));
 495                b2 = 0x31; /* xor */
 496                b3 = 0xC0;
 497                EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
 498                goto done;
 499        }
 500
 501        /* mov %eax, imm32 */
 502        if (is_ereg(dst_reg))
 503                EMIT1(add_1mod(0x40, dst_reg));
 504        EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
 505done:
 506        *pprog = prog;
 507}
 508
 509static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
 510                           const u32 imm32_hi, const u32 imm32_lo)
 511{
 512        u8 *prog = *pprog;
 513        int cnt = 0;
 514
 515        if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
 516                /*
 517                 * For emitting plain u32, where sign bit must not be
 518                 * propagated LLVM tends to load imm64 over mov32
 519                 * directly, so save couple of bytes by just doing
 520                 * 'mov %eax, imm32' instead.
 521                 */
 522                emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
 523        } else {
 524                /* movabsq %rax, imm64 */
 525                EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
 526                EMIT(imm32_lo, 4);
 527                EMIT(imm32_hi, 4);
 528        }
 529
 530        *pprog = prog;
 531}
 532
 533static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
 534{
 535        u8 *prog = *pprog;
 536        int cnt = 0;
 537
 538        if (is64) {
 539                /* mov dst, src */
 540                EMIT_mov(dst_reg, src_reg);
 541        } else {
 542                /* mov32 dst, src */
 543                if (is_ereg(dst_reg) || is_ereg(src_reg))
 544                        EMIT1(add_2mod(0x40, dst_reg, src_reg));
 545                EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
 546        }
 547
 548        *pprog = prog;
 549}
 550
 551/* LDX: dst_reg = *(u8*)(src_reg + off) */
 552static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
 553{
 554        u8 *prog = *pprog;
 555        int cnt = 0;
 556
 557        switch (size) {
 558        case BPF_B:
 559                /* Emit 'movzx rax, byte ptr [rax + off]' */
 560                EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
 561                break;
 562        case BPF_H:
 563                /* Emit 'movzx rax, word ptr [rax + off]' */
 564                EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
 565                break;
 566        case BPF_W:
 567                /* Emit 'mov eax, dword ptr [rax+0x14]' */
 568                if (is_ereg(dst_reg) || is_ereg(src_reg))
 569                        EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
 570                else
 571                        EMIT1(0x8B);
 572                break;
 573        case BPF_DW:
 574                /* Emit 'mov rax, qword ptr [rax+0x14]' */
 575                EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
 576                break;
 577        }
 578        /*
 579         * If insn->off == 0 we can save one extra byte, but
 580         * special case of x86 R13 which always needs an offset
 581         * is not worth the hassle
 582         */
 583        if (is_imm8(off))
 584                EMIT2(add_2reg(0x40, src_reg, dst_reg), off);
 585        else
 586                EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off);
 587        *pprog = prog;
 588}
 589
 590/* STX: *(u8*)(dst_reg + off) = src_reg */
 591static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
 592{
 593        u8 *prog = *pprog;
 594        int cnt = 0;
 595
 596        switch (size) {
 597        case BPF_B:
 598                /* Emit 'mov byte ptr [rax + off], al' */
 599                if (is_ereg(dst_reg) || is_ereg(src_reg) ||
 600                    /* We have to add extra byte for x86 SIL, DIL regs */
 601                    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
 602                        EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
 603                else
 604                        EMIT1(0x88);
 605                break;
 606        case BPF_H:
 607                if (is_ereg(dst_reg) || is_ereg(src_reg))
 608                        EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
 609                else
 610                        EMIT2(0x66, 0x89);
 611                break;
 612        case BPF_W:
 613                if (is_ereg(dst_reg) || is_ereg(src_reg))
 614                        EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
 615                else
 616                        EMIT1(0x89);
 617                break;
 618        case BPF_DW:
 619                EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
 620                break;
 621        }
 622        if (is_imm8(off))
 623                EMIT2(add_2reg(0x40, dst_reg, src_reg), off);
 624        else
 625                EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off);
 626        *pprog = prog;
 627}
 628
 629static bool ex_handler_bpf(const struct exception_table_entry *x,
 630                           struct pt_regs *regs, int trapnr,
 631                           unsigned long error_code, unsigned long fault_addr)
 632{
 633        u32 reg = x->fixup >> 8;
 634
 635        /* jump over faulting load and clear dest register */
 636        *(unsigned long *)((void *)regs + reg) = 0;
 637        regs->ip += x->fixup & 0xff;
 638        return true;
 639}
 640
 641static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 642                  int oldproglen, struct jit_context *ctx)
 643{
 644        struct bpf_insn *insn = bpf_prog->insnsi;
 645        int insn_cnt = bpf_prog->len;
 646        bool seen_exit = false;
 647        u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
 648        int i, cnt = 0, excnt = 0;
 649        int proglen = 0;
 650        u8 *prog = temp;
 651
 652        emit_prologue(&prog, bpf_prog->aux->stack_depth,
 653                      bpf_prog_was_classic(bpf_prog));
 654        addrs[0] = prog - temp;
 655
 656        for (i = 1; i <= insn_cnt; i++, insn++) {
 657                const s32 imm32 = insn->imm;
 658                u32 dst_reg = insn->dst_reg;
 659                u32 src_reg = insn->src_reg;
 660                u8 b2 = 0, b3 = 0;
 661                s64 jmp_offset;
 662                u8 jmp_cond;
 663                int ilen;
 664                u8 *func;
 665
 666                switch (insn->code) {
 667                        /* ALU */
 668                case BPF_ALU | BPF_ADD | BPF_X:
 669                case BPF_ALU | BPF_SUB | BPF_X:
 670                case BPF_ALU | BPF_AND | BPF_X:
 671                case BPF_ALU | BPF_OR | BPF_X:
 672                case BPF_ALU | BPF_XOR | BPF_X:
 673                case BPF_ALU64 | BPF_ADD | BPF_X:
 674                case BPF_ALU64 | BPF_SUB | BPF_X:
 675                case BPF_ALU64 | BPF_AND | BPF_X:
 676                case BPF_ALU64 | BPF_OR | BPF_X:
 677                case BPF_ALU64 | BPF_XOR | BPF_X:
 678                        switch (BPF_OP(insn->code)) {
 679                        case BPF_ADD: b2 = 0x01; break;
 680                        case BPF_SUB: b2 = 0x29; break;
 681                        case BPF_AND: b2 = 0x21; break;
 682                        case BPF_OR: b2 = 0x09; break;
 683                        case BPF_XOR: b2 = 0x31; break;
 684                        }
 685                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 686                                EMIT1(add_2mod(0x48, dst_reg, src_reg));
 687                        else if (is_ereg(dst_reg) || is_ereg(src_reg))
 688                                EMIT1(add_2mod(0x40, dst_reg, src_reg));
 689                        EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
 690                        break;
 691
 692                case BPF_ALU64 | BPF_MOV | BPF_X:
 693                case BPF_ALU | BPF_MOV | BPF_X:
 694                        emit_mov_reg(&prog,
 695                                     BPF_CLASS(insn->code) == BPF_ALU64,
 696                                     dst_reg, src_reg);
 697                        break;
 698
 699                        /* neg dst */
 700                case BPF_ALU | BPF_NEG:
 701                case BPF_ALU64 | BPF_NEG:
 702                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 703                                EMIT1(add_1mod(0x48, dst_reg));
 704                        else if (is_ereg(dst_reg))
 705                                EMIT1(add_1mod(0x40, dst_reg));
 706                        EMIT2(0xF7, add_1reg(0xD8, dst_reg));
 707                        break;
 708
 709                case BPF_ALU | BPF_ADD | BPF_K:
 710                case BPF_ALU | BPF_SUB | BPF_K:
 711                case BPF_ALU | BPF_AND | BPF_K:
 712                case BPF_ALU | BPF_OR | BPF_K:
 713                case BPF_ALU | BPF_XOR | BPF_K:
 714                case BPF_ALU64 | BPF_ADD | BPF_K:
 715                case BPF_ALU64 | BPF_SUB | BPF_K:
 716                case BPF_ALU64 | BPF_AND | BPF_K:
 717                case BPF_ALU64 | BPF_OR | BPF_K:
 718                case BPF_ALU64 | BPF_XOR | BPF_K:
 719                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 720                                EMIT1(add_1mod(0x48, dst_reg));
 721                        else if (is_ereg(dst_reg))
 722                                EMIT1(add_1mod(0x40, dst_reg));
 723
 724                        /*
 725                         * b3 holds 'normal' opcode, b2 short form only valid
 726                         * in case dst is eax/rax.
 727                         */
 728                        switch (BPF_OP(insn->code)) {
 729                        case BPF_ADD:
 730                                b3 = 0xC0;
 731                                b2 = 0x05;
 732                                break;
 733                        case BPF_SUB:
 734                                b3 = 0xE8;
 735                                b2 = 0x2D;
 736                                break;
 737                        case BPF_AND:
 738                                b3 = 0xE0;
 739                                b2 = 0x25;
 740                                break;
 741                        case BPF_OR:
 742                                b3 = 0xC8;
 743                                b2 = 0x0D;
 744                                break;
 745                        case BPF_XOR:
 746                                b3 = 0xF0;
 747                                b2 = 0x35;
 748                                break;
 749                        }
 750
 751                        if (is_imm8(imm32))
 752                                EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
 753                        else if (is_axreg(dst_reg))
 754                                EMIT1_off32(b2, imm32);
 755                        else
 756                                EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
 757                        break;
 758
 759                case BPF_ALU64 | BPF_MOV | BPF_K:
 760                case BPF_ALU | BPF_MOV | BPF_K:
 761                        emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
 762                                       dst_reg, imm32);
 763                        break;
 764
 765                case BPF_LD | BPF_IMM | BPF_DW:
 766                        emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
 767                        insn++;
 768                        i++;
 769                        break;
 770
 771                        /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
 772                case BPF_ALU | BPF_MOD | BPF_X:
 773                case BPF_ALU | BPF_DIV | BPF_X:
 774                case BPF_ALU | BPF_MOD | BPF_K:
 775                case BPF_ALU | BPF_DIV | BPF_K:
 776                case BPF_ALU64 | BPF_MOD | BPF_X:
 777                case BPF_ALU64 | BPF_DIV | BPF_X:
 778                case BPF_ALU64 | BPF_MOD | BPF_K:
 779                case BPF_ALU64 | BPF_DIV | BPF_K:
 780                        EMIT1(0x50); /* push rax */
 781                        EMIT1(0x52); /* push rdx */
 782
 783                        if (BPF_SRC(insn->code) == BPF_X)
 784                                /* mov r11, src_reg */
 785                                EMIT_mov(AUX_REG, src_reg);
 786                        else
 787                                /* mov r11, imm32 */
 788                                EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
 789
 790                        /* mov rax, dst_reg */
 791                        EMIT_mov(BPF_REG_0, dst_reg);
 792
 793                        /*
 794                         * xor edx, edx
 795                         * equivalent to 'xor rdx, rdx', but one byte less
 796                         */
 797                        EMIT2(0x31, 0xd2);
 798
 799                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 800                                /* div r11 */
 801                                EMIT3(0x49, 0xF7, 0xF3);
 802                        else
 803                                /* div r11d */
 804                                EMIT3(0x41, 0xF7, 0xF3);
 805
 806                        if (BPF_OP(insn->code) == BPF_MOD)
 807                                /* mov r11, rdx */
 808                                EMIT3(0x49, 0x89, 0xD3);
 809                        else
 810                                /* mov r11, rax */
 811                                EMIT3(0x49, 0x89, 0xC3);
 812
 813                        EMIT1(0x5A); /* pop rdx */
 814                        EMIT1(0x58); /* pop rax */
 815
 816                        /* mov dst_reg, r11 */
 817                        EMIT_mov(dst_reg, AUX_REG);
 818                        break;
 819
 820                case BPF_ALU | BPF_MUL | BPF_K:
 821                case BPF_ALU | BPF_MUL | BPF_X:
 822                case BPF_ALU64 | BPF_MUL | BPF_K:
 823                case BPF_ALU64 | BPF_MUL | BPF_X:
 824                {
 825                        bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
 826
 827                        if (dst_reg != BPF_REG_0)
 828                                EMIT1(0x50); /* push rax */
 829                        if (dst_reg != BPF_REG_3)
 830                                EMIT1(0x52); /* push rdx */
 831
 832                        /* mov r11, dst_reg */
 833                        EMIT_mov(AUX_REG, dst_reg);
 834
 835                        if (BPF_SRC(insn->code) == BPF_X)
 836                                emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
 837                        else
 838                                emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
 839
 840                        if (is64)
 841                                EMIT1(add_1mod(0x48, AUX_REG));
 842                        else if (is_ereg(AUX_REG))
 843                                EMIT1(add_1mod(0x40, AUX_REG));
 844                        /* mul(q) r11 */
 845                        EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
 846
 847                        if (dst_reg != BPF_REG_3)
 848                                EMIT1(0x5A); /* pop rdx */
 849                        if (dst_reg != BPF_REG_0) {
 850                                /* mov dst_reg, rax */
 851                                EMIT_mov(dst_reg, BPF_REG_0);
 852                                EMIT1(0x58); /* pop rax */
 853                        }
 854                        break;
 855                }
 856                        /* Shifts */
 857                case BPF_ALU | BPF_LSH | BPF_K:
 858                case BPF_ALU | BPF_RSH | BPF_K:
 859                case BPF_ALU | BPF_ARSH | BPF_K:
 860                case BPF_ALU64 | BPF_LSH | BPF_K:
 861                case BPF_ALU64 | BPF_RSH | BPF_K:
 862                case BPF_ALU64 | BPF_ARSH | BPF_K:
 863                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 864                                EMIT1(add_1mod(0x48, dst_reg));
 865                        else if (is_ereg(dst_reg))
 866                                EMIT1(add_1mod(0x40, dst_reg));
 867
 868                        switch (BPF_OP(insn->code)) {
 869                        case BPF_LSH: b3 = 0xE0; break;
 870                        case BPF_RSH: b3 = 0xE8; break;
 871                        case BPF_ARSH: b3 = 0xF8; break;
 872                        }
 873
 874                        if (imm32 == 1)
 875                                EMIT2(0xD1, add_1reg(b3, dst_reg));
 876                        else
 877                                EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
 878                        break;
 879
 880                case BPF_ALU | BPF_LSH | BPF_X:
 881                case BPF_ALU | BPF_RSH | BPF_X:
 882                case BPF_ALU | BPF_ARSH | BPF_X:
 883                case BPF_ALU64 | BPF_LSH | BPF_X:
 884                case BPF_ALU64 | BPF_RSH | BPF_X:
 885                case BPF_ALU64 | BPF_ARSH | BPF_X:
 886
 887                        /* Check for bad case when dst_reg == rcx */
 888                        if (dst_reg == BPF_REG_4) {
 889                                /* mov r11, dst_reg */
 890                                EMIT_mov(AUX_REG, dst_reg);
 891                                dst_reg = AUX_REG;
 892                        }
 893
 894                        if (src_reg != BPF_REG_4) { /* common case */
 895                                EMIT1(0x51); /* push rcx */
 896
 897                                /* mov rcx, src_reg */
 898                                EMIT_mov(BPF_REG_4, src_reg);
 899                        }
 900
 901                        /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
 902                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 903                                EMIT1(add_1mod(0x48, dst_reg));
 904                        else if (is_ereg(dst_reg))
 905                                EMIT1(add_1mod(0x40, dst_reg));
 906
 907                        switch (BPF_OP(insn->code)) {
 908                        case BPF_LSH: b3 = 0xE0; break;
 909                        case BPF_RSH: b3 = 0xE8; break;
 910                        case BPF_ARSH: b3 = 0xF8; break;
 911                        }
 912                        EMIT2(0xD3, add_1reg(b3, dst_reg));
 913
 914                        if (src_reg != BPF_REG_4)
 915                                EMIT1(0x59); /* pop rcx */
 916
 917                        if (insn->dst_reg == BPF_REG_4)
 918                                /* mov dst_reg, r11 */
 919                                EMIT_mov(insn->dst_reg, AUX_REG);
 920                        break;
 921
 922                case BPF_ALU | BPF_END | BPF_FROM_BE:
 923                        switch (imm32) {
 924                        case 16:
 925                                /* Emit 'ror %ax, 8' to swap lower 2 bytes */
 926                                EMIT1(0x66);
 927                                if (is_ereg(dst_reg))
 928                                        EMIT1(0x41);
 929                                EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
 930
 931                                /* Emit 'movzwl eax, ax' */
 932                                if (is_ereg(dst_reg))
 933                                        EMIT3(0x45, 0x0F, 0xB7);
 934                                else
 935                                        EMIT2(0x0F, 0xB7);
 936                                EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
 937                                break;
 938                        case 32:
 939                                /* Emit 'bswap eax' to swap lower 4 bytes */
 940                                if (is_ereg(dst_reg))
 941                                        EMIT2(0x41, 0x0F);
 942                                else
 943                                        EMIT1(0x0F);
 944                                EMIT1(add_1reg(0xC8, dst_reg));
 945                                break;
 946                        case 64:
 947                                /* Emit 'bswap rax' to swap 8 bytes */
 948                                EMIT3(add_1mod(0x48, dst_reg), 0x0F,
 949                                      add_1reg(0xC8, dst_reg));
 950                                break;
 951                        }
 952                        break;
 953
 954                case BPF_ALU | BPF_END | BPF_FROM_LE:
 955                        switch (imm32) {
 956                        case 16:
 957                                /*
 958                                 * Emit 'movzwl eax, ax' to zero extend 16-bit
 959                                 * into 64 bit
 960                                 */
 961                                if (is_ereg(dst_reg))
 962                                        EMIT3(0x45, 0x0F, 0xB7);
 963                                else
 964                                        EMIT2(0x0F, 0xB7);
 965                                EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
 966                                break;
 967                        case 32:
 968                                /* Emit 'mov eax, eax' to clear upper 32-bits */
 969                                if (is_ereg(dst_reg))
 970                                        EMIT1(0x45);
 971                                EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
 972                                break;
 973                        case 64:
 974                                /* nop */
 975                                break;
 976                        }
 977                        break;
 978
 979                        /* ST: *(u8*)(dst_reg + off) = imm */
 980                case BPF_ST | BPF_MEM | BPF_B:
 981                        if (is_ereg(dst_reg))
 982                                EMIT2(0x41, 0xC6);
 983                        else
 984                                EMIT1(0xC6);
 985                        goto st;
 986                case BPF_ST | BPF_MEM | BPF_H:
 987                        if (is_ereg(dst_reg))
 988                                EMIT3(0x66, 0x41, 0xC7);
 989                        else
 990                                EMIT2(0x66, 0xC7);
 991                        goto st;
 992                case BPF_ST | BPF_MEM | BPF_W:
 993                        if (is_ereg(dst_reg))
 994                                EMIT2(0x41, 0xC7);
 995                        else
 996                                EMIT1(0xC7);
 997                        goto st;
 998                case BPF_ST | BPF_MEM | BPF_DW:
 999                        EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1000
1001st:                     if (is_imm8(insn->off))
1002                                EMIT2(add_1reg(0x40, dst_reg), insn->off);
1003                        else
1004                                EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1005
1006                        EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1007                        break;
1008
1009                        /* STX: *(u8*)(dst_reg + off) = src_reg */
1010                case BPF_STX | BPF_MEM | BPF_B:
1011                case BPF_STX | BPF_MEM | BPF_H:
1012                case BPF_STX | BPF_MEM | BPF_W:
1013                case BPF_STX | BPF_MEM | BPF_DW:
1014                        emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1015                        break;
1016
1017                        /* LDX: dst_reg = *(u8*)(src_reg + off) */
1018                case BPF_LDX | BPF_MEM | BPF_B:
1019                case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1020                case BPF_LDX | BPF_MEM | BPF_H:
1021                case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1022                case BPF_LDX | BPF_MEM | BPF_W:
1023                case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1024                case BPF_LDX | BPF_MEM | BPF_DW:
1025                case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1026                        emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1027                        if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1028                                struct exception_table_entry *ex;
1029                                u8 *_insn = image + proglen;
1030                                s64 delta;
1031
1032                                if (!bpf_prog->aux->extable)
1033                                        break;
1034
1035                                if (excnt >= bpf_prog->aux->num_exentries) {
1036                                        pr_err("ex gen bug\n");
1037                                        return -EFAULT;
1038                                }
1039                                ex = &bpf_prog->aux->extable[excnt++];
1040
1041                                delta = _insn - (u8 *)&ex->insn;
1042                                if (!is_simm32(delta)) {
1043                                        pr_err("extable->insn doesn't fit into 32-bit\n");
1044                                        return -EFAULT;
1045                                }
1046                                ex->insn = delta;
1047
1048                                delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
1049                                if (!is_simm32(delta)) {
1050                                        pr_err("extable->handler doesn't fit into 32-bit\n");
1051                                        return -EFAULT;
1052                                }
1053                                ex->handler = delta;
1054
1055                                if (dst_reg > BPF_REG_9) {
1056                                        pr_err("verifier error\n");
1057                                        return -EFAULT;
1058                                }
1059                                /*
1060                                 * Compute size of x86 insn and its target dest x86 register.
1061                                 * ex_handler_bpf() will use lower 8 bits to adjust
1062                                 * pt_regs->ip to jump over this x86 instruction
1063                                 * and upper bits to figure out which pt_regs to zero out.
1064                                 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1065                                 * of 4 bytes will be ignored and rbx will be zero inited.
1066                                 */
1067                                ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1068                        }
1069                        break;
1070
1071                        /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
1072                case BPF_STX | BPF_XADD | BPF_W:
1073                        /* Emit 'lock add dword ptr [rax + off], eax' */
1074                        if (is_ereg(dst_reg) || is_ereg(src_reg))
1075                                EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
1076                        else
1077                                EMIT2(0xF0, 0x01);
1078                        goto xadd;
1079                case BPF_STX | BPF_XADD | BPF_DW:
1080                        EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
1081xadd:                   if (is_imm8(insn->off))
1082                                EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
1083                        else
1084                                EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
1085                                            insn->off);
1086                        break;
1087
1088                        /* call */
1089                case BPF_JMP | BPF_CALL:
1090                        func = (u8 *) __bpf_call_base + imm32;
1091                        if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1092                                return -EINVAL;
1093                        break;
1094
1095                case BPF_JMP | BPF_TAIL_CALL:
1096                        if (imm32)
1097                                emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1098                                                          &prog, addrs[i], image);
1099                        else
1100                                emit_bpf_tail_call_indirect(&prog);
1101                        break;
1102
1103                        /* cond jump */
1104                case BPF_JMP | BPF_JEQ | BPF_X:
1105                case BPF_JMP | BPF_JNE | BPF_X:
1106                case BPF_JMP | BPF_JGT | BPF_X:
1107                case BPF_JMP | BPF_JLT | BPF_X:
1108                case BPF_JMP | BPF_JGE | BPF_X:
1109                case BPF_JMP | BPF_JLE | BPF_X:
1110                case BPF_JMP | BPF_JSGT | BPF_X:
1111                case BPF_JMP | BPF_JSLT | BPF_X:
1112                case BPF_JMP | BPF_JSGE | BPF_X:
1113                case BPF_JMP | BPF_JSLE | BPF_X:
1114                case BPF_JMP32 | BPF_JEQ | BPF_X:
1115                case BPF_JMP32 | BPF_JNE | BPF_X:
1116                case BPF_JMP32 | BPF_JGT | BPF_X:
1117                case BPF_JMP32 | BPF_JLT | BPF_X:
1118                case BPF_JMP32 | BPF_JGE | BPF_X:
1119                case BPF_JMP32 | BPF_JLE | BPF_X:
1120                case BPF_JMP32 | BPF_JSGT | BPF_X:
1121                case BPF_JMP32 | BPF_JSLT | BPF_X:
1122                case BPF_JMP32 | BPF_JSGE | BPF_X:
1123                case BPF_JMP32 | BPF_JSLE | BPF_X:
1124                        /* cmp dst_reg, src_reg */
1125                        if (BPF_CLASS(insn->code) == BPF_JMP)
1126                                EMIT1(add_2mod(0x48, dst_reg, src_reg));
1127                        else if (is_ereg(dst_reg) || is_ereg(src_reg))
1128                                EMIT1(add_2mod(0x40, dst_reg, src_reg));
1129                        EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1130                        goto emit_cond_jmp;
1131
1132                case BPF_JMP | BPF_JSET | BPF_X:
1133                case BPF_JMP32 | BPF_JSET | BPF_X:
1134                        /* test dst_reg, src_reg */
1135                        if (BPF_CLASS(insn->code) == BPF_JMP)
1136                                EMIT1(add_2mod(0x48, dst_reg, src_reg));
1137                        else if (is_ereg(dst_reg) || is_ereg(src_reg))
1138                                EMIT1(add_2mod(0x40, dst_reg, src_reg));
1139                        EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1140                        goto emit_cond_jmp;
1141
1142                case BPF_JMP | BPF_JSET | BPF_K:
1143                case BPF_JMP32 | BPF_JSET | BPF_K:
1144                        /* test dst_reg, imm32 */
1145                        if (BPF_CLASS(insn->code) == BPF_JMP)
1146                                EMIT1(add_1mod(0x48, dst_reg));
1147                        else if (is_ereg(dst_reg))
1148                                EMIT1(add_1mod(0x40, dst_reg));
1149                        EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1150                        goto emit_cond_jmp;
1151
1152                case BPF_JMP | BPF_JEQ | BPF_K:
1153                case BPF_JMP | BPF_JNE | BPF_K:
1154                case BPF_JMP | BPF_JGT | BPF_K:
1155                case BPF_JMP | BPF_JLT | BPF_K:
1156                case BPF_JMP | BPF_JGE | BPF_K:
1157                case BPF_JMP | BPF_JLE | BPF_K:
1158                case BPF_JMP | BPF_JSGT | BPF_K:
1159                case BPF_JMP | BPF_JSLT | BPF_K:
1160                case BPF_JMP | BPF_JSGE | BPF_K:
1161                case BPF_JMP | BPF_JSLE | BPF_K:
1162                case BPF_JMP32 | BPF_JEQ | BPF_K:
1163                case BPF_JMP32 | BPF_JNE | BPF_K:
1164                case BPF_JMP32 | BPF_JGT | BPF_K:
1165                case BPF_JMP32 | BPF_JLT | BPF_K:
1166                case BPF_JMP32 | BPF_JGE | BPF_K:
1167                case BPF_JMP32 | BPF_JLE | BPF_K:
1168                case BPF_JMP32 | BPF_JSGT | BPF_K:
1169                case BPF_JMP32 | BPF_JSLT | BPF_K:
1170                case BPF_JMP32 | BPF_JSGE | BPF_K:
1171                case BPF_JMP32 | BPF_JSLE | BPF_K:
1172                        /* test dst_reg, dst_reg to save one extra byte */
1173                        if (imm32 == 0) {
1174                                if (BPF_CLASS(insn->code) == BPF_JMP)
1175                                        EMIT1(add_2mod(0x48, dst_reg, dst_reg));
1176                                else if (is_ereg(dst_reg))
1177                                        EMIT1(add_2mod(0x40, dst_reg, dst_reg));
1178                                EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1179                                goto emit_cond_jmp;
1180                        }
1181
1182                        /* cmp dst_reg, imm8/32 */
1183                        if (BPF_CLASS(insn->code) == BPF_JMP)
1184                                EMIT1(add_1mod(0x48, dst_reg));
1185                        else if (is_ereg(dst_reg))
1186                                EMIT1(add_1mod(0x40, dst_reg));
1187
1188                        if (is_imm8(imm32))
1189                                EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1190                        else
1191                                EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1192
1193emit_cond_jmp:          /* Convert BPF opcode to x86 */
1194                        switch (BPF_OP(insn->code)) {
1195                        case BPF_JEQ:
1196                                jmp_cond = X86_JE;
1197                                break;
1198                        case BPF_JSET:
1199                        case BPF_JNE:
1200                                jmp_cond = X86_JNE;
1201                                break;
1202                        case BPF_JGT:
1203                                /* GT is unsigned '>', JA in x86 */
1204                                jmp_cond = X86_JA;
1205                                break;
1206                        case BPF_JLT:
1207                                /* LT is unsigned '<', JB in x86 */
1208                                jmp_cond = X86_JB;
1209                                break;
1210                        case BPF_JGE:
1211                                /* GE is unsigned '>=', JAE in x86 */
1212                                jmp_cond = X86_JAE;
1213                                break;
1214                        case BPF_JLE:
1215                                /* LE is unsigned '<=', JBE in x86 */
1216                                jmp_cond = X86_JBE;
1217                                break;
1218                        case BPF_JSGT:
1219                                /* Signed '>', GT in x86 */
1220                                jmp_cond = X86_JG;
1221                                break;
1222                        case BPF_JSLT:
1223                                /* Signed '<', LT in x86 */
1224                                jmp_cond = X86_JL;
1225                                break;
1226                        case BPF_JSGE:
1227                                /* Signed '>=', GE in x86 */
1228                                jmp_cond = X86_JGE;
1229                                break;
1230                        case BPF_JSLE:
1231                                /* Signed '<=', LE in x86 */
1232                                jmp_cond = X86_JLE;
1233                                break;
1234                        default: /* to silence GCC warning */
1235                                return -EFAULT;
1236                        }
1237                        jmp_offset = addrs[i + insn->off] - addrs[i];
1238                        if (is_imm8(jmp_offset)) {
1239                                EMIT2(jmp_cond, jmp_offset);
1240                        } else if (is_simm32(jmp_offset)) {
1241                                EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1242                        } else {
1243                                pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1244                                return -EFAULT;
1245                        }
1246
1247                        break;
1248
1249                case BPF_JMP | BPF_JA:
1250                        if (insn->off == -1)
1251                                /* -1 jmp instructions will always jump
1252                                 * backwards two bytes. Explicitly handling
1253                                 * this case avoids wasting too many passes
1254                                 * when there are long sequences of replaced
1255                                 * dead code.
1256                                 */
1257                                jmp_offset = -2;
1258                        else
1259                                jmp_offset = addrs[i + insn->off] - addrs[i];
1260
1261                        if (!jmp_offset)
1262                                /* Optimize out nop jumps */
1263                                break;
1264emit_jmp:
1265                        if (is_imm8(jmp_offset)) {
1266                                EMIT2(0xEB, jmp_offset);
1267                        } else if (is_simm32(jmp_offset)) {
1268                                EMIT1_off32(0xE9, jmp_offset);
1269                        } else {
1270                                pr_err("jmp gen bug %llx\n", jmp_offset);
1271                                return -EFAULT;
1272                        }
1273                        break;
1274
1275                case BPF_JMP | BPF_EXIT:
1276                        if (seen_exit) {
1277                                jmp_offset = ctx->cleanup_addr - addrs[i];
1278                                goto emit_jmp;
1279                        }
1280                        seen_exit = true;
1281                        /* Update cleanup_addr */
1282                        ctx->cleanup_addr = proglen;
1283                        if (!bpf_prog_was_classic(bpf_prog))
1284                                EMIT1(0x5B); /* get rid of tail_call_cnt */
1285                        EMIT2(0x41, 0x5F);   /* pop r15 */
1286                        EMIT2(0x41, 0x5E);   /* pop r14 */
1287                        EMIT2(0x41, 0x5D);   /* pop r13 */
1288                        EMIT1(0x5B);         /* pop rbx */
1289                        EMIT1(0xC9);         /* leave */
1290                        EMIT1(0xC3);         /* ret */
1291                        break;
1292
1293                default:
1294                        /*
1295                         * By design x86-64 JIT should support all BPF instructions.
1296                         * This error will be seen if new instruction was added
1297                         * to the interpreter, but not to the JIT, or if there is
1298                         * junk in bpf_prog.
1299                         */
1300                        pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1301                        return -EINVAL;
1302                }
1303
1304                ilen = prog - temp;
1305                if (ilen > BPF_MAX_INSN_SIZE) {
1306                        pr_err("bpf_jit: fatal insn size error\n");
1307                        return -EFAULT;
1308                }
1309
1310                if (image) {
1311                        if (unlikely(proglen + ilen > oldproglen)) {
1312                                pr_err("bpf_jit: fatal error\n");
1313                                return -EFAULT;
1314                        }
1315                        memcpy(image + proglen, temp, ilen);
1316                }
1317                proglen += ilen;
1318                addrs[i] = proglen;
1319                prog = temp;
1320        }
1321
1322        if (image && excnt != bpf_prog->aux->num_exentries) {
1323                pr_err("extable is not populated\n");
1324                return -EFAULT;
1325        }
1326        return proglen;
1327}
1328
1329static void save_regs(struct btf_func_model *m, u8 **prog, int nr_args,
1330                      int stack_size)
1331{
1332        int i;
1333        /* Store function arguments to stack.
1334         * For a function that accepts two pointers the sequence will be:
1335         * mov QWORD PTR [rbp-0x10],rdi
1336         * mov QWORD PTR [rbp-0x8],rsi
1337         */
1338        for (i = 0; i < min(nr_args, 6); i++)
1339                emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1340                         BPF_REG_FP,
1341                         i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1342                         -(stack_size - i * 8));
1343}
1344
1345static void restore_regs(struct btf_func_model *m, u8 **prog, int nr_args,
1346                         int stack_size)
1347{
1348        int i;
1349
1350        /* Restore function arguments from stack.
1351         * For a function that accepts two pointers the sequence will be:
1352         * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1353         * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1354         */
1355        for (i = 0; i < min(nr_args, 6); i++)
1356                emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1357                         i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1358                         BPF_REG_FP,
1359                         -(stack_size - i * 8));
1360}
1361
1362static int invoke_bpf(struct btf_func_model *m, u8 **pprog,
1363                      struct bpf_prog **progs, int prog_cnt, int stack_size)
1364{
1365        u8 *prog = *pprog;
1366        int cnt = 0, i;
1367
1368        for (i = 0; i < prog_cnt; i++) {
1369                if (emit_call(&prog, __bpf_prog_enter, prog))
1370                        return -EINVAL;
1371                /* remember prog start time returned by __bpf_prog_enter */
1372                emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1373
1374                /* arg1: lea rdi, [rbp - stack_size] */
1375                EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1376                /* arg2: progs[i]->insnsi for interpreter */
1377                if (!progs[i]->jited)
1378                        emit_mov_imm64(&prog, BPF_REG_2,
1379                                       (long) progs[i]->insnsi >> 32,
1380                                       (u32) (long) progs[i]->insnsi);
1381                /* call JITed bpf program or interpreter */
1382                if (emit_call(&prog, progs[i]->bpf_func, prog))
1383                        return -EINVAL;
1384
1385                /* arg1: mov rdi, progs[i] */
1386                emit_mov_imm64(&prog, BPF_REG_1, (long) progs[i] >> 32,
1387                               (u32) (long) progs[i]);
1388                /* arg2: mov rsi, rbx <- start time in nsec */
1389                emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1390                if (emit_call(&prog, __bpf_prog_exit, prog))
1391                        return -EINVAL;
1392        }
1393        *pprog = prog;
1394        return 0;
1395}
1396
1397/* Example:
1398 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1399 * its 'struct btf_func_model' will be nr_args=2
1400 * The assembly code when eth_type_trans is executing after trampoline:
1401 *
1402 * push rbp
1403 * mov rbp, rsp
1404 * sub rsp, 16                     // space for skb and dev
1405 * push rbx                        // temp regs to pass start time
1406 * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
1407 * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
1408 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1409 * mov rbx, rax                    // remember start time in bpf stats are enabled
1410 * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
1411 * call addr_of_jited_FENTRY_prog
1412 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1413 * mov rsi, rbx                    // prog start time
1414 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1415 * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
1416 * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
1417 * pop rbx
1418 * leave
1419 * ret
1420 *
1421 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1422 * replaced with 'call generated_bpf_trampoline'. When it returns
1423 * eth_type_trans will continue executing with original skb and dev pointers.
1424 *
1425 * The assembly code when eth_type_trans is called from trampoline:
1426 *
1427 * push rbp
1428 * mov rbp, rsp
1429 * sub rsp, 24                     // space for skb, dev, return value
1430 * push rbx                        // temp regs to pass start time
1431 * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
1432 * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
1433 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1434 * mov rbx, rax                    // remember start time if bpf stats are enabled
1435 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1436 * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
1437 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1438 * mov rsi, rbx                    // prog start time
1439 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1440 * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
1441 * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
1442 * call eth_type_trans+5           // execute body of eth_type_trans
1443 * mov qword ptr [rbp - 8], rax    // save return value
1444 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1445 * mov rbx, rax                    // remember start time in bpf stats are enabled
1446 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1447 * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
1448 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1449 * mov rsi, rbx                    // prog start time
1450 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1451 * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
1452 * pop rbx
1453 * leave
1454 * add rsp, 8                      // skip eth_type_trans's frame
1455 * ret                             // return to its caller
1456 */
1457int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
1458                                struct bpf_prog **fentry_progs, int fentry_cnt,
1459                                struct bpf_prog **fexit_progs, int fexit_cnt,
1460                                void *orig_call)
1461{
1462        int cnt = 0, nr_args = m->nr_args;
1463        int stack_size = nr_args * 8;
1464        u8 *prog;
1465
1466        /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1467        if (nr_args > 6)
1468                return -ENOTSUPP;
1469
1470        if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1471            (flags & BPF_TRAMP_F_SKIP_FRAME))
1472                return -EINVAL;
1473
1474        if (flags & BPF_TRAMP_F_CALL_ORIG)
1475                stack_size += 8; /* room for return value of orig_call */
1476
1477        if (flags & BPF_TRAMP_F_SKIP_FRAME)
1478                /* skip patched call instruction and point orig_call to actual
1479                 * body of the kernel function.
1480                 */
1481                orig_call += X86_PATCH_SIZE;
1482
1483        prog = image;
1484
1485        EMIT1(0x55);             /* push rbp */
1486        EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
1487        EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
1488        EMIT1(0x53);             /* push rbx */
1489
1490        save_regs(m, &prog, nr_args, stack_size);
1491
1492        if (fentry_cnt)
1493                if (invoke_bpf(m, &prog, fentry_progs, fentry_cnt, stack_size))
1494                        return -EINVAL;
1495
1496        if (flags & BPF_TRAMP_F_CALL_ORIG) {
1497                if (fentry_cnt)
1498                        restore_regs(m, &prog, nr_args, stack_size);
1499
1500                /* call original function */
1501                if (emit_call(&prog, orig_call, prog))
1502                        return -EINVAL;
1503                /* remember return value in a stack for bpf prog to access */
1504                emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1505        }
1506
1507        if (fexit_cnt)
1508                if (invoke_bpf(m, &prog, fexit_progs, fexit_cnt, stack_size))
1509                        return -EINVAL;
1510
1511        if (flags & BPF_TRAMP_F_RESTORE_REGS)
1512                restore_regs(m, &prog, nr_args, stack_size);
1513
1514        if (flags & BPF_TRAMP_F_CALL_ORIG)
1515                /* restore original return value back into RAX */
1516                emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
1517
1518        EMIT1(0x5B); /* pop rbx */
1519        EMIT1(0xC9); /* leave */
1520        if (flags & BPF_TRAMP_F_SKIP_FRAME)
1521                /* skip our return address and return to parent */
1522                EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
1523        EMIT1(0xC3); /* ret */
1524        /* One half of the page has active running trampoline.
1525         * Another half is an area for next trampoline.
1526         * Make sure the trampoline generation logic doesn't overflow.
1527         */
1528        if (WARN_ON_ONCE(prog - (u8 *)image > PAGE_SIZE / 2 - BPF_INSN_SAFETY))
1529                return -EFAULT;
1530        return 0;
1531}
1532
1533struct x64_jit_data {
1534        struct bpf_binary_header *header;
1535        int *addrs;
1536        u8 *image;
1537        int proglen;
1538        struct jit_context ctx;
1539};
1540
1541struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1542{
1543        struct bpf_binary_header *header = NULL;
1544        struct bpf_prog *tmp, *orig_prog = prog;
1545        struct x64_jit_data *jit_data;
1546        int proglen, oldproglen = 0;
1547        struct jit_context ctx = {};
1548        bool tmp_blinded = false;
1549        bool extra_pass = false;
1550        u8 *image = NULL;
1551        int *addrs;
1552        int pass;
1553        int i;
1554
1555        if (!prog->jit_requested)
1556                return orig_prog;
1557
1558        tmp = bpf_jit_blind_constants(prog);
1559        /*
1560         * If blinding was requested and we failed during blinding,
1561         * we must fall back to the interpreter.
1562         */
1563        if (IS_ERR(tmp))
1564                return orig_prog;
1565        if (tmp != prog) {
1566                tmp_blinded = true;
1567                prog = tmp;
1568        }
1569
1570        jit_data = prog->aux->jit_data;
1571        if (!jit_data) {
1572                jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1573                if (!jit_data) {
1574                        prog = orig_prog;
1575                        goto out;
1576                }
1577                prog->aux->jit_data = jit_data;
1578        }
1579        addrs = jit_data->addrs;
1580        if (addrs) {
1581                ctx = jit_data->ctx;
1582                oldproglen = jit_data->proglen;
1583                image = jit_data->image;
1584                header = jit_data->header;
1585                extra_pass = true;
1586                goto skip_init_addrs;
1587        }
1588        addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1589        if (!addrs) {
1590                prog = orig_prog;
1591                goto out_addrs;
1592        }
1593
1594        /*
1595         * Before first pass, make a rough estimation of addrs[]
1596         * each BPF instruction is translated to less than 64 bytes
1597         */
1598        for (proglen = 0, i = 0; i <= prog->len; i++) {
1599                proglen += 64;
1600                addrs[i] = proglen;
1601        }
1602        ctx.cleanup_addr = proglen;
1603skip_init_addrs:
1604
1605        /*
1606         * JITed image shrinks with every pass and the loop iterates
1607         * until the image stops shrinking. Very large BPF programs
1608         * may converge on the last pass. In such case do one more
1609         * pass to emit the final image.
1610         */
1611        for (pass = 0; pass < 20 || image; pass++) {
1612                proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1613                if (proglen <= 0) {
1614out_image:
1615                        image = NULL;
1616                        if (header)
1617                                bpf_jit_binary_free(header);
1618                        prog = orig_prog;
1619                        goto out_addrs;
1620                }
1621                if (image) {
1622                        if (proglen != oldproglen) {
1623                                pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1624                                       proglen, oldproglen);
1625                                goto out_image;
1626                        }
1627                        break;
1628                }
1629                if (proglen == oldproglen) {
1630                        /*
1631                         * The number of entries in extable is the number of BPF_LDX
1632                         * insns that access kernel memory via "pointer to BTF type".
1633                         * The verifier changed their opcode from LDX|MEM|size
1634                         * to LDX|PROBE_MEM|size to make JITing easier.
1635                         */
1636                        u32 align = __alignof__(struct exception_table_entry);
1637                        u32 extable_size = prog->aux->num_exentries *
1638                                sizeof(struct exception_table_entry);
1639
1640                        /* allocate module memory for x86 insns and extable */
1641                        header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
1642                                                      &image, align, jit_fill_hole);
1643                        if (!header) {
1644                                prog = orig_prog;
1645                                goto out_addrs;
1646                        }
1647                        prog->aux->extable = (void *) image + roundup(proglen, align);
1648                }
1649                oldproglen = proglen;
1650                cond_resched();
1651        }
1652
1653        if (bpf_jit_enable > 1)
1654                bpf_jit_dump(prog->len, proglen, pass + 1, image);
1655
1656        if (image) {
1657                if (!prog->is_func || extra_pass) {
1658                        bpf_tail_call_direct_fixup(prog);
1659                        bpf_jit_binary_lock_ro(header);
1660                } else {
1661                        jit_data->addrs = addrs;
1662                        jit_data->ctx = ctx;
1663                        jit_data->proglen = proglen;
1664                        jit_data->image = image;
1665                        jit_data->header = header;
1666                }
1667                prog->bpf_func = (void *)image;
1668                prog->jited = 1;
1669                prog->jited_len = proglen;
1670        } else {
1671                prog = orig_prog;
1672        }
1673
1674        if (!image || !prog->is_func || extra_pass) {
1675                if (image)
1676                        bpf_prog_fill_jited_linfo(prog, addrs + 1);
1677out_addrs:
1678                kfree(addrs);
1679                kfree(jit_data);
1680                prog->aux->jit_data = NULL;
1681        }
1682out:
1683        if (tmp_blinded)
1684                bpf_jit_prog_release_other(prog, prog == orig_prog ?
1685                                           tmp : orig_prog);
1686        return prog;
1687}
1688