linux/arch/x86/net/bpf_jit_comp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * bpf_jit_comp.c: BPF JIT compiler
   4 *
   5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
   6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   7 */
   8#include <linux/netdevice.h>
   9#include <linux/filter.h>
  10#include <linux/if_vlan.h>
  11#include <linux/bpf.h>
  12#include <linux/memory.h>
  13#include <linux/sort.h>
  14#include <asm/extable.h>
  15#include <asm/set_memory.h>
  16#include <asm/nospec-branch.h>
  17#include <asm/text-patching.h>
  18#include <asm/asm-prototypes.h>
  19
  20static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
  21{
  22        if (len == 1)
  23                *ptr = bytes;
  24        else if (len == 2)
  25                *(u16 *)ptr = bytes;
  26        else {
  27                *(u32 *)ptr = bytes;
  28                barrier();
  29        }
  30        return ptr + len;
  31}
  32
  33#define EMIT(bytes, len) \
  34        do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
  35
  36#define EMIT1(b1)               EMIT(b1, 1)
  37#define EMIT2(b1, b2)           EMIT((b1) + ((b2) << 8), 2)
  38#define EMIT3(b1, b2, b3)       EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
  39#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
  40
  41#define EMIT1_off32(b1, off) \
  42        do { EMIT1(b1); EMIT(off, 4); } while (0)
  43#define EMIT2_off32(b1, b2, off) \
  44        do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
  45#define EMIT3_off32(b1, b2, b3, off) \
  46        do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
  47#define EMIT4_off32(b1, b2, b3, b4, off) \
  48        do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
  49
  50static bool is_imm8(int value)
  51{
  52        return value <= 127 && value >= -128;
  53}
  54
  55static bool is_simm32(s64 value)
  56{
  57        return value == (s64)(s32)value;
  58}
  59
  60static bool is_uimm32(u64 value)
  61{
  62        return value == (u64)(u32)value;
  63}
  64
  65/* mov dst, src */
  66#define EMIT_mov(DST, SRC)                                                               \
  67        do {                                                                             \
  68                if (DST != SRC)                                                          \
  69                        EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
  70        } while (0)
  71
  72static int bpf_size_to_x86_bytes(int bpf_size)
  73{
  74        if (bpf_size == BPF_W)
  75                return 4;
  76        else if (bpf_size == BPF_H)
  77                return 2;
  78        else if (bpf_size == BPF_B)
  79                return 1;
  80        else if (bpf_size == BPF_DW)
  81                return 4; /* imm32 */
  82        else
  83                return 0;
  84}
  85
  86/*
  87 * List of x86 cond jumps opcodes (. + s8)
  88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
  89 */
  90#define X86_JB  0x72
  91#define X86_JAE 0x73
  92#define X86_JE  0x74
  93#define X86_JNE 0x75
  94#define X86_JBE 0x76
  95#define X86_JA  0x77
  96#define X86_JL  0x7C
  97#define X86_JGE 0x7D
  98#define X86_JLE 0x7E
  99#define X86_JG  0x7F
 100
 101/* Pick a register outside of BPF range for JIT internal work */
 102#define AUX_REG (MAX_BPF_JIT_REG + 1)
 103#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
 104
 105/*
 106 * The following table maps BPF registers to x86-64 registers.
 107 *
 108 * x86-64 register R12 is unused, since if used as base address
 109 * register in load/store instructions, it always needs an
 110 * extra byte of encoding and is callee saved.
 111 *
 112 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
 113 * trampoline. x86-64 register R10 is used for blinding (if enabled).
 114 */
 115static const int reg2hex[] = {
 116        [BPF_REG_0] = 0,  /* RAX */
 117        [BPF_REG_1] = 7,  /* RDI */
 118        [BPF_REG_2] = 6,  /* RSI */
 119        [BPF_REG_3] = 2,  /* RDX */
 120        [BPF_REG_4] = 1,  /* RCX */
 121        [BPF_REG_5] = 0,  /* R8  */
 122        [BPF_REG_6] = 3,  /* RBX callee saved */
 123        [BPF_REG_7] = 5,  /* R13 callee saved */
 124        [BPF_REG_8] = 6,  /* R14 callee saved */
 125        [BPF_REG_9] = 7,  /* R15 callee saved */
 126        [BPF_REG_FP] = 5, /* RBP readonly */
 127        [BPF_REG_AX] = 2, /* R10 temp register */
 128        [AUX_REG] = 3,    /* R11 temp register */
 129        [X86_REG_R9] = 1, /* R9 register, 6th function argument */
 130};
 131
 132static const int reg2pt_regs[] = {
 133        [BPF_REG_0] = offsetof(struct pt_regs, ax),
 134        [BPF_REG_1] = offsetof(struct pt_regs, di),
 135        [BPF_REG_2] = offsetof(struct pt_regs, si),
 136        [BPF_REG_3] = offsetof(struct pt_regs, dx),
 137        [BPF_REG_4] = offsetof(struct pt_regs, cx),
 138        [BPF_REG_5] = offsetof(struct pt_regs, r8),
 139        [BPF_REG_6] = offsetof(struct pt_regs, bx),
 140        [BPF_REG_7] = offsetof(struct pt_regs, r13),
 141        [BPF_REG_8] = offsetof(struct pt_regs, r14),
 142        [BPF_REG_9] = offsetof(struct pt_regs, r15),
 143};
 144
 145/*
 146 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
 147 * which need extra byte of encoding.
 148 * rax,rcx,...,rbp have simpler encoding
 149 */
 150static bool is_ereg(u32 reg)
 151{
 152        return (1 << reg) & (BIT(BPF_REG_5) |
 153                             BIT(AUX_REG) |
 154                             BIT(BPF_REG_7) |
 155                             BIT(BPF_REG_8) |
 156                             BIT(BPF_REG_9) |
 157                             BIT(X86_REG_R9) |
 158                             BIT(BPF_REG_AX));
 159}
 160
 161/*
 162 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
 163 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
 164 * of encoding. al,cl,dl,bl have simpler encoding.
 165 */
 166static bool is_ereg_8l(u32 reg)
 167{
 168        return is_ereg(reg) ||
 169            (1 << reg) & (BIT(BPF_REG_1) |
 170                          BIT(BPF_REG_2) |
 171                          BIT(BPF_REG_FP));
 172}
 173
 174static bool is_axreg(u32 reg)
 175{
 176        return reg == BPF_REG_0;
 177}
 178
 179/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
 180static u8 add_1mod(u8 byte, u32 reg)
 181{
 182        if (is_ereg(reg))
 183                byte |= 1;
 184        return byte;
 185}
 186
 187static u8 add_2mod(u8 byte, u32 r1, u32 r2)
 188{
 189        if (is_ereg(r1))
 190                byte |= 1;
 191        if (is_ereg(r2))
 192                byte |= 4;
 193        return byte;
 194}
 195
 196/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
 197static u8 add_1reg(u8 byte, u32 dst_reg)
 198{
 199        return byte + reg2hex[dst_reg];
 200}
 201
 202/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
 203static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
 204{
 205        return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
 206}
 207
 208/* Some 1-byte opcodes for binary ALU operations */
 209static u8 simple_alu_opcodes[] = {
 210        [BPF_ADD] = 0x01,
 211        [BPF_SUB] = 0x29,
 212        [BPF_AND] = 0x21,
 213        [BPF_OR] = 0x09,
 214        [BPF_XOR] = 0x31,
 215        [BPF_LSH] = 0xE0,
 216        [BPF_RSH] = 0xE8,
 217        [BPF_ARSH] = 0xF8,
 218};
 219
 220static void jit_fill_hole(void *area, unsigned int size)
 221{
 222        /* Fill whole space with INT3 instructions */
 223        memset(area, 0xcc, size);
 224}
 225
 226struct jit_context {
 227        int cleanup_addr; /* Epilogue code offset */
 228};
 229
 230/* Maximum number of bytes emitted while JITing one eBPF insn */
 231#define BPF_MAX_INSN_SIZE       128
 232#define BPF_INSN_SAFETY         64
 233
 234/* Number of bytes emit_patch() needs to generate instructions */
 235#define X86_PATCH_SIZE          5
 236/* Number of bytes that will be skipped on tailcall */
 237#define X86_TAIL_CALL_OFFSET    11
 238
 239static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
 240{
 241        u8 *prog = *pprog;
 242        int cnt = 0;
 243
 244        if (callee_regs_used[0])
 245                EMIT1(0x53);         /* push rbx */
 246        if (callee_regs_used[1])
 247                EMIT2(0x41, 0x55);   /* push r13 */
 248        if (callee_regs_used[2])
 249                EMIT2(0x41, 0x56);   /* push r14 */
 250        if (callee_regs_used[3])
 251                EMIT2(0x41, 0x57);   /* push r15 */
 252        *pprog = prog;
 253}
 254
 255static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
 256{
 257        u8 *prog = *pprog;
 258        int cnt = 0;
 259
 260        if (callee_regs_used[3])
 261                EMIT2(0x41, 0x5F);   /* pop r15 */
 262        if (callee_regs_used[2])
 263                EMIT2(0x41, 0x5E);   /* pop r14 */
 264        if (callee_regs_used[1])
 265                EMIT2(0x41, 0x5D);   /* pop r13 */
 266        if (callee_regs_used[0])
 267                EMIT1(0x5B);         /* pop rbx */
 268        *pprog = prog;
 269}
 270
 271/*
 272 * Emit x86-64 prologue code for BPF program.
 273 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
 274 * while jumping to another program
 275 */
 276static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
 277                          bool tail_call_reachable, bool is_subprog)
 278{
 279        u8 *prog = *pprog;
 280        int cnt = X86_PATCH_SIZE;
 281
 282        /* BPF trampoline can be made to work without these nops,
 283         * but let's waste 5 bytes for now and optimize later
 284         */
 285        memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
 286        prog += cnt;
 287        if (!ebpf_from_cbpf) {
 288                if (tail_call_reachable && !is_subprog)
 289                        EMIT2(0x31, 0xC0); /* xor eax, eax */
 290                else
 291                        EMIT2(0x66, 0x90); /* nop2 */
 292        }
 293        EMIT1(0x55);             /* push rbp */
 294        EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
 295        /* sub rsp, rounded_stack_depth */
 296        if (stack_depth)
 297                EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
 298        if (tail_call_reachable)
 299                EMIT1(0x50);         /* push rax */
 300        *pprog = prog;
 301}
 302
 303static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
 304{
 305        u8 *prog = *pprog;
 306        int cnt = 0;
 307        s64 offset;
 308
 309        offset = func - (ip + X86_PATCH_SIZE);
 310        if (!is_simm32(offset)) {
 311                pr_err("Target call %p is out of range\n", func);
 312                return -ERANGE;
 313        }
 314        EMIT1_off32(opcode, offset);
 315        *pprog = prog;
 316        return 0;
 317}
 318
 319static int emit_call(u8 **pprog, void *func, void *ip)
 320{
 321        return emit_patch(pprog, func, ip, 0xE8);
 322}
 323
 324static int emit_jump(u8 **pprog, void *func, void *ip)
 325{
 326        return emit_patch(pprog, func, ip, 0xE9);
 327}
 328
 329static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
 330                                void *old_addr, void *new_addr,
 331                                const bool text_live)
 332{
 333        const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
 334        u8 old_insn[X86_PATCH_SIZE];
 335        u8 new_insn[X86_PATCH_SIZE];
 336        u8 *prog;
 337        int ret;
 338
 339        memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
 340        if (old_addr) {
 341                prog = old_insn;
 342                ret = t == BPF_MOD_CALL ?
 343                      emit_call(&prog, old_addr, ip) :
 344                      emit_jump(&prog, old_addr, ip);
 345                if (ret)
 346                        return ret;
 347        }
 348
 349        memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
 350        if (new_addr) {
 351                prog = new_insn;
 352                ret = t == BPF_MOD_CALL ?
 353                      emit_call(&prog, new_addr, ip) :
 354                      emit_jump(&prog, new_addr, ip);
 355                if (ret)
 356                        return ret;
 357        }
 358
 359        ret = -EBUSY;
 360        mutex_lock(&text_mutex);
 361        if (memcmp(ip, old_insn, X86_PATCH_SIZE))
 362                goto out;
 363        ret = 1;
 364        if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
 365                if (text_live)
 366                        text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
 367                else
 368                        memcpy(ip, new_insn, X86_PATCH_SIZE);
 369                ret = 0;
 370        }
 371out:
 372        mutex_unlock(&text_mutex);
 373        return ret;
 374}
 375
 376int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
 377                       void *old_addr, void *new_addr)
 378{
 379        if (!is_kernel_text((long)ip) &&
 380            !is_bpf_text_address((long)ip))
 381                /* BPF poking in modules is not supported */
 382                return -EINVAL;
 383
 384        return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
 385}
 386
 387static int get_pop_bytes(bool *callee_regs_used)
 388{
 389        int bytes = 0;
 390
 391        if (callee_regs_used[3])
 392                bytes += 2;
 393        if (callee_regs_used[2])
 394                bytes += 2;
 395        if (callee_regs_used[1])
 396                bytes += 2;
 397        if (callee_regs_used[0])
 398                bytes += 1;
 399
 400        return bytes;
 401}
 402
 403/*
 404 * Generate the following code:
 405 *
 406 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
 407 *   if (index >= array->map.max_entries)
 408 *     goto out;
 409 *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
 410 *     goto out;
 411 *   prog = array->ptrs[index];
 412 *   if (prog == NULL)
 413 *     goto out;
 414 *   goto *(prog->bpf_func + prologue_size);
 415 * out:
 416 */
 417static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
 418                                        u32 stack_depth)
 419{
 420        int tcc_off = -4 - round_up(stack_depth, 8);
 421        u8 *prog = *pprog;
 422        int pop_bytes = 0;
 423        int off1 = 42;
 424        int off2 = 31;
 425        int off3 = 9;
 426        int cnt = 0;
 427
 428        /* count the additional bytes used for popping callee regs from stack
 429         * that need to be taken into account for each of the offsets that
 430         * are used for bailing out of the tail call
 431         */
 432        pop_bytes = get_pop_bytes(callee_regs_used);
 433        off1 += pop_bytes;
 434        off2 += pop_bytes;
 435        off3 += pop_bytes;
 436
 437        if (stack_depth) {
 438                off1 += 7;
 439                off2 += 7;
 440                off3 += 7;
 441        }
 442
 443        /*
 444         * rdi - pointer to ctx
 445         * rsi - pointer to bpf_array
 446         * rdx - index in bpf_array
 447         */
 448
 449        /*
 450         * if (index >= array->map.max_entries)
 451         *      goto out;
 452         */
 453        EMIT2(0x89, 0xD2);                        /* mov edx, edx */
 454        EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
 455              offsetof(struct bpf_array, map.max_entries));
 456#define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
 457        EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
 458
 459        /*
 460         * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
 461         *      goto out;
 462         */
 463        EMIT2_off32(0x8B, 0x85, tcc_off);         /* mov eax, dword ptr [rbp - tcc_off] */
 464        EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
 465#define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
 466        EMIT2(X86_JA, OFFSET2);                   /* ja out */
 467        EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
 468        EMIT2_off32(0x89, 0x85, tcc_off);         /* mov dword ptr [rbp - tcc_off], eax */
 469
 470        /* prog = array->ptrs[index]; */
 471        EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
 472                    offsetof(struct bpf_array, ptrs));
 473
 474        /*
 475         * if (prog == NULL)
 476         *      goto out;
 477         */
 478        EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
 479#define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
 480        EMIT2(X86_JE, OFFSET3);                   /* je out */
 481
 482        *pprog = prog;
 483        pop_callee_regs(pprog, callee_regs_used);
 484        prog = *pprog;
 485
 486        EMIT1(0x58);                              /* pop rax */
 487        if (stack_depth)
 488                EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
 489                            round_up(stack_depth, 8));
 490
 491        /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
 492        EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
 493              offsetof(struct bpf_prog, bpf_func));
 494        EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
 495              X86_TAIL_CALL_OFFSET);
 496        /*
 497         * Now we're ready to jump into next BPF program
 498         * rdi == ctx (1st arg)
 499         * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
 500         */
 501        RETPOLINE_RCX_BPF_JIT();
 502
 503        /* out: */
 504        *pprog = prog;
 505}
 506
 507static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
 508                                      u8 **pprog, int addr, u8 *image,
 509                                      bool *callee_regs_used, u32 stack_depth)
 510{
 511        int tcc_off = -4 - round_up(stack_depth, 8);
 512        u8 *prog = *pprog;
 513        int pop_bytes = 0;
 514        int off1 = 20;
 515        int poke_off;
 516        int cnt = 0;
 517
 518        /* count the additional bytes used for popping callee regs to stack
 519         * that need to be taken into account for jump offset that is used for
 520         * bailing out from of the tail call when limit is reached
 521         */
 522        pop_bytes = get_pop_bytes(callee_regs_used);
 523        off1 += pop_bytes;
 524
 525        /*
 526         * total bytes for:
 527         * - nop5/ jmpq $off
 528         * - pop callee regs
 529         * - sub rsp, $val if depth > 0
 530         * - pop rax
 531         */
 532        poke_off = X86_PATCH_SIZE + pop_bytes + 1;
 533        if (stack_depth) {
 534                poke_off += 7;
 535                off1 += 7;
 536        }
 537
 538        /*
 539         * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
 540         *      goto out;
 541         */
 542        EMIT2_off32(0x8B, 0x85, tcc_off);             /* mov eax, dword ptr [rbp - tcc_off] */
 543        EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
 544        EMIT2(X86_JA, off1);                          /* ja out */
 545        EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
 546        EMIT2_off32(0x89, 0x85, tcc_off);             /* mov dword ptr [rbp - tcc_off], eax */
 547
 548        poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
 549        poke->adj_off = X86_TAIL_CALL_OFFSET;
 550        poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
 551        poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
 552
 553        emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
 554                  poke->tailcall_bypass);
 555
 556        *pprog = prog;
 557        pop_callee_regs(pprog, callee_regs_used);
 558        prog = *pprog;
 559        EMIT1(0x58);                                  /* pop rax */
 560        if (stack_depth)
 561                EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
 562
 563        memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
 564        prog += X86_PATCH_SIZE;
 565        /* out: */
 566
 567        *pprog = prog;
 568}
 569
 570static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
 571{
 572        struct bpf_jit_poke_descriptor *poke;
 573        struct bpf_array *array;
 574        struct bpf_prog *target;
 575        int i, ret;
 576
 577        for (i = 0; i < prog->aux->size_poke_tab; i++) {
 578                poke = &prog->aux->poke_tab[i];
 579                WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
 580
 581                if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
 582                        continue;
 583
 584                array = container_of(poke->tail_call.map, struct bpf_array, map);
 585                mutex_lock(&array->aux->poke_mutex);
 586                target = array->ptrs[poke->tail_call.key];
 587                if (target) {
 588                        /* Plain memcpy is used when image is not live yet
 589                         * and still not locked as read-only. Once poke
 590                         * location is active (poke->tailcall_target_stable),
 591                         * any parallel bpf_arch_text_poke() might occur
 592                         * still on the read-write image until we finally
 593                         * locked it as read-only. Both modifications on
 594                         * the given image are under text_mutex to avoid
 595                         * interference.
 596                         */
 597                        ret = __bpf_arch_text_poke(poke->tailcall_target,
 598                                                   BPF_MOD_JUMP, NULL,
 599                                                   (u8 *)target->bpf_func +
 600                                                   poke->adj_off, false);
 601                        BUG_ON(ret < 0);
 602                        ret = __bpf_arch_text_poke(poke->tailcall_bypass,
 603                                                   BPF_MOD_JUMP,
 604                                                   (u8 *)poke->tailcall_target +
 605                                                   X86_PATCH_SIZE, NULL, false);
 606                        BUG_ON(ret < 0);
 607                }
 608                WRITE_ONCE(poke->tailcall_target_stable, true);
 609                mutex_unlock(&array->aux->poke_mutex);
 610        }
 611}
 612
 613static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
 614                           u32 dst_reg, const u32 imm32)
 615{
 616        u8 *prog = *pprog;
 617        u8 b1, b2, b3;
 618        int cnt = 0;
 619
 620        /*
 621         * Optimization: if imm32 is positive, use 'mov %eax, imm32'
 622         * (which zero-extends imm32) to save 2 bytes.
 623         */
 624        if (sign_propagate && (s32)imm32 < 0) {
 625                /* 'mov %rax, imm32' sign extends imm32 */
 626                b1 = add_1mod(0x48, dst_reg);
 627                b2 = 0xC7;
 628                b3 = 0xC0;
 629                EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
 630                goto done;
 631        }
 632
 633        /*
 634         * Optimization: if imm32 is zero, use 'xor %eax, %eax'
 635         * to save 3 bytes.
 636         */
 637        if (imm32 == 0) {
 638                if (is_ereg(dst_reg))
 639                        EMIT1(add_2mod(0x40, dst_reg, dst_reg));
 640                b2 = 0x31; /* xor */
 641                b3 = 0xC0;
 642                EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
 643                goto done;
 644        }
 645
 646        /* mov %eax, imm32 */
 647        if (is_ereg(dst_reg))
 648                EMIT1(add_1mod(0x40, dst_reg));
 649        EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
 650done:
 651        *pprog = prog;
 652}
 653
 654static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
 655                           const u32 imm32_hi, const u32 imm32_lo)
 656{
 657        u8 *prog = *pprog;
 658        int cnt = 0;
 659
 660        if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
 661                /*
 662                 * For emitting plain u32, where sign bit must not be
 663                 * propagated LLVM tends to load imm64 over mov32
 664                 * directly, so save couple of bytes by just doing
 665                 * 'mov %eax, imm32' instead.
 666                 */
 667                emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
 668        } else {
 669                /* movabsq %rax, imm64 */
 670                EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
 671                EMIT(imm32_lo, 4);
 672                EMIT(imm32_hi, 4);
 673        }
 674
 675        *pprog = prog;
 676}
 677
 678static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
 679{
 680        u8 *prog = *pprog;
 681        int cnt = 0;
 682
 683        if (is64) {
 684                /* mov dst, src */
 685                EMIT_mov(dst_reg, src_reg);
 686        } else {
 687                /* mov32 dst, src */
 688                if (is_ereg(dst_reg) || is_ereg(src_reg))
 689                        EMIT1(add_2mod(0x40, dst_reg, src_reg));
 690                EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
 691        }
 692
 693        *pprog = prog;
 694}
 695
 696/* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
 697static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
 698{
 699        u8 *prog = *pprog;
 700        int cnt = 0;
 701
 702        if (is_imm8(off)) {
 703                /* 1-byte signed displacement.
 704                 *
 705                 * If off == 0 we could skip this and save one extra byte, but
 706                 * special case of x86 R13 which always needs an offset is not
 707                 * worth the hassle
 708                 */
 709                EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
 710        } else {
 711                /* 4-byte signed displacement */
 712                EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
 713        }
 714        *pprog = prog;
 715}
 716
 717/*
 718 * Emit a REX byte if it will be necessary to address these registers
 719 */
 720static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
 721{
 722        u8 *prog = *pprog;
 723        int cnt = 0;
 724
 725        if (is64)
 726                EMIT1(add_2mod(0x48, dst_reg, src_reg));
 727        else if (is_ereg(dst_reg) || is_ereg(src_reg))
 728                EMIT1(add_2mod(0x40, dst_reg, src_reg));
 729        *pprog = prog;
 730}
 731
 732/* LDX: dst_reg = *(u8*)(src_reg + off) */
 733static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
 734{
 735        u8 *prog = *pprog;
 736        int cnt = 0;
 737
 738        switch (size) {
 739        case BPF_B:
 740                /* Emit 'movzx rax, byte ptr [rax + off]' */
 741                EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
 742                break;
 743        case BPF_H:
 744                /* Emit 'movzx rax, word ptr [rax + off]' */
 745                EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
 746                break;
 747        case BPF_W:
 748                /* Emit 'mov eax, dword ptr [rax+0x14]' */
 749                if (is_ereg(dst_reg) || is_ereg(src_reg))
 750                        EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
 751                else
 752                        EMIT1(0x8B);
 753                break;
 754        case BPF_DW:
 755                /* Emit 'mov rax, qword ptr [rax+0x14]' */
 756                EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
 757                break;
 758        }
 759        emit_insn_suffix(&prog, src_reg, dst_reg, off);
 760        *pprog = prog;
 761}
 762
 763/* STX: *(u8*)(dst_reg + off) = src_reg */
 764static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
 765{
 766        u8 *prog = *pprog;
 767        int cnt = 0;
 768
 769        switch (size) {
 770        case BPF_B:
 771                /* Emit 'mov byte ptr [rax + off], al' */
 772                if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
 773                        /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
 774                        EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
 775                else
 776                        EMIT1(0x88);
 777                break;
 778        case BPF_H:
 779                if (is_ereg(dst_reg) || is_ereg(src_reg))
 780                        EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
 781                else
 782                        EMIT2(0x66, 0x89);
 783                break;
 784        case BPF_W:
 785                if (is_ereg(dst_reg) || is_ereg(src_reg))
 786                        EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
 787                else
 788                        EMIT1(0x89);
 789                break;
 790        case BPF_DW:
 791                EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
 792                break;
 793        }
 794        emit_insn_suffix(&prog, dst_reg, src_reg, off);
 795        *pprog = prog;
 796}
 797
 798static int emit_atomic(u8 **pprog, u8 atomic_op,
 799                       u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
 800{
 801        u8 *prog = *pprog;
 802        int cnt = 0;
 803
 804        EMIT1(0xF0); /* lock prefix */
 805
 806        maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
 807
 808        /* emit opcode */
 809        switch (atomic_op) {
 810        case BPF_ADD:
 811        case BPF_SUB:
 812        case BPF_AND:
 813        case BPF_OR:
 814        case BPF_XOR:
 815                /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
 816                EMIT1(simple_alu_opcodes[atomic_op]);
 817                break;
 818        case BPF_ADD | BPF_FETCH:
 819                /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
 820                EMIT2(0x0F, 0xC1);
 821                break;
 822        case BPF_XCHG:
 823                /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
 824                EMIT1(0x87);
 825                break;
 826        case BPF_CMPXCHG:
 827                /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
 828                EMIT2(0x0F, 0xB1);
 829                break;
 830        default:
 831                pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
 832                return -EFAULT;
 833        }
 834
 835        emit_insn_suffix(&prog, dst_reg, src_reg, off);
 836
 837        *pprog = prog;
 838        return 0;
 839}
 840
 841static bool ex_handler_bpf(const struct exception_table_entry *x,
 842                           struct pt_regs *regs, int trapnr,
 843                           unsigned long error_code, unsigned long fault_addr)
 844{
 845        u32 reg = x->fixup >> 8;
 846
 847        /* jump over faulting load and clear dest register */
 848        *(unsigned long *)((void *)regs + reg) = 0;
 849        regs->ip += x->fixup & 0xff;
 850        return true;
 851}
 852
 853static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
 854                             bool *regs_used, bool *tail_call_seen)
 855{
 856        int i;
 857
 858        for (i = 1; i <= insn_cnt; i++, insn++) {
 859                if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
 860                        *tail_call_seen = true;
 861                if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
 862                        regs_used[0] = true;
 863                if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
 864                        regs_used[1] = true;
 865                if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
 866                        regs_used[2] = true;
 867                if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
 868                        regs_used[3] = true;
 869        }
 870}
 871
 872static int emit_nops(u8 **pprog, int len)
 873{
 874        u8 *prog = *pprog;
 875        int i, noplen, cnt = 0;
 876
 877        while (len > 0) {
 878                noplen = len;
 879
 880                if (noplen > ASM_NOP_MAX)
 881                        noplen = ASM_NOP_MAX;
 882
 883                for (i = 0; i < noplen; i++)
 884                        EMIT1(ideal_nops[noplen][i]);
 885                len -= noplen;
 886        }
 887
 888        *pprog = prog;
 889
 890        return cnt;
 891}
 892
 893#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
 894
 895static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 896                  int oldproglen, struct jit_context *ctx, bool jmp_padding)
 897{
 898        bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
 899        struct bpf_insn *insn = bpf_prog->insnsi;
 900        bool callee_regs_used[4] = {};
 901        int insn_cnt = bpf_prog->len;
 902        bool tail_call_seen = false;
 903        bool seen_exit = false;
 904        u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
 905        int i, cnt = 0, excnt = 0;
 906        int ilen, proglen = 0;
 907        u8 *prog = temp;
 908        int err;
 909
 910        detect_reg_usage(insn, insn_cnt, callee_regs_used,
 911                         &tail_call_seen);
 912
 913        /* tail call's presence in current prog implies it is reachable */
 914        tail_call_reachable |= tail_call_seen;
 915
 916        emit_prologue(&prog, bpf_prog->aux->stack_depth,
 917                      bpf_prog_was_classic(bpf_prog), tail_call_reachable,
 918                      bpf_prog->aux->func_idx != 0);
 919        push_callee_regs(&prog, callee_regs_used);
 920
 921        ilen = prog - temp;
 922        if (image)
 923                memcpy(image + proglen, temp, ilen);
 924        proglen += ilen;
 925        addrs[0] = proglen;
 926        prog = temp;
 927
 928        for (i = 1; i <= insn_cnt; i++, insn++) {
 929                const s32 imm32 = insn->imm;
 930                u32 dst_reg = insn->dst_reg;
 931                u32 src_reg = insn->src_reg;
 932                u8 b2 = 0, b3 = 0;
 933                u8 *start_of_ldx;
 934                s64 jmp_offset;
 935                u8 jmp_cond;
 936                u8 *func;
 937                int nops;
 938
 939                switch (insn->code) {
 940                        /* ALU */
 941                case BPF_ALU | BPF_ADD | BPF_X:
 942                case BPF_ALU | BPF_SUB | BPF_X:
 943                case BPF_ALU | BPF_AND | BPF_X:
 944                case BPF_ALU | BPF_OR | BPF_X:
 945                case BPF_ALU | BPF_XOR | BPF_X:
 946                case BPF_ALU64 | BPF_ADD | BPF_X:
 947                case BPF_ALU64 | BPF_SUB | BPF_X:
 948                case BPF_ALU64 | BPF_AND | BPF_X:
 949                case BPF_ALU64 | BPF_OR | BPF_X:
 950                case BPF_ALU64 | BPF_XOR | BPF_X:
 951                        maybe_emit_mod(&prog, dst_reg, src_reg,
 952                                       BPF_CLASS(insn->code) == BPF_ALU64);
 953                        b2 = simple_alu_opcodes[BPF_OP(insn->code)];
 954                        EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
 955                        break;
 956
 957                case BPF_ALU64 | BPF_MOV | BPF_X:
 958                case BPF_ALU | BPF_MOV | BPF_X:
 959                        emit_mov_reg(&prog,
 960                                     BPF_CLASS(insn->code) == BPF_ALU64,
 961                                     dst_reg, src_reg);
 962                        break;
 963
 964                        /* neg dst */
 965                case BPF_ALU | BPF_NEG:
 966                case BPF_ALU64 | BPF_NEG:
 967                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 968                                EMIT1(add_1mod(0x48, dst_reg));
 969                        else if (is_ereg(dst_reg))
 970                                EMIT1(add_1mod(0x40, dst_reg));
 971                        EMIT2(0xF7, add_1reg(0xD8, dst_reg));
 972                        break;
 973
 974                case BPF_ALU | BPF_ADD | BPF_K:
 975                case BPF_ALU | BPF_SUB | BPF_K:
 976                case BPF_ALU | BPF_AND | BPF_K:
 977                case BPF_ALU | BPF_OR | BPF_K:
 978                case BPF_ALU | BPF_XOR | BPF_K:
 979                case BPF_ALU64 | BPF_ADD | BPF_K:
 980                case BPF_ALU64 | BPF_SUB | BPF_K:
 981                case BPF_ALU64 | BPF_AND | BPF_K:
 982                case BPF_ALU64 | BPF_OR | BPF_K:
 983                case BPF_ALU64 | BPF_XOR | BPF_K:
 984                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 985                                EMIT1(add_1mod(0x48, dst_reg));
 986                        else if (is_ereg(dst_reg))
 987                                EMIT1(add_1mod(0x40, dst_reg));
 988
 989                        /*
 990                         * b3 holds 'normal' opcode, b2 short form only valid
 991                         * in case dst is eax/rax.
 992                         */
 993                        switch (BPF_OP(insn->code)) {
 994                        case BPF_ADD:
 995                                b3 = 0xC0;
 996                                b2 = 0x05;
 997                                break;
 998                        case BPF_SUB:
 999                                b3 = 0xE8;
1000                                b2 = 0x2D;
1001                                break;
1002                        case BPF_AND:
1003                                b3 = 0xE0;
1004                                b2 = 0x25;
1005                                break;
1006                        case BPF_OR:
1007                                b3 = 0xC8;
1008                                b2 = 0x0D;
1009                                break;
1010                        case BPF_XOR:
1011                                b3 = 0xF0;
1012                                b2 = 0x35;
1013                                break;
1014                        }
1015
1016                        if (is_imm8(imm32))
1017                                EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1018                        else if (is_axreg(dst_reg))
1019                                EMIT1_off32(b2, imm32);
1020                        else
1021                                EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1022                        break;
1023
1024                case BPF_ALU64 | BPF_MOV | BPF_K:
1025                case BPF_ALU | BPF_MOV | BPF_K:
1026                        emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1027                                       dst_reg, imm32);
1028                        break;
1029
1030                case BPF_LD | BPF_IMM | BPF_DW:
1031                        emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1032                        insn++;
1033                        i++;
1034                        break;
1035
1036                        /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1037                case BPF_ALU | BPF_MOD | BPF_X:
1038                case BPF_ALU | BPF_DIV | BPF_X:
1039                case BPF_ALU | BPF_MOD | BPF_K:
1040                case BPF_ALU | BPF_DIV | BPF_K:
1041                case BPF_ALU64 | BPF_MOD | BPF_X:
1042                case BPF_ALU64 | BPF_DIV | BPF_X:
1043                case BPF_ALU64 | BPF_MOD | BPF_K:
1044                case BPF_ALU64 | BPF_DIV | BPF_K:
1045                        EMIT1(0x50); /* push rax */
1046                        EMIT1(0x52); /* push rdx */
1047
1048                        if (BPF_SRC(insn->code) == BPF_X)
1049                                /* mov r11, src_reg */
1050                                EMIT_mov(AUX_REG, src_reg);
1051                        else
1052                                /* mov r11, imm32 */
1053                                EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1054
1055                        /* mov rax, dst_reg */
1056                        EMIT_mov(BPF_REG_0, dst_reg);
1057
1058                        /*
1059                         * xor edx, edx
1060                         * equivalent to 'xor rdx, rdx', but one byte less
1061                         */
1062                        EMIT2(0x31, 0xd2);
1063
1064                        if (BPF_CLASS(insn->code) == BPF_ALU64)
1065                                /* div r11 */
1066                                EMIT3(0x49, 0xF7, 0xF3);
1067                        else
1068                                /* div r11d */
1069                                EMIT3(0x41, 0xF7, 0xF3);
1070
1071                        if (BPF_OP(insn->code) == BPF_MOD)
1072                                /* mov r11, rdx */
1073                                EMIT3(0x49, 0x89, 0xD3);
1074                        else
1075                                /* mov r11, rax */
1076                                EMIT3(0x49, 0x89, 0xC3);
1077
1078                        EMIT1(0x5A); /* pop rdx */
1079                        EMIT1(0x58); /* pop rax */
1080
1081                        /* mov dst_reg, r11 */
1082                        EMIT_mov(dst_reg, AUX_REG);
1083                        break;
1084
1085                case BPF_ALU | BPF_MUL | BPF_K:
1086                case BPF_ALU | BPF_MUL | BPF_X:
1087                case BPF_ALU64 | BPF_MUL | BPF_K:
1088                case BPF_ALU64 | BPF_MUL | BPF_X:
1089                {
1090                        bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1091
1092                        if (dst_reg != BPF_REG_0)
1093                                EMIT1(0x50); /* push rax */
1094                        if (dst_reg != BPF_REG_3)
1095                                EMIT1(0x52); /* push rdx */
1096
1097                        /* mov r11, dst_reg */
1098                        EMIT_mov(AUX_REG, dst_reg);
1099
1100                        if (BPF_SRC(insn->code) == BPF_X)
1101                                emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
1102                        else
1103                                emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
1104
1105                        if (is64)
1106                                EMIT1(add_1mod(0x48, AUX_REG));
1107                        else if (is_ereg(AUX_REG))
1108                                EMIT1(add_1mod(0x40, AUX_REG));
1109                        /* mul(q) r11 */
1110                        EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
1111
1112                        if (dst_reg != BPF_REG_3)
1113                                EMIT1(0x5A); /* pop rdx */
1114                        if (dst_reg != BPF_REG_0) {
1115                                /* mov dst_reg, rax */
1116                                EMIT_mov(dst_reg, BPF_REG_0);
1117                                EMIT1(0x58); /* pop rax */
1118                        }
1119                        break;
1120                }
1121                        /* Shifts */
1122                case BPF_ALU | BPF_LSH | BPF_K:
1123                case BPF_ALU | BPF_RSH | BPF_K:
1124                case BPF_ALU | BPF_ARSH | BPF_K:
1125                case BPF_ALU64 | BPF_LSH | BPF_K:
1126                case BPF_ALU64 | BPF_RSH | BPF_K:
1127                case BPF_ALU64 | BPF_ARSH | BPF_K:
1128                        if (BPF_CLASS(insn->code) == BPF_ALU64)
1129                                EMIT1(add_1mod(0x48, dst_reg));
1130                        else if (is_ereg(dst_reg))
1131                                EMIT1(add_1mod(0x40, dst_reg));
1132
1133                        b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1134                        if (imm32 == 1)
1135                                EMIT2(0xD1, add_1reg(b3, dst_reg));
1136                        else
1137                                EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1138                        break;
1139
1140                case BPF_ALU | BPF_LSH | BPF_X:
1141                case BPF_ALU | BPF_RSH | BPF_X:
1142                case BPF_ALU | BPF_ARSH | BPF_X:
1143                case BPF_ALU64 | BPF_LSH | BPF_X:
1144                case BPF_ALU64 | BPF_RSH | BPF_X:
1145                case BPF_ALU64 | BPF_ARSH | BPF_X:
1146
1147                        /* Check for bad case when dst_reg == rcx */
1148                        if (dst_reg == BPF_REG_4) {
1149                                /* mov r11, dst_reg */
1150                                EMIT_mov(AUX_REG, dst_reg);
1151                                dst_reg = AUX_REG;
1152                        }
1153
1154                        if (src_reg != BPF_REG_4) { /* common case */
1155                                EMIT1(0x51); /* push rcx */
1156
1157                                /* mov rcx, src_reg */
1158                                EMIT_mov(BPF_REG_4, src_reg);
1159                        }
1160
1161                        /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1162                        if (BPF_CLASS(insn->code) == BPF_ALU64)
1163                                EMIT1(add_1mod(0x48, dst_reg));
1164                        else if (is_ereg(dst_reg))
1165                                EMIT1(add_1mod(0x40, dst_reg));
1166
1167                        b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1168                        EMIT2(0xD3, add_1reg(b3, dst_reg));
1169
1170                        if (src_reg != BPF_REG_4)
1171                                EMIT1(0x59); /* pop rcx */
1172
1173                        if (insn->dst_reg == BPF_REG_4)
1174                                /* mov dst_reg, r11 */
1175                                EMIT_mov(insn->dst_reg, AUX_REG);
1176                        break;
1177
1178                case BPF_ALU | BPF_END | BPF_FROM_BE:
1179                        switch (imm32) {
1180                        case 16:
1181                                /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1182                                EMIT1(0x66);
1183                                if (is_ereg(dst_reg))
1184                                        EMIT1(0x41);
1185                                EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1186
1187                                /* Emit 'movzwl eax, ax' */
1188                                if (is_ereg(dst_reg))
1189                                        EMIT3(0x45, 0x0F, 0xB7);
1190                                else
1191                                        EMIT2(0x0F, 0xB7);
1192                                EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1193                                break;
1194                        case 32:
1195                                /* Emit 'bswap eax' to swap lower 4 bytes */
1196                                if (is_ereg(dst_reg))
1197                                        EMIT2(0x41, 0x0F);
1198                                else
1199                                        EMIT1(0x0F);
1200                                EMIT1(add_1reg(0xC8, dst_reg));
1201                                break;
1202                        case 64:
1203                                /* Emit 'bswap rax' to swap 8 bytes */
1204                                EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1205                                      add_1reg(0xC8, dst_reg));
1206                                break;
1207                        }
1208                        break;
1209
1210                case BPF_ALU | BPF_END | BPF_FROM_LE:
1211                        switch (imm32) {
1212                        case 16:
1213                                /*
1214                                 * Emit 'movzwl eax, ax' to zero extend 16-bit
1215                                 * into 64 bit
1216                                 */
1217                                if (is_ereg(dst_reg))
1218                                        EMIT3(0x45, 0x0F, 0xB7);
1219                                else
1220                                        EMIT2(0x0F, 0xB7);
1221                                EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1222                                break;
1223                        case 32:
1224                                /* Emit 'mov eax, eax' to clear upper 32-bits */
1225                                if (is_ereg(dst_reg))
1226                                        EMIT1(0x45);
1227                                EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1228                                break;
1229                        case 64:
1230                                /* nop */
1231                                break;
1232                        }
1233                        break;
1234
1235                        /* ST: *(u8*)(dst_reg + off) = imm */
1236                case BPF_ST | BPF_MEM | BPF_B:
1237                        if (is_ereg(dst_reg))
1238                                EMIT2(0x41, 0xC6);
1239                        else
1240                                EMIT1(0xC6);
1241                        goto st;
1242                case BPF_ST | BPF_MEM | BPF_H:
1243                        if (is_ereg(dst_reg))
1244                                EMIT3(0x66, 0x41, 0xC7);
1245                        else
1246                                EMIT2(0x66, 0xC7);
1247                        goto st;
1248                case BPF_ST | BPF_MEM | BPF_W:
1249                        if (is_ereg(dst_reg))
1250                                EMIT2(0x41, 0xC7);
1251                        else
1252                                EMIT1(0xC7);
1253                        goto st;
1254                case BPF_ST | BPF_MEM | BPF_DW:
1255                        EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1256
1257st:                     if (is_imm8(insn->off))
1258                                EMIT2(add_1reg(0x40, dst_reg), insn->off);
1259                        else
1260                                EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1261
1262                        EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1263                        break;
1264
1265                        /* STX: *(u8*)(dst_reg + off) = src_reg */
1266                case BPF_STX | BPF_MEM | BPF_B:
1267                case BPF_STX | BPF_MEM | BPF_H:
1268                case BPF_STX | BPF_MEM | BPF_W:
1269                case BPF_STX | BPF_MEM | BPF_DW:
1270                        emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1271                        break;
1272
1273                        /* LDX: dst_reg = *(u8*)(src_reg + off) */
1274                case BPF_LDX | BPF_MEM | BPF_B:
1275                case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1276                case BPF_LDX | BPF_MEM | BPF_H:
1277                case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1278                case BPF_LDX | BPF_MEM | BPF_W:
1279                case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1280                case BPF_LDX | BPF_MEM | BPF_DW:
1281                case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1282                        if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1283                                /* test src_reg, src_reg */
1284                                maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
1285                                EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
1286                                /* jne start_of_ldx */
1287                                EMIT2(X86_JNE, 0);
1288                                /* xor dst_reg, dst_reg */
1289                                emit_mov_imm32(&prog, false, dst_reg, 0);
1290                                /* jmp byte_after_ldx */
1291                                EMIT2(0xEB, 0);
1292
1293                                /* populate jmp_offset for JNE above */
1294                                temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
1295                                start_of_ldx = prog;
1296                        }
1297                        emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1298                        if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1299                                struct exception_table_entry *ex;
1300                                u8 *_insn = image + proglen;
1301                                s64 delta;
1302
1303                                /* populate jmp_offset for JMP above */
1304                                start_of_ldx[-1] = prog - start_of_ldx;
1305
1306                                if (!bpf_prog->aux->extable)
1307                                        break;
1308
1309                                if (excnt >= bpf_prog->aux->num_exentries) {
1310                                        pr_err("ex gen bug\n");
1311                                        return -EFAULT;
1312                                }
1313                                ex = &bpf_prog->aux->extable[excnt++];
1314
1315                                delta = _insn - (u8 *)&ex->insn;
1316                                if (!is_simm32(delta)) {
1317                                        pr_err("extable->insn doesn't fit into 32-bit\n");
1318                                        return -EFAULT;
1319                                }
1320                                ex->insn = delta;
1321
1322                                delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
1323                                if (!is_simm32(delta)) {
1324                                        pr_err("extable->handler doesn't fit into 32-bit\n");
1325                                        return -EFAULT;
1326                                }
1327                                ex->handler = delta;
1328
1329                                if (dst_reg > BPF_REG_9) {
1330                                        pr_err("verifier error\n");
1331                                        return -EFAULT;
1332                                }
1333                                /*
1334                                 * Compute size of x86 insn and its target dest x86 register.
1335                                 * ex_handler_bpf() will use lower 8 bits to adjust
1336                                 * pt_regs->ip to jump over this x86 instruction
1337                                 * and upper bits to figure out which pt_regs to zero out.
1338                                 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1339                                 * of 4 bytes will be ignored and rbx will be zero inited.
1340                                 */
1341                                ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1342                        }
1343                        break;
1344
1345                case BPF_STX | BPF_ATOMIC | BPF_W:
1346                case BPF_STX | BPF_ATOMIC | BPF_DW:
1347                        if (insn->imm == (BPF_AND | BPF_FETCH) ||
1348                            insn->imm == (BPF_OR | BPF_FETCH) ||
1349                            insn->imm == (BPF_XOR | BPF_FETCH)) {
1350                                u8 *branch_target;
1351                                bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1352                                u32 real_src_reg = src_reg;
1353
1354                                /*
1355                                 * Can't be implemented with a single x86 insn.
1356                                 * Need to do a CMPXCHG loop.
1357                                 */
1358
1359                                /* Will need RAX as a CMPXCHG operand so save R0 */
1360                                emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1361                                if (src_reg == BPF_REG_0)
1362                                        real_src_reg = BPF_REG_AX;
1363
1364                                branch_target = prog;
1365                                /* Load old value */
1366                                emit_ldx(&prog, BPF_SIZE(insn->code),
1367                                         BPF_REG_0, dst_reg, insn->off);
1368                                /*
1369                                 * Perform the (commutative) operation locally,
1370                                 * put the result in the AUX_REG.
1371                                 */
1372                                emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1373                                maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1374                                EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1375                                      add_2reg(0xC0, AUX_REG, real_src_reg));
1376                                /* Attempt to swap in new value */
1377                                err = emit_atomic(&prog, BPF_CMPXCHG,
1378                                                  dst_reg, AUX_REG, insn->off,
1379                                                  BPF_SIZE(insn->code));
1380                                if (WARN_ON(err))
1381                                        return err;
1382                                /*
1383                                 * ZF tells us whether we won the race. If it's
1384                                 * cleared we need to try again.
1385                                 */
1386                                EMIT2(X86_JNE, -(prog - branch_target) - 2);
1387                                /* Return the pre-modification value */
1388                                emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1389                                /* Restore R0 after clobbering RAX */
1390                                emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1391                                break;
1392
1393                        }
1394
1395                        err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1396                                                  insn->off, BPF_SIZE(insn->code));
1397                        if (err)
1398                                return err;
1399                        break;
1400
1401                        /* call */
1402                case BPF_JMP | BPF_CALL:
1403                        func = (u8 *) __bpf_call_base + imm32;
1404                        if (tail_call_reachable) {
1405                                EMIT3_off32(0x48, 0x8B, 0x85,
1406                                            -(bpf_prog->aux->stack_depth + 8));
1407                                if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1408                                        return -EINVAL;
1409                        } else {
1410                                if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1411                                        return -EINVAL;
1412                        }
1413                        break;
1414
1415                case BPF_JMP | BPF_TAIL_CALL:
1416                        if (imm32)
1417                                emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1418                                                          &prog, addrs[i], image,
1419                                                          callee_regs_used,
1420                                                          bpf_prog->aux->stack_depth);
1421                        else
1422                                emit_bpf_tail_call_indirect(&prog,
1423                                                            callee_regs_used,
1424                                                            bpf_prog->aux->stack_depth);
1425                        break;
1426
1427                        /* cond jump */
1428                case BPF_JMP | BPF_JEQ | BPF_X:
1429                case BPF_JMP | BPF_JNE | BPF_X:
1430                case BPF_JMP | BPF_JGT | BPF_X:
1431                case BPF_JMP | BPF_JLT | BPF_X:
1432                case BPF_JMP | BPF_JGE | BPF_X:
1433                case BPF_JMP | BPF_JLE | BPF_X:
1434                case BPF_JMP | BPF_JSGT | BPF_X:
1435                case BPF_JMP | BPF_JSLT | BPF_X:
1436                case BPF_JMP | BPF_JSGE | BPF_X:
1437                case BPF_JMP | BPF_JSLE | BPF_X:
1438                case BPF_JMP32 | BPF_JEQ | BPF_X:
1439                case BPF_JMP32 | BPF_JNE | BPF_X:
1440                case BPF_JMP32 | BPF_JGT | BPF_X:
1441                case BPF_JMP32 | BPF_JLT | BPF_X:
1442                case BPF_JMP32 | BPF_JGE | BPF_X:
1443                case BPF_JMP32 | BPF_JLE | BPF_X:
1444                case BPF_JMP32 | BPF_JSGT | BPF_X:
1445                case BPF_JMP32 | BPF_JSLT | BPF_X:
1446                case BPF_JMP32 | BPF_JSGE | BPF_X:
1447                case BPF_JMP32 | BPF_JSLE | BPF_X:
1448                        /* cmp dst_reg, src_reg */
1449                        maybe_emit_mod(&prog, dst_reg, src_reg,
1450                                       BPF_CLASS(insn->code) == BPF_JMP);
1451                        EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1452                        goto emit_cond_jmp;
1453
1454                case BPF_JMP | BPF_JSET | BPF_X:
1455                case BPF_JMP32 | BPF_JSET | BPF_X:
1456                        /* test dst_reg, src_reg */
1457                        maybe_emit_mod(&prog, dst_reg, src_reg,
1458                                       BPF_CLASS(insn->code) == BPF_JMP);
1459                        EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1460                        goto emit_cond_jmp;
1461
1462                case BPF_JMP | BPF_JSET | BPF_K:
1463                case BPF_JMP32 | BPF_JSET | BPF_K:
1464                        /* test dst_reg, imm32 */
1465                        if (BPF_CLASS(insn->code) == BPF_JMP)
1466                                EMIT1(add_1mod(0x48, dst_reg));
1467                        else if (is_ereg(dst_reg))
1468                                EMIT1(add_1mod(0x40, dst_reg));
1469                        EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1470                        goto emit_cond_jmp;
1471
1472                case BPF_JMP | BPF_JEQ | BPF_K:
1473                case BPF_JMP | BPF_JNE | BPF_K:
1474                case BPF_JMP | BPF_JGT | BPF_K:
1475                case BPF_JMP | BPF_JLT | BPF_K:
1476                case BPF_JMP | BPF_JGE | BPF_K:
1477                case BPF_JMP | BPF_JLE | BPF_K:
1478                case BPF_JMP | BPF_JSGT | BPF_K:
1479                case BPF_JMP | BPF_JSLT | BPF_K:
1480                case BPF_JMP | BPF_JSGE | BPF_K:
1481                case BPF_JMP | BPF_JSLE | BPF_K:
1482                case BPF_JMP32 | BPF_JEQ | BPF_K:
1483                case BPF_JMP32 | BPF_JNE | BPF_K:
1484                case BPF_JMP32 | BPF_JGT | BPF_K:
1485                case BPF_JMP32 | BPF_JLT | BPF_K:
1486                case BPF_JMP32 | BPF_JGE | BPF_K:
1487                case BPF_JMP32 | BPF_JLE | BPF_K:
1488                case BPF_JMP32 | BPF_JSGT | BPF_K:
1489                case BPF_JMP32 | BPF_JSLT | BPF_K:
1490                case BPF_JMP32 | BPF_JSGE | BPF_K:
1491                case BPF_JMP32 | BPF_JSLE | BPF_K:
1492                        /* test dst_reg, dst_reg to save one extra byte */
1493                        if (imm32 == 0) {
1494                                maybe_emit_mod(&prog, dst_reg, dst_reg,
1495                                               BPF_CLASS(insn->code) == BPF_JMP);
1496                                EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1497                                goto emit_cond_jmp;
1498                        }
1499
1500                        /* cmp dst_reg, imm8/32 */
1501                        if (BPF_CLASS(insn->code) == BPF_JMP)
1502                                EMIT1(add_1mod(0x48, dst_reg));
1503                        else if (is_ereg(dst_reg))
1504                                EMIT1(add_1mod(0x40, dst_reg));
1505
1506                        if (is_imm8(imm32))
1507                                EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1508                        else
1509                                EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1510
1511emit_cond_jmp:          /* Convert BPF opcode to x86 */
1512                        switch (BPF_OP(insn->code)) {
1513                        case BPF_JEQ:
1514                                jmp_cond = X86_JE;
1515                                break;
1516                        case BPF_JSET:
1517                        case BPF_JNE:
1518                                jmp_cond = X86_JNE;
1519                                break;
1520                        case BPF_JGT:
1521                                /* GT is unsigned '>', JA in x86 */
1522                                jmp_cond = X86_JA;
1523                                break;
1524                        case BPF_JLT:
1525                                /* LT is unsigned '<', JB in x86 */
1526                                jmp_cond = X86_JB;
1527                                break;
1528                        case BPF_JGE:
1529                                /* GE is unsigned '>=', JAE in x86 */
1530                                jmp_cond = X86_JAE;
1531                                break;
1532                        case BPF_JLE:
1533                                /* LE is unsigned '<=', JBE in x86 */
1534                                jmp_cond = X86_JBE;
1535                                break;
1536                        case BPF_JSGT:
1537                                /* Signed '>', GT in x86 */
1538                                jmp_cond = X86_JG;
1539                                break;
1540                        case BPF_JSLT:
1541                                /* Signed '<', LT in x86 */
1542                                jmp_cond = X86_JL;
1543                                break;
1544                        case BPF_JSGE:
1545                                /* Signed '>=', GE in x86 */
1546                                jmp_cond = X86_JGE;
1547                                break;
1548                        case BPF_JSLE:
1549                                /* Signed '<=', LE in x86 */
1550                                jmp_cond = X86_JLE;
1551                                break;
1552                        default: /* to silence GCC warning */
1553                                return -EFAULT;
1554                        }
1555                        jmp_offset = addrs[i + insn->off] - addrs[i];
1556                        if (is_imm8(jmp_offset)) {
1557                                if (jmp_padding) {
1558                                        /* To keep the jmp_offset valid, the extra bytes are
1559                                         * padded before the jump insn, so we substract the
1560                                         * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1561                                         *
1562                                         * If the previous pass already emits an imm8
1563                                         * jmp_cond, then this BPF insn won't shrink, so
1564                                         * "nops" is 0.
1565                                         *
1566                                         * On the other hand, if the previous pass emits an
1567                                         * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1568                                         * keep the image from shrinking further.
1569                                         *
1570                                         * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1571                                         *     is 2 bytes, so the size difference is 4 bytes.
1572                                         */
1573                                        nops = INSN_SZ_DIFF - 2;
1574                                        if (nops != 0 && nops != 4) {
1575                                                pr_err("unexpected jmp_cond padding: %d bytes\n",
1576                                                       nops);
1577                                                return -EFAULT;
1578                                        }
1579                                        cnt += emit_nops(&prog, nops);
1580                                }
1581                                EMIT2(jmp_cond, jmp_offset);
1582                        } else if (is_simm32(jmp_offset)) {
1583                                EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1584                        } else {
1585                                pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1586                                return -EFAULT;
1587                        }
1588
1589                        break;
1590
1591                case BPF_JMP | BPF_JA:
1592                        if (insn->off == -1)
1593                                /* -1 jmp instructions will always jump
1594                                 * backwards two bytes. Explicitly handling
1595                                 * this case avoids wasting too many passes
1596                                 * when there are long sequences of replaced
1597                                 * dead code.
1598                                 */
1599                                jmp_offset = -2;
1600                        else
1601                                jmp_offset = addrs[i + insn->off] - addrs[i];
1602
1603                        if (!jmp_offset) {
1604                                /*
1605                                 * If jmp_padding is enabled, the extra nops will
1606                                 * be inserted. Otherwise, optimize out nop jumps.
1607                                 */
1608                                if (jmp_padding) {
1609                                        /* There are 3 possible conditions.
1610                                         * (1) This BPF_JA is already optimized out in
1611                                         *     the previous run, so there is no need
1612                                         *     to pad any extra byte (0 byte).
1613                                         * (2) The previous pass emits an imm8 jmp,
1614                                         *     so we pad 2 bytes to match the previous
1615                                         *     insn size.
1616                                         * (3) Similarly, the previous pass emits an
1617                                         *     imm32 jmp, and 5 bytes is padded.
1618                                         */
1619                                        nops = INSN_SZ_DIFF;
1620                                        if (nops != 0 && nops != 2 && nops != 5) {
1621                                                pr_err("unexpected nop jump padding: %d bytes\n",
1622                                                       nops);
1623                                                return -EFAULT;
1624                                        }
1625                                        cnt += emit_nops(&prog, nops);
1626                                }
1627                                break;
1628                        }
1629emit_jmp:
1630                        if (is_imm8(jmp_offset)) {
1631                                if (jmp_padding) {
1632                                        /* To avoid breaking jmp_offset, the extra bytes
1633                                         * are padded before the actual jmp insn, so
1634                                         * 2 bytes is substracted from INSN_SZ_DIFF.
1635                                         *
1636                                         * If the previous pass already emits an imm8
1637                                         * jmp, there is nothing to pad (0 byte).
1638                                         *
1639                                         * If it emits an imm32 jmp (5 bytes) previously
1640                                         * and now an imm8 jmp (2 bytes), then we pad
1641                                         * (5 - 2 = 3) bytes to stop the image from
1642                                         * shrinking further.
1643                                         */
1644                                        nops = INSN_SZ_DIFF - 2;
1645                                        if (nops != 0 && nops != 3) {
1646                                                pr_err("unexpected jump padding: %d bytes\n",
1647                                                       nops);
1648                                                return -EFAULT;
1649                                        }
1650                                        cnt += emit_nops(&prog, INSN_SZ_DIFF - 2);
1651                                }
1652                                EMIT2(0xEB, jmp_offset);
1653                        } else if (is_simm32(jmp_offset)) {
1654                                EMIT1_off32(0xE9, jmp_offset);
1655                        } else {
1656                                pr_err("jmp gen bug %llx\n", jmp_offset);
1657                                return -EFAULT;
1658                        }
1659                        break;
1660
1661                case BPF_JMP | BPF_EXIT:
1662                        if (seen_exit) {
1663                                jmp_offset = ctx->cleanup_addr - addrs[i];
1664                                goto emit_jmp;
1665                        }
1666                        seen_exit = true;
1667                        /* Update cleanup_addr */
1668                        ctx->cleanup_addr = proglen;
1669                        pop_callee_regs(&prog, callee_regs_used);
1670                        EMIT1(0xC9);         /* leave */
1671                        EMIT1(0xC3);         /* ret */
1672                        break;
1673
1674                default:
1675                        /*
1676                         * By design x86-64 JIT should support all BPF instructions.
1677                         * This error will be seen if new instruction was added
1678                         * to the interpreter, but not to the JIT, or if there is
1679                         * junk in bpf_prog.
1680                         */
1681                        pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1682                        return -EINVAL;
1683                }
1684
1685                ilen = prog - temp;
1686                if (ilen > BPF_MAX_INSN_SIZE) {
1687                        pr_err("bpf_jit: fatal insn size error\n");
1688                        return -EFAULT;
1689                }
1690
1691                if (image) {
1692                        /*
1693                         * When populating the image, assert that:
1694                         *
1695                         *  i) We do not write beyond the allocated space, and
1696                         * ii) addrs[i] did not change from the prior run, in order
1697                         *     to validate assumptions made for computing branch
1698                         *     displacements.
1699                         */
1700                        if (unlikely(proglen + ilen > oldproglen ||
1701                                     proglen + ilen != addrs[i])) {
1702                                pr_err("bpf_jit: fatal error\n");
1703                                return -EFAULT;
1704                        }
1705                        memcpy(image + proglen, temp, ilen);
1706                }
1707                proglen += ilen;
1708                addrs[i] = proglen;
1709                prog = temp;
1710        }
1711
1712        if (image && excnt != bpf_prog->aux->num_exentries) {
1713                pr_err("extable is not populated\n");
1714                return -EFAULT;
1715        }
1716        return proglen;
1717}
1718
1719static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1720                      int stack_size)
1721{
1722        int i;
1723        /* Store function arguments to stack.
1724         * For a function that accepts two pointers the sequence will be:
1725         * mov QWORD PTR [rbp-0x10],rdi
1726         * mov QWORD PTR [rbp-0x8],rsi
1727         */
1728        for (i = 0; i < min(nr_args, 6); i++)
1729                emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1730                         BPF_REG_FP,
1731                         i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1732                         -(stack_size - i * 8));
1733}
1734
1735static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1736                         int stack_size)
1737{
1738        int i;
1739
1740        /* Restore function arguments from stack.
1741         * For a function that accepts two pointers the sequence will be:
1742         * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1743         * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1744         */
1745        for (i = 0; i < min(nr_args, 6); i++)
1746                emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1747                         i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1748                         BPF_REG_FP,
1749                         -(stack_size - i * 8));
1750}
1751
1752static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1753                           struct bpf_prog *p, int stack_size, bool mod_ret)
1754{
1755        u8 *prog = *pprog;
1756        u8 *jmp_insn;
1757        int cnt = 0;
1758
1759        /* arg1: mov rdi, progs[i] */
1760        emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1761        if (emit_call(&prog,
1762                      p->aux->sleepable ? __bpf_prog_enter_sleepable :
1763                      __bpf_prog_enter, prog))
1764                        return -EINVAL;
1765        /* remember prog start time returned by __bpf_prog_enter */
1766        emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1767
1768        /* if (__bpf_prog_enter*(prog) == 0)
1769         *      goto skip_exec_of_prog;
1770         */
1771        EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
1772        /* emit 2 nops that will be replaced with JE insn */
1773        jmp_insn = prog;
1774        emit_nops(&prog, 2);
1775
1776        /* arg1: lea rdi, [rbp - stack_size] */
1777        EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1778        /* arg2: progs[i]->insnsi for interpreter */
1779        if (!p->jited)
1780                emit_mov_imm64(&prog, BPF_REG_2,
1781                               (long) p->insnsi >> 32,
1782                               (u32) (long) p->insnsi);
1783        /* call JITed bpf program or interpreter */
1784        if (emit_call(&prog, p->bpf_func, prog))
1785                return -EINVAL;
1786
1787        /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1788         * of the previous call which is then passed on the stack to
1789         * the next BPF program.
1790         */
1791        if (mod_ret)
1792                emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1793
1794        /* replace 2 nops with JE insn, since jmp target is known */
1795        jmp_insn[0] = X86_JE;
1796        jmp_insn[1] = prog - jmp_insn - 2;
1797
1798        /* arg1: mov rdi, progs[i] */
1799        emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1800        /* arg2: mov rsi, rbx <- start time in nsec */
1801        emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1802        if (emit_call(&prog,
1803                      p->aux->sleepable ? __bpf_prog_exit_sleepable :
1804                      __bpf_prog_exit, prog))
1805                        return -EINVAL;
1806
1807        *pprog = prog;
1808        return 0;
1809}
1810
1811static void emit_align(u8 **pprog, u32 align)
1812{
1813        u8 *target, *prog = *pprog;
1814
1815        target = PTR_ALIGN(prog, align);
1816        if (target != prog)
1817                emit_nops(&prog, target - prog);
1818
1819        *pprog = prog;
1820}
1821
1822static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1823{
1824        u8 *prog = *pprog;
1825        int cnt = 0;
1826        s64 offset;
1827
1828        offset = func - (ip + 2 + 4);
1829        if (!is_simm32(offset)) {
1830                pr_err("Target %p is out of range\n", func);
1831                return -EINVAL;
1832        }
1833        EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1834        *pprog = prog;
1835        return 0;
1836}
1837
1838static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1839                      struct bpf_tramp_progs *tp, int stack_size)
1840{
1841        int i;
1842        u8 *prog = *pprog;
1843
1844        for (i = 0; i < tp->nr_progs; i++) {
1845                if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
1846                        return -EINVAL;
1847        }
1848        *pprog = prog;
1849        return 0;
1850}
1851
1852static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1853                              struct bpf_tramp_progs *tp, int stack_size,
1854                              u8 **branches)
1855{
1856        u8 *prog = *pprog;
1857        int i, cnt = 0;
1858
1859        /* The first fmod_ret program will receive a garbage return value.
1860         * Set this to 0 to avoid confusing the program.
1861         */
1862        emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1863        emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1864        for (i = 0; i < tp->nr_progs; i++) {
1865                if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1866                        return -EINVAL;
1867
1868                /* mod_ret prog stored return value into [rbp - 8]. Emit:
1869                 * if (*(u64 *)(rbp - 8) !=  0)
1870                 *      goto do_fexit;
1871                 */
1872                /* cmp QWORD PTR [rbp - 0x8], 0x0 */
1873                EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1874
1875                /* Save the location of the branch and Generate 6 nops
1876                 * (4 bytes for an offset and 2 bytes for the jump) These nops
1877                 * are replaced with a conditional jump once do_fexit (i.e. the
1878                 * start of the fexit invocation) is finalized.
1879                 */
1880                branches[i] = prog;
1881                emit_nops(&prog, 4 + 2);
1882        }
1883
1884        *pprog = prog;
1885        return 0;
1886}
1887
1888/* Example:
1889 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1890 * its 'struct btf_func_model' will be nr_args=2
1891 * The assembly code when eth_type_trans is executing after trampoline:
1892 *
1893 * push rbp
1894 * mov rbp, rsp
1895 * sub rsp, 16                     // space for skb and dev
1896 * push rbx                        // temp regs to pass start time
1897 * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
1898 * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
1899 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1900 * mov rbx, rax                    // remember start time in bpf stats are enabled
1901 * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
1902 * call addr_of_jited_FENTRY_prog
1903 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1904 * mov rsi, rbx                    // prog start time
1905 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1906 * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
1907 * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
1908 * pop rbx
1909 * leave
1910 * ret
1911 *
1912 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1913 * replaced with 'call generated_bpf_trampoline'. When it returns
1914 * eth_type_trans will continue executing with original skb and dev pointers.
1915 *
1916 * The assembly code when eth_type_trans is called from trampoline:
1917 *
1918 * push rbp
1919 * mov rbp, rsp
1920 * sub rsp, 24                     // space for skb, dev, return value
1921 * push rbx                        // temp regs to pass start time
1922 * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
1923 * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
1924 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1925 * mov rbx, rax                    // remember start time if bpf stats are enabled
1926 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1927 * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
1928 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1929 * mov rsi, rbx                    // prog start time
1930 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1931 * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
1932 * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
1933 * call eth_type_trans+5           // execute body of eth_type_trans
1934 * mov qword ptr [rbp - 8], rax    // save return value
1935 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1936 * mov rbx, rax                    // remember start time in bpf stats are enabled
1937 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1938 * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
1939 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1940 * mov rsi, rbx                    // prog start time
1941 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1942 * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
1943 * pop rbx
1944 * leave
1945 * add rsp, 8                      // skip eth_type_trans's frame
1946 * ret                             // return to its caller
1947 */
1948int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1949                                const struct btf_func_model *m, u32 flags,
1950                                struct bpf_tramp_progs *tprogs,
1951                                void *orig_call)
1952{
1953        int ret, i, cnt = 0, nr_args = m->nr_args;
1954        int stack_size = nr_args * 8;
1955        struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
1956        struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
1957        struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
1958        u8 **branches = NULL;
1959        u8 *prog;
1960
1961        /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1962        if (nr_args > 6)
1963                return -ENOTSUPP;
1964
1965        if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1966            (flags & BPF_TRAMP_F_SKIP_FRAME))
1967                return -EINVAL;
1968
1969        if (flags & BPF_TRAMP_F_CALL_ORIG)
1970                stack_size += 8; /* room for return value of orig_call */
1971
1972        if (flags & BPF_TRAMP_F_SKIP_FRAME)
1973                /* skip patched call instruction and point orig_call to actual
1974                 * body of the kernel function.
1975                 */
1976                orig_call += X86_PATCH_SIZE;
1977
1978        prog = image;
1979
1980        EMIT1(0x55);             /* push rbp */
1981        EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
1982        EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
1983        EMIT1(0x53);             /* push rbx */
1984
1985        save_regs(m, &prog, nr_args, stack_size);
1986
1987        if (flags & BPF_TRAMP_F_CALL_ORIG) {
1988                /* arg1: mov rdi, im */
1989                emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
1990                if (emit_call(&prog, __bpf_tramp_enter, prog)) {
1991                        ret = -EINVAL;
1992                        goto cleanup;
1993                }
1994        }
1995
1996        if (fentry->nr_progs)
1997                if (invoke_bpf(m, &prog, fentry, stack_size))
1998                        return -EINVAL;
1999
2000        if (fmod_ret->nr_progs) {
2001                branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
2002                                   GFP_KERNEL);
2003                if (!branches)
2004                        return -ENOMEM;
2005
2006                if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
2007                                       branches)) {
2008                        ret = -EINVAL;
2009                        goto cleanup;
2010                }
2011        }
2012
2013        if (flags & BPF_TRAMP_F_CALL_ORIG) {
2014                restore_regs(m, &prog, nr_args, stack_size);
2015
2016                /* call original function */
2017                if (emit_call(&prog, orig_call, prog)) {
2018                        ret = -EINVAL;
2019                        goto cleanup;
2020                }
2021                /* remember return value in a stack for bpf prog to access */
2022                emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2023                im->ip_after_call = prog;
2024                memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
2025                prog += X86_PATCH_SIZE;
2026        }
2027
2028        if (fmod_ret->nr_progs) {
2029                /* From Intel 64 and IA-32 Architectures Optimization
2030                 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2031                 * Coding Rule 11: All branch targets should be 16-byte
2032                 * aligned.
2033                 */
2034                emit_align(&prog, 16);
2035                /* Update the branches saved in invoke_bpf_mod_ret with the
2036                 * aligned address of do_fexit.
2037                 */
2038                for (i = 0; i < fmod_ret->nr_progs; i++)
2039                        emit_cond_near_jump(&branches[i], prog, branches[i],
2040                                            X86_JNE);
2041        }
2042
2043        if (fexit->nr_progs)
2044                if (invoke_bpf(m, &prog, fexit, stack_size)) {
2045                        ret = -EINVAL;
2046                        goto cleanup;
2047                }
2048
2049        if (flags & BPF_TRAMP_F_RESTORE_REGS)
2050                restore_regs(m, &prog, nr_args, stack_size);
2051
2052        /* This needs to be done regardless. If there were fmod_ret programs,
2053         * the return value is only updated on the stack and still needs to be
2054         * restored to R0.
2055         */
2056        if (flags & BPF_TRAMP_F_CALL_ORIG) {
2057                im->ip_epilogue = prog;
2058                /* arg1: mov rdi, im */
2059                emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2060                if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2061                        ret = -EINVAL;
2062                        goto cleanup;
2063                }
2064                /* restore original return value back into RAX */
2065                emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2066        }
2067
2068        EMIT1(0x5B); /* pop rbx */
2069        EMIT1(0xC9); /* leave */
2070        if (flags & BPF_TRAMP_F_SKIP_FRAME)
2071                /* skip our return address and return to parent */
2072                EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2073        EMIT1(0xC3); /* ret */
2074        /* Make sure the trampoline generation logic doesn't overflow */
2075        if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2076                ret = -EFAULT;
2077                goto cleanup;
2078        }
2079        ret = prog - (u8 *)image;
2080
2081cleanup:
2082        kfree(branches);
2083        return ret;
2084}
2085
2086static int emit_fallback_jump(u8 **pprog)
2087{
2088        u8 *prog = *pprog;
2089        int err = 0;
2090
2091#ifdef CONFIG_RETPOLINE
2092        /* Note that this assumes the the compiler uses external
2093         * thunks for indirect calls. Both clang and GCC use the same
2094         * naming convention for external thunks.
2095         */
2096        err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
2097#else
2098        int cnt = 0;
2099
2100        EMIT2(0xFF, 0xE2);      /* jmp rdx */
2101#endif
2102        *pprog = prog;
2103        return err;
2104}
2105
2106static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
2107{
2108        u8 *jg_reloc, *prog = *pprog;
2109        int pivot, err, jg_bytes = 1, cnt = 0;
2110        s64 jg_offset;
2111
2112        if (a == b) {
2113                /* Leaf node of recursion, i.e. not a range of indices
2114                 * anymore.
2115                 */
2116                EMIT1(add_1mod(0x48, BPF_REG_3));       /* cmp rdx,func */
2117                if (!is_simm32(progs[a]))
2118                        return -1;
2119                EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2120                            progs[a]);
2121                err = emit_cond_near_jump(&prog,        /* je func */
2122                                          (void *)progs[a], prog,
2123                                          X86_JE);
2124                if (err)
2125                        return err;
2126
2127                err = emit_fallback_jump(&prog);        /* jmp thunk/indirect */
2128                if (err)
2129                        return err;
2130
2131                *pprog = prog;
2132                return 0;
2133        }
2134
2135        /* Not a leaf node, so we pivot, and recursively descend into
2136         * the lower and upper ranges.
2137         */
2138        pivot = (b - a) / 2;
2139        EMIT1(add_1mod(0x48, BPF_REG_3));               /* cmp rdx,func */
2140        if (!is_simm32(progs[a + pivot]))
2141                return -1;
2142        EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2143
2144        if (pivot > 2) {                                /* jg upper_part */
2145                /* Require near jump. */
2146                jg_bytes = 4;
2147                EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2148        } else {
2149                EMIT2(X86_JG, 0);
2150        }
2151        jg_reloc = prog;
2152
2153        err = emit_bpf_dispatcher(&prog, a, a + pivot,  /* emit lower_part */
2154                                  progs);
2155        if (err)
2156                return err;
2157
2158        /* From Intel 64 and IA-32 Architectures Optimization
2159         * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2160         * Coding Rule 11: All branch targets should be 16-byte
2161         * aligned.
2162         */
2163        emit_align(&prog, 16);
2164        jg_offset = prog - jg_reloc;
2165        emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2166
2167        err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2168                                  b, progs);
2169        if (err)
2170                return err;
2171
2172        *pprog = prog;
2173        return 0;
2174}
2175
2176static int cmp_ips(const void *a, const void *b)
2177{
2178        const s64 *ipa = a;
2179        const s64 *ipb = b;
2180
2181        if (*ipa > *ipb)
2182                return 1;
2183        if (*ipa < *ipb)
2184                return -1;
2185        return 0;
2186}
2187
2188int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
2189{
2190        u8 *prog = image;
2191
2192        sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2193        return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2194}
2195
2196struct x64_jit_data {
2197        struct bpf_binary_header *header;
2198        int *addrs;
2199        u8 *image;
2200        int proglen;
2201        struct jit_context ctx;
2202};
2203
2204#define MAX_PASSES 20
2205#define PADDING_PASSES (MAX_PASSES - 5)
2206
2207struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2208{
2209        struct bpf_binary_header *header = NULL;
2210        struct bpf_prog *tmp, *orig_prog = prog;
2211        struct x64_jit_data *jit_data;
2212        int proglen, oldproglen = 0;
2213        struct jit_context ctx = {};
2214        bool tmp_blinded = false;
2215        bool extra_pass = false;
2216        bool padding = false;
2217        u8 *image = NULL;
2218        int *addrs;
2219        int pass;
2220        int i;
2221
2222        if (!prog->jit_requested)
2223                return orig_prog;
2224
2225        tmp = bpf_jit_blind_constants(prog);
2226        /*
2227         * If blinding was requested and we failed during blinding,
2228         * we must fall back to the interpreter.
2229         */
2230        if (IS_ERR(tmp))
2231                return orig_prog;
2232        if (tmp != prog) {
2233                tmp_blinded = true;
2234                prog = tmp;
2235        }
2236
2237        jit_data = prog->aux->jit_data;
2238        if (!jit_data) {
2239                jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2240                if (!jit_data) {
2241                        prog = orig_prog;
2242                        goto out;
2243                }
2244                prog->aux->jit_data = jit_data;
2245        }
2246        addrs = jit_data->addrs;
2247        if (addrs) {
2248                ctx = jit_data->ctx;
2249                oldproglen = jit_data->proglen;
2250                image = jit_data->image;
2251                header = jit_data->header;
2252                extra_pass = true;
2253                padding = true;
2254                goto skip_init_addrs;
2255        }
2256        addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2257        if (!addrs) {
2258                prog = orig_prog;
2259                goto out_addrs;
2260        }
2261
2262        /*
2263         * Before first pass, make a rough estimation of addrs[]
2264         * each BPF instruction is translated to less than 64 bytes
2265         */
2266        for (proglen = 0, i = 0; i <= prog->len; i++) {
2267                proglen += 64;
2268                addrs[i] = proglen;
2269        }
2270        ctx.cleanup_addr = proglen;
2271skip_init_addrs:
2272
2273        /*
2274         * JITed image shrinks with every pass and the loop iterates
2275         * until the image stops shrinking. Very large BPF programs
2276         * may converge on the last pass. In such case do one more
2277         * pass to emit the final image.
2278         */
2279        for (pass = 0; pass < MAX_PASSES || image; pass++) {
2280                if (!padding && pass >= PADDING_PASSES)
2281                        padding = true;
2282                proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
2283                if (proglen <= 0) {
2284out_image:
2285                        image = NULL;
2286                        if (header)
2287                                bpf_jit_binary_free(header);
2288                        prog = orig_prog;
2289                        goto out_addrs;
2290                }
2291                if (image) {
2292                        if (proglen != oldproglen) {
2293                                pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2294                                       proglen, oldproglen);
2295                                goto out_image;
2296                        }
2297                        break;
2298                }
2299                if (proglen == oldproglen) {
2300                        /*
2301                         * The number of entries in extable is the number of BPF_LDX
2302                         * insns that access kernel memory via "pointer to BTF type".
2303                         * The verifier changed their opcode from LDX|MEM|size
2304                         * to LDX|PROBE_MEM|size to make JITing easier.
2305                         */
2306                        u32 align = __alignof__(struct exception_table_entry);
2307                        u32 extable_size = prog->aux->num_exentries *
2308                                sizeof(struct exception_table_entry);
2309
2310                        /* allocate module memory for x86 insns and extable */
2311                        header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
2312                                                      &image, align, jit_fill_hole);
2313                        if (!header) {
2314                                prog = orig_prog;
2315                                goto out_addrs;
2316                        }
2317                        prog->aux->extable = (void *) image + roundup(proglen, align);
2318                }
2319                oldproglen = proglen;
2320                cond_resched();
2321        }
2322
2323        if (bpf_jit_enable > 1)
2324                bpf_jit_dump(prog->len, proglen, pass + 1, image);
2325
2326        if (image) {
2327                if (!prog->is_func || extra_pass) {
2328                        bpf_tail_call_direct_fixup(prog);
2329                        bpf_jit_binary_lock_ro(header);
2330                } else {
2331                        jit_data->addrs = addrs;
2332                        jit_data->ctx = ctx;
2333                        jit_data->proglen = proglen;
2334                        jit_data->image = image;
2335                        jit_data->header = header;
2336                }
2337                prog->bpf_func = (void *)image;
2338                prog->jited = 1;
2339                prog->jited_len = proglen;
2340        } else {
2341                prog = orig_prog;
2342        }
2343
2344        if (!image || !prog->is_func || extra_pass) {
2345                if (image)
2346                        bpf_prog_fill_jited_linfo(prog, addrs + 1);
2347out_addrs:
2348                kvfree(addrs);
2349                kfree(jit_data);
2350                prog->aux->jit_data = NULL;
2351        }
2352out:
2353        if (tmp_blinded)
2354                bpf_jit_prog_release_other(prog, prog == orig_prog ?
2355                                           tmp : orig_prog);
2356        return prog;
2357}
2358