linux/arch/x86/net/bpf_jit_comp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * bpf_jit_comp.c: BPF JIT compiler
   4 *
   5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
   6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   7 */
   8#include <linux/netdevice.h>
   9#include <linux/filter.h>
  10#include <linux/if_vlan.h>
  11#include <linux/bpf.h>
  12#include <linux/memory.h>
  13#include <linux/sort.h>
  14#include <asm/extable.h>
  15#include <asm/set_memory.h>
  16#include <asm/nospec-branch.h>
  17#include <asm/text-patching.h>
  18#include <asm/asm-prototypes.h>
  19
  20static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
  21{
  22        if (len == 1)
  23                *ptr = bytes;
  24        else if (len == 2)
  25                *(u16 *)ptr = bytes;
  26        else {
  27                *(u32 *)ptr = bytes;
  28                barrier();
  29        }
  30        return ptr + len;
  31}
  32
  33#define EMIT(bytes, len) \
  34        do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
  35
  36#define EMIT1(b1)               EMIT(b1, 1)
  37#define EMIT2(b1, b2)           EMIT((b1) + ((b2) << 8), 2)
  38#define EMIT3(b1, b2, b3)       EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
  39#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
  40
  41#define EMIT1_off32(b1, off) \
  42        do { EMIT1(b1); EMIT(off, 4); } while (0)
  43#define EMIT2_off32(b1, b2, off) \
  44        do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
  45#define EMIT3_off32(b1, b2, b3, off) \
  46        do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
  47#define EMIT4_off32(b1, b2, b3, b4, off) \
  48        do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
  49
  50static bool is_imm8(int value)
  51{
  52        return value <= 127 && value >= -128;
  53}
  54
  55static bool is_simm32(s64 value)
  56{
  57        return value == (s64)(s32)value;
  58}
  59
  60static bool is_uimm32(u64 value)
  61{
  62        return value == (u64)(u32)value;
  63}
  64
  65/* mov dst, src */
  66#define EMIT_mov(DST, SRC)                                                               \
  67        do {                                                                             \
  68                if (DST != SRC)                                                          \
  69                        EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
  70        } while (0)
  71
  72static int bpf_size_to_x86_bytes(int bpf_size)
  73{
  74        if (bpf_size == BPF_W)
  75                return 4;
  76        else if (bpf_size == BPF_H)
  77                return 2;
  78        else if (bpf_size == BPF_B)
  79                return 1;
  80        else if (bpf_size == BPF_DW)
  81                return 4; /* imm32 */
  82        else
  83                return 0;
  84}
  85
  86/*
  87 * List of x86 cond jumps opcodes (. + s8)
  88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
  89 */
  90#define X86_JB  0x72
  91#define X86_JAE 0x73
  92#define X86_JE  0x74
  93#define X86_JNE 0x75
  94#define X86_JBE 0x76
  95#define X86_JA  0x77
  96#define X86_JL  0x7C
  97#define X86_JGE 0x7D
  98#define X86_JLE 0x7E
  99#define X86_JG  0x7F
 100
 101/* Pick a register outside of BPF range for JIT internal work */
 102#define AUX_REG (MAX_BPF_JIT_REG + 1)
 103#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
 104
 105/*
 106 * The following table maps BPF registers to x86-64 registers.
 107 *
 108 * x86-64 register R12 is unused, since if used as base address
 109 * register in load/store instructions, it always needs an
 110 * extra byte of encoding and is callee saved.
 111 *
 112 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
 113 * trampoline. x86-64 register R10 is used for blinding (if enabled).
 114 */
 115static const int reg2hex[] = {
 116        [BPF_REG_0] = 0,  /* RAX */
 117        [BPF_REG_1] = 7,  /* RDI */
 118        [BPF_REG_2] = 6,  /* RSI */
 119        [BPF_REG_3] = 2,  /* RDX */
 120        [BPF_REG_4] = 1,  /* RCX */
 121        [BPF_REG_5] = 0,  /* R8  */
 122        [BPF_REG_6] = 3,  /* RBX callee saved */
 123        [BPF_REG_7] = 5,  /* R13 callee saved */
 124        [BPF_REG_8] = 6,  /* R14 callee saved */
 125        [BPF_REG_9] = 7,  /* R15 callee saved */
 126        [BPF_REG_FP] = 5, /* RBP readonly */
 127        [BPF_REG_AX] = 2, /* R10 temp register */
 128        [AUX_REG] = 3,    /* R11 temp register */
 129        [X86_REG_R9] = 1, /* R9 register, 6th function argument */
 130};
 131
 132static const int reg2pt_regs[] = {
 133        [BPF_REG_0] = offsetof(struct pt_regs, ax),
 134        [BPF_REG_1] = offsetof(struct pt_regs, di),
 135        [BPF_REG_2] = offsetof(struct pt_regs, si),
 136        [BPF_REG_3] = offsetof(struct pt_regs, dx),
 137        [BPF_REG_4] = offsetof(struct pt_regs, cx),
 138        [BPF_REG_5] = offsetof(struct pt_regs, r8),
 139        [BPF_REG_6] = offsetof(struct pt_regs, bx),
 140        [BPF_REG_7] = offsetof(struct pt_regs, r13),
 141        [BPF_REG_8] = offsetof(struct pt_regs, r14),
 142        [BPF_REG_9] = offsetof(struct pt_regs, r15),
 143};
 144
 145/*
 146 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
 147 * which need extra byte of encoding.
 148 * rax,rcx,...,rbp have simpler encoding
 149 */
 150static bool is_ereg(u32 reg)
 151{
 152        return (1 << reg) & (BIT(BPF_REG_5) |
 153                             BIT(AUX_REG) |
 154                             BIT(BPF_REG_7) |
 155                             BIT(BPF_REG_8) |
 156                             BIT(BPF_REG_9) |
 157                             BIT(X86_REG_R9) |
 158                             BIT(BPF_REG_AX));
 159}
 160
 161/*
 162 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
 163 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
 164 * of encoding. al,cl,dl,bl have simpler encoding.
 165 */
 166static bool is_ereg_8l(u32 reg)
 167{
 168        return is_ereg(reg) ||
 169            (1 << reg) & (BIT(BPF_REG_1) |
 170                          BIT(BPF_REG_2) |
 171                          BIT(BPF_REG_FP));
 172}
 173
 174static bool is_axreg(u32 reg)
 175{
 176        return reg == BPF_REG_0;
 177}
 178
 179/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
 180static u8 add_1mod(u8 byte, u32 reg)
 181{
 182        if (is_ereg(reg))
 183                byte |= 1;
 184        return byte;
 185}
 186
 187static u8 add_2mod(u8 byte, u32 r1, u32 r2)
 188{
 189        if (is_ereg(r1))
 190                byte |= 1;
 191        if (is_ereg(r2))
 192                byte |= 4;
 193        return byte;
 194}
 195
 196/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
 197static u8 add_1reg(u8 byte, u32 dst_reg)
 198{
 199        return byte + reg2hex[dst_reg];
 200}
 201
 202/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
 203static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
 204{
 205        return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
 206}
 207
 208static void jit_fill_hole(void *area, unsigned int size)
 209{
 210        /* Fill whole space with INT3 instructions */
 211        memset(area, 0xcc, size);
 212}
 213
 214struct jit_context {
 215        int cleanup_addr; /* Epilogue code offset */
 216};
 217
 218/* Maximum number of bytes emitted while JITing one eBPF insn */
 219#define BPF_MAX_INSN_SIZE       128
 220#define BPF_INSN_SAFETY         64
 221
 222/* Number of bytes emit_patch() needs to generate instructions */
 223#define X86_PATCH_SIZE          5
 224
 225#define PROLOGUE_SIZE           25
 226
 227/*
 228 * Emit x86-64 prologue code for BPF program and check its size.
 229 * bpf_tail_call helper will skip it while jumping into another program
 230 */
 231static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
 232{
 233        u8 *prog = *pprog;
 234        int cnt = X86_PATCH_SIZE;
 235
 236        /* BPF trampoline can be made to work without these nops,
 237         * but let's waste 5 bytes for now and optimize later
 238         */
 239        memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
 240        prog += cnt;
 241        EMIT1(0x55);             /* push rbp */
 242        EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
 243        /* sub rsp, rounded_stack_depth */
 244        EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
 245        EMIT1(0x53);             /* push rbx */
 246        EMIT2(0x41, 0x55);       /* push r13 */
 247        EMIT2(0x41, 0x56);       /* push r14 */
 248        EMIT2(0x41, 0x57);       /* push r15 */
 249        if (!ebpf_from_cbpf) {
 250                /* zero init tail_call_cnt */
 251                EMIT2(0x6a, 0x00);
 252                BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
 253        }
 254        *pprog = prog;
 255}
 256
 257static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
 258{
 259        u8 *prog = *pprog;
 260        int cnt = 0;
 261        s64 offset;
 262
 263        offset = func - (ip + X86_PATCH_SIZE);
 264        if (!is_simm32(offset)) {
 265                pr_err("Target call %p is out of range\n", func);
 266                return -ERANGE;
 267        }
 268        EMIT1_off32(opcode, offset);
 269        *pprog = prog;
 270        return 0;
 271}
 272
 273static int emit_call(u8 **pprog, void *func, void *ip)
 274{
 275        return emit_patch(pprog, func, ip, 0xE8);
 276}
 277
 278static int emit_jump(u8 **pprog, void *func, void *ip)
 279{
 280        return emit_patch(pprog, func, ip, 0xE9);
 281}
 282
 283static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
 284                                void *old_addr, void *new_addr,
 285                                const bool text_live)
 286{
 287        const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
 288        u8 old_insn[X86_PATCH_SIZE];
 289        u8 new_insn[X86_PATCH_SIZE];
 290        u8 *prog;
 291        int ret;
 292
 293        memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
 294        if (old_addr) {
 295                prog = old_insn;
 296                ret = t == BPF_MOD_CALL ?
 297                      emit_call(&prog, old_addr, ip) :
 298                      emit_jump(&prog, old_addr, ip);
 299                if (ret)
 300                        return ret;
 301        }
 302
 303        memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
 304        if (new_addr) {
 305                prog = new_insn;
 306                ret = t == BPF_MOD_CALL ?
 307                      emit_call(&prog, new_addr, ip) :
 308                      emit_jump(&prog, new_addr, ip);
 309                if (ret)
 310                        return ret;
 311        }
 312
 313        ret = -EBUSY;
 314        mutex_lock(&text_mutex);
 315        if (memcmp(ip, old_insn, X86_PATCH_SIZE))
 316                goto out;
 317        if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
 318                if (text_live)
 319                        text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
 320                else
 321                        memcpy(ip, new_insn, X86_PATCH_SIZE);
 322        }
 323        ret = 0;
 324out:
 325        mutex_unlock(&text_mutex);
 326        return ret;
 327}
 328
 329int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
 330                       void *old_addr, void *new_addr)
 331{
 332        if (!is_kernel_text((long)ip) &&
 333            !is_bpf_text_address((long)ip))
 334                /* BPF poking in modules is not supported */
 335                return -EINVAL;
 336
 337        return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
 338}
 339
 340/*
 341 * Generate the following code:
 342 *
 343 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
 344 *   if (index >= array->map.max_entries)
 345 *     goto out;
 346 *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
 347 *     goto out;
 348 *   prog = array->ptrs[index];
 349 *   if (prog == NULL)
 350 *     goto out;
 351 *   goto *(prog->bpf_func + prologue_size);
 352 * out:
 353 */
 354static void emit_bpf_tail_call_indirect(u8 **pprog)
 355{
 356        u8 *prog = *pprog;
 357        int label1, label2, label3;
 358        int cnt = 0;
 359
 360        /*
 361         * rdi - pointer to ctx
 362         * rsi - pointer to bpf_array
 363         * rdx - index in bpf_array
 364         */
 365
 366        /*
 367         * if (index >= array->map.max_entries)
 368         *      goto out;
 369         */
 370        EMIT2(0x89, 0xD2);                        /* mov edx, edx */
 371        EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
 372              offsetof(struct bpf_array, map.max_entries));
 373#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
 374        EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
 375        label1 = cnt;
 376
 377        /*
 378         * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
 379         *      goto out;
 380         */
 381        EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
 382        EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
 383#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
 384        EMIT2(X86_JA, OFFSET2);                   /* ja out */
 385        label2 = cnt;
 386        EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
 387        EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
 388
 389        /* prog = array->ptrs[index]; */
 390        EMIT4_off32(0x48, 0x8B, 0x84, 0xD6,       /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
 391                    offsetof(struct bpf_array, ptrs));
 392
 393        /*
 394         * if (prog == NULL)
 395         *      goto out;
 396         */
 397        EMIT3(0x48, 0x85, 0xC0);                  /* test rax,rax */
 398#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
 399        EMIT2(X86_JE, OFFSET3);                   /* je out */
 400        label3 = cnt;
 401
 402        /* goto *(prog->bpf_func + prologue_size); */
 403        EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
 404              offsetof(struct bpf_prog, bpf_func));
 405        EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */
 406
 407        /*
 408         * Wow we're ready to jump into next BPF program
 409         * rdi == ctx (1st arg)
 410         * rax == prog->bpf_func + prologue_size
 411         */
 412        RETPOLINE_RAX_BPF_JIT();
 413
 414        /* out: */
 415        BUILD_BUG_ON(cnt - label1 != OFFSET1);
 416        BUILD_BUG_ON(cnt - label2 != OFFSET2);
 417        BUILD_BUG_ON(cnt - label3 != OFFSET3);
 418        *pprog = prog;
 419}
 420
 421static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
 422                                      u8 **pprog, int addr, u8 *image)
 423{
 424        u8 *prog = *pprog;
 425        int cnt = 0;
 426
 427        /*
 428         * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
 429         *      goto out;
 430         */
 431        EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
 432        EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
 433        EMIT2(X86_JA, 14);                            /* ja out */
 434        EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
 435        EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
 436
 437        poke->ip = image + (addr - X86_PATCH_SIZE);
 438        poke->adj_off = PROLOGUE_SIZE;
 439
 440        memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
 441        prog += X86_PATCH_SIZE;
 442        /* out: */
 443
 444        *pprog = prog;
 445}
 446
 447static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
 448{
 449        struct bpf_jit_poke_descriptor *poke;
 450        struct bpf_array *array;
 451        struct bpf_prog *target;
 452        int i, ret;
 453
 454        for (i = 0; i < prog->aux->size_poke_tab; i++) {
 455                poke = &prog->aux->poke_tab[i];
 456                WARN_ON_ONCE(READ_ONCE(poke->ip_stable));
 457
 458                if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
 459                        continue;
 460
 461                array = container_of(poke->tail_call.map, struct bpf_array, map);
 462                mutex_lock(&array->aux->poke_mutex);
 463                target = array->ptrs[poke->tail_call.key];
 464                if (target) {
 465                        /* Plain memcpy is used when image is not live yet
 466                         * and still not locked as read-only. Once poke
 467                         * location is active (poke->ip_stable), any parallel
 468                         * bpf_arch_text_poke() might occur still on the
 469                         * read-write image until we finally locked it as
 470                         * read-only. Both modifications on the given image
 471                         * are under text_mutex to avoid interference.
 472                         */
 473                        ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
 474                                                   (u8 *)target->bpf_func +
 475                                                   poke->adj_off, false);
 476                        BUG_ON(ret < 0);
 477                }
 478                WRITE_ONCE(poke->ip_stable, true);
 479                mutex_unlock(&array->aux->poke_mutex);
 480        }
 481}
 482
 483static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
 484                           u32 dst_reg, const u32 imm32)
 485{
 486        u8 *prog = *pprog;
 487        u8 b1, b2, b3;
 488        int cnt = 0;
 489
 490        /*
 491         * Optimization: if imm32 is positive, use 'mov %eax, imm32'
 492         * (which zero-extends imm32) to save 2 bytes.
 493         */
 494        if (sign_propagate && (s32)imm32 < 0) {
 495                /* 'mov %rax, imm32' sign extends imm32 */
 496                b1 = add_1mod(0x48, dst_reg);
 497                b2 = 0xC7;
 498                b3 = 0xC0;
 499                EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
 500                goto done;
 501        }
 502
 503        /*
 504         * Optimization: if imm32 is zero, use 'xor %eax, %eax'
 505         * to save 3 bytes.
 506         */
 507        if (imm32 == 0) {
 508                if (is_ereg(dst_reg))
 509                        EMIT1(add_2mod(0x40, dst_reg, dst_reg));
 510                b2 = 0x31; /* xor */
 511                b3 = 0xC0;
 512                EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
 513                goto done;
 514        }
 515
 516        /* mov %eax, imm32 */
 517        if (is_ereg(dst_reg))
 518                EMIT1(add_1mod(0x40, dst_reg));
 519        EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
 520done:
 521        *pprog = prog;
 522}
 523
 524static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
 525                           const u32 imm32_hi, const u32 imm32_lo)
 526{
 527        u8 *prog = *pprog;
 528        int cnt = 0;
 529
 530        if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
 531                /*
 532                 * For emitting plain u32, where sign bit must not be
 533                 * propagated LLVM tends to load imm64 over mov32
 534                 * directly, so save couple of bytes by just doing
 535                 * 'mov %eax, imm32' instead.
 536                 */
 537                emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
 538        } else {
 539                /* movabsq %rax, imm64 */
 540                EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
 541                EMIT(imm32_lo, 4);
 542                EMIT(imm32_hi, 4);
 543        }
 544
 545        *pprog = prog;
 546}
 547
 548static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
 549{
 550        u8 *prog = *pprog;
 551        int cnt = 0;
 552
 553        if (is64) {
 554                /* mov dst, src */
 555                EMIT_mov(dst_reg, src_reg);
 556        } else {
 557                /* mov32 dst, src */
 558                if (is_ereg(dst_reg) || is_ereg(src_reg))
 559                        EMIT1(add_2mod(0x40, dst_reg, src_reg));
 560                EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
 561        }
 562
 563        *pprog = prog;
 564}
 565
 566/* LDX: dst_reg = *(u8*)(src_reg + off) */
 567static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
 568{
 569        u8 *prog = *pprog;
 570        int cnt = 0;
 571
 572        switch (size) {
 573        case BPF_B:
 574                /* Emit 'movzx rax, byte ptr [rax + off]' */
 575                EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
 576                break;
 577        case BPF_H:
 578                /* Emit 'movzx rax, word ptr [rax + off]' */
 579                EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
 580                break;
 581        case BPF_W:
 582                /* Emit 'mov eax, dword ptr [rax+0x14]' */
 583                if (is_ereg(dst_reg) || is_ereg(src_reg))
 584                        EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
 585                else
 586                        EMIT1(0x8B);
 587                break;
 588        case BPF_DW:
 589                /* Emit 'mov rax, qword ptr [rax+0x14]' */
 590                EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
 591                break;
 592        }
 593        /*
 594         * If insn->off == 0 we can save one extra byte, but
 595         * special case of x86 R13 which always needs an offset
 596         * is not worth the hassle
 597         */
 598        if (is_imm8(off))
 599                EMIT2(add_2reg(0x40, src_reg, dst_reg), off);
 600        else
 601                EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off);
 602        *pprog = prog;
 603}
 604
 605/* STX: *(u8*)(dst_reg + off) = src_reg */
 606static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
 607{
 608        u8 *prog = *pprog;
 609        int cnt = 0;
 610
 611        switch (size) {
 612        case BPF_B:
 613                /* Emit 'mov byte ptr [rax + off], al' */
 614                if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
 615                        /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
 616                        EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
 617                else
 618                        EMIT1(0x88);
 619                break;
 620        case BPF_H:
 621                if (is_ereg(dst_reg) || is_ereg(src_reg))
 622                        EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
 623                else
 624                        EMIT2(0x66, 0x89);
 625                break;
 626        case BPF_W:
 627                if (is_ereg(dst_reg) || is_ereg(src_reg))
 628                        EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
 629                else
 630                        EMIT1(0x89);
 631                break;
 632        case BPF_DW:
 633                EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
 634                break;
 635        }
 636        if (is_imm8(off))
 637                EMIT2(add_2reg(0x40, dst_reg, src_reg), off);
 638        else
 639                EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off);
 640        *pprog = prog;
 641}
 642
 643static bool ex_handler_bpf(const struct exception_table_entry *x,
 644                           struct pt_regs *regs, int trapnr,
 645                           unsigned long error_code, unsigned long fault_addr)
 646{
 647        u32 reg = x->fixup >> 8;
 648
 649        /* jump over faulting load and clear dest register */
 650        *(unsigned long *)((void *)regs + reg) = 0;
 651        regs->ip += x->fixup & 0xff;
 652        return true;
 653}
 654
 655static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 656                  int oldproglen, struct jit_context *ctx)
 657{
 658        struct bpf_insn *insn = bpf_prog->insnsi;
 659        int insn_cnt = bpf_prog->len;
 660        bool seen_exit = false;
 661        u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
 662        int i, cnt = 0, excnt = 0;
 663        int proglen = 0;
 664        u8 *prog = temp;
 665
 666        emit_prologue(&prog, bpf_prog->aux->stack_depth,
 667                      bpf_prog_was_classic(bpf_prog));
 668        addrs[0] = prog - temp;
 669
 670        for (i = 1; i <= insn_cnt; i++, insn++) {
 671                const s32 imm32 = insn->imm;
 672                u32 dst_reg = insn->dst_reg;
 673                u32 src_reg = insn->src_reg;
 674                u8 b2 = 0, b3 = 0;
 675                s64 jmp_offset;
 676                u8 jmp_cond;
 677                int ilen;
 678                u8 *func;
 679
 680                switch (insn->code) {
 681                        /* ALU */
 682                case BPF_ALU | BPF_ADD | BPF_X:
 683                case BPF_ALU | BPF_SUB | BPF_X:
 684                case BPF_ALU | BPF_AND | BPF_X:
 685                case BPF_ALU | BPF_OR | BPF_X:
 686                case BPF_ALU | BPF_XOR | BPF_X:
 687                case BPF_ALU64 | BPF_ADD | BPF_X:
 688                case BPF_ALU64 | BPF_SUB | BPF_X:
 689                case BPF_ALU64 | BPF_AND | BPF_X:
 690                case BPF_ALU64 | BPF_OR | BPF_X:
 691                case BPF_ALU64 | BPF_XOR | BPF_X:
 692                        switch (BPF_OP(insn->code)) {
 693                        case BPF_ADD: b2 = 0x01; break;
 694                        case BPF_SUB: b2 = 0x29; break;
 695                        case BPF_AND: b2 = 0x21; break;
 696                        case BPF_OR: b2 = 0x09; break;
 697                        case BPF_XOR: b2 = 0x31; break;
 698                        }
 699                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 700                                EMIT1(add_2mod(0x48, dst_reg, src_reg));
 701                        else if (is_ereg(dst_reg) || is_ereg(src_reg))
 702                                EMIT1(add_2mod(0x40, dst_reg, src_reg));
 703                        EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
 704                        break;
 705
 706                case BPF_ALU64 | BPF_MOV | BPF_X:
 707                case BPF_ALU | BPF_MOV | BPF_X:
 708                        emit_mov_reg(&prog,
 709                                     BPF_CLASS(insn->code) == BPF_ALU64,
 710                                     dst_reg, src_reg);
 711                        break;
 712
 713                        /* neg dst */
 714                case BPF_ALU | BPF_NEG:
 715                case BPF_ALU64 | BPF_NEG:
 716                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 717                                EMIT1(add_1mod(0x48, dst_reg));
 718                        else if (is_ereg(dst_reg))
 719                                EMIT1(add_1mod(0x40, dst_reg));
 720                        EMIT2(0xF7, add_1reg(0xD8, dst_reg));
 721                        break;
 722
 723                case BPF_ALU | BPF_ADD | BPF_K:
 724                case BPF_ALU | BPF_SUB | BPF_K:
 725                case BPF_ALU | BPF_AND | BPF_K:
 726                case BPF_ALU | BPF_OR | BPF_K:
 727                case BPF_ALU | BPF_XOR | BPF_K:
 728                case BPF_ALU64 | BPF_ADD | BPF_K:
 729                case BPF_ALU64 | BPF_SUB | BPF_K:
 730                case BPF_ALU64 | BPF_AND | BPF_K:
 731                case BPF_ALU64 | BPF_OR | BPF_K:
 732                case BPF_ALU64 | BPF_XOR | BPF_K:
 733                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 734                                EMIT1(add_1mod(0x48, dst_reg));
 735                        else if (is_ereg(dst_reg))
 736                                EMIT1(add_1mod(0x40, dst_reg));
 737
 738                        /*
 739                         * b3 holds 'normal' opcode, b2 short form only valid
 740                         * in case dst is eax/rax.
 741                         */
 742                        switch (BPF_OP(insn->code)) {
 743                        case BPF_ADD:
 744                                b3 = 0xC0;
 745                                b2 = 0x05;
 746                                break;
 747                        case BPF_SUB:
 748                                b3 = 0xE8;
 749                                b2 = 0x2D;
 750                                break;
 751                        case BPF_AND:
 752                                b3 = 0xE0;
 753                                b2 = 0x25;
 754                                break;
 755                        case BPF_OR:
 756                                b3 = 0xC8;
 757                                b2 = 0x0D;
 758                                break;
 759                        case BPF_XOR:
 760                                b3 = 0xF0;
 761                                b2 = 0x35;
 762                                break;
 763                        }
 764
 765                        if (is_imm8(imm32))
 766                                EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
 767                        else if (is_axreg(dst_reg))
 768                                EMIT1_off32(b2, imm32);
 769                        else
 770                                EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
 771                        break;
 772
 773                case BPF_ALU64 | BPF_MOV | BPF_K:
 774                case BPF_ALU | BPF_MOV | BPF_K:
 775                        emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
 776                                       dst_reg, imm32);
 777                        break;
 778
 779                case BPF_LD | BPF_IMM | BPF_DW:
 780                        emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
 781                        insn++;
 782                        i++;
 783                        break;
 784
 785                        /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
 786                case BPF_ALU | BPF_MOD | BPF_X:
 787                case BPF_ALU | BPF_DIV | BPF_X:
 788                case BPF_ALU | BPF_MOD | BPF_K:
 789                case BPF_ALU | BPF_DIV | BPF_K:
 790                case BPF_ALU64 | BPF_MOD | BPF_X:
 791                case BPF_ALU64 | BPF_DIV | BPF_X:
 792                case BPF_ALU64 | BPF_MOD | BPF_K:
 793                case BPF_ALU64 | BPF_DIV | BPF_K:
 794                        EMIT1(0x50); /* push rax */
 795                        EMIT1(0x52); /* push rdx */
 796
 797                        if (BPF_SRC(insn->code) == BPF_X)
 798                                /* mov r11, src_reg */
 799                                EMIT_mov(AUX_REG, src_reg);
 800                        else
 801                                /* mov r11, imm32 */
 802                                EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
 803
 804                        /* mov rax, dst_reg */
 805                        EMIT_mov(BPF_REG_0, dst_reg);
 806
 807                        /*
 808                         * xor edx, edx
 809                         * equivalent to 'xor rdx, rdx', but one byte less
 810                         */
 811                        EMIT2(0x31, 0xd2);
 812
 813                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 814                                /* div r11 */
 815                                EMIT3(0x49, 0xF7, 0xF3);
 816                        else
 817                                /* div r11d */
 818                                EMIT3(0x41, 0xF7, 0xF3);
 819
 820                        if (BPF_OP(insn->code) == BPF_MOD)
 821                                /* mov r11, rdx */
 822                                EMIT3(0x49, 0x89, 0xD3);
 823                        else
 824                                /* mov r11, rax */
 825                                EMIT3(0x49, 0x89, 0xC3);
 826
 827                        EMIT1(0x5A); /* pop rdx */
 828                        EMIT1(0x58); /* pop rax */
 829
 830                        /* mov dst_reg, r11 */
 831                        EMIT_mov(dst_reg, AUX_REG);
 832                        break;
 833
 834                case BPF_ALU | BPF_MUL | BPF_K:
 835                case BPF_ALU | BPF_MUL | BPF_X:
 836                case BPF_ALU64 | BPF_MUL | BPF_K:
 837                case BPF_ALU64 | BPF_MUL | BPF_X:
 838                {
 839                        bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
 840
 841                        if (dst_reg != BPF_REG_0)
 842                                EMIT1(0x50); /* push rax */
 843                        if (dst_reg != BPF_REG_3)
 844                                EMIT1(0x52); /* push rdx */
 845
 846                        /* mov r11, dst_reg */
 847                        EMIT_mov(AUX_REG, dst_reg);
 848
 849                        if (BPF_SRC(insn->code) == BPF_X)
 850                                emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
 851                        else
 852                                emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
 853
 854                        if (is64)
 855                                EMIT1(add_1mod(0x48, AUX_REG));
 856                        else if (is_ereg(AUX_REG))
 857                                EMIT1(add_1mod(0x40, AUX_REG));
 858                        /* mul(q) r11 */
 859                        EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
 860
 861                        if (dst_reg != BPF_REG_3)
 862                                EMIT1(0x5A); /* pop rdx */
 863                        if (dst_reg != BPF_REG_0) {
 864                                /* mov dst_reg, rax */
 865                                EMIT_mov(dst_reg, BPF_REG_0);
 866                                EMIT1(0x58); /* pop rax */
 867                        }
 868                        break;
 869                }
 870                        /* Shifts */
 871                case BPF_ALU | BPF_LSH | BPF_K:
 872                case BPF_ALU | BPF_RSH | BPF_K:
 873                case BPF_ALU | BPF_ARSH | BPF_K:
 874                case BPF_ALU64 | BPF_LSH | BPF_K:
 875                case BPF_ALU64 | BPF_RSH | BPF_K:
 876                case BPF_ALU64 | BPF_ARSH | BPF_K:
 877                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 878                                EMIT1(add_1mod(0x48, dst_reg));
 879                        else if (is_ereg(dst_reg))
 880                                EMIT1(add_1mod(0x40, dst_reg));
 881
 882                        switch (BPF_OP(insn->code)) {
 883                        case BPF_LSH: b3 = 0xE0; break;
 884                        case BPF_RSH: b3 = 0xE8; break;
 885                        case BPF_ARSH: b3 = 0xF8; break;
 886                        }
 887
 888                        if (imm32 == 1)
 889                                EMIT2(0xD1, add_1reg(b3, dst_reg));
 890                        else
 891                                EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
 892                        break;
 893
 894                case BPF_ALU | BPF_LSH | BPF_X:
 895                case BPF_ALU | BPF_RSH | BPF_X:
 896                case BPF_ALU | BPF_ARSH | BPF_X:
 897                case BPF_ALU64 | BPF_LSH | BPF_X:
 898                case BPF_ALU64 | BPF_RSH | BPF_X:
 899                case BPF_ALU64 | BPF_ARSH | BPF_X:
 900
 901                        /* Check for bad case when dst_reg == rcx */
 902                        if (dst_reg == BPF_REG_4) {
 903                                /* mov r11, dst_reg */
 904                                EMIT_mov(AUX_REG, dst_reg);
 905                                dst_reg = AUX_REG;
 906                        }
 907
 908                        if (src_reg != BPF_REG_4) { /* common case */
 909                                EMIT1(0x51); /* push rcx */
 910
 911                                /* mov rcx, src_reg */
 912                                EMIT_mov(BPF_REG_4, src_reg);
 913                        }
 914
 915                        /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
 916                        if (BPF_CLASS(insn->code) == BPF_ALU64)
 917                                EMIT1(add_1mod(0x48, dst_reg));
 918                        else if (is_ereg(dst_reg))
 919                                EMIT1(add_1mod(0x40, dst_reg));
 920
 921                        switch (BPF_OP(insn->code)) {
 922                        case BPF_LSH: b3 = 0xE0; break;
 923                        case BPF_RSH: b3 = 0xE8; break;
 924                        case BPF_ARSH: b3 = 0xF8; break;
 925                        }
 926                        EMIT2(0xD3, add_1reg(b3, dst_reg));
 927
 928                        if (src_reg != BPF_REG_4)
 929                                EMIT1(0x59); /* pop rcx */
 930
 931                        if (insn->dst_reg == BPF_REG_4)
 932                                /* mov dst_reg, r11 */
 933                                EMIT_mov(insn->dst_reg, AUX_REG);
 934                        break;
 935
 936                case BPF_ALU | BPF_END | BPF_FROM_BE:
 937                        switch (imm32) {
 938                        case 16:
 939                                /* Emit 'ror %ax, 8' to swap lower 2 bytes */
 940                                EMIT1(0x66);
 941                                if (is_ereg(dst_reg))
 942                                        EMIT1(0x41);
 943                                EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
 944
 945                                /* Emit 'movzwl eax, ax' */
 946                                if (is_ereg(dst_reg))
 947                                        EMIT3(0x45, 0x0F, 0xB7);
 948                                else
 949                                        EMIT2(0x0F, 0xB7);
 950                                EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
 951                                break;
 952                        case 32:
 953                                /* Emit 'bswap eax' to swap lower 4 bytes */
 954                                if (is_ereg(dst_reg))
 955                                        EMIT2(0x41, 0x0F);
 956                                else
 957                                        EMIT1(0x0F);
 958                                EMIT1(add_1reg(0xC8, dst_reg));
 959                                break;
 960                        case 64:
 961                                /* Emit 'bswap rax' to swap 8 bytes */
 962                                EMIT3(add_1mod(0x48, dst_reg), 0x0F,
 963                                      add_1reg(0xC8, dst_reg));
 964                                break;
 965                        }
 966                        break;
 967
 968                case BPF_ALU | BPF_END | BPF_FROM_LE:
 969                        switch (imm32) {
 970                        case 16:
 971                                /*
 972                                 * Emit 'movzwl eax, ax' to zero extend 16-bit
 973                                 * into 64 bit
 974                                 */
 975                                if (is_ereg(dst_reg))
 976                                        EMIT3(0x45, 0x0F, 0xB7);
 977                                else
 978                                        EMIT2(0x0F, 0xB7);
 979                                EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
 980                                break;
 981                        case 32:
 982                                /* Emit 'mov eax, eax' to clear upper 32-bits */
 983                                if (is_ereg(dst_reg))
 984                                        EMIT1(0x45);
 985                                EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
 986                                break;
 987                        case 64:
 988                                /* nop */
 989                                break;
 990                        }
 991                        break;
 992
 993                        /* ST: *(u8*)(dst_reg + off) = imm */
 994                case BPF_ST | BPF_MEM | BPF_B:
 995                        if (is_ereg(dst_reg))
 996                                EMIT2(0x41, 0xC6);
 997                        else
 998                                EMIT1(0xC6);
 999                        goto st;
1000                case BPF_ST | BPF_MEM | BPF_H:
1001                        if (is_ereg(dst_reg))
1002                                EMIT3(0x66, 0x41, 0xC7);
1003                        else
1004                                EMIT2(0x66, 0xC7);
1005                        goto st;
1006                case BPF_ST | BPF_MEM | BPF_W:
1007                        if (is_ereg(dst_reg))
1008                                EMIT2(0x41, 0xC7);
1009                        else
1010                                EMIT1(0xC7);
1011                        goto st;
1012                case BPF_ST | BPF_MEM | BPF_DW:
1013                        EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1014
1015st:                     if (is_imm8(insn->off))
1016                                EMIT2(add_1reg(0x40, dst_reg), insn->off);
1017                        else
1018                                EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1019
1020                        EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1021                        break;
1022
1023                        /* STX: *(u8*)(dst_reg + off) = src_reg */
1024                case BPF_STX | BPF_MEM | BPF_B:
1025                case BPF_STX | BPF_MEM | BPF_H:
1026                case BPF_STX | BPF_MEM | BPF_W:
1027                case BPF_STX | BPF_MEM | BPF_DW:
1028                        emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1029                        break;
1030
1031                        /* LDX: dst_reg = *(u8*)(src_reg + off) */
1032                case BPF_LDX | BPF_MEM | BPF_B:
1033                case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1034                case BPF_LDX | BPF_MEM | BPF_H:
1035                case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1036                case BPF_LDX | BPF_MEM | BPF_W:
1037                case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1038                case BPF_LDX | BPF_MEM | BPF_DW:
1039                case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1040                        emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1041                        if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1042                                struct exception_table_entry *ex;
1043                                u8 *_insn = image + proglen;
1044                                s64 delta;
1045
1046                                if (!bpf_prog->aux->extable)
1047                                        break;
1048
1049                                if (excnt >= bpf_prog->aux->num_exentries) {
1050                                        pr_err("ex gen bug\n");
1051                                        return -EFAULT;
1052                                }
1053                                ex = &bpf_prog->aux->extable[excnt++];
1054
1055                                delta = _insn - (u8 *)&ex->insn;
1056                                if (!is_simm32(delta)) {
1057                                        pr_err("extable->insn doesn't fit into 32-bit\n");
1058                                        return -EFAULT;
1059                                }
1060                                ex->insn = delta;
1061
1062                                delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
1063                                if (!is_simm32(delta)) {
1064                                        pr_err("extable->handler doesn't fit into 32-bit\n");
1065                                        return -EFAULT;
1066                                }
1067                                ex->handler = delta;
1068
1069                                if (dst_reg > BPF_REG_9) {
1070                                        pr_err("verifier error\n");
1071                                        return -EFAULT;
1072                                }
1073                                /*
1074                                 * Compute size of x86 insn and its target dest x86 register.
1075                                 * ex_handler_bpf() will use lower 8 bits to adjust
1076                                 * pt_regs->ip to jump over this x86 instruction
1077                                 * and upper bits to figure out which pt_regs to zero out.
1078                                 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1079                                 * of 4 bytes will be ignored and rbx will be zero inited.
1080                                 */
1081                                ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1082                        }
1083                        break;
1084
1085                        /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
1086                case BPF_STX | BPF_XADD | BPF_W:
1087                        /* Emit 'lock add dword ptr [rax + off], eax' */
1088                        if (is_ereg(dst_reg) || is_ereg(src_reg))
1089                                EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
1090                        else
1091                                EMIT2(0xF0, 0x01);
1092                        goto xadd;
1093                case BPF_STX | BPF_XADD | BPF_DW:
1094                        EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
1095xadd:                   if (is_imm8(insn->off))
1096                                EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
1097                        else
1098                                EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
1099                                            insn->off);
1100                        break;
1101
1102                        /* call */
1103                case BPF_JMP | BPF_CALL:
1104                        func = (u8 *) __bpf_call_base + imm32;
1105                        if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1106                                return -EINVAL;
1107                        break;
1108
1109                case BPF_JMP | BPF_TAIL_CALL:
1110                        if (imm32)
1111                                emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1112                                                          &prog, addrs[i], image);
1113                        else
1114                                emit_bpf_tail_call_indirect(&prog);
1115                        break;
1116
1117                        /* cond jump */
1118                case BPF_JMP | BPF_JEQ | BPF_X:
1119                case BPF_JMP | BPF_JNE | BPF_X:
1120                case BPF_JMP | BPF_JGT | BPF_X:
1121                case BPF_JMP | BPF_JLT | BPF_X:
1122                case BPF_JMP | BPF_JGE | BPF_X:
1123                case BPF_JMP | BPF_JLE | BPF_X:
1124                case BPF_JMP | BPF_JSGT | BPF_X:
1125                case BPF_JMP | BPF_JSLT | BPF_X:
1126                case BPF_JMP | BPF_JSGE | BPF_X:
1127                case BPF_JMP | BPF_JSLE | BPF_X:
1128                case BPF_JMP32 | BPF_JEQ | BPF_X:
1129                case BPF_JMP32 | BPF_JNE | BPF_X:
1130                case BPF_JMP32 | BPF_JGT | BPF_X:
1131                case BPF_JMP32 | BPF_JLT | BPF_X:
1132                case BPF_JMP32 | BPF_JGE | BPF_X:
1133                case BPF_JMP32 | BPF_JLE | BPF_X:
1134                case BPF_JMP32 | BPF_JSGT | BPF_X:
1135                case BPF_JMP32 | BPF_JSLT | BPF_X:
1136                case BPF_JMP32 | BPF_JSGE | BPF_X:
1137                case BPF_JMP32 | BPF_JSLE | BPF_X:
1138                        /* cmp dst_reg, src_reg */
1139                        if (BPF_CLASS(insn->code) == BPF_JMP)
1140                                EMIT1(add_2mod(0x48, dst_reg, src_reg));
1141                        else if (is_ereg(dst_reg) || is_ereg(src_reg))
1142                                EMIT1(add_2mod(0x40, dst_reg, src_reg));
1143                        EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1144                        goto emit_cond_jmp;
1145
1146                case BPF_JMP | BPF_JSET | BPF_X:
1147                case BPF_JMP32 | BPF_JSET | BPF_X:
1148                        /* test dst_reg, src_reg */
1149                        if (BPF_CLASS(insn->code) == BPF_JMP)
1150                                EMIT1(add_2mod(0x48, dst_reg, src_reg));
1151                        else if (is_ereg(dst_reg) || is_ereg(src_reg))
1152                                EMIT1(add_2mod(0x40, dst_reg, src_reg));
1153                        EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1154                        goto emit_cond_jmp;
1155
1156                case BPF_JMP | BPF_JSET | BPF_K:
1157                case BPF_JMP32 | BPF_JSET | BPF_K:
1158                        /* test dst_reg, imm32 */
1159                        if (BPF_CLASS(insn->code) == BPF_JMP)
1160                                EMIT1(add_1mod(0x48, dst_reg));
1161                        else if (is_ereg(dst_reg))
1162                                EMIT1(add_1mod(0x40, dst_reg));
1163                        EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1164                        goto emit_cond_jmp;
1165
1166                case BPF_JMP | BPF_JEQ | BPF_K:
1167                case BPF_JMP | BPF_JNE | BPF_K:
1168                case BPF_JMP | BPF_JGT | BPF_K:
1169                case BPF_JMP | BPF_JLT | BPF_K:
1170                case BPF_JMP | BPF_JGE | BPF_K:
1171                case BPF_JMP | BPF_JLE | BPF_K:
1172                case BPF_JMP | BPF_JSGT | BPF_K:
1173                case BPF_JMP | BPF_JSLT | BPF_K:
1174                case BPF_JMP | BPF_JSGE | BPF_K:
1175                case BPF_JMP | BPF_JSLE | BPF_K:
1176                case BPF_JMP32 | BPF_JEQ | BPF_K:
1177                case BPF_JMP32 | BPF_JNE | BPF_K:
1178                case BPF_JMP32 | BPF_JGT | BPF_K:
1179                case BPF_JMP32 | BPF_JLT | BPF_K:
1180                case BPF_JMP32 | BPF_JGE | BPF_K:
1181                case BPF_JMP32 | BPF_JLE | BPF_K:
1182                case BPF_JMP32 | BPF_JSGT | BPF_K:
1183                case BPF_JMP32 | BPF_JSLT | BPF_K:
1184                case BPF_JMP32 | BPF_JSGE | BPF_K:
1185                case BPF_JMP32 | BPF_JSLE | BPF_K:
1186                        /* test dst_reg, dst_reg to save one extra byte */
1187                        if (imm32 == 0) {
1188                                if (BPF_CLASS(insn->code) == BPF_JMP)
1189                                        EMIT1(add_2mod(0x48, dst_reg, dst_reg));
1190                                else if (is_ereg(dst_reg))
1191                                        EMIT1(add_2mod(0x40, dst_reg, dst_reg));
1192                                EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1193                                goto emit_cond_jmp;
1194                        }
1195
1196                        /* cmp dst_reg, imm8/32 */
1197                        if (BPF_CLASS(insn->code) == BPF_JMP)
1198                                EMIT1(add_1mod(0x48, dst_reg));
1199                        else if (is_ereg(dst_reg))
1200                                EMIT1(add_1mod(0x40, dst_reg));
1201
1202                        if (is_imm8(imm32))
1203                                EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1204                        else
1205                                EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1206
1207emit_cond_jmp:          /* Convert BPF opcode to x86 */
1208                        switch (BPF_OP(insn->code)) {
1209                        case BPF_JEQ:
1210                                jmp_cond = X86_JE;
1211                                break;
1212                        case BPF_JSET:
1213                        case BPF_JNE:
1214                                jmp_cond = X86_JNE;
1215                                break;
1216                        case BPF_JGT:
1217                                /* GT is unsigned '>', JA in x86 */
1218                                jmp_cond = X86_JA;
1219                                break;
1220                        case BPF_JLT:
1221                                /* LT is unsigned '<', JB in x86 */
1222                                jmp_cond = X86_JB;
1223                                break;
1224                        case BPF_JGE:
1225                                /* GE is unsigned '>=', JAE in x86 */
1226                                jmp_cond = X86_JAE;
1227                                break;
1228                        case BPF_JLE:
1229                                /* LE is unsigned '<=', JBE in x86 */
1230                                jmp_cond = X86_JBE;
1231                                break;
1232                        case BPF_JSGT:
1233                                /* Signed '>', GT in x86 */
1234                                jmp_cond = X86_JG;
1235                                break;
1236                        case BPF_JSLT:
1237                                /* Signed '<', LT in x86 */
1238                                jmp_cond = X86_JL;
1239                                break;
1240                        case BPF_JSGE:
1241                                /* Signed '>=', GE in x86 */
1242                                jmp_cond = X86_JGE;
1243                                break;
1244                        case BPF_JSLE:
1245                                /* Signed '<=', LE in x86 */
1246                                jmp_cond = X86_JLE;
1247                                break;
1248                        default: /* to silence GCC warning */
1249                                return -EFAULT;
1250                        }
1251                        jmp_offset = addrs[i + insn->off] - addrs[i];
1252                        if (is_imm8(jmp_offset)) {
1253                                EMIT2(jmp_cond, jmp_offset);
1254                        } else if (is_simm32(jmp_offset)) {
1255                                EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1256                        } else {
1257                                pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1258                                return -EFAULT;
1259                        }
1260
1261                        break;
1262
1263                case BPF_JMP | BPF_JA:
1264                        if (insn->off == -1)
1265                                /* -1 jmp instructions will always jump
1266                                 * backwards two bytes. Explicitly handling
1267                                 * this case avoids wasting too many passes
1268                                 * when there are long sequences of replaced
1269                                 * dead code.
1270                                 */
1271                                jmp_offset = -2;
1272                        else
1273                                jmp_offset = addrs[i + insn->off] - addrs[i];
1274
1275                        if (!jmp_offset)
1276                                /* Optimize out nop jumps */
1277                                break;
1278emit_jmp:
1279                        if (is_imm8(jmp_offset)) {
1280                                EMIT2(0xEB, jmp_offset);
1281                        } else if (is_simm32(jmp_offset)) {
1282                                EMIT1_off32(0xE9, jmp_offset);
1283                        } else {
1284                                pr_err("jmp gen bug %llx\n", jmp_offset);
1285                                return -EFAULT;
1286                        }
1287                        break;
1288
1289                case BPF_JMP | BPF_EXIT:
1290                        if (seen_exit) {
1291                                jmp_offset = ctx->cleanup_addr - addrs[i];
1292                                goto emit_jmp;
1293                        }
1294                        seen_exit = true;
1295                        /* Update cleanup_addr */
1296                        ctx->cleanup_addr = proglen;
1297                        if (!bpf_prog_was_classic(bpf_prog))
1298                                EMIT1(0x5B); /* get rid of tail_call_cnt */
1299                        EMIT2(0x41, 0x5F);   /* pop r15 */
1300                        EMIT2(0x41, 0x5E);   /* pop r14 */
1301                        EMIT2(0x41, 0x5D);   /* pop r13 */
1302                        EMIT1(0x5B);         /* pop rbx */
1303                        EMIT1(0xC9);         /* leave */
1304                        EMIT1(0xC3);         /* ret */
1305                        break;
1306
1307                default:
1308                        /*
1309                         * By design x86-64 JIT should support all BPF instructions.
1310                         * This error will be seen if new instruction was added
1311                         * to the interpreter, but not to the JIT, or if there is
1312                         * junk in bpf_prog.
1313                         */
1314                        pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1315                        return -EINVAL;
1316                }
1317
1318                ilen = prog - temp;
1319                if (ilen > BPF_MAX_INSN_SIZE) {
1320                        pr_err("bpf_jit: fatal insn size error\n");
1321                        return -EFAULT;
1322                }
1323
1324                if (image) {
1325                        if (unlikely(proglen + ilen > oldproglen)) {
1326                                pr_err("bpf_jit: fatal error\n");
1327                                return -EFAULT;
1328                        }
1329                        memcpy(image + proglen, temp, ilen);
1330                }
1331                proglen += ilen;
1332                addrs[i] = proglen;
1333                prog = temp;
1334        }
1335
1336        if (image && excnt != bpf_prog->aux->num_exentries) {
1337                pr_err("extable is not populated\n");
1338                return -EFAULT;
1339        }
1340        return proglen;
1341}
1342
1343static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1344                      int stack_size)
1345{
1346        int i;
1347        /* Store function arguments to stack.
1348         * For a function that accepts two pointers the sequence will be:
1349         * mov QWORD PTR [rbp-0x10],rdi
1350         * mov QWORD PTR [rbp-0x8],rsi
1351         */
1352        for (i = 0; i < min(nr_args, 6); i++)
1353                emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1354                         BPF_REG_FP,
1355                         i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1356                         -(stack_size - i * 8));
1357}
1358
1359static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1360                         int stack_size)
1361{
1362        int i;
1363
1364        /* Restore function arguments from stack.
1365         * For a function that accepts two pointers the sequence will be:
1366         * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1367         * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1368         */
1369        for (i = 0; i < min(nr_args, 6); i++)
1370                emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1371                         i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1372                         BPF_REG_FP,
1373                         -(stack_size - i * 8));
1374}
1375
1376static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1377                           struct bpf_prog *p, int stack_size, bool mod_ret)
1378{
1379        u8 *prog = *pprog;
1380        int cnt = 0;
1381
1382        if (emit_call(&prog, __bpf_prog_enter, prog))
1383                return -EINVAL;
1384        /* remember prog start time returned by __bpf_prog_enter */
1385        emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1386
1387        /* arg1: lea rdi, [rbp - stack_size] */
1388        EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1389        /* arg2: progs[i]->insnsi for interpreter */
1390        if (!p->jited)
1391                emit_mov_imm64(&prog, BPF_REG_2,
1392                               (long) p->insnsi >> 32,
1393                               (u32) (long) p->insnsi);
1394        /* call JITed bpf program or interpreter */
1395        if (emit_call(&prog, p->bpf_func, prog))
1396                return -EINVAL;
1397
1398        /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1399         * of the previous call which is then passed on the stack to
1400         * the next BPF program.
1401         */
1402        if (mod_ret)
1403                emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1404
1405        /* arg1: mov rdi, progs[i] */
1406        emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
1407                       (u32) (long) p);
1408        /* arg2: mov rsi, rbx <- start time in nsec */
1409        emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1410        if (emit_call(&prog, __bpf_prog_exit, prog))
1411                return -EINVAL;
1412
1413        *pprog = prog;
1414        return 0;
1415}
1416
1417static void emit_nops(u8 **pprog, unsigned int len)
1418{
1419        unsigned int i, noplen;
1420        u8 *prog = *pprog;
1421        int cnt = 0;
1422
1423        while (len > 0) {
1424                noplen = len;
1425
1426                if (noplen > ASM_NOP_MAX)
1427                        noplen = ASM_NOP_MAX;
1428
1429                for (i = 0; i < noplen; i++)
1430                        EMIT1(ideal_nops[noplen][i]);
1431                len -= noplen;
1432        }
1433
1434        *pprog = prog;
1435}
1436
1437static void emit_align(u8 **pprog, u32 align)
1438{
1439        u8 *target, *prog = *pprog;
1440
1441        target = PTR_ALIGN(prog, align);
1442        if (target != prog)
1443                emit_nops(&prog, target - prog);
1444
1445        *pprog = prog;
1446}
1447
1448static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1449{
1450        u8 *prog = *pprog;
1451        int cnt = 0;
1452        s64 offset;
1453
1454        offset = func - (ip + 2 + 4);
1455        if (!is_simm32(offset)) {
1456                pr_err("Target %p is out of range\n", func);
1457                return -EINVAL;
1458        }
1459        EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1460        *pprog = prog;
1461        return 0;
1462}
1463
1464static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1465                      struct bpf_tramp_progs *tp, int stack_size)
1466{
1467        int i;
1468        u8 *prog = *pprog;
1469
1470        for (i = 0; i < tp->nr_progs; i++) {
1471                if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
1472                        return -EINVAL;
1473        }
1474        *pprog = prog;
1475        return 0;
1476}
1477
1478static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1479                              struct bpf_tramp_progs *tp, int stack_size,
1480                              u8 **branches)
1481{
1482        u8 *prog = *pprog;
1483        int i, cnt = 0;
1484
1485        /* The first fmod_ret program will receive a garbage return value.
1486         * Set this to 0 to avoid confusing the program.
1487         */
1488        emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1489        emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1490        for (i = 0; i < tp->nr_progs; i++) {
1491                if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1492                        return -EINVAL;
1493
1494                /* mod_ret prog stored return value into [rbp - 8]. Emit:
1495                 * if (*(u64 *)(rbp - 8) !=  0)
1496                 *      goto do_fexit;
1497                 */
1498                /* cmp QWORD PTR [rbp - 0x8], 0x0 */
1499                EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1500
1501                /* Save the location of the branch and Generate 6 nops
1502                 * (4 bytes for an offset and 2 bytes for the jump) These nops
1503                 * are replaced with a conditional jump once do_fexit (i.e. the
1504                 * start of the fexit invocation) is finalized.
1505                 */
1506                branches[i] = prog;
1507                emit_nops(&prog, 4 + 2);
1508        }
1509
1510        *pprog = prog;
1511        return 0;
1512}
1513
1514/* Example:
1515 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1516 * its 'struct btf_func_model' will be nr_args=2
1517 * The assembly code when eth_type_trans is executing after trampoline:
1518 *
1519 * push rbp
1520 * mov rbp, rsp
1521 * sub rsp, 16                     // space for skb and dev
1522 * push rbx                        // temp regs to pass start time
1523 * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
1524 * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
1525 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1526 * mov rbx, rax                    // remember start time in bpf stats are enabled
1527 * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
1528 * call addr_of_jited_FENTRY_prog
1529 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1530 * mov rsi, rbx                    // prog start time
1531 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1532 * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
1533 * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
1534 * pop rbx
1535 * leave
1536 * ret
1537 *
1538 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1539 * replaced with 'call generated_bpf_trampoline'. When it returns
1540 * eth_type_trans will continue executing with original skb and dev pointers.
1541 *
1542 * The assembly code when eth_type_trans is called from trampoline:
1543 *
1544 * push rbp
1545 * mov rbp, rsp
1546 * sub rsp, 24                     // space for skb, dev, return value
1547 * push rbx                        // temp regs to pass start time
1548 * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
1549 * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
1550 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1551 * mov rbx, rax                    // remember start time if bpf stats are enabled
1552 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1553 * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
1554 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1555 * mov rsi, rbx                    // prog start time
1556 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1557 * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
1558 * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
1559 * call eth_type_trans+5           // execute body of eth_type_trans
1560 * mov qword ptr [rbp - 8], rax    // save return value
1561 * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1562 * mov rbx, rax                    // remember start time in bpf stats are enabled
1563 * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1564 * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
1565 * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1566 * mov rsi, rbx                    // prog start time
1567 * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1568 * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
1569 * pop rbx
1570 * leave
1571 * add rsp, 8                      // skip eth_type_trans's frame
1572 * ret                             // return to its caller
1573 */
1574int arch_prepare_bpf_trampoline(void *image, void *image_end,
1575                                const struct btf_func_model *m, u32 flags,
1576                                struct bpf_tramp_progs *tprogs,
1577                                void *orig_call)
1578{
1579        int ret, i, cnt = 0, nr_args = m->nr_args;
1580        int stack_size = nr_args * 8;
1581        struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
1582        struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
1583        struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
1584        u8 **branches = NULL;
1585        u8 *prog;
1586
1587        /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1588        if (nr_args > 6)
1589                return -ENOTSUPP;
1590
1591        if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1592            (flags & BPF_TRAMP_F_SKIP_FRAME))
1593                return -EINVAL;
1594
1595        if (flags & BPF_TRAMP_F_CALL_ORIG)
1596                stack_size += 8; /* room for return value of orig_call */
1597
1598        if (flags & BPF_TRAMP_F_SKIP_FRAME)
1599                /* skip patched call instruction and point orig_call to actual
1600                 * body of the kernel function.
1601                 */
1602                orig_call += X86_PATCH_SIZE;
1603
1604        prog = image;
1605
1606        EMIT1(0x55);             /* push rbp */
1607        EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
1608        EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
1609        EMIT1(0x53);             /* push rbx */
1610
1611        save_regs(m, &prog, nr_args, stack_size);
1612
1613        if (fentry->nr_progs)
1614                if (invoke_bpf(m, &prog, fentry, stack_size))
1615                        return -EINVAL;
1616
1617        if (fmod_ret->nr_progs) {
1618                branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
1619                                   GFP_KERNEL);
1620                if (!branches)
1621                        return -ENOMEM;
1622
1623                if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
1624                                       branches)) {
1625                        ret = -EINVAL;
1626                        goto cleanup;
1627                }
1628        }
1629
1630        if (flags & BPF_TRAMP_F_CALL_ORIG) {
1631                if (fentry->nr_progs || fmod_ret->nr_progs)
1632                        restore_regs(m, &prog, nr_args, stack_size);
1633
1634                /* call original function */
1635                if (emit_call(&prog, orig_call, prog)) {
1636                        ret = -EINVAL;
1637                        goto cleanup;
1638                }
1639                /* remember return value in a stack for bpf prog to access */
1640                emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1641        }
1642
1643        if (fmod_ret->nr_progs) {
1644                /* From Intel 64 and IA-32 Architectures Optimization
1645                 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
1646                 * Coding Rule 11: All branch targets should be 16-byte
1647                 * aligned.
1648                 */
1649                emit_align(&prog, 16);
1650                /* Update the branches saved in invoke_bpf_mod_ret with the
1651                 * aligned address of do_fexit.
1652                 */
1653                for (i = 0; i < fmod_ret->nr_progs; i++)
1654                        emit_cond_near_jump(&branches[i], prog, branches[i],
1655                                            X86_JNE);
1656        }
1657
1658        if (fexit->nr_progs)
1659                if (invoke_bpf(m, &prog, fexit, stack_size)) {
1660                        ret = -EINVAL;
1661                        goto cleanup;
1662                }
1663
1664        if (flags & BPF_TRAMP_F_RESTORE_REGS)
1665                restore_regs(m, &prog, nr_args, stack_size);
1666
1667        /* This needs to be done regardless. If there were fmod_ret programs,
1668         * the return value is only updated on the stack and still needs to be
1669         * restored to R0.
1670         */
1671        if (flags & BPF_TRAMP_F_CALL_ORIG)
1672                /* restore original return value back into RAX */
1673                emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
1674
1675        EMIT1(0x5B); /* pop rbx */
1676        EMIT1(0xC9); /* leave */
1677        if (flags & BPF_TRAMP_F_SKIP_FRAME)
1678                /* skip our return address and return to parent */
1679                EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
1680        EMIT1(0xC3); /* ret */
1681        /* Make sure the trampoline generation logic doesn't overflow */
1682        if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
1683                ret = -EFAULT;
1684                goto cleanup;
1685        }
1686        ret = prog - (u8 *)image;
1687
1688cleanup:
1689        kfree(branches);
1690        return ret;
1691}
1692
1693static int emit_fallback_jump(u8 **pprog)
1694{
1695        u8 *prog = *pprog;
1696        int err = 0;
1697
1698#ifdef CONFIG_RETPOLINE
1699        /* Note that this assumes the the compiler uses external
1700         * thunks for indirect calls. Both clang and GCC use the same
1701         * naming convention for external thunks.
1702         */
1703        err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
1704#else
1705        int cnt = 0;
1706
1707        EMIT2(0xFF, 0xE2);      /* jmp rdx */
1708#endif
1709        *pprog = prog;
1710        return err;
1711}
1712
1713static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
1714{
1715        u8 *jg_reloc, *prog = *pprog;
1716        int pivot, err, jg_bytes = 1, cnt = 0;
1717        s64 jg_offset;
1718
1719        if (a == b) {
1720                /* Leaf node of recursion, i.e. not a range of indices
1721                 * anymore.
1722                 */
1723                EMIT1(add_1mod(0x48, BPF_REG_3));       /* cmp rdx,func */
1724                if (!is_simm32(progs[a]))
1725                        return -1;
1726                EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
1727                            progs[a]);
1728                err = emit_cond_near_jump(&prog,        /* je func */
1729                                          (void *)progs[a], prog,
1730                                          X86_JE);
1731                if (err)
1732                        return err;
1733
1734                err = emit_fallback_jump(&prog);        /* jmp thunk/indirect */
1735                if (err)
1736                        return err;
1737
1738                *pprog = prog;
1739                return 0;
1740        }
1741
1742        /* Not a leaf node, so we pivot, and recursively descend into
1743         * the lower and upper ranges.
1744         */
1745        pivot = (b - a) / 2;
1746        EMIT1(add_1mod(0x48, BPF_REG_3));               /* cmp rdx,func */
1747        if (!is_simm32(progs[a + pivot]))
1748                return -1;
1749        EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
1750
1751        if (pivot > 2) {                                /* jg upper_part */
1752                /* Require near jump. */
1753                jg_bytes = 4;
1754                EMIT2_off32(0x0F, X86_JG + 0x10, 0);
1755        } else {
1756                EMIT2(X86_JG, 0);
1757        }
1758        jg_reloc = prog;
1759
1760        err = emit_bpf_dispatcher(&prog, a, a + pivot,  /* emit lower_part */
1761                                  progs);
1762        if (err)
1763                return err;
1764
1765        /* From Intel 64 and IA-32 Architectures Optimization
1766         * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
1767         * Coding Rule 11: All branch targets should be 16-byte
1768         * aligned.
1769         */
1770        emit_align(&prog, 16);
1771        jg_offset = prog - jg_reloc;
1772        emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
1773
1774        err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
1775                                  b, progs);
1776        if (err)
1777                return err;
1778
1779        *pprog = prog;
1780        return 0;
1781}
1782
1783static int cmp_ips(const void *a, const void *b)
1784{
1785        const s64 *ipa = a;
1786        const s64 *ipb = b;
1787
1788        if (*ipa > *ipb)
1789                return 1;
1790        if (*ipa < *ipb)
1791                return -1;
1792        return 0;
1793}
1794
1795int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
1796{
1797        u8 *prog = image;
1798
1799        sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
1800        return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
1801}
1802
1803struct x64_jit_data {
1804        struct bpf_binary_header *header;
1805        int *addrs;
1806        u8 *image;
1807        int proglen;
1808        struct jit_context ctx;
1809};
1810
1811struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1812{
1813        struct bpf_binary_header *header = NULL;
1814        struct bpf_prog *tmp, *orig_prog = prog;
1815        struct x64_jit_data *jit_data;
1816        int proglen, oldproglen = 0;
1817        struct jit_context ctx = {};
1818        bool tmp_blinded = false;
1819        bool extra_pass = false;
1820        u8 *image = NULL;
1821        int *addrs;
1822        int pass;
1823        int i;
1824
1825        if (!prog->jit_requested)
1826                return orig_prog;
1827
1828        tmp = bpf_jit_blind_constants(prog);
1829        /*
1830         * If blinding was requested and we failed during blinding,
1831         * we must fall back to the interpreter.
1832         */
1833        if (IS_ERR(tmp))
1834                return orig_prog;
1835        if (tmp != prog) {
1836                tmp_blinded = true;
1837                prog = tmp;
1838        }
1839
1840        jit_data = prog->aux->jit_data;
1841        if (!jit_data) {
1842                jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1843                if (!jit_data) {
1844                        prog = orig_prog;
1845                        goto out;
1846                }
1847                prog->aux->jit_data = jit_data;
1848        }
1849        addrs = jit_data->addrs;
1850        if (addrs) {
1851                ctx = jit_data->ctx;
1852                oldproglen = jit_data->proglen;
1853                image = jit_data->image;
1854                header = jit_data->header;
1855                extra_pass = true;
1856                goto skip_init_addrs;
1857        }
1858        addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1859        if (!addrs) {
1860                prog = orig_prog;
1861                goto out_addrs;
1862        }
1863
1864        /*
1865         * Before first pass, make a rough estimation of addrs[]
1866         * each BPF instruction is translated to less than 64 bytes
1867         */
1868        for (proglen = 0, i = 0; i <= prog->len; i++) {
1869                proglen += 64;
1870                addrs[i] = proglen;
1871        }
1872        ctx.cleanup_addr = proglen;
1873skip_init_addrs:
1874
1875        /*
1876         * JITed image shrinks with every pass and the loop iterates
1877         * until the image stops shrinking. Very large BPF programs
1878         * may converge on the last pass. In such case do one more
1879         * pass to emit the final image.
1880         */
1881        for (pass = 0; pass < 20 || image; pass++) {
1882                proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1883                if (proglen <= 0) {
1884out_image:
1885                        image = NULL;
1886                        if (header)
1887                                bpf_jit_binary_free(header);
1888                        prog = orig_prog;
1889                        goto out_addrs;
1890                }
1891                if (image) {
1892                        if (proglen != oldproglen) {
1893                                pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1894                                       proglen, oldproglen);
1895                                goto out_image;
1896                        }
1897                        break;
1898                }
1899                if (proglen == oldproglen) {
1900                        /*
1901                         * The number of entries in extable is the number of BPF_LDX
1902                         * insns that access kernel memory via "pointer to BTF type".
1903                         * The verifier changed their opcode from LDX|MEM|size
1904                         * to LDX|PROBE_MEM|size to make JITing easier.
1905                         */
1906                        u32 align = __alignof__(struct exception_table_entry);
1907                        u32 extable_size = prog->aux->num_exentries *
1908                                sizeof(struct exception_table_entry);
1909
1910                        /* allocate module memory for x86 insns and extable */
1911                        header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
1912                                                      &image, align, jit_fill_hole);
1913                        if (!header) {
1914                                prog = orig_prog;
1915                                goto out_addrs;
1916                        }
1917                        prog->aux->extable = (void *) image + roundup(proglen, align);
1918                }
1919                oldproglen = proglen;
1920                cond_resched();
1921        }
1922
1923        if (bpf_jit_enable > 1)
1924                bpf_jit_dump(prog->len, proglen, pass + 1, image);
1925
1926        if (image) {
1927                if (!prog->is_func || extra_pass) {
1928                        bpf_tail_call_direct_fixup(prog);
1929                        bpf_jit_binary_lock_ro(header);
1930                } else {
1931                        jit_data->addrs = addrs;
1932                        jit_data->ctx = ctx;
1933                        jit_data->proglen = proglen;
1934                        jit_data->image = image;
1935                        jit_data->header = header;
1936                }
1937                prog->bpf_func = (void *)image;
1938                prog->jited = 1;
1939                prog->jited_len = proglen;
1940        } else {
1941                prog = orig_prog;
1942        }
1943
1944        if (!image || !prog->is_func || extra_pass) {
1945                if (image)
1946                        bpf_prog_fill_jited_linfo(prog, addrs + 1);
1947out_addrs:
1948                kfree(addrs);
1949                kfree(jit_data);
1950                prog->aux->jit_data = NULL;
1951        }
1952out:
1953        if (tmp_blinded)
1954                bpf_jit_prog_release_other(prog, prog == orig_prog ?
1955                                           tmp : orig_prog);
1956        return prog;
1957}
1958