linux/kernel/bpf/core.c
<<
>>
Prefs
   1/*
   2 * Linux Socket Filter - Kernel level socket filtering
   3 *
   4 * Based on the design of the Berkeley Packet Filter. The new
   5 * internal format has been designed by PLUMgrid:
   6 *
   7 *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
   8 *
   9 * Authors:
  10 *
  11 *      Jay Schulist <jschlst@samba.org>
  12 *      Alexei Starovoitov <ast@plumgrid.com>
  13 *      Daniel Borkmann <dborkman@redhat.com>
  14 *
  15 * This program is free software; you can redistribute it and/or
  16 * modify it under the terms of the GNU General Public License
  17 * as published by the Free Software Foundation; either version
  18 * 2 of the License, or (at your option) any later version.
  19 *
  20 * Andi Kleen - Fix a few bad bugs and races.
  21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  22 */
  23
  24#include <linux/filter.h>
  25#include <linux/skbuff.h>
  26#include <linux/vmalloc.h>
  27#include <linux/random.h>
  28#include <linux/moduleloader.h>
  29#include <linux/bpf.h>
  30#include <linux/frame.h>
  31#include <linux/rbtree_latch.h>
  32#include <linux/kallsyms.h>
  33#include <linux/rcupdate.h>
  34
  35#include <asm/unaligned.h>
  36
  37/* Registers */
  38#define BPF_R0  regs[BPF_REG_0]
  39#define BPF_R1  regs[BPF_REG_1]
  40#define BPF_R2  regs[BPF_REG_2]
  41#define BPF_R3  regs[BPF_REG_3]
  42#define BPF_R4  regs[BPF_REG_4]
  43#define BPF_R5  regs[BPF_REG_5]
  44#define BPF_R6  regs[BPF_REG_6]
  45#define BPF_R7  regs[BPF_REG_7]
  46#define BPF_R8  regs[BPF_REG_8]
  47#define BPF_R9  regs[BPF_REG_9]
  48#define BPF_R10 regs[BPF_REG_10]
  49
  50/* Named registers */
  51#define DST     regs[insn->dst_reg]
  52#define SRC     regs[insn->src_reg]
  53#define FP      regs[BPF_REG_FP]
  54#define ARG1    regs[BPF_REG_ARG1]
  55#define CTX     regs[BPF_REG_CTX]
  56#define IMM     insn->imm
  57
  58/* No hurry in this branch
  59 *
  60 * Exported for the bpf jit load helper.
  61 */
  62void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
  63{
  64        u8 *ptr = NULL;
  65
  66        if (k >= SKF_NET_OFF)
  67                ptr = skb_network_header(skb) + k - SKF_NET_OFF;
  68        else if (k >= SKF_LL_OFF)
  69                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
  70
  71        if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
  72                return ptr;
  73
  74        return NULL;
  75}
  76
  77struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
  78{
  79        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
  80        struct bpf_prog_aux *aux;
  81        struct bpf_prog *fp;
  82
  83        size = round_up(size, PAGE_SIZE);
  84        fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
  85        if (fp == NULL)
  86                return NULL;
  87
  88        kmemcheck_annotate_bitfield(fp, meta);
  89
  90        aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
  91        if (aux == NULL) {
  92                vfree(fp);
  93                return NULL;
  94        }
  95
  96        fp->pages = size / PAGE_SIZE;
  97        fp->aux = aux;
  98        fp->aux->prog = fp;
  99
 100        INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
 101
 102        return fp;
 103}
 104EXPORT_SYMBOL_GPL(bpf_prog_alloc);
 105
 106struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 107                                  gfp_t gfp_extra_flags)
 108{
 109        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 110        struct bpf_prog *fp;
 111        u32 pages, delta;
 112        int ret;
 113
 114        BUG_ON(fp_old == NULL);
 115
 116        size = round_up(size, PAGE_SIZE);
 117        pages = size / PAGE_SIZE;
 118        if (pages <= fp_old->pages)
 119                return fp_old;
 120
 121        delta = pages - fp_old->pages;
 122        ret = __bpf_prog_charge(fp_old->aux->user, delta);
 123        if (ret)
 124                return NULL;
 125
 126        fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
 127        if (fp == NULL) {
 128                __bpf_prog_uncharge(fp_old->aux->user, delta);
 129        } else {
 130                kmemcheck_annotate_bitfield(fp, meta);
 131
 132                memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
 133                fp->pages = pages;
 134                fp->aux->prog = fp;
 135
 136                /* We keep fp->aux from fp_old around in the new
 137                 * reallocated structure.
 138                 */
 139                fp_old->aux = NULL;
 140                __bpf_prog_free(fp_old);
 141        }
 142
 143        return fp;
 144}
 145
 146void __bpf_prog_free(struct bpf_prog *fp)
 147{
 148        kfree(fp->aux);
 149        vfree(fp);
 150}
 151
 152int bpf_prog_calc_tag(struct bpf_prog *fp)
 153{
 154        const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
 155        u32 raw_size = bpf_prog_tag_scratch_size(fp);
 156        u32 digest[SHA_DIGEST_WORDS];
 157        u32 ws[SHA_WORKSPACE_WORDS];
 158        u32 i, bsize, psize, blocks;
 159        struct bpf_insn *dst;
 160        bool was_ld_map;
 161        u8 *raw, *todo;
 162        __be32 *result;
 163        __be64 *bits;
 164
 165        raw = vmalloc(raw_size);
 166        if (!raw)
 167                return -ENOMEM;
 168
 169        sha_init(digest);
 170        memset(ws, 0, sizeof(ws));
 171
 172        /* We need to take out the map fd for the digest calculation
 173         * since they are unstable from user space side.
 174         */
 175        dst = (void *)raw;
 176        for (i = 0, was_ld_map = false; i < fp->len; i++) {
 177                dst[i] = fp->insnsi[i];
 178                if (!was_ld_map &&
 179                    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
 180                    dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
 181                        was_ld_map = true;
 182                        dst[i].imm = 0;
 183                } else if (was_ld_map &&
 184                           dst[i].code == 0 &&
 185                           dst[i].dst_reg == 0 &&
 186                           dst[i].src_reg == 0 &&
 187                           dst[i].off == 0) {
 188                        was_ld_map = false;
 189                        dst[i].imm = 0;
 190                } else {
 191                        was_ld_map = false;
 192                }
 193        }
 194
 195        psize = bpf_prog_insn_size(fp);
 196        memset(&raw[psize], 0, raw_size - psize);
 197        raw[psize++] = 0x80;
 198
 199        bsize  = round_up(psize, SHA_MESSAGE_BYTES);
 200        blocks = bsize / SHA_MESSAGE_BYTES;
 201        todo   = raw;
 202        if (bsize - psize >= sizeof(__be64)) {
 203                bits = (__be64 *)(todo + bsize - sizeof(__be64));
 204        } else {
 205                bits = (__be64 *)(todo + bsize + bits_offset);
 206                blocks++;
 207        }
 208        *bits = cpu_to_be64((psize - 1) << 3);
 209
 210        while (blocks--) {
 211                sha_transform(digest, todo, ws);
 212                todo += SHA_MESSAGE_BYTES;
 213        }
 214
 215        result = (__force __be32 *)digest;
 216        for (i = 0; i < SHA_DIGEST_WORDS; i++)
 217                result[i] = cpu_to_be32(digest[i]);
 218        memcpy(fp->tag, result, sizeof(fp->tag));
 219
 220        vfree(raw);
 221        return 0;
 222}
 223
 224static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
 225{
 226        return BPF_CLASS(insn->code) == BPF_JMP  &&
 227               /* Call and Exit are both special jumps with no
 228                * target inside the BPF instruction image.
 229                */
 230               BPF_OP(insn->code) != BPF_CALL &&
 231               BPF_OP(insn->code) != BPF_EXIT;
 232}
 233
 234static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
 235{
 236        struct bpf_insn *insn = prog->insnsi;
 237        u32 i, insn_cnt = prog->len;
 238
 239        for (i = 0; i < insn_cnt; i++, insn++) {
 240                if (!bpf_is_jmp_and_has_target(insn))
 241                        continue;
 242
 243                /* Adjust offset of jmps if we cross boundaries. */
 244                if (i < pos && i + insn->off + 1 > pos)
 245                        insn->off += delta;
 246                else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
 247                        insn->off -= delta;
 248        }
 249}
 250
 251struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 252                                       const struct bpf_insn *patch, u32 len)
 253{
 254        u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
 255        struct bpf_prog *prog_adj;
 256
 257        /* Since our patchlet doesn't expand the image, we're done. */
 258        if (insn_delta == 0) {
 259                memcpy(prog->insnsi + off, patch, sizeof(*patch));
 260                return prog;
 261        }
 262
 263        insn_adj_cnt = prog->len + insn_delta;
 264
 265        /* Several new instructions need to be inserted. Make room
 266         * for them. Likely, there's no need for a new allocation as
 267         * last page could have large enough tailroom.
 268         */
 269        prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
 270                                    GFP_USER);
 271        if (!prog_adj)
 272                return NULL;
 273
 274        prog_adj->len = insn_adj_cnt;
 275
 276        /* Patching happens in 3 steps:
 277         *
 278         * 1) Move over tail of insnsi from next instruction onwards,
 279         *    so we can patch the single target insn with one or more
 280         *    new ones (patching is always from 1 to n insns, n > 0).
 281         * 2) Inject new instructions at the target location.
 282         * 3) Adjust branch offsets if necessary.
 283         */
 284        insn_rest = insn_adj_cnt - off - len;
 285
 286        memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
 287                sizeof(*patch) * insn_rest);
 288        memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
 289
 290        bpf_adj_branches(prog_adj, off, insn_delta);
 291
 292        return prog_adj;
 293}
 294
 295#ifdef CONFIG_BPF_JIT
 296static __always_inline void
 297bpf_get_prog_addr_region(const struct bpf_prog *prog,
 298                         unsigned long *symbol_start,
 299                         unsigned long *symbol_end)
 300{
 301        const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
 302        unsigned long addr = (unsigned long)hdr;
 303
 304        WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
 305
 306        *symbol_start = addr;
 307        *symbol_end   = addr + hdr->pages * PAGE_SIZE;
 308}
 309
 310static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
 311{
 312        BUILD_BUG_ON(sizeof("bpf_prog_") +
 313                     sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN);
 314
 315        sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
 316        sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
 317        *sym = 0;
 318}
 319
 320static __always_inline unsigned long
 321bpf_get_prog_addr_start(struct latch_tree_node *n)
 322{
 323        unsigned long symbol_start, symbol_end;
 324        const struct bpf_prog_aux *aux;
 325
 326        aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
 327        bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 328
 329        return symbol_start;
 330}
 331
 332static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
 333                                          struct latch_tree_node *b)
 334{
 335        return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
 336}
 337
 338static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
 339{
 340        unsigned long val = (unsigned long)key;
 341        unsigned long symbol_start, symbol_end;
 342        const struct bpf_prog_aux *aux;
 343
 344        aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
 345        bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 346
 347        if (val < symbol_start)
 348                return -1;
 349        if (val >= symbol_end)
 350                return  1;
 351
 352        return 0;
 353}
 354
 355static const struct latch_tree_ops bpf_tree_ops = {
 356        .less   = bpf_tree_less,
 357        .comp   = bpf_tree_comp,
 358};
 359
 360static DEFINE_SPINLOCK(bpf_lock);
 361static LIST_HEAD(bpf_kallsyms);
 362static struct latch_tree_root bpf_tree __cacheline_aligned;
 363
 364int bpf_jit_kallsyms __read_mostly;
 365
 366static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
 367{
 368        WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
 369        list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
 370        latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
 371}
 372
 373static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
 374{
 375        if (list_empty(&aux->ksym_lnode))
 376                return;
 377
 378        latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
 379        list_del_rcu(&aux->ksym_lnode);
 380}
 381
 382static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 383{
 384        return fp->jited && !bpf_prog_was_classic(fp);
 385}
 386
 387static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
 388{
 389        return list_empty(&fp->aux->ksym_lnode) ||
 390               fp->aux->ksym_lnode.prev == LIST_POISON2;
 391}
 392
 393void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 394{
 395        if (!bpf_prog_kallsyms_candidate(fp) ||
 396            !capable(CAP_SYS_ADMIN))
 397                return;
 398
 399        spin_lock_bh(&bpf_lock);
 400        bpf_prog_ksym_node_add(fp->aux);
 401        spin_unlock_bh(&bpf_lock);
 402}
 403
 404void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 405{
 406        if (!bpf_prog_kallsyms_candidate(fp))
 407                return;
 408
 409        spin_lock_bh(&bpf_lock);
 410        bpf_prog_ksym_node_del(fp->aux);
 411        spin_unlock_bh(&bpf_lock);
 412}
 413
 414static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
 415{
 416        struct latch_tree_node *n;
 417
 418        if (!bpf_jit_kallsyms_enabled())
 419                return NULL;
 420
 421        n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
 422        return n ?
 423               container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
 424               NULL;
 425}
 426
 427const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
 428                                 unsigned long *off, char *sym)
 429{
 430        unsigned long symbol_start, symbol_end;
 431        struct bpf_prog *prog;
 432        char *ret = NULL;
 433
 434        rcu_read_lock();
 435        prog = bpf_prog_kallsyms_find(addr);
 436        if (prog) {
 437                bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
 438                bpf_get_prog_name(prog, sym);
 439
 440                ret = sym;
 441                if (size)
 442                        *size = symbol_end - symbol_start;
 443                if (off)
 444                        *off  = addr - symbol_start;
 445        }
 446        rcu_read_unlock();
 447
 448        return ret;
 449}
 450
 451bool is_bpf_text_address(unsigned long addr)
 452{
 453        bool ret;
 454
 455        rcu_read_lock();
 456        ret = bpf_prog_kallsyms_find(addr) != NULL;
 457        rcu_read_unlock();
 458
 459        return ret;
 460}
 461
 462int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 463                    char *sym)
 464{
 465        unsigned long symbol_start, symbol_end;
 466        struct bpf_prog_aux *aux;
 467        unsigned int it = 0;
 468        int ret = -ERANGE;
 469
 470        if (!bpf_jit_kallsyms_enabled())
 471                return ret;
 472
 473        rcu_read_lock();
 474        list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
 475                if (it++ != symnum)
 476                        continue;
 477
 478                bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 479                bpf_get_prog_name(aux->prog, sym);
 480
 481                *value = symbol_start;
 482                *type  = BPF_SYM_ELF_TYPE;
 483
 484                ret = 0;
 485                break;
 486        }
 487        rcu_read_unlock();
 488
 489        return ret;
 490}
 491
 492struct bpf_binary_header *
 493bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 494                     unsigned int alignment,
 495                     bpf_jit_fill_hole_t bpf_fill_ill_insns)
 496{
 497        struct bpf_binary_header *hdr;
 498        unsigned int size, hole, start;
 499
 500        /* Most of BPF filters are really small, but if some of them
 501         * fill a page, allow at least 128 extra bytes to insert a
 502         * random section of illegal instructions.
 503         */
 504        size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
 505        hdr = module_alloc(size);
 506        if (hdr == NULL)
 507                return NULL;
 508
 509        /* Fill space with illegal/arch-dep instructions. */
 510        bpf_fill_ill_insns(hdr, size);
 511
 512        hdr->pages = size / PAGE_SIZE;
 513        hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 514                     PAGE_SIZE - sizeof(*hdr));
 515        start = (get_random_int() % hole) & ~(alignment - 1);
 516
 517        /* Leave a random number of instructions before BPF code. */
 518        *image_ptr = &hdr->image[start];
 519
 520        return hdr;
 521}
 522
 523void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 524{
 525        module_memfree(hdr);
 526}
 527
 528/* This symbol is only overridden by archs that have different
 529 * requirements than the usual eBPF JITs, f.e. when they only
 530 * implement cBPF JIT, do not set images read-only, etc.
 531 */
 532void __weak bpf_jit_free(struct bpf_prog *fp)
 533{
 534        if (fp->jited) {
 535                struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
 536
 537                bpf_jit_binary_unlock_ro(hdr);
 538                bpf_jit_binary_free(hdr);
 539
 540                WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
 541        }
 542
 543        bpf_prog_unlock_free(fp);
 544}
 545
 546int bpf_jit_harden __read_mostly;
 547
 548static int bpf_jit_blind_insn(const struct bpf_insn *from,
 549                              const struct bpf_insn *aux,
 550                              struct bpf_insn *to_buff)
 551{
 552        struct bpf_insn *to = to_buff;
 553        u32 imm_rnd = get_random_int();
 554        s16 off;
 555
 556        BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
 557        BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
 558
 559        if (from->imm == 0 &&
 560            (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
 561             from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
 562                *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
 563                goto out;
 564        }
 565
 566        switch (from->code) {
 567        case BPF_ALU | BPF_ADD | BPF_K:
 568        case BPF_ALU | BPF_SUB | BPF_K:
 569        case BPF_ALU | BPF_AND | BPF_K:
 570        case BPF_ALU | BPF_OR  | BPF_K:
 571        case BPF_ALU | BPF_XOR | BPF_K:
 572        case BPF_ALU | BPF_MUL | BPF_K:
 573        case BPF_ALU | BPF_MOV | BPF_K:
 574        case BPF_ALU | BPF_DIV | BPF_K:
 575        case BPF_ALU | BPF_MOD | BPF_K:
 576                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 577                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 578                *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
 579                break;
 580
 581        case BPF_ALU64 | BPF_ADD | BPF_K:
 582        case BPF_ALU64 | BPF_SUB | BPF_K:
 583        case BPF_ALU64 | BPF_AND | BPF_K:
 584        case BPF_ALU64 | BPF_OR  | BPF_K:
 585        case BPF_ALU64 | BPF_XOR | BPF_K:
 586        case BPF_ALU64 | BPF_MUL | BPF_K:
 587        case BPF_ALU64 | BPF_MOV | BPF_K:
 588        case BPF_ALU64 | BPF_DIV | BPF_K:
 589        case BPF_ALU64 | BPF_MOD | BPF_K:
 590                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 591                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 592                *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
 593                break;
 594
 595        case BPF_JMP | BPF_JEQ  | BPF_K:
 596        case BPF_JMP | BPF_JNE  | BPF_K:
 597        case BPF_JMP | BPF_JGT  | BPF_K:
 598        case BPF_JMP | BPF_JGE  | BPF_K:
 599        case BPF_JMP | BPF_JSGT | BPF_K:
 600        case BPF_JMP | BPF_JSGE | BPF_K:
 601        case BPF_JMP | BPF_JSET | BPF_K:
 602                /* Accommodate for extra offset in case of a backjump. */
 603                off = from->off;
 604                if (off < 0)
 605                        off -= 2;
 606                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 607                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 608                *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
 609                break;
 610
 611        case BPF_LD | BPF_ABS | BPF_W:
 612        case BPF_LD | BPF_ABS | BPF_H:
 613        case BPF_LD | BPF_ABS | BPF_B:
 614                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 615                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 616                *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
 617                break;
 618
 619        case BPF_LD | BPF_IND | BPF_W:
 620        case BPF_LD | BPF_IND | BPF_H:
 621        case BPF_LD | BPF_IND | BPF_B:
 622                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 623                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 624                *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
 625                *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
 626                break;
 627
 628        case BPF_LD | BPF_IMM | BPF_DW:
 629                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
 630                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 631                *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
 632                *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
 633                break;
 634        case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
 635                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
 636                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 637                *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
 638                break;
 639
 640        case BPF_ST | BPF_MEM | BPF_DW:
 641        case BPF_ST | BPF_MEM | BPF_W:
 642        case BPF_ST | BPF_MEM | BPF_H:
 643        case BPF_ST | BPF_MEM | BPF_B:
 644                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 645                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 646                *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
 647                break;
 648        }
 649out:
 650        return to - to_buff;
 651}
 652
 653static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
 654                                              gfp_t gfp_extra_flags)
 655{
 656        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 657        struct bpf_prog *fp;
 658
 659        fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
 660        if (fp != NULL) {
 661                kmemcheck_annotate_bitfield(fp, meta);
 662
 663                /* aux->prog still points to the fp_other one, so
 664                 * when promoting the clone to the real program,
 665                 * this still needs to be adapted.
 666                 */
 667                memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
 668        }
 669
 670        return fp;
 671}
 672
 673static void bpf_prog_clone_free(struct bpf_prog *fp)
 674{
 675        /* aux was stolen by the other clone, so we cannot free
 676         * it from this path! It will be freed eventually by the
 677         * other program on release.
 678         *
 679         * At this point, we don't need a deferred release since
 680         * clone is guaranteed to not be locked.
 681         */
 682        fp->aux = NULL;
 683        __bpf_prog_free(fp);
 684}
 685
 686void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
 687{
 688        /* We have to repoint aux->prog to self, as we don't
 689         * know whether fp here is the clone or the original.
 690         */
 691        fp->aux->prog = fp;
 692        bpf_prog_clone_free(fp_other);
 693}
 694
 695struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
 696{
 697        struct bpf_insn insn_buff[16], aux[2];
 698        struct bpf_prog *clone, *tmp;
 699        int insn_delta, insn_cnt;
 700        struct bpf_insn *insn;
 701        int i, rewritten;
 702
 703        if (!bpf_jit_blinding_enabled())
 704                return prog;
 705
 706        clone = bpf_prog_clone_create(prog, GFP_USER);
 707        if (!clone)
 708                return ERR_PTR(-ENOMEM);
 709
 710        insn_cnt = clone->len;
 711        insn = clone->insnsi;
 712
 713        for (i = 0; i < insn_cnt; i++, insn++) {
 714                /* We temporarily need to hold the original ld64 insn
 715                 * so that we can still access the first part in the
 716                 * second blinding run.
 717                 */
 718                if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
 719                    insn[1].code == 0)
 720                        memcpy(aux, insn, sizeof(aux));
 721
 722                rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
 723                if (!rewritten)
 724                        continue;
 725
 726                tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
 727                if (!tmp) {
 728                        /* Patching may have repointed aux->prog during
 729                         * realloc from the original one, so we need to
 730                         * fix it up here on error.
 731                         */
 732                        bpf_jit_prog_release_other(prog, clone);
 733                        return ERR_PTR(-ENOMEM);
 734                }
 735
 736                clone = tmp;
 737                insn_delta = rewritten - 1;
 738
 739                /* Walk new program and skip insns we just inserted. */
 740                insn = clone->insnsi + i + insn_delta;
 741                insn_cnt += insn_delta;
 742                i        += insn_delta;
 743        }
 744
 745        return clone;
 746}
 747#endif /* CONFIG_BPF_JIT */
 748
 749/* Base function for offset calculation. Needs to go into .text section,
 750 * therefore keeping it non-static as well; will also be used by JITs
 751 * anyway later on, so do not let the compiler omit it.
 752 */
 753noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 754{
 755        return 0;
 756}
 757EXPORT_SYMBOL_GPL(__bpf_call_base);
 758
 759/**
 760 *      __bpf_prog_run - run eBPF program on a given context
 761 *      @ctx: is the data we are operating on
 762 *      @insn: is the array of eBPF instructions
 763 *
 764 * Decode and execute eBPF instructions.
 765 */
 766static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
 767                                    u64 *stack)
 768{
 769        u64 tmp;
 770        static const void *jumptable[256] = {
 771                [0 ... 255] = &&default_label,
 772                /* Now overwrite non-defaults ... */
 773                /* 32 bit ALU operations */
 774                [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
 775                [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
 776                [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
 777                [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
 778                [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
 779                [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
 780                [BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
 781                [BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
 782                [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
 783                [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
 784                [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
 785                [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
 786                [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
 787                [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
 788                [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
 789                [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
 790                [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
 791                [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
 792                [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
 793                [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
 794                [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
 795                [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
 796                [BPF_ALU | BPF_NEG] = &&ALU_NEG,
 797                [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
 798                [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
 799                /* 64 bit ALU operations */
 800                [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
 801                [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
 802                [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
 803                [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
 804                [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
 805                [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
 806                [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
 807                [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
 808                [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
 809                [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
 810                [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
 811                [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
 812                [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
 813                [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
 814                [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
 815                [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
 816                [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
 817                [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
 818                [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
 819                [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
 820                [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
 821                [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
 822                [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
 823                [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
 824                [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
 825                /* Call instruction */
 826                [BPF_JMP | BPF_CALL] = &&JMP_CALL,
 827                [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
 828                /* Jumps */
 829                [BPF_JMP | BPF_JA] = &&JMP_JA,
 830                [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
 831                [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
 832                [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
 833                [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
 834                [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
 835                [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
 836                [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
 837                [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
 838                [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
 839                [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
 840                [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
 841                [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
 842                [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
 843                [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
 844                /* Program return */
 845                [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
 846                /* Store instructions */
 847                [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
 848                [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
 849                [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
 850                [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
 851                [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
 852                [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
 853                [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
 854                [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
 855                [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
 856                [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
 857                /* Load instructions */
 858                [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
 859                [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
 860                [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
 861                [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
 862                [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
 863                [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
 864                [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
 865                [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
 866                [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
 867                [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
 868                [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
 869        };
 870        u32 tail_call_cnt = 0;
 871        void *ptr;
 872        int off;
 873
 874#define CONT     ({ insn++; goto select_insn; })
 875#define CONT_JMP ({ insn++; goto select_insn; })
 876
 877select_insn:
 878        goto *jumptable[insn->code];
 879
 880        /* ALU */
 881#define ALU(OPCODE, OP)                 \
 882        ALU64_##OPCODE##_X:             \
 883                DST = DST OP SRC;       \
 884                CONT;                   \
 885        ALU_##OPCODE##_X:               \
 886                DST = (u32) DST OP (u32) SRC;   \
 887                CONT;                   \
 888        ALU64_##OPCODE##_K:             \
 889                DST = DST OP IMM;               \
 890                CONT;                   \
 891        ALU_##OPCODE##_K:               \
 892                DST = (u32) DST OP (u32) IMM;   \
 893                CONT;
 894
 895        ALU(ADD,  +)
 896        ALU(SUB,  -)
 897        ALU(AND,  &)
 898        ALU(OR,   |)
 899        ALU(LSH, <<)
 900        ALU(RSH, >>)
 901        ALU(XOR,  ^)
 902        ALU(MUL,  *)
 903#undef ALU
 904        ALU_NEG:
 905                DST = (u32) -DST;
 906                CONT;
 907        ALU64_NEG:
 908                DST = -DST;
 909                CONT;
 910        ALU_MOV_X:
 911                DST = (u32) SRC;
 912                CONT;
 913        ALU_MOV_K:
 914                DST = (u32) IMM;
 915                CONT;
 916        ALU64_MOV_X:
 917                DST = SRC;
 918                CONT;
 919        ALU64_MOV_K:
 920                DST = IMM;
 921                CONT;
 922        LD_IMM_DW:
 923                DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
 924                insn++;
 925                CONT;
 926        ALU64_ARSH_X:
 927                (*(s64 *) &DST) >>= SRC;
 928                CONT;
 929        ALU64_ARSH_K:
 930                (*(s64 *) &DST) >>= IMM;
 931                CONT;
 932        ALU64_MOD_X:
 933                if (unlikely(SRC == 0))
 934                        return 0;
 935                div64_u64_rem(DST, SRC, &tmp);
 936                DST = tmp;
 937                CONT;
 938        ALU_MOD_X:
 939                if (unlikely(SRC == 0))
 940                        return 0;
 941                tmp = (u32) DST;
 942                DST = do_div(tmp, (u32) SRC);
 943                CONT;
 944        ALU64_MOD_K:
 945                div64_u64_rem(DST, IMM, &tmp);
 946                DST = tmp;
 947                CONT;
 948        ALU_MOD_K:
 949                tmp = (u32) DST;
 950                DST = do_div(tmp, (u32) IMM);
 951                CONT;
 952        ALU64_DIV_X:
 953                if (unlikely(SRC == 0))
 954                        return 0;
 955                DST = div64_u64(DST, SRC);
 956                CONT;
 957        ALU_DIV_X:
 958                if (unlikely(SRC == 0))
 959                        return 0;
 960                tmp = (u32) DST;
 961                do_div(tmp, (u32) SRC);
 962                DST = (u32) tmp;
 963                CONT;
 964        ALU64_DIV_K:
 965                DST = div64_u64(DST, IMM);
 966                CONT;
 967        ALU_DIV_K:
 968                tmp = (u32) DST;
 969                do_div(tmp, (u32) IMM);
 970                DST = (u32) tmp;
 971                CONT;
 972        ALU_END_TO_BE:
 973                switch (IMM) {
 974                case 16:
 975                        DST = (__force u16) cpu_to_be16(DST);
 976                        break;
 977                case 32:
 978                        DST = (__force u32) cpu_to_be32(DST);
 979                        break;
 980                case 64:
 981                        DST = (__force u64) cpu_to_be64(DST);
 982                        break;
 983                }
 984                CONT;
 985        ALU_END_TO_LE:
 986                switch (IMM) {
 987                case 16:
 988                        DST = (__force u16) cpu_to_le16(DST);
 989                        break;
 990                case 32:
 991                        DST = (__force u32) cpu_to_le32(DST);
 992                        break;
 993                case 64:
 994                        DST = (__force u64) cpu_to_le64(DST);
 995                        break;
 996                }
 997                CONT;
 998
 999        /* CALL */
1000        JMP_CALL:
1001                /* Function call scratches BPF_R1-BPF_R5 registers,
1002                 * preserves BPF_R6-BPF_R9, and stores return value
1003                 * into BPF_R0.
1004                 */
1005                BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1006                                                       BPF_R4, BPF_R5);
1007                CONT;
1008
1009        JMP_TAIL_CALL: {
1010                struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1011                struct bpf_array *array = container_of(map, struct bpf_array, map);
1012                struct bpf_prog *prog;
1013                u64 index = BPF_R3;
1014
1015                if (unlikely(index >= array->map.max_entries))
1016                        goto out;
1017                if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1018                        goto out;
1019
1020                tail_call_cnt++;
1021
1022                prog = READ_ONCE(array->ptrs[index]);
1023                if (!prog)
1024                        goto out;
1025
1026                /* ARG1 at this point is guaranteed to point to CTX from
1027                 * the verifier side due to the fact that the tail call is
1028                 * handeled like a helper, that is, bpf_tail_call_proto,
1029                 * where arg1_type is ARG_PTR_TO_CTX.
1030                 */
1031                insn = prog->insnsi;
1032                goto select_insn;
1033out:
1034                CONT;
1035        }
1036        /* JMP */
1037        JMP_JA:
1038                insn += insn->off;
1039                CONT;
1040        JMP_JEQ_X:
1041                if (DST == SRC) {
1042                        insn += insn->off;
1043                        CONT_JMP;
1044                }
1045                CONT;
1046        JMP_JEQ_K:
1047                if (DST == IMM) {
1048                        insn += insn->off;
1049                        CONT_JMP;
1050                }
1051                CONT;
1052        JMP_JNE_X:
1053                if (DST != SRC) {
1054                        insn += insn->off;
1055                        CONT_JMP;
1056                }
1057                CONT;
1058        JMP_JNE_K:
1059                if (DST != IMM) {
1060                        insn += insn->off;
1061                        CONT_JMP;
1062                }
1063                CONT;
1064        JMP_JGT_X:
1065                if (DST > SRC) {
1066                        insn += insn->off;
1067                        CONT_JMP;
1068                }
1069                CONT;
1070        JMP_JGT_K:
1071                if (DST > IMM) {
1072                        insn += insn->off;
1073                        CONT_JMP;
1074                }
1075                CONT;
1076        JMP_JGE_X:
1077                if (DST >= SRC) {
1078                        insn += insn->off;
1079                        CONT_JMP;
1080                }
1081                CONT;
1082        JMP_JGE_K:
1083                if (DST >= IMM) {
1084                        insn += insn->off;
1085                        CONT_JMP;
1086                }
1087                CONT;
1088        JMP_JSGT_X:
1089                if (((s64) DST) > ((s64) SRC)) {
1090                        insn += insn->off;
1091                        CONT_JMP;
1092                }
1093                CONT;
1094        JMP_JSGT_K:
1095                if (((s64) DST) > ((s64) IMM)) {
1096                        insn += insn->off;
1097                        CONT_JMP;
1098                }
1099                CONT;
1100        JMP_JSGE_X:
1101                if (((s64) DST) >= ((s64) SRC)) {
1102                        insn += insn->off;
1103                        CONT_JMP;
1104                }
1105                CONT;
1106        JMP_JSGE_K:
1107                if (((s64) DST) >= ((s64) IMM)) {
1108                        insn += insn->off;
1109                        CONT_JMP;
1110                }
1111                CONT;
1112        JMP_JSET_X:
1113                if (DST & SRC) {
1114                        insn += insn->off;
1115                        CONT_JMP;
1116                }
1117                CONT;
1118        JMP_JSET_K:
1119                if (DST & IMM) {
1120                        insn += insn->off;
1121                        CONT_JMP;
1122                }
1123                CONT;
1124        JMP_EXIT:
1125                return BPF_R0;
1126
1127        /* STX and ST and LDX*/
1128#define LDST(SIZEOP, SIZE)                                              \
1129        STX_MEM_##SIZEOP:                                               \
1130                *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
1131                CONT;                                                   \
1132        ST_MEM_##SIZEOP:                                                \
1133                *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
1134                CONT;                                                   \
1135        LDX_MEM_##SIZEOP:                                               \
1136                DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
1137                CONT;
1138
1139        LDST(B,   u8)
1140        LDST(H,  u16)
1141        LDST(W,  u32)
1142        LDST(DW, u64)
1143#undef LDST
1144        STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1145                atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1146                           (DST + insn->off));
1147                CONT;
1148        STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1149                atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1150                             (DST + insn->off));
1151                CONT;
1152        LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1153                off = IMM;
1154load_word:
1155                /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1156                 * appearing in the programs where ctx == skb
1157                 * (see may_access_skb() in the verifier). All programs
1158                 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1159                 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1160                 * verifier will check that BPF_R6 == ctx.
1161                 *
1162                 * BPF_ABS and BPF_IND are wrappers of function calls,
1163                 * so they scratch BPF_R1-BPF_R5 registers, preserve
1164                 * BPF_R6-BPF_R9, and store return value into BPF_R0.
1165                 *
1166                 * Implicit input:
1167                 *   ctx == skb == BPF_R6 == CTX
1168                 *
1169                 * Explicit input:
1170                 *   SRC == any register
1171                 *   IMM == 32-bit immediate
1172                 *
1173                 * Output:
1174                 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
1175                 */
1176
1177                ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
1178                if (likely(ptr != NULL)) {
1179                        BPF_R0 = get_unaligned_be32(ptr);
1180                        CONT;
1181                }
1182
1183                return 0;
1184        LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
1185                off = IMM;
1186load_half:
1187                ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
1188                if (likely(ptr != NULL)) {
1189                        BPF_R0 = get_unaligned_be16(ptr);
1190                        CONT;
1191                }
1192
1193                return 0;
1194        LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
1195                off = IMM;
1196load_byte:
1197                ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
1198                if (likely(ptr != NULL)) {
1199                        BPF_R0 = *(u8 *)ptr;
1200                        CONT;
1201                }
1202
1203                return 0;
1204        LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
1205                off = IMM + SRC;
1206                goto load_word;
1207        LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
1208                off = IMM + SRC;
1209                goto load_half;
1210        LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
1211                off = IMM + SRC;
1212                goto load_byte;
1213
1214        default_label:
1215                /* If we ever reach this, we have a bug somewhere. */
1216                WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
1217                return 0;
1218}
1219STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1220
1221#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1222#define DEFINE_BPF_PROG_RUN(stack_size) \
1223static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1224{ \
1225        u64 stack[stack_size / sizeof(u64)]; \
1226        u64 regs[MAX_BPF_REG]; \
1227\
1228        FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1229        ARG1 = (u64) (unsigned long) ctx; \
1230        return ___bpf_prog_run(regs, insn, stack); \
1231}
1232
1233#define EVAL1(FN, X) FN(X)
1234#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1235#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1236#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1237#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1238#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1239
1240EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1241EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1242EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1243
1244#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1245
1246static unsigned int (*interpreters[])(const void *ctx,
1247                                      const struct bpf_insn *insn) = {
1248EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1249EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1250EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1251};
1252
1253bool bpf_prog_array_compatible(struct bpf_array *array,
1254                               const struct bpf_prog *fp)
1255{
1256        if (!array->owner_prog_type) {
1257                /* There's no owner yet where we could check for
1258                 * compatibility.
1259                 */
1260                array->owner_prog_type = fp->type;
1261                array->owner_jited = fp->jited;
1262
1263                return true;
1264        }
1265
1266        return array->owner_prog_type == fp->type &&
1267               array->owner_jited == fp->jited;
1268}
1269
1270static int bpf_check_tail_call(const struct bpf_prog *fp)
1271{
1272        struct bpf_prog_aux *aux = fp->aux;
1273        int i;
1274
1275        for (i = 0; i < aux->used_map_cnt; i++) {
1276                struct bpf_map *map = aux->used_maps[i];
1277                struct bpf_array *array;
1278
1279                if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1280                        continue;
1281
1282                array = container_of(map, struct bpf_array, map);
1283                if (!bpf_prog_array_compatible(array, fp))
1284                        return -EINVAL;
1285        }
1286
1287        return 0;
1288}
1289
1290/**
1291 *      bpf_prog_select_runtime - select exec runtime for BPF program
1292 *      @fp: bpf_prog populated with internal BPF program
1293 *      @err: pointer to error variable
1294 *
1295 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1296 * The BPF program will be executed via BPF_PROG_RUN() macro.
1297 */
1298struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1299{
1300        u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1301
1302        fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1303
1304        /* eBPF JITs can rewrite the program in case constant
1305         * blinding is active. However, in case of error during
1306         * blinding, bpf_int_jit_compile() must always return a
1307         * valid program, which in this case would simply not
1308         * be JITed, but falls back to the interpreter.
1309         */
1310        fp = bpf_int_jit_compile(fp);
1311        bpf_prog_lock_ro(fp);
1312
1313        /* The tail call compatibility check can only be done at
1314         * this late stage as we need to determine, if we deal
1315         * with JITed or non JITed program concatenations and not
1316         * all eBPF JITs might immediately support all features.
1317         */
1318        *err = bpf_check_tail_call(fp);
1319
1320        return fp;
1321}
1322EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1323
1324static void bpf_prog_free_deferred(struct work_struct *work)
1325{
1326        struct bpf_prog_aux *aux;
1327
1328        aux = container_of(work, struct bpf_prog_aux, work);
1329        bpf_jit_free(aux->prog);
1330}
1331
1332/* Free internal BPF program */
1333void bpf_prog_free(struct bpf_prog *fp)
1334{
1335        struct bpf_prog_aux *aux = fp->aux;
1336
1337        INIT_WORK(&aux->work, bpf_prog_free_deferred);
1338        schedule_work(&aux->work);
1339}
1340EXPORT_SYMBOL_GPL(bpf_prog_free);
1341
1342/* RNG for unpriviledged user space with separated state from prandom_u32(). */
1343static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1344
1345void bpf_user_rnd_init_once(void)
1346{
1347        prandom_init_once(&bpf_user_rnd_state);
1348}
1349
1350BPF_CALL_0(bpf_user_rnd_u32)
1351{
1352        /* Should someone ever have the rather unwise idea to use some
1353         * of the registers passed into this function, then note that
1354         * this function is called from native eBPF and classic-to-eBPF
1355         * transformations. Register assignments from both sides are
1356         * different, f.e. classic always sets fn(ctx, A, X) here.
1357         */
1358        struct rnd_state *state;
1359        u32 res;
1360
1361        state = &get_cpu_var(bpf_user_rnd_state);
1362        res = prandom_u32_state(state);
1363        put_cpu_var(bpf_user_rnd_state);
1364
1365        return res;
1366}
1367
1368/* Weak definitions of helper functions in case we don't have bpf syscall. */
1369const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1370const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1371const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1372
1373const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1374const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1375const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1376const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1377
1378const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1379const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1380const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1381
1382const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1383{
1384        return NULL;
1385}
1386
1387u64 __weak
1388bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1389                 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1390{
1391        return -ENOTSUPP;
1392}
1393
1394/* Always built-in helper functions. */
1395const struct bpf_func_proto bpf_tail_call_proto = {
1396        .func           = NULL,
1397        .gpl_only       = false,
1398        .ret_type       = RET_VOID,
1399        .arg1_type      = ARG_PTR_TO_CTX,
1400        .arg2_type      = ARG_CONST_MAP_PTR,
1401        .arg3_type      = ARG_ANYTHING,
1402};
1403
1404/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1405 * It is encouraged to implement bpf_int_jit_compile() instead, so that
1406 * eBPF and implicitly also cBPF can get JITed!
1407 */
1408struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1409{
1410        return prog;
1411}
1412
1413/* Stub for JITs that support eBPF. All cBPF code gets transformed into
1414 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1415 */
1416void __weak bpf_jit_compile(struct bpf_prog *prog)
1417{
1418}
1419
1420bool __weak bpf_helper_changes_pkt_data(void *func)
1421{
1422        return false;
1423}
1424
1425/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1426 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1427 */
1428int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1429                         int len)
1430{
1431        return -EFAULT;
1432}
1433
1434/* All definitions of tracepoints related to BPF. */
1435#define CREATE_TRACE_POINTS
1436#include <linux/bpf_trace.h>
1437
1438EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1439
1440EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
1441EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
1442