linux/kernel/bpf/core.c
<<
>>
Prefs
   1/*
   2 * Linux Socket Filter - Kernel level socket filtering
   3 *
   4 * Based on the design of the Berkeley Packet Filter. The new
   5 * internal format has been designed by PLUMgrid:
   6 *
   7 *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
   8 *
   9 * Authors:
  10 *
  11 *      Jay Schulist <jschlst@samba.org>
  12 *      Alexei Starovoitov <ast@plumgrid.com>
  13 *      Daniel Borkmann <dborkman@redhat.com>
  14 *
  15 * This program is free software; you can redistribute it and/or
  16 * modify it under the terms of the GNU General Public License
  17 * as published by the Free Software Foundation; either version
  18 * 2 of the License, or (at your option) any later version.
  19 *
  20 * Andi Kleen - Fix a few bad bugs and races.
  21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  22 */
  23
  24#include <linux/filter.h>
  25#include <linux/skbuff.h>
  26#include <linux/vmalloc.h>
  27#include <linux/random.h>
  28#include <linux/moduleloader.h>
  29#include <linux/bpf.h>
  30#include <linux/frame.h>
  31#include <linux/rbtree_latch.h>
  32#include <linux/kallsyms.h>
  33#include <linux/rcupdate.h>
  34#include <linux/perf_event.h>
  35
  36#include <asm/unaligned.h>
  37
  38/* Registers */
  39#define BPF_R0  regs[BPF_REG_0]
  40#define BPF_R1  regs[BPF_REG_1]
  41#define BPF_R2  regs[BPF_REG_2]
  42#define BPF_R3  regs[BPF_REG_3]
  43#define BPF_R4  regs[BPF_REG_4]
  44#define BPF_R5  regs[BPF_REG_5]
  45#define BPF_R6  regs[BPF_REG_6]
  46#define BPF_R7  regs[BPF_REG_7]
  47#define BPF_R8  regs[BPF_REG_8]
  48#define BPF_R9  regs[BPF_REG_9]
  49#define BPF_R10 regs[BPF_REG_10]
  50
  51/* Named registers */
  52#define DST     regs[insn->dst_reg]
  53#define SRC     regs[insn->src_reg]
  54#define FP      regs[BPF_REG_FP]
  55#define ARG1    regs[BPF_REG_ARG1]
  56#define CTX     regs[BPF_REG_CTX]
  57#define IMM     insn->imm
  58
  59/* No hurry in this branch
  60 *
  61 * Exported for the bpf jit load helper.
  62 */
  63void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
  64{
  65        u8 *ptr = NULL;
  66
  67        if (k >= SKF_NET_OFF)
  68                ptr = skb_network_header(skb) + k - SKF_NET_OFF;
  69        else if (k >= SKF_LL_OFF)
  70                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
  71
  72        if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
  73                return ptr;
  74
  75        return NULL;
  76}
  77
  78struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
  79{
  80        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
  81        struct bpf_prog_aux *aux;
  82        struct bpf_prog *fp;
  83
  84        size = round_up(size, PAGE_SIZE);
  85        fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
  86        if (fp == NULL)
  87                return NULL;
  88
  89        aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
  90        if (aux == NULL) {
  91                vfree(fp);
  92                return NULL;
  93        }
  94
  95        fp->pages = size / PAGE_SIZE;
  96        fp->aux = aux;
  97        fp->aux->prog = fp;
  98        fp->jit_requested = ebpf_jit_enabled();
  99
 100        INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
 101
 102        return fp;
 103}
 104EXPORT_SYMBOL_GPL(bpf_prog_alloc);
 105
 106struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 107                                  gfp_t gfp_extra_flags)
 108{
 109        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 110        struct bpf_prog *fp;
 111        u32 pages, delta;
 112        int ret;
 113
 114        BUG_ON(fp_old == NULL);
 115
 116        size = round_up(size, PAGE_SIZE);
 117        pages = size / PAGE_SIZE;
 118        if (pages <= fp_old->pages)
 119                return fp_old;
 120
 121        delta = pages - fp_old->pages;
 122        ret = __bpf_prog_charge(fp_old->aux->user, delta);
 123        if (ret)
 124                return NULL;
 125
 126        fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
 127        if (fp == NULL) {
 128                __bpf_prog_uncharge(fp_old->aux->user, delta);
 129        } else {
 130                memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
 131                fp->pages = pages;
 132                fp->aux->prog = fp;
 133
 134                /* We keep fp->aux from fp_old around in the new
 135                 * reallocated structure.
 136                 */
 137                fp_old->aux = NULL;
 138                __bpf_prog_free(fp_old);
 139        }
 140
 141        return fp;
 142}
 143
 144void __bpf_prog_free(struct bpf_prog *fp)
 145{
 146        kfree(fp->aux);
 147        vfree(fp);
 148}
 149
 150int bpf_prog_calc_tag(struct bpf_prog *fp)
 151{
 152        const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
 153        u32 raw_size = bpf_prog_tag_scratch_size(fp);
 154        u32 digest[SHA_DIGEST_WORDS];
 155        u32 ws[SHA_WORKSPACE_WORDS];
 156        u32 i, bsize, psize, blocks;
 157        struct bpf_insn *dst;
 158        bool was_ld_map;
 159        u8 *raw, *todo;
 160        __be32 *result;
 161        __be64 *bits;
 162
 163        raw = vmalloc(raw_size);
 164        if (!raw)
 165                return -ENOMEM;
 166
 167        sha_init(digest);
 168        memset(ws, 0, sizeof(ws));
 169
 170        /* We need to take out the map fd for the digest calculation
 171         * since they are unstable from user space side.
 172         */
 173        dst = (void *)raw;
 174        for (i = 0, was_ld_map = false; i < fp->len; i++) {
 175                dst[i] = fp->insnsi[i];
 176                if (!was_ld_map &&
 177                    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
 178                    dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
 179                        was_ld_map = true;
 180                        dst[i].imm = 0;
 181                } else if (was_ld_map &&
 182                           dst[i].code == 0 &&
 183                           dst[i].dst_reg == 0 &&
 184                           dst[i].src_reg == 0 &&
 185                           dst[i].off == 0) {
 186                        was_ld_map = false;
 187                        dst[i].imm = 0;
 188                } else {
 189                        was_ld_map = false;
 190                }
 191        }
 192
 193        psize = bpf_prog_insn_size(fp);
 194        memset(&raw[psize], 0, raw_size - psize);
 195        raw[psize++] = 0x80;
 196
 197        bsize  = round_up(psize, SHA_MESSAGE_BYTES);
 198        blocks = bsize / SHA_MESSAGE_BYTES;
 199        todo   = raw;
 200        if (bsize - psize >= sizeof(__be64)) {
 201                bits = (__be64 *)(todo + bsize - sizeof(__be64));
 202        } else {
 203                bits = (__be64 *)(todo + bsize + bits_offset);
 204                blocks++;
 205        }
 206        *bits = cpu_to_be64((psize - 1) << 3);
 207
 208        while (blocks--) {
 209                sha_transform(digest, todo, ws);
 210                todo += SHA_MESSAGE_BYTES;
 211        }
 212
 213        result = (__force __be32 *)digest;
 214        for (i = 0; i < SHA_DIGEST_WORDS; i++)
 215                result[i] = cpu_to_be32(digest[i]);
 216        memcpy(fp->tag, result, sizeof(fp->tag));
 217
 218        vfree(raw);
 219        return 0;
 220}
 221
 222static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
 223                                u32 curr, const bool probe_pass)
 224{
 225        const s64 imm_min = S32_MIN, imm_max = S32_MAX;
 226        s64 imm = insn->imm;
 227
 228        if (curr < pos && curr + imm + 1 > pos)
 229                imm += delta;
 230        else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
 231                imm -= delta;
 232        if (imm < imm_min || imm > imm_max)
 233                return -ERANGE;
 234        if (!probe_pass)
 235                insn->imm = imm;
 236        return 0;
 237}
 238
 239static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
 240                                u32 curr, const bool probe_pass)
 241{
 242        const s32 off_min = S16_MIN, off_max = S16_MAX;
 243        s32 off = insn->off;
 244
 245        if (curr < pos && curr + off + 1 > pos)
 246                off += delta;
 247        else if (curr > pos + delta && curr + off + 1 <= pos + delta)
 248                off -= delta;
 249        if (off < off_min || off > off_max)
 250                return -ERANGE;
 251        if (!probe_pass)
 252                insn->off = off;
 253        return 0;
 254}
 255
 256static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
 257                            const bool probe_pass)
 258{
 259        u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
 260        struct bpf_insn *insn = prog->insnsi;
 261        int ret = 0;
 262
 263        for (i = 0; i < insn_cnt; i++, insn++) {
 264                u8 code;
 265
 266                /* In the probing pass we still operate on the original,
 267                 * unpatched image in order to check overflows before we
 268                 * do any other adjustments. Therefore skip the patchlet.
 269                 */
 270                if (probe_pass && i == pos) {
 271                        i += delta + 1;
 272                        insn++;
 273                }
 274                code = insn->code;
 275                if (BPF_CLASS(code) != BPF_JMP ||
 276                    BPF_OP(code) == BPF_EXIT)
 277                        continue;
 278                /* Adjust offset of jmps if we cross patch boundaries. */
 279                if (BPF_OP(code) == BPF_CALL) {
 280                        if (insn->src_reg != BPF_PSEUDO_CALL)
 281                                continue;
 282                        ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
 283                                                   probe_pass);
 284                } else {
 285                        ret = bpf_adj_delta_to_off(insn, pos, delta, i,
 286                                                   probe_pass);
 287                }
 288                if (ret)
 289                        break;
 290        }
 291
 292        return ret;
 293}
 294
 295struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 296                                       const struct bpf_insn *patch, u32 len)
 297{
 298        u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
 299        const u32 cnt_max = S16_MAX;
 300        struct bpf_prog *prog_adj;
 301
 302        /* Since our patchlet doesn't expand the image, we're done. */
 303        if (insn_delta == 0) {
 304                memcpy(prog->insnsi + off, patch, sizeof(*patch));
 305                return prog;
 306        }
 307
 308        insn_adj_cnt = prog->len + insn_delta;
 309
 310        /* Reject anything that would potentially let the insn->off
 311         * target overflow when we have excessive program expansions.
 312         * We need to probe here before we do any reallocation where
 313         * we afterwards may not fail anymore.
 314         */
 315        if (insn_adj_cnt > cnt_max &&
 316            bpf_adj_branches(prog, off, insn_delta, true))
 317                return NULL;
 318
 319        /* Several new instructions need to be inserted. Make room
 320         * for them. Likely, there's no need for a new allocation as
 321         * last page could have large enough tailroom.
 322         */
 323        prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
 324                                    GFP_USER);
 325        if (!prog_adj)
 326                return NULL;
 327
 328        prog_adj->len = insn_adj_cnt;
 329
 330        /* Patching happens in 3 steps:
 331         *
 332         * 1) Move over tail of insnsi from next instruction onwards,
 333         *    so we can patch the single target insn with one or more
 334         *    new ones (patching is always from 1 to n insns, n > 0).
 335         * 2) Inject new instructions at the target location.
 336         * 3) Adjust branch offsets if necessary.
 337         */
 338        insn_rest = insn_adj_cnt - off - len;
 339
 340        memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
 341                sizeof(*patch) * insn_rest);
 342        memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
 343
 344        /* We are guaranteed to not fail at this point, otherwise
 345         * the ship has sailed to reverse to the original state. An
 346         * overflow cannot happen at this point.
 347         */
 348        BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
 349
 350        return prog_adj;
 351}
 352
 353void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
 354{
 355        int i;
 356
 357        for (i = 0; i < fp->aux->func_cnt; i++)
 358                bpf_prog_kallsyms_del(fp->aux->func[i]);
 359}
 360
 361void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
 362{
 363        bpf_prog_kallsyms_del_subprogs(fp);
 364        bpf_prog_kallsyms_del(fp);
 365}
 366
 367#ifdef CONFIG_BPF_JIT
 368/* All BPF JIT sysctl knobs here. */
 369int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
 370int bpf_jit_harden   __read_mostly;
 371int bpf_jit_kallsyms __read_mostly;
 372
 373static __always_inline void
 374bpf_get_prog_addr_region(const struct bpf_prog *prog,
 375                         unsigned long *symbol_start,
 376                         unsigned long *symbol_end)
 377{
 378        const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
 379        unsigned long addr = (unsigned long)hdr;
 380
 381        WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
 382
 383        *symbol_start = addr;
 384        *symbol_end   = addr + hdr->pages * PAGE_SIZE;
 385}
 386
 387static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
 388{
 389        const char *end = sym + KSYM_NAME_LEN;
 390
 391        BUILD_BUG_ON(sizeof("bpf_prog_") +
 392                     sizeof(prog->tag) * 2 +
 393                     /* name has been null terminated.
 394                      * We should need +1 for the '_' preceding
 395                      * the name.  However, the null character
 396                      * is double counted between the name and the
 397                      * sizeof("bpf_prog_") above, so we omit
 398                      * the +1 here.
 399                      */
 400                     sizeof(prog->aux->name) > KSYM_NAME_LEN);
 401
 402        sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
 403        sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
 404        if (prog->aux->name[0])
 405                snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
 406        else
 407                *sym = 0;
 408}
 409
 410static __always_inline unsigned long
 411bpf_get_prog_addr_start(struct latch_tree_node *n)
 412{
 413        unsigned long symbol_start, symbol_end;
 414        const struct bpf_prog_aux *aux;
 415
 416        aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
 417        bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 418
 419        return symbol_start;
 420}
 421
 422static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
 423                                          struct latch_tree_node *b)
 424{
 425        return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
 426}
 427
 428static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
 429{
 430        unsigned long val = (unsigned long)key;
 431        unsigned long symbol_start, symbol_end;
 432        const struct bpf_prog_aux *aux;
 433
 434        aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
 435        bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 436
 437        if (val < symbol_start)
 438                return -1;
 439        if (val >= symbol_end)
 440                return  1;
 441
 442        return 0;
 443}
 444
 445static const struct latch_tree_ops bpf_tree_ops = {
 446        .less   = bpf_tree_less,
 447        .comp   = bpf_tree_comp,
 448};
 449
 450static DEFINE_SPINLOCK(bpf_lock);
 451static LIST_HEAD(bpf_kallsyms);
 452static struct latch_tree_root bpf_tree __cacheline_aligned;
 453
 454static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
 455{
 456        WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
 457        list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
 458        latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
 459}
 460
 461static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
 462{
 463        if (list_empty(&aux->ksym_lnode))
 464                return;
 465
 466        latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
 467        list_del_rcu(&aux->ksym_lnode);
 468}
 469
 470static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 471{
 472        return fp->jited && !bpf_prog_was_classic(fp);
 473}
 474
 475static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
 476{
 477        return list_empty(&fp->aux->ksym_lnode) ||
 478               fp->aux->ksym_lnode.prev == LIST_POISON2;
 479}
 480
 481void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 482{
 483        if (!bpf_prog_kallsyms_candidate(fp) ||
 484            !capable(CAP_SYS_ADMIN))
 485                return;
 486
 487        spin_lock_bh(&bpf_lock);
 488        bpf_prog_ksym_node_add(fp->aux);
 489        spin_unlock_bh(&bpf_lock);
 490}
 491
 492void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 493{
 494        if (!bpf_prog_kallsyms_candidate(fp))
 495                return;
 496
 497        spin_lock_bh(&bpf_lock);
 498        bpf_prog_ksym_node_del(fp->aux);
 499        spin_unlock_bh(&bpf_lock);
 500}
 501
 502static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
 503{
 504        struct latch_tree_node *n;
 505
 506        if (!bpf_jit_kallsyms_enabled())
 507                return NULL;
 508
 509        n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
 510        return n ?
 511               container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
 512               NULL;
 513}
 514
 515const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
 516                                 unsigned long *off, char *sym)
 517{
 518        unsigned long symbol_start, symbol_end;
 519        struct bpf_prog *prog;
 520        char *ret = NULL;
 521
 522        rcu_read_lock();
 523        prog = bpf_prog_kallsyms_find(addr);
 524        if (prog) {
 525                bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
 526                bpf_get_prog_name(prog, sym);
 527
 528                ret = sym;
 529                if (size)
 530                        *size = symbol_end - symbol_start;
 531                if (off)
 532                        *off  = addr - symbol_start;
 533        }
 534        rcu_read_unlock();
 535
 536        return ret;
 537}
 538
 539bool is_bpf_text_address(unsigned long addr)
 540{
 541        bool ret;
 542
 543        rcu_read_lock();
 544        ret = bpf_prog_kallsyms_find(addr) != NULL;
 545        rcu_read_unlock();
 546
 547        return ret;
 548}
 549
 550int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 551                    char *sym)
 552{
 553        unsigned long symbol_start, symbol_end;
 554        struct bpf_prog_aux *aux;
 555        unsigned int it = 0;
 556        int ret = -ERANGE;
 557
 558        if (!bpf_jit_kallsyms_enabled())
 559                return ret;
 560
 561        rcu_read_lock();
 562        list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
 563                if (it++ != symnum)
 564                        continue;
 565
 566                bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 567                bpf_get_prog_name(aux->prog, sym);
 568
 569                *value = symbol_start;
 570                *type  = BPF_SYM_ELF_TYPE;
 571
 572                ret = 0;
 573                break;
 574        }
 575        rcu_read_unlock();
 576
 577        return ret;
 578}
 579
 580struct bpf_binary_header *
 581bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 582                     unsigned int alignment,
 583                     bpf_jit_fill_hole_t bpf_fill_ill_insns)
 584{
 585        struct bpf_binary_header *hdr;
 586        unsigned int size, hole, start;
 587
 588        /* Most of BPF filters are really small, but if some of them
 589         * fill a page, allow at least 128 extra bytes to insert a
 590         * random section of illegal instructions.
 591         */
 592        size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
 593        hdr = module_alloc(size);
 594        if (hdr == NULL)
 595                return NULL;
 596
 597        /* Fill space with illegal/arch-dep instructions. */
 598        bpf_fill_ill_insns(hdr, size);
 599
 600        hdr->pages = size / PAGE_SIZE;
 601        hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 602                     PAGE_SIZE - sizeof(*hdr));
 603        start = (get_random_int() % hole) & ~(alignment - 1);
 604
 605        /* Leave a random number of instructions before BPF code. */
 606        *image_ptr = &hdr->image[start];
 607
 608        return hdr;
 609}
 610
 611void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 612{
 613        module_memfree(hdr);
 614}
 615
 616/* This symbol is only overridden by archs that have different
 617 * requirements than the usual eBPF JITs, f.e. when they only
 618 * implement cBPF JIT, do not set images read-only, etc.
 619 */
 620void __weak bpf_jit_free(struct bpf_prog *fp)
 621{
 622        if (fp->jited) {
 623                struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
 624
 625                bpf_jit_binary_unlock_ro(hdr);
 626                bpf_jit_binary_free(hdr);
 627
 628                WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
 629        }
 630
 631        bpf_prog_unlock_free(fp);
 632}
 633
 634static int bpf_jit_blind_insn(const struct bpf_insn *from,
 635                              const struct bpf_insn *aux,
 636                              struct bpf_insn *to_buff)
 637{
 638        struct bpf_insn *to = to_buff;
 639        u32 imm_rnd = get_random_int();
 640        s16 off;
 641
 642        BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
 643        BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
 644
 645        if (from->imm == 0 &&
 646            (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
 647             from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
 648                *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
 649                goto out;
 650        }
 651
 652        switch (from->code) {
 653        case BPF_ALU | BPF_ADD | BPF_K:
 654        case BPF_ALU | BPF_SUB | BPF_K:
 655        case BPF_ALU | BPF_AND | BPF_K:
 656        case BPF_ALU | BPF_OR  | BPF_K:
 657        case BPF_ALU | BPF_XOR | BPF_K:
 658        case BPF_ALU | BPF_MUL | BPF_K:
 659        case BPF_ALU | BPF_MOV | BPF_K:
 660        case BPF_ALU | BPF_DIV | BPF_K:
 661        case BPF_ALU | BPF_MOD | BPF_K:
 662                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 663                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 664                *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
 665                break;
 666
 667        case BPF_ALU64 | BPF_ADD | BPF_K:
 668        case BPF_ALU64 | BPF_SUB | BPF_K:
 669        case BPF_ALU64 | BPF_AND | BPF_K:
 670        case BPF_ALU64 | BPF_OR  | BPF_K:
 671        case BPF_ALU64 | BPF_XOR | BPF_K:
 672        case BPF_ALU64 | BPF_MUL | BPF_K:
 673        case BPF_ALU64 | BPF_MOV | BPF_K:
 674        case BPF_ALU64 | BPF_DIV | BPF_K:
 675        case BPF_ALU64 | BPF_MOD | BPF_K:
 676                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 677                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 678                *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
 679                break;
 680
 681        case BPF_JMP | BPF_JEQ  | BPF_K:
 682        case BPF_JMP | BPF_JNE  | BPF_K:
 683        case BPF_JMP | BPF_JGT  | BPF_K:
 684        case BPF_JMP | BPF_JLT  | BPF_K:
 685        case BPF_JMP | BPF_JGE  | BPF_K:
 686        case BPF_JMP | BPF_JLE  | BPF_K:
 687        case BPF_JMP | BPF_JSGT | BPF_K:
 688        case BPF_JMP | BPF_JSLT | BPF_K:
 689        case BPF_JMP | BPF_JSGE | BPF_K:
 690        case BPF_JMP | BPF_JSLE | BPF_K:
 691        case BPF_JMP | BPF_JSET | BPF_K:
 692                /* Accommodate for extra offset in case of a backjump. */
 693                off = from->off;
 694                if (off < 0)
 695                        off -= 2;
 696                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 697                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 698                *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
 699                break;
 700
 701        case BPF_LD | BPF_IMM | BPF_DW:
 702                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
 703                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 704                *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
 705                *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
 706                break;
 707        case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
 708                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
 709                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 710                *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
 711                break;
 712
 713        case BPF_ST | BPF_MEM | BPF_DW:
 714        case BPF_ST | BPF_MEM | BPF_W:
 715        case BPF_ST | BPF_MEM | BPF_H:
 716        case BPF_ST | BPF_MEM | BPF_B:
 717                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 718                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 719                *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
 720                break;
 721        }
 722out:
 723        return to - to_buff;
 724}
 725
 726static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
 727                                              gfp_t gfp_extra_flags)
 728{
 729        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 730        struct bpf_prog *fp;
 731
 732        fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
 733        if (fp != NULL) {
 734                /* aux->prog still points to the fp_other one, so
 735                 * when promoting the clone to the real program,
 736                 * this still needs to be adapted.
 737                 */
 738                memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
 739        }
 740
 741        return fp;
 742}
 743
 744static void bpf_prog_clone_free(struct bpf_prog *fp)
 745{
 746        /* aux was stolen by the other clone, so we cannot free
 747         * it from this path! It will be freed eventually by the
 748         * other program on release.
 749         *
 750         * At this point, we don't need a deferred release since
 751         * clone is guaranteed to not be locked.
 752         */
 753        fp->aux = NULL;
 754        __bpf_prog_free(fp);
 755}
 756
 757void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
 758{
 759        /* We have to repoint aux->prog to self, as we don't
 760         * know whether fp here is the clone or the original.
 761         */
 762        fp->aux->prog = fp;
 763        bpf_prog_clone_free(fp_other);
 764}
 765
 766struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
 767{
 768        struct bpf_insn insn_buff[16], aux[2];
 769        struct bpf_prog *clone, *tmp;
 770        int insn_delta, insn_cnt;
 771        struct bpf_insn *insn;
 772        int i, rewritten;
 773
 774        if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
 775                return prog;
 776
 777        clone = bpf_prog_clone_create(prog, GFP_USER);
 778        if (!clone)
 779                return ERR_PTR(-ENOMEM);
 780
 781        insn_cnt = clone->len;
 782        insn = clone->insnsi;
 783
 784        for (i = 0; i < insn_cnt; i++, insn++) {
 785                /* We temporarily need to hold the original ld64 insn
 786                 * so that we can still access the first part in the
 787                 * second blinding run.
 788                 */
 789                if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
 790                    insn[1].code == 0)
 791                        memcpy(aux, insn, sizeof(aux));
 792
 793                rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
 794                if (!rewritten)
 795                        continue;
 796
 797                tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
 798                if (!tmp) {
 799                        /* Patching may have repointed aux->prog during
 800                         * realloc from the original one, so we need to
 801                         * fix it up here on error.
 802                         */
 803                        bpf_jit_prog_release_other(prog, clone);
 804                        return ERR_PTR(-ENOMEM);
 805                }
 806
 807                clone = tmp;
 808                insn_delta = rewritten - 1;
 809
 810                /* Walk new program and skip insns we just inserted. */
 811                insn = clone->insnsi + i + insn_delta;
 812                insn_cnt += insn_delta;
 813                i        += insn_delta;
 814        }
 815
 816        clone->blinded = 1;
 817        return clone;
 818}
 819#endif /* CONFIG_BPF_JIT */
 820
 821/* Base function for offset calculation. Needs to go into .text section,
 822 * therefore keeping it non-static as well; will also be used by JITs
 823 * anyway later on, so do not let the compiler omit it. This also needs
 824 * to go into kallsyms for correlation from e.g. bpftool, so naming
 825 * must not change.
 826 */
 827noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 828{
 829        return 0;
 830}
 831EXPORT_SYMBOL_GPL(__bpf_call_base);
 832
 833/* All UAPI available opcodes. */
 834#define BPF_INSN_MAP(INSN_2, INSN_3)            \
 835        /* 32 bit ALU operations. */            \
 836        /*   Register based. */                 \
 837        INSN_3(ALU, ADD, X),                    \
 838        INSN_3(ALU, SUB, X),                    \
 839        INSN_3(ALU, AND, X),                    \
 840        INSN_3(ALU, OR,  X),                    \
 841        INSN_3(ALU, LSH, X),                    \
 842        INSN_3(ALU, RSH, X),                    \
 843        INSN_3(ALU, XOR, X),                    \
 844        INSN_3(ALU, MUL, X),                    \
 845        INSN_3(ALU, MOV, X),                    \
 846        INSN_3(ALU, DIV, X),                    \
 847        INSN_3(ALU, MOD, X),                    \
 848        INSN_2(ALU, NEG),                       \
 849        INSN_3(ALU, END, TO_BE),                \
 850        INSN_3(ALU, END, TO_LE),                \
 851        /*   Immediate based. */                \
 852        INSN_3(ALU, ADD, K),                    \
 853        INSN_3(ALU, SUB, K),                    \
 854        INSN_3(ALU, AND, K),                    \
 855        INSN_3(ALU, OR,  K),                    \
 856        INSN_3(ALU, LSH, K),                    \
 857        INSN_3(ALU, RSH, K),                    \
 858        INSN_3(ALU, XOR, K),                    \
 859        INSN_3(ALU, MUL, K),                    \
 860        INSN_3(ALU, MOV, K),                    \
 861        INSN_3(ALU, DIV, K),                    \
 862        INSN_3(ALU, MOD, K),                    \
 863        /* 64 bit ALU operations. */            \
 864        /*   Register based. */                 \
 865        INSN_3(ALU64, ADD,  X),                 \
 866        INSN_3(ALU64, SUB,  X),                 \
 867        INSN_3(ALU64, AND,  X),                 \
 868        INSN_3(ALU64, OR,   X),                 \
 869        INSN_3(ALU64, LSH,  X),                 \
 870        INSN_3(ALU64, RSH,  X),                 \
 871        INSN_3(ALU64, XOR,  X),                 \
 872        INSN_3(ALU64, MUL,  X),                 \
 873        INSN_3(ALU64, MOV,  X),                 \
 874        INSN_3(ALU64, ARSH, X),                 \
 875        INSN_3(ALU64, DIV,  X),                 \
 876        INSN_3(ALU64, MOD,  X),                 \
 877        INSN_2(ALU64, NEG),                     \
 878        /*   Immediate based. */                \
 879        INSN_3(ALU64, ADD,  K),                 \
 880        INSN_3(ALU64, SUB,  K),                 \
 881        INSN_3(ALU64, AND,  K),                 \
 882        INSN_3(ALU64, OR,   K),                 \
 883        INSN_3(ALU64, LSH,  K),                 \
 884        INSN_3(ALU64, RSH,  K),                 \
 885        INSN_3(ALU64, XOR,  K),                 \
 886        INSN_3(ALU64, MUL,  K),                 \
 887        INSN_3(ALU64, MOV,  K),                 \
 888        INSN_3(ALU64, ARSH, K),                 \
 889        INSN_3(ALU64, DIV,  K),                 \
 890        INSN_3(ALU64, MOD,  K),                 \
 891        /* Call instruction. */                 \
 892        INSN_2(JMP, CALL),                      \
 893        /* Exit instruction. */                 \
 894        INSN_2(JMP, EXIT),                      \
 895        /* Jump instructions. */                \
 896        /*   Register based. */                 \
 897        INSN_3(JMP, JEQ,  X),                   \
 898        INSN_3(JMP, JNE,  X),                   \
 899        INSN_3(JMP, JGT,  X),                   \
 900        INSN_3(JMP, JLT,  X),                   \
 901        INSN_3(JMP, JGE,  X),                   \
 902        INSN_3(JMP, JLE,  X),                   \
 903        INSN_3(JMP, JSGT, X),                   \
 904        INSN_3(JMP, JSLT, X),                   \
 905        INSN_3(JMP, JSGE, X),                   \
 906        INSN_3(JMP, JSLE, X),                   \
 907        INSN_3(JMP, JSET, X),                   \
 908        /*   Immediate based. */                \
 909        INSN_3(JMP, JEQ,  K),                   \
 910        INSN_3(JMP, JNE,  K),                   \
 911        INSN_3(JMP, JGT,  K),                   \
 912        INSN_3(JMP, JLT,  K),                   \
 913        INSN_3(JMP, JGE,  K),                   \
 914        INSN_3(JMP, JLE,  K),                   \
 915        INSN_3(JMP, JSGT, K),                   \
 916        INSN_3(JMP, JSLT, K),                   \
 917        INSN_3(JMP, JSGE, K),                   \
 918        INSN_3(JMP, JSLE, K),                   \
 919        INSN_3(JMP, JSET, K),                   \
 920        INSN_2(JMP, JA),                        \
 921        /* Store instructions. */               \
 922        /*   Register based. */                 \
 923        INSN_3(STX, MEM,  B),                   \
 924        INSN_3(STX, MEM,  H),                   \
 925        INSN_3(STX, MEM,  W),                   \
 926        INSN_3(STX, MEM,  DW),                  \
 927        INSN_3(STX, XADD, W),                   \
 928        INSN_3(STX, XADD, DW),                  \
 929        /*   Immediate based. */                \
 930        INSN_3(ST, MEM, B),                     \
 931        INSN_3(ST, MEM, H),                     \
 932        INSN_3(ST, MEM, W),                     \
 933        INSN_3(ST, MEM, DW),                    \
 934        /* Load instructions. */                \
 935        /*   Register based. */                 \
 936        INSN_3(LDX, MEM, B),                    \
 937        INSN_3(LDX, MEM, H),                    \
 938        INSN_3(LDX, MEM, W),                    \
 939        INSN_3(LDX, MEM, DW),                   \
 940        /*   Immediate based. */                \
 941        INSN_3(LD, IMM, DW)
 942
 943bool bpf_opcode_in_insntable(u8 code)
 944{
 945#define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
 946#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
 947        static const bool public_insntable[256] = {
 948                [0 ... 255] = false,
 949                /* Now overwrite non-defaults ... */
 950                BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
 951                /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
 952                [BPF_LD | BPF_ABS | BPF_B] = true,
 953                [BPF_LD | BPF_ABS | BPF_H] = true,
 954                [BPF_LD | BPF_ABS | BPF_W] = true,
 955                [BPF_LD | BPF_IND | BPF_B] = true,
 956                [BPF_LD | BPF_IND | BPF_H] = true,
 957                [BPF_LD | BPF_IND | BPF_W] = true,
 958        };
 959#undef BPF_INSN_3_TBL
 960#undef BPF_INSN_2_TBL
 961        return public_insntable[code];
 962}
 963
 964#ifndef CONFIG_BPF_JIT_ALWAYS_ON
 965/**
 966 *      __bpf_prog_run - run eBPF program on a given context
 967 *      @ctx: is the data we are operating on
 968 *      @insn: is the array of eBPF instructions
 969 *
 970 * Decode and execute eBPF instructions.
 971 */
 972static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
 973{
 974        u64 tmp;
 975#define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
 976#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
 977        static const void *jumptable[256] = {
 978                [0 ... 255] = &&default_label,
 979                /* Now overwrite non-defaults ... */
 980                BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
 981                /* Non-UAPI available opcodes. */
 982                [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
 983                [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
 984        };
 985#undef BPF_INSN_3_LBL
 986#undef BPF_INSN_2_LBL
 987        u32 tail_call_cnt = 0;
 988
 989#define CONT     ({ insn++; goto select_insn; })
 990#define CONT_JMP ({ insn++; goto select_insn; })
 991
 992select_insn:
 993        goto *jumptable[insn->code];
 994
 995        /* ALU */
 996#define ALU(OPCODE, OP)                 \
 997        ALU64_##OPCODE##_X:             \
 998                DST = DST OP SRC;       \
 999                CONT;                   \
1000        ALU_##OPCODE##_X:               \
1001                DST = (u32) DST OP (u32) SRC;   \
1002                CONT;                   \
1003        ALU64_##OPCODE##_K:             \
1004                DST = DST OP IMM;               \
1005                CONT;                   \
1006        ALU_##OPCODE##_K:               \
1007                DST = (u32) DST OP (u32) IMM;   \
1008                CONT;
1009
1010        ALU(ADD,  +)
1011        ALU(SUB,  -)
1012        ALU(AND,  &)
1013        ALU(OR,   |)
1014        ALU(LSH, <<)
1015        ALU(RSH, >>)
1016        ALU(XOR,  ^)
1017        ALU(MUL,  *)
1018#undef ALU
1019        ALU_NEG:
1020                DST = (u32) -DST;
1021                CONT;
1022        ALU64_NEG:
1023                DST = -DST;
1024                CONT;
1025        ALU_MOV_X:
1026                DST = (u32) SRC;
1027                CONT;
1028        ALU_MOV_K:
1029                DST = (u32) IMM;
1030                CONT;
1031        ALU64_MOV_X:
1032                DST = SRC;
1033                CONT;
1034        ALU64_MOV_K:
1035                DST = IMM;
1036                CONT;
1037        LD_IMM_DW:
1038                DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1039                insn++;
1040                CONT;
1041        ALU64_ARSH_X:
1042                (*(s64 *) &DST) >>= SRC;
1043                CONT;
1044        ALU64_ARSH_K:
1045                (*(s64 *) &DST) >>= IMM;
1046                CONT;
1047        ALU64_MOD_X:
1048                div64_u64_rem(DST, SRC, &tmp);
1049                DST = tmp;
1050                CONT;
1051        ALU_MOD_X:
1052                tmp = (u32) DST;
1053                DST = do_div(tmp, (u32) SRC);
1054                CONT;
1055        ALU64_MOD_K:
1056                div64_u64_rem(DST, IMM, &tmp);
1057                DST = tmp;
1058                CONT;
1059        ALU_MOD_K:
1060                tmp = (u32) DST;
1061                DST = do_div(tmp, (u32) IMM);
1062                CONT;
1063        ALU64_DIV_X:
1064                DST = div64_u64(DST, SRC);
1065                CONT;
1066        ALU_DIV_X:
1067                tmp = (u32) DST;
1068                do_div(tmp, (u32) SRC);
1069                DST = (u32) tmp;
1070                CONT;
1071        ALU64_DIV_K:
1072                DST = div64_u64(DST, IMM);
1073                CONT;
1074        ALU_DIV_K:
1075                tmp = (u32) DST;
1076                do_div(tmp, (u32) IMM);
1077                DST = (u32) tmp;
1078                CONT;
1079        ALU_END_TO_BE:
1080                switch (IMM) {
1081                case 16:
1082                        DST = (__force u16) cpu_to_be16(DST);
1083                        break;
1084                case 32:
1085                        DST = (__force u32) cpu_to_be32(DST);
1086                        break;
1087                case 64:
1088                        DST = (__force u64) cpu_to_be64(DST);
1089                        break;
1090                }
1091                CONT;
1092        ALU_END_TO_LE:
1093                switch (IMM) {
1094                case 16:
1095                        DST = (__force u16) cpu_to_le16(DST);
1096                        break;
1097                case 32:
1098                        DST = (__force u32) cpu_to_le32(DST);
1099                        break;
1100                case 64:
1101                        DST = (__force u64) cpu_to_le64(DST);
1102                        break;
1103                }
1104                CONT;
1105
1106        /* CALL */
1107        JMP_CALL:
1108                /* Function call scratches BPF_R1-BPF_R5 registers,
1109                 * preserves BPF_R6-BPF_R9, and stores return value
1110                 * into BPF_R0.
1111                 */
1112                BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1113                                                       BPF_R4, BPF_R5);
1114                CONT;
1115
1116        JMP_CALL_ARGS:
1117                BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1118                                                            BPF_R3, BPF_R4,
1119                                                            BPF_R5,
1120                                                            insn + insn->off + 1);
1121                CONT;
1122
1123        JMP_TAIL_CALL: {
1124                struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1125                struct bpf_array *array = container_of(map, struct bpf_array, map);
1126                struct bpf_prog *prog;
1127                u32 index = BPF_R3;
1128
1129                if (unlikely(index >= array->map.max_entries))
1130                        goto out;
1131                if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1132                        goto out;
1133
1134                tail_call_cnt++;
1135
1136                prog = READ_ONCE(array->ptrs[index]);
1137                if (!prog)
1138                        goto out;
1139
1140                /* ARG1 at this point is guaranteed to point to CTX from
1141                 * the verifier side due to the fact that the tail call is
1142                 * handeled like a helper, that is, bpf_tail_call_proto,
1143                 * where arg1_type is ARG_PTR_TO_CTX.
1144                 */
1145                insn = prog->insnsi;
1146                goto select_insn;
1147out:
1148                CONT;
1149        }
1150        /* JMP */
1151        JMP_JA:
1152                insn += insn->off;
1153                CONT;
1154        JMP_JEQ_X:
1155                if (DST == SRC) {
1156                        insn += insn->off;
1157                        CONT_JMP;
1158                }
1159                CONT;
1160        JMP_JEQ_K:
1161                if (DST == IMM) {
1162                        insn += insn->off;
1163                        CONT_JMP;
1164                }
1165                CONT;
1166        JMP_JNE_X:
1167                if (DST != SRC) {
1168                        insn += insn->off;
1169                        CONT_JMP;
1170                }
1171                CONT;
1172        JMP_JNE_K:
1173                if (DST != IMM) {
1174                        insn += insn->off;
1175                        CONT_JMP;
1176                }
1177                CONT;
1178        JMP_JGT_X:
1179                if (DST > SRC) {
1180                        insn += insn->off;
1181                        CONT_JMP;
1182                }
1183                CONT;
1184        JMP_JGT_K:
1185                if (DST > IMM) {
1186                        insn += insn->off;
1187                        CONT_JMP;
1188                }
1189                CONT;
1190        JMP_JLT_X:
1191                if (DST < SRC) {
1192                        insn += insn->off;
1193                        CONT_JMP;
1194                }
1195                CONT;
1196        JMP_JLT_K:
1197                if (DST < IMM) {
1198                        insn += insn->off;
1199                        CONT_JMP;
1200                }
1201                CONT;
1202        JMP_JGE_X:
1203                if (DST >= SRC) {
1204                        insn += insn->off;
1205                        CONT_JMP;
1206                }
1207                CONT;
1208        JMP_JGE_K:
1209                if (DST >= IMM) {
1210                        insn += insn->off;
1211                        CONT_JMP;
1212                }
1213                CONT;
1214        JMP_JLE_X:
1215                if (DST <= SRC) {
1216                        insn += insn->off;
1217                        CONT_JMP;
1218                }
1219                CONT;
1220        JMP_JLE_K:
1221                if (DST <= IMM) {
1222                        insn += insn->off;
1223                        CONT_JMP;
1224                }
1225                CONT;
1226        JMP_JSGT_X:
1227                if (((s64) DST) > ((s64) SRC)) {
1228                        insn += insn->off;
1229                        CONT_JMP;
1230                }
1231                CONT;
1232        JMP_JSGT_K:
1233                if (((s64) DST) > ((s64) IMM)) {
1234                        insn += insn->off;
1235                        CONT_JMP;
1236                }
1237                CONT;
1238        JMP_JSLT_X:
1239                if (((s64) DST) < ((s64) SRC)) {
1240                        insn += insn->off;
1241                        CONT_JMP;
1242                }
1243                CONT;
1244        JMP_JSLT_K:
1245                if (((s64) DST) < ((s64) IMM)) {
1246                        insn += insn->off;
1247                        CONT_JMP;
1248                }
1249                CONT;
1250        JMP_JSGE_X:
1251                if (((s64) DST) >= ((s64) SRC)) {
1252                        insn += insn->off;
1253                        CONT_JMP;
1254                }
1255                CONT;
1256        JMP_JSGE_K:
1257                if (((s64) DST) >= ((s64) IMM)) {
1258                        insn += insn->off;
1259                        CONT_JMP;
1260                }
1261                CONT;
1262        JMP_JSLE_X:
1263                if (((s64) DST) <= ((s64) SRC)) {
1264                        insn += insn->off;
1265                        CONT_JMP;
1266                }
1267                CONT;
1268        JMP_JSLE_K:
1269                if (((s64) DST) <= ((s64) IMM)) {
1270                        insn += insn->off;
1271                        CONT_JMP;
1272                }
1273                CONT;
1274        JMP_JSET_X:
1275                if (DST & SRC) {
1276                        insn += insn->off;
1277                        CONT_JMP;
1278                }
1279                CONT;
1280        JMP_JSET_K:
1281                if (DST & IMM) {
1282                        insn += insn->off;
1283                        CONT_JMP;
1284                }
1285                CONT;
1286        JMP_EXIT:
1287                return BPF_R0;
1288
1289        /* STX and ST and LDX*/
1290#define LDST(SIZEOP, SIZE)                                              \
1291        STX_MEM_##SIZEOP:                                               \
1292                *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
1293                CONT;                                                   \
1294        ST_MEM_##SIZEOP:                                                \
1295                *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
1296                CONT;                                                   \
1297        LDX_MEM_##SIZEOP:                                               \
1298                DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
1299                CONT;
1300
1301        LDST(B,   u8)
1302        LDST(H,  u16)
1303        LDST(W,  u32)
1304        LDST(DW, u64)
1305#undef LDST
1306        STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1307                atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1308                           (DST + insn->off));
1309                CONT;
1310        STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1311                atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1312                             (DST + insn->off));
1313                CONT;
1314
1315        default_label:
1316                /* If we ever reach this, we have a bug somewhere. Die hard here
1317                 * instead of just returning 0; we could be somewhere in a subprog,
1318                 * so execution could continue otherwise which we do /not/ want.
1319                 *
1320                 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1321                 */
1322                pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1323                BUG_ON(1);
1324                return 0;
1325}
1326STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1327
1328#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1329#define DEFINE_BPF_PROG_RUN(stack_size) \
1330static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1331{ \
1332        u64 stack[stack_size / sizeof(u64)]; \
1333        u64 regs[MAX_BPF_REG]; \
1334\
1335        FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1336        ARG1 = (u64) (unsigned long) ctx; \
1337        return ___bpf_prog_run(regs, insn, stack); \
1338}
1339
1340#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1341#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1342static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1343                                      const struct bpf_insn *insn) \
1344{ \
1345        u64 stack[stack_size / sizeof(u64)]; \
1346        u64 regs[MAX_BPF_REG]; \
1347\
1348        FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1349        BPF_R1 = r1; \
1350        BPF_R2 = r2; \
1351        BPF_R3 = r3; \
1352        BPF_R4 = r4; \
1353        BPF_R5 = r5; \
1354        return ___bpf_prog_run(regs, insn, stack); \
1355}
1356
1357#define EVAL1(FN, X) FN(X)
1358#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1359#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1360#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1361#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1362#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1363
1364EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1365EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1366EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1367
1368EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1369EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1370EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1371
1372#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1373
1374static unsigned int (*interpreters[])(const void *ctx,
1375                                      const struct bpf_insn *insn) = {
1376EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1377EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1378EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1379};
1380#undef PROG_NAME_LIST
1381#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1382static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1383                                  const struct bpf_insn *insn) = {
1384EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1385EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1386EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1387};
1388#undef PROG_NAME_LIST
1389
1390void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1391{
1392        stack_depth = max_t(u32, stack_depth, 1);
1393        insn->off = (s16) insn->imm;
1394        insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1395                __bpf_call_base_args;
1396        insn->code = BPF_JMP | BPF_CALL_ARGS;
1397}
1398
1399#else
1400static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1401                                         const struct bpf_insn *insn)
1402{
1403        /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1404         * is not working properly, so warn about it!
1405         */
1406        WARN_ON_ONCE(1);
1407        return 0;
1408}
1409#endif
1410
1411bool bpf_prog_array_compatible(struct bpf_array *array,
1412                               const struct bpf_prog *fp)
1413{
1414        if (fp->kprobe_override)
1415                return false;
1416
1417        if (!array->owner_prog_type) {
1418                /* There's no owner yet where we could check for
1419                 * compatibility.
1420                 */
1421                array->owner_prog_type = fp->type;
1422                array->owner_jited = fp->jited;
1423
1424                return true;
1425        }
1426
1427        return array->owner_prog_type == fp->type &&
1428               array->owner_jited == fp->jited;
1429}
1430
1431static int bpf_check_tail_call(const struct bpf_prog *fp)
1432{
1433        struct bpf_prog_aux *aux = fp->aux;
1434        int i;
1435
1436        for (i = 0; i < aux->used_map_cnt; i++) {
1437                struct bpf_map *map = aux->used_maps[i];
1438                struct bpf_array *array;
1439
1440                if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1441                        continue;
1442
1443                array = container_of(map, struct bpf_array, map);
1444                if (!bpf_prog_array_compatible(array, fp))
1445                        return -EINVAL;
1446        }
1447
1448        return 0;
1449}
1450
1451static void bpf_prog_select_func(struct bpf_prog *fp)
1452{
1453#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1454        u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1455
1456        fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1457#else
1458        fp->bpf_func = __bpf_prog_ret0_warn;
1459#endif
1460}
1461
1462/**
1463 *      bpf_prog_select_runtime - select exec runtime for BPF program
1464 *      @fp: bpf_prog populated with internal BPF program
1465 *      @err: pointer to error variable
1466 *
1467 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1468 * The BPF program will be executed via BPF_PROG_RUN() macro.
1469 */
1470struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1471{
1472        /* In case of BPF to BPF calls, verifier did all the prep
1473         * work with regards to JITing, etc.
1474         */
1475        if (fp->bpf_func)
1476                goto finalize;
1477
1478        bpf_prog_select_func(fp);
1479
1480        /* eBPF JITs can rewrite the program in case constant
1481         * blinding is active. However, in case of error during
1482         * blinding, bpf_int_jit_compile() must always return a
1483         * valid program, which in this case would simply not
1484         * be JITed, but falls back to the interpreter.
1485         */
1486        if (!bpf_prog_is_dev_bound(fp->aux)) {
1487                fp = bpf_int_jit_compile(fp);
1488#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1489                if (!fp->jited) {
1490                        *err = -ENOTSUPP;
1491                        return fp;
1492                }
1493#endif
1494        } else {
1495                *err = bpf_prog_offload_compile(fp);
1496                if (*err)
1497                        return fp;
1498        }
1499
1500finalize:
1501        bpf_prog_lock_ro(fp);
1502
1503        /* The tail call compatibility check can only be done at
1504         * this late stage as we need to determine, if we deal
1505         * with JITed or non JITed program concatenations and not
1506         * all eBPF JITs might immediately support all features.
1507         */
1508        *err = bpf_check_tail_call(fp);
1509
1510        return fp;
1511}
1512EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1513
1514static unsigned int __bpf_prog_ret1(const void *ctx,
1515                                    const struct bpf_insn *insn)
1516{
1517        return 1;
1518}
1519
1520static struct bpf_prog_dummy {
1521        struct bpf_prog prog;
1522} dummy_bpf_prog = {
1523        .prog = {
1524                .bpf_func = __bpf_prog_ret1,
1525        },
1526};
1527
1528/* to avoid allocating empty bpf_prog_array for cgroups that
1529 * don't have bpf program attached use one global 'empty_prog_array'
1530 * It will not be modified the caller of bpf_prog_array_alloc()
1531 * (since caller requested prog_cnt == 0)
1532 * that pointer should be 'freed' by bpf_prog_array_free()
1533 */
1534static struct {
1535        struct bpf_prog_array hdr;
1536        struct bpf_prog *null_prog;
1537} empty_prog_array = {
1538        .null_prog = NULL,
1539};
1540
1541struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1542{
1543        if (prog_cnt)
1544                return kzalloc(sizeof(struct bpf_prog_array) +
1545                               sizeof(struct bpf_prog_array_item) *
1546                               (prog_cnt + 1),
1547                               flags);
1548
1549        return &empty_prog_array.hdr;
1550}
1551
1552void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1553{
1554        if (!progs ||
1555            progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1556                return;
1557        kfree_rcu(progs, rcu);
1558}
1559
1560int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
1561{
1562        struct bpf_prog_array_item *item;
1563        u32 cnt = 0;
1564
1565        rcu_read_lock();
1566        item = rcu_dereference(array)->items;
1567        for (; item->prog; item++)
1568                if (item->prog != &dummy_bpf_prog.prog)
1569                        cnt++;
1570        rcu_read_unlock();
1571        return cnt;
1572}
1573
1574
1575static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
1576                                     u32 *prog_ids,
1577                                     u32 request_cnt)
1578{
1579        struct bpf_prog_array_item *item;
1580        int i = 0;
1581
1582        item = rcu_dereference_check(array, 1)->items;
1583        for (; item->prog; item++) {
1584                if (item->prog == &dummy_bpf_prog.prog)
1585                        continue;
1586                prog_ids[i] = item->prog->aux->id;
1587                if (++i == request_cnt) {
1588                        item++;
1589                        break;
1590                }
1591        }
1592
1593        return !!(item->prog);
1594}
1595
1596int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
1597                                __u32 __user *prog_ids, u32 cnt)
1598{
1599        unsigned long err = 0;
1600        bool nospc;
1601        u32 *ids;
1602
1603        /* users of this function are doing:
1604         * cnt = bpf_prog_array_length();
1605         * if (cnt > 0)
1606         *     bpf_prog_array_copy_to_user(..., cnt);
1607         * so below kcalloc doesn't need extra cnt > 0 check, but
1608         * bpf_prog_array_length() releases rcu lock and
1609         * prog array could have been swapped with empty or larger array,
1610         * so always copy 'cnt' prog_ids to the user.
1611         * In a rare race the user will see zero prog_ids
1612         */
1613        ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1614        if (!ids)
1615                return -ENOMEM;
1616        rcu_read_lock();
1617        nospc = bpf_prog_array_copy_core(array, ids, cnt);
1618        rcu_read_unlock();
1619        err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1620        kfree(ids);
1621        if (err)
1622                return -EFAULT;
1623        if (nospc)
1624                return -ENOSPC;
1625        return 0;
1626}
1627
1628void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
1629                                struct bpf_prog *old_prog)
1630{
1631        struct bpf_prog_array_item *item = array->items;
1632
1633        for (; item->prog; item++)
1634                if (item->prog == old_prog) {
1635                        WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1636                        break;
1637                }
1638}
1639
1640int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1641                        struct bpf_prog *exclude_prog,
1642                        struct bpf_prog *include_prog,
1643                        struct bpf_prog_array **new_array)
1644{
1645        int new_prog_cnt, carry_prog_cnt = 0;
1646        struct bpf_prog_array_item *existing;
1647        struct bpf_prog_array *array;
1648        bool found_exclude = false;
1649        int new_prog_idx = 0;
1650
1651        /* Figure out how many existing progs we need to carry over to
1652         * the new array.
1653         */
1654        if (old_array) {
1655                existing = old_array->items;
1656                for (; existing->prog; existing++) {
1657                        if (existing->prog == exclude_prog) {
1658                                found_exclude = true;
1659                                continue;
1660                        }
1661                        if (existing->prog != &dummy_bpf_prog.prog)
1662                                carry_prog_cnt++;
1663                        if (existing->prog == include_prog)
1664                                return -EEXIST;
1665                }
1666        }
1667
1668        if (exclude_prog && !found_exclude)
1669                return -ENOENT;
1670
1671        /* How many progs (not NULL) will be in the new array? */
1672        new_prog_cnt = carry_prog_cnt;
1673        if (include_prog)
1674                new_prog_cnt += 1;
1675
1676        /* Do we have any prog (not NULL) in the new array? */
1677        if (!new_prog_cnt) {
1678                *new_array = NULL;
1679                return 0;
1680        }
1681
1682        /* +1 as the end of prog_array is marked with NULL */
1683        array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1684        if (!array)
1685                return -ENOMEM;
1686
1687        /* Fill in the new prog array */
1688        if (carry_prog_cnt) {
1689                existing = old_array->items;
1690                for (; existing->prog; existing++)
1691                        if (existing->prog != exclude_prog &&
1692                            existing->prog != &dummy_bpf_prog.prog) {
1693                                array->items[new_prog_idx++].prog =
1694                                        existing->prog;
1695                        }
1696        }
1697        if (include_prog)
1698                array->items[new_prog_idx++].prog = include_prog;
1699        array->items[new_prog_idx].prog = NULL;
1700        *new_array = array;
1701        return 0;
1702}
1703
1704int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1705                             u32 *prog_ids, u32 request_cnt,
1706                             u32 *prog_cnt)
1707{
1708        u32 cnt = 0;
1709
1710        if (array)
1711                cnt = bpf_prog_array_length(array);
1712
1713        *prog_cnt = cnt;
1714
1715        /* return early if user requested only program count or nothing to copy */
1716        if (!request_cnt || !cnt)
1717                return 0;
1718
1719        /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1720        return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
1721                                                                     : 0;
1722}
1723
1724static void bpf_prog_free_deferred(struct work_struct *work)
1725{
1726        struct bpf_prog_aux *aux;
1727        int i;
1728
1729        aux = container_of(work, struct bpf_prog_aux, work);
1730        if (bpf_prog_is_dev_bound(aux))
1731                bpf_prog_offload_destroy(aux->prog);
1732#ifdef CONFIG_PERF_EVENTS
1733        if (aux->prog->has_callchain_buf)
1734                put_callchain_buffers();
1735#endif
1736        for (i = 0; i < aux->func_cnt; i++)
1737                bpf_jit_free(aux->func[i]);
1738        if (aux->func_cnt) {
1739                kfree(aux->func);
1740                bpf_prog_unlock_free(aux->prog);
1741        } else {
1742                bpf_jit_free(aux->prog);
1743        }
1744}
1745
1746/* Free internal BPF program */
1747void bpf_prog_free(struct bpf_prog *fp)
1748{
1749        struct bpf_prog_aux *aux = fp->aux;
1750
1751        INIT_WORK(&aux->work, bpf_prog_free_deferred);
1752        schedule_work(&aux->work);
1753}
1754EXPORT_SYMBOL_GPL(bpf_prog_free);
1755
1756/* RNG for unpriviledged user space with separated state from prandom_u32(). */
1757static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1758
1759void bpf_user_rnd_init_once(void)
1760{
1761        prandom_init_once(&bpf_user_rnd_state);
1762}
1763
1764BPF_CALL_0(bpf_user_rnd_u32)
1765{
1766        /* Should someone ever have the rather unwise idea to use some
1767         * of the registers passed into this function, then note that
1768         * this function is called from native eBPF and classic-to-eBPF
1769         * transformations. Register assignments from both sides are
1770         * different, f.e. classic always sets fn(ctx, A, X) here.
1771         */
1772        struct rnd_state *state;
1773        u32 res;
1774
1775        state = &get_cpu_var(bpf_user_rnd_state);
1776        res = prandom_u32_state(state);
1777        put_cpu_var(bpf_user_rnd_state);
1778
1779        return res;
1780}
1781
1782/* Weak definitions of helper functions in case we don't have bpf syscall. */
1783const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1784const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1785const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1786
1787const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1788const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1789const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1790const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1791
1792const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1793const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1794const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1795const struct bpf_func_proto bpf_sock_map_update_proto __weak;
1796const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
1797const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
1798const struct bpf_func_proto bpf_get_local_storage_proto __weak;
1799
1800const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1801{
1802        return NULL;
1803}
1804
1805u64 __weak
1806bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1807                 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1808{
1809        return -ENOTSUPP;
1810}
1811EXPORT_SYMBOL_GPL(bpf_event_output);
1812
1813/* Always built-in helper functions. */
1814const struct bpf_func_proto bpf_tail_call_proto = {
1815        .func           = NULL,
1816        .gpl_only       = false,
1817        .ret_type       = RET_VOID,
1818        .arg1_type      = ARG_PTR_TO_CTX,
1819        .arg2_type      = ARG_CONST_MAP_PTR,
1820        .arg3_type      = ARG_ANYTHING,
1821};
1822
1823/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1824 * It is encouraged to implement bpf_int_jit_compile() instead, so that
1825 * eBPF and implicitly also cBPF can get JITed!
1826 */
1827struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1828{
1829        return prog;
1830}
1831
1832/* Stub for JITs that support eBPF. All cBPF code gets transformed into
1833 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1834 */
1835void __weak bpf_jit_compile(struct bpf_prog *prog)
1836{
1837}
1838
1839bool __weak bpf_helper_changes_pkt_data(void *func)
1840{
1841        return false;
1842}
1843
1844/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1845 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1846 */
1847int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1848                         int len)
1849{
1850        return -EFAULT;
1851}
1852
1853/* All definitions of tracepoints related to BPF. */
1854#define CREATE_TRACE_POINTS
1855#include <linux/bpf_trace.h>
1856
1857EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1858