linux/kernel/bpf/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Linux Socket Filter - Kernel level socket filtering
   4 *
   5 * Based on the design of the Berkeley Packet Filter. The new
   6 * internal format has been designed by PLUMgrid:
   7 *
   8 *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
   9 *
  10 * Authors:
  11 *
  12 *      Jay Schulist <jschlst@samba.org>
  13 *      Alexei Starovoitov <ast@plumgrid.com>
  14 *      Daniel Borkmann <dborkman@redhat.com>
  15 *
  16 * Andi Kleen - Fix a few bad bugs and races.
  17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  18 */
  19
  20#include <uapi/linux/btf.h>
  21#include <linux/filter.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/random.h>
  25#include <linux/moduleloader.h>
  26#include <linux/bpf.h>
  27#include <linux/btf.h>
  28#include <linux/objtool.h>
  29#include <linux/rbtree_latch.h>
  30#include <linux/kallsyms.h>
  31#include <linux/rcupdate.h>
  32#include <linux/perf_event.h>
  33#include <linux/extable.h>
  34#include <linux/log2.h>
  35
  36#include <asm/barrier.h>
  37#include <asm/unaligned.h>
  38
  39/* Registers */
  40#define BPF_R0  regs[BPF_REG_0]
  41#define BPF_R1  regs[BPF_REG_1]
  42#define BPF_R2  regs[BPF_REG_2]
  43#define BPF_R3  regs[BPF_REG_3]
  44#define BPF_R4  regs[BPF_REG_4]
  45#define BPF_R5  regs[BPF_REG_5]
  46#define BPF_R6  regs[BPF_REG_6]
  47#define BPF_R7  regs[BPF_REG_7]
  48#define BPF_R8  regs[BPF_REG_8]
  49#define BPF_R9  regs[BPF_REG_9]
  50#define BPF_R10 regs[BPF_REG_10]
  51
  52/* Named registers */
  53#define DST     regs[insn->dst_reg]
  54#define SRC     regs[insn->src_reg]
  55#define FP      regs[BPF_REG_FP]
  56#define AX      regs[BPF_REG_AX]
  57#define ARG1    regs[BPF_REG_ARG1]
  58#define CTX     regs[BPF_REG_CTX]
  59#define IMM     insn->imm
  60
  61/* No hurry in this branch
  62 *
  63 * Exported for the bpf jit load helper.
  64 */
  65void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
  66{
  67        u8 *ptr = NULL;
  68
  69        if (k >= SKF_NET_OFF)
  70                ptr = skb_network_header(skb) + k - SKF_NET_OFF;
  71        else if (k >= SKF_LL_OFF)
  72                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
  73
  74        if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
  75                return ptr;
  76
  77        return NULL;
  78}
  79
  80struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
  81{
  82        gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
  83        struct bpf_prog_aux *aux;
  84        struct bpf_prog *fp;
  85
  86        size = round_up(size, PAGE_SIZE);
  87        fp = __vmalloc(size, gfp_flags);
  88        if (fp == NULL)
  89                return NULL;
  90
  91        aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
  92        if (aux == NULL) {
  93                vfree(fp);
  94                return NULL;
  95        }
  96        fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
  97        if (!fp->active) {
  98                vfree(fp);
  99                kfree(aux);
 100                return NULL;
 101        }
 102
 103        fp->pages = size / PAGE_SIZE;
 104        fp->aux = aux;
 105        fp->aux->prog = fp;
 106        fp->jit_requested = ebpf_jit_enabled();
 107
 108        INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
 109        mutex_init(&fp->aux->used_maps_mutex);
 110        mutex_init(&fp->aux->dst_mutex);
 111
 112        return fp;
 113}
 114
 115struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 116{
 117        gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
 118        struct bpf_prog *prog;
 119        int cpu;
 120
 121        prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
 122        if (!prog)
 123                return NULL;
 124
 125        prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
 126        if (!prog->stats) {
 127                free_percpu(prog->active);
 128                kfree(prog->aux);
 129                vfree(prog);
 130                return NULL;
 131        }
 132
 133        for_each_possible_cpu(cpu) {
 134                struct bpf_prog_stats *pstats;
 135
 136                pstats = per_cpu_ptr(prog->stats, cpu);
 137                u64_stats_init(&pstats->syncp);
 138        }
 139        return prog;
 140}
 141EXPORT_SYMBOL_GPL(bpf_prog_alloc);
 142
 143int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
 144{
 145        if (!prog->aux->nr_linfo || !prog->jit_requested)
 146                return 0;
 147
 148        prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
 149                                          sizeof(*prog->aux->jited_linfo),
 150                                          GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
 151        if (!prog->aux->jited_linfo)
 152                return -ENOMEM;
 153
 154        return 0;
 155}
 156
 157void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
 158{
 159        if (prog->aux->jited_linfo &&
 160            (!prog->jited || !prog->aux->jited_linfo[0])) {
 161                kvfree(prog->aux->jited_linfo);
 162                prog->aux->jited_linfo = NULL;
 163        }
 164
 165        kfree(prog->aux->kfunc_tab);
 166        prog->aux->kfunc_tab = NULL;
 167}
 168
 169/* The jit engine is responsible to provide an array
 170 * for insn_off to the jited_off mapping (insn_to_jit_off).
 171 *
 172 * The idx to this array is the insn_off.  Hence, the insn_off
 173 * here is relative to the prog itself instead of the main prog.
 174 * This array has one entry for each xlated bpf insn.
 175 *
 176 * jited_off is the byte off to the last byte of the jited insn.
 177 *
 178 * Hence, with
 179 * insn_start:
 180 *      The first bpf insn off of the prog.  The insn off
 181 *      here is relative to the main prog.
 182 *      e.g. if prog is a subprog, insn_start > 0
 183 * linfo_idx:
 184 *      The prog's idx to prog->aux->linfo and jited_linfo
 185 *
 186 * jited_linfo[linfo_idx] = prog->bpf_func
 187 *
 188 * For i > linfo_idx,
 189 *
 190 * jited_linfo[i] = prog->bpf_func +
 191 *      insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
 192 */
 193void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
 194                               const u32 *insn_to_jit_off)
 195{
 196        u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
 197        const struct bpf_line_info *linfo;
 198        void **jited_linfo;
 199
 200        if (!prog->aux->jited_linfo)
 201                /* Userspace did not provide linfo */
 202                return;
 203
 204        linfo_idx = prog->aux->linfo_idx;
 205        linfo = &prog->aux->linfo[linfo_idx];
 206        insn_start = linfo[0].insn_off;
 207        insn_end = insn_start + prog->len;
 208
 209        jited_linfo = &prog->aux->jited_linfo[linfo_idx];
 210        jited_linfo[0] = prog->bpf_func;
 211
 212        nr_linfo = prog->aux->nr_linfo - linfo_idx;
 213
 214        for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
 215                /* The verifier ensures that linfo[i].insn_off is
 216                 * strictly increasing
 217                 */
 218                jited_linfo[i] = prog->bpf_func +
 219                        insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
 220}
 221
 222struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 223                                  gfp_t gfp_extra_flags)
 224{
 225        gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
 226        struct bpf_prog *fp;
 227        u32 pages;
 228
 229        size = round_up(size, PAGE_SIZE);
 230        pages = size / PAGE_SIZE;
 231        if (pages <= fp_old->pages)
 232                return fp_old;
 233
 234        fp = __vmalloc(size, gfp_flags);
 235        if (fp) {
 236                memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
 237                fp->pages = pages;
 238                fp->aux->prog = fp;
 239
 240                /* We keep fp->aux from fp_old around in the new
 241                 * reallocated structure.
 242                 */
 243                fp_old->aux = NULL;
 244                fp_old->stats = NULL;
 245                fp_old->active = NULL;
 246                __bpf_prog_free(fp_old);
 247        }
 248
 249        return fp;
 250}
 251
 252void __bpf_prog_free(struct bpf_prog *fp)
 253{
 254        if (fp->aux) {
 255                mutex_destroy(&fp->aux->used_maps_mutex);
 256                mutex_destroy(&fp->aux->dst_mutex);
 257                kfree(fp->aux->poke_tab);
 258                kfree(fp->aux);
 259        }
 260        free_percpu(fp->stats);
 261        free_percpu(fp->active);
 262        vfree(fp);
 263}
 264
 265int bpf_prog_calc_tag(struct bpf_prog *fp)
 266{
 267        const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
 268        u32 raw_size = bpf_prog_tag_scratch_size(fp);
 269        u32 digest[SHA1_DIGEST_WORDS];
 270        u32 ws[SHA1_WORKSPACE_WORDS];
 271        u32 i, bsize, psize, blocks;
 272        struct bpf_insn *dst;
 273        bool was_ld_map;
 274        u8 *raw, *todo;
 275        __be32 *result;
 276        __be64 *bits;
 277
 278        raw = vmalloc(raw_size);
 279        if (!raw)
 280                return -ENOMEM;
 281
 282        sha1_init(digest);
 283        memset(ws, 0, sizeof(ws));
 284
 285        /* We need to take out the map fd for the digest calculation
 286         * since they are unstable from user space side.
 287         */
 288        dst = (void *)raw;
 289        for (i = 0, was_ld_map = false; i < fp->len; i++) {
 290                dst[i] = fp->insnsi[i];
 291                if (!was_ld_map &&
 292                    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
 293                    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
 294                     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
 295                        was_ld_map = true;
 296                        dst[i].imm = 0;
 297                } else if (was_ld_map &&
 298                           dst[i].code == 0 &&
 299                           dst[i].dst_reg == 0 &&
 300                           dst[i].src_reg == 0 &&
 301                           dst[i].off == 0) {
 302                        was_ld_map = false;
 303                        dst[i].imm = 0;
 304                } else {
 305                        was_ld_map = false;
 306                }
 307        }
 308
 309        psize = bpf_prog_insn_size(fp);
 310        memset(&raw[psize], 0, raw_size - psize);
 311        raw[psize++] = 0x80;
 312
 313        bsize  = round_up(psize, SHA1_BLOCK_SIZE);
 314        blocks = bsize / SHA1_BLOCK_SIZE;
 315        todo   = raw;
 316        if (bsize - psize >= sizeof(__be64)) {
 317                bits = (__be64 *)(todo + bsize - sizeof(__be64));
 318        } else {
 319                bits = (__be64 *)(todo + bsize + bits_offset);
 320                blocks++;
 321        }
 322        *bits = cpu_to_be64((psize - 1) << 3);
 323
 324        while (blocks--) {
 325                sha1_transform(digest, todo, ws);
 326                todo += SHA1_BLOCK_SIZE;
 327        }
 328
 329        result = (__force __be32 *)digest;
 330        for (i = 0; i < SHA1_DIGEST_WORDS; i++)
 331                result[i] = cpu_to_be32(digest[i]);
 332        memcpy(fp->tag, result, sizeof(fp->tag));
 333
 334        vfree(raw);
 335        return 0;
 336}
 337
 338static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
 339                                s32 end_new, s32 curr, const bool probe_pass)
 340{
 341        const s64 imm_min = S32_MIN, imm_max = S32_MAX;
 342        s32 delta = end_new - end_old;
 343        s64 imm = insn->imm;
 344
 345        if (curr < pos && curr + imm + 1 >= end_old)
 346                imm += delta;
 347        else if (curr >= end_new && curr + imm + 1 < end_new)
 348                imm -= delta;
 349        if (imm < imm_min || imm > imm_max)
 350                return -ERANGE;
 351        if (!probe_pass)
 352                insn->imm = imm;
 353        return 0;
 354}
 355
 356static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
 357                                s32 end_new, s32 curr, const bool probe_pass)
 358{
 359        const s32 off_min = S16_MIN, off_max = S16_MAX;
 360        s32 delta = end_new - end_old;
 361        s32 off = insn->off;
 362
 363        if (curr < pos && curr + off + 1 >= end_old)
 364                off += delta;
 365        else if (curr >= end_new && curr + off + 1 < end_new)
 366                off -= delta;
 367        if (off < off_min || off > off_max)
 368                return -ERANGE;
 369        if (!probe_pass)
 370                insn->off = off;
 371        return 0;
 372}
 373
 374static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
 375                            s32 end_new, const bool probe_pass)
 376{
 377        u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
 378        struct bpf_insn *insn = prog->insnsi;
 379        int ret = 0;
 380
 381        for (i = 0; i < insn_cnt; i++, insn++) {
 382                u8 code;
 383
 384                /* In the probing pass we still operate on the original,
 385                 * unpatched image in order to check overflows before we
 386                 * do any other adjustments. Therefore skip the patchlet.
 387                 */
 388                if (probe_pass && i == pos) {
 389                        i = end_new;
 390                        insn = prog->insnsi + end_old;
 391                }
 392                code = insn->code;
 393                if ((BPF_CLASS(code) != BPF_JMP &&
 394                     BPF_CLASS(code) != BPF_JMP32) ||
 395                    BPF_OP(code) == BPF_EXIT)
 396                        continue;
 397                /* Adjust offset of jmps if we cross patch boundaries. */
 398                if (BPF_OP(code) == BPF_CALL) {
 399                        if (insn->src_reg != BPF_PSEUDO_CALL)
 400                                continue;
 401                        ret = bpf_adj_delta_to_imm(insn, pos, end_old,
 402                                                   end_new, i, probe_pass);
 403                } else {
 404                        ret = bpf_adj_delta_to_off(insn, pos, end_old,
 405                                                   end_new, i, probe_pass);
 406                }
 407                if (ret)
 408                        break;
 409        }
 410
 411        return ret;
 412}
 413
 414static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
 415{
 416        struct bpf_line_info *linfo;
 417        u32 i, nr_linfo;
 418
 419        nr_linfo = prog->aux->nr_linfo;
 420        if (!nr_linfo || !delta)
 421                return;
 422
 423        linfo = prog->aux->linfo;
 424
 425        for (i = 0; i < nr_linfo; i++)
 426                if (off < linfo[i].insn_off)
 427                        break;
 428
 429        /* Push all off < linfo[i].insn_off by delta */
 430        for (; i < nr_linfo; i++)
 431                linfo[i].insn_off += delta;
 432}
 433
 434struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 435                                       const struct bpf_insn *patch, u32 len)
 436{
 437        u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
 438        const u32 cnt_max = S16_MAX;
 439        struct bpf_prog *prog_adj;
 440        int err;
 441
 442        /* Since our patchlet doesn't expand the image, we're done. */
 443        if (insn_delta == 0) {
 444                memcpy(prog->insnsi + off, patch, sizeof(*patch));
 445                return prog;
 446        }
 447
 448        insn_adj_cnt = prog->len + insn_delta;
 449
 450        /* Reject anything that would potentially let the insn->off
 451         * target overflow when we have excessive program expansions.
 452         * We need to probe here before we do any reallocation where
 453         * we afterwards may not fail anymore.
 454         */
 455        if (insn_adj_cnt > cnt_max &&
 456            (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
 457                return ERR_PTR(err);
 458
 459        /* Several new instructions need to be inserted. Make room
 460         * for them. Likely, there's no need for a new allocation as
 461         * last page could have large enough tailroom.
 462         */
 463        prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
 464                                    GFP_USER);
 465        if (!prog_adj)
 466                return ERR_PTR(-ENOMEM);
 467
 468        prog_adj->len = insn_adj_cnt;
 469
 470        /* Patching happens in 3 steps:
 471         *
 472         * 1) Move over tail of insnsi from next instruction onwards,
 473         *    so we can patch the single target insn with one or more
 474         *    new ones (patching is always from 1 to n insns, n > 0).
 475         * 2) Inject new instructions at the target location.
 476         * 3) Adjust branch offsets if necessary.
 477         */
 478        insn_rest = insn_adj_cnt - off - len;
 479
 480        memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
 481                sizeof(*patch) * insn_rest);
 482        memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
 483
 484        /* We are guaranteed to not fail at this point, otherwise
 485         * the ship has sailed to reverse to the original state. An
 486         * overflow cannot happen at this point.
 487         */
 488        BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
 489
 490        bpf_adj_linfo(prog_adj, off, insn_delta);
 491
 492        return prog_adj;
 493}
 494
 495int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
 496{
 497        /* Branch offsets can't overflow when program is shrinking, no need
 498         * to call bpf_adj_branches(..., true) here
 499         */
 500        memmove(prog->insnsi + off, prog->insnsi + off + cnt,
 501                sizeof(struct bpf_insn) * (prog->len - off - cnt));
 502        prog->len -= cnt;
 503
 504        return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
 505}
 506
 507static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
 508{
 509        int i;
 510
 511        for (i = 0; i < fp->aux->func_cnt; i++)
 512                bpf_prog_kallsyms_del(fp->aux->func[i]);
 513}
 514
 515void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
 516{
 517        bpf_prog_kallsyms_del_subprogs(fp);
 518        bpf_prog_kallsyms_del(fp);
 519}
 520
 521#ifdef CONFIG_BPF_JIT
 522/* All BPF JIT sysctl knobs here. */
 523int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
 524int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
 525int bpf_jit_harden   __read_mostly;
 526long bpf_jit_limit   __read_mostly;
 527
 528static void
 529bpf_prog_ksym_set_addr(struct bpf_prog *prog)
 530{
 531        const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
 532        unsigned long addr = (unsigned long)hdr;
 533
 534        WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
 535
 536        prog->aux->ksym.start = (unsigned long) prog->bpf_func;
 537        prog->aux->ksym.end   = addr + hdr->pages * PAGE_SIZE;
 538}
 539
 540static void
 541bpf_prog_ksym_set_name(struct bpf_prog *prog)
 542{
 543        char *sym = prog->aux->ksym.name;
 544        const char *end = sym + KSYM_NAME_LEN;
 545        const struct btf_type *type;
 546        const char *func_name;
 547
 548        BUILD_BUG_ON(sizeof("bpf_prog_") +
 549                     sizeof(prog->tag) * 2 +
 550                     /* name has been null terminated.
 551                      * We should need +1 for the '_' preceding
 552                      * the name.  However, the null character
 553                      * is double counted between the name and the
 554                      * sizeof("bpf_prog_") above, so we omit
 555                      * the +1 here.
 556                      */
 557                     sizeof(prog->aux->name) > KSYM_NAME_LEN);
 558
 559        sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
 560        sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
 561
 562        /* prog->aux->name will be ignored if full btf name is available */
 563        if (prog->aux->func_info_cnt) {
 564                type = btf_type_by_id(prog->aux->btf,
 565                                      prog->aux->func_info[prog->aux->func_idx].type_id);
 566                func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
 567                snprintf(sym, (size_t)(end - sym), "_%s", func_name);
 568                return;
 569        }
 570
 571        if (prog->aux->name[0])
 572                snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
 573        else
 574                *sym = 0;
 575}
 576
 577static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
 578{
 579        return container_of(n, struct bpf_ksym, tnode)->start;
 580}
 581
 582static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
 583                                          struct latch_tree_node *b)
 584{
 585        return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
 586}
 587
 588static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
 589{
 590        unsigned long val = (unsigned long)key;
 591        const struct bpf_ksym *ksym;
 592
 593        ksym = container_of(n, struct bpf_ksym, tnode);
 594
 595        if (val < ksym->start)
 596                return -1;
 597        if (val >= ksym->end)
 598                return  1;
 599
 600        return 0;
 601}
 602
 603static const struct latch_tree_ops bpf_tree_ops = {
 604        .less   = bpf_tree_less,
 605        .comp   = bpf_tree_comp,
 606};
 607
 608static DEFINE_SPINLOCK(bpf_lock);
 609static LIST_HEAD(bpf_kallsyms);
 610static struct latch_tree_root bpf_tree __cacheline_aligned;
 611
 612void bpf_ksym_add(struct bpf_ksym *ksym)
 613{
 614        spin_lock_bh(&bpf_lock);
 615        WARN_ON_ONCE(!list_empty(&ksym->lnode));
 616        list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
 617        latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
 618        spin_unlock_bh(&bpf_lock);
 619}
 620
 621static void __bpf_ksym_del(struct bpf_ksym *ksym)
 622{
 623        if (list_empty(&ksym->lnode))
 624                return;
 625
 626        latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
 627        list_del_rcu(&ksym->lnode);
 628}
 629
 630void bpf_ksym_del(struct bpf_ksym *ksym)
 631{
 632        spin_lock_bh(&bpf_lock);
 633        __bpf_ksym_del(ksym);
 634        spin_unlock_bh(&bpf_lock);
 635}
 636
 637static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 638{
 639        return fp->jited && !bpf_prog_was_classic(fp);
 640}
 641
 642static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
 643{
 644        return list_empty(&fp->aux->ksym.lnode) ||
 645               fp->aux->ksym.lnode.prev == LIST_POISON2;
 646}
 647
 648void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 649{
 650        if (!bpf_prog_kallsyms_candidate(fp) ||
 651            !bpf_capable())
 652                return;
 653
 654        bpf_prog_ksym_set_addr(fp);
 655        bpf_prog_ksym_set_name(fp);
 656        fp->aux->ksym.prog = true;
 657
 658        bpf_ksym_add(&fp->aux->ksym);
 659}
 660
 661void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 662{
 663        if (!bpf_prog_kallsyms_candidate(fp))
 664                return;
 665
 666        bpf_ksym_del(&fp->aux->ksym);
 667}
 668
 669static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
 670{
 671        struct latch_tree_node *n;
 672
 673        n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
 674        return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
 675}
 676
 677const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
 678                                 unsigned long *off, char *sym)
 679{
 680        struct bpf_ksym *ksym;
 681        char *ret = NULL;
 682
 683        rcu_read_lock();
 684        ksym = bpf_ksym_find(addr);
 685        if (ksym) {
 686                unsigned long symbol_start = ksym->start;
 687                unsigned long symbol_end = ksym->end;
 688
 689                strncpy(sym, ksym->name, KSYM_NAME_LEN);
 690
 691                ret = sym;
 692                if (size)
 693                        *size = symbol_end - symbol_start;
 694                if (off)
 695                        *off  = addr - symbol_start;
 696        }
 697        rcu_read_unlock();
 698
 699        return ret;
 700}
 701
 702bool is_bpf_text_address(unsigned long addr)
 703{
 704        bool ret;
 705
 706        rcu_read_lock();
 707        ret = bpf_ksym_find(addr) != NULL;
 708        rcu_read_unlock();
 709
 710        return ret;
 711}
 712
 713static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
 714{
 715        struct bpf_ksym *ksym = bpf_ksym_find(addr);
 716
 717        return ksym && ksym->prog ?
 718               container_of(ksym, struct bpf_prog_aux, ksym)->prog :
 719               NULL;
 720}
 721
 722const struct exception_table_entry *search_bpf_extables(unsigned long addr)
 723{
 724        const struct exception_table_entry *e = NULL;
 725        struct bpf_prog *prog;
 726
 727        rcu_read_lock();
 728        prog = bpf_prog_ksym_find(addr);
 729        if (!prog)
 730                goto out;
 731        if (!prog->aux->num_exentries)
 732                goto out;
 733
 734        e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
 735out:
 736        rcu_read_unlock();
 737        return e;
 738}
 739
 740int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 741                    char *sym)
 742{
 743        struct bpf_ksym *ksym;
 744        unsigned int it = 0;
 745        int ret = -ERANGE;
 746
 747        if (!bpf_jit_kallsyms_enabled())
 748                return ret;
 749
 750        rcu_read_lock();
 751        list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
 752                if (it++ != symnum)
 753                        continue;
 754
 755                strncpy(sym, ksym->name, KSYM_NAME_LEN);
 756
 757                *value = ksym->start;
 758                *type  = BPF_SYM_ELF_TYPE;
 759
 760                ret = 0;
 761                break;
 762        }
 763        rcu_read_unlock();
 764
 765        return ret;
 766}
 767
 768int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
 769                                struct bpf_jit_poke_descriptor *poke)
 770{
 771        struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
 772        static const u32 poke_tab_max = 1024;
 773        u32 slot = prog->aux->size_poke_tab;
 774        u32 size = slot + 1;
 775
 776        if (size > poke_tab_max)
 777                return -ENOSPC;
 778        if (poke->tailcall_target || poke->tailcall_target_stable ||
 779            poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
 780                return -EINVAL;
 781
 782        switch (poke->reason) {
 783        case BPF_POKE_REASON_TAIL_CALL:
 784                if (!poke->tail_call.map)
 785                        return -EINVAL;
 786                break;
 787        default:
 788                return -EINVAL;
 789        }
 790
 791        tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
 792        if (!tab)
 793                return -ENOMEM;
 794
 795        memcpy(&tab[slot], poke, sizeof(*poke));
 796        prog->aux->size_poke_tab = size;
 797        prog->aux->poke_tab = tab;
 798
 799        return slot;
 800}
 801
 802static atomic_long_t bpf_jit_current;
 803
 804/* Can be overridden by an arch's JIT compiler if it has a custom,
 805 * dedicated BPF backend memory area, or if neither of the two
 806 * below apply.
 807 */
 808u64 __weak bpf_jit_alloc_exec_limit(void)
 809{
 810#if defined(MODULES_VADDR)
 811        return MODULES_END - MODULES_VADDR;
 812#else
 813        return VMALLOC_END - VMALLOC_START;
 814#endif
 815}
 816
 817static int __init bpf_jit_charge_init(void)
 818{
 819        /* Only used as heuristic here to derive limit. */
 820        bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
 821                                            PAGE_SIZE), LONG_MAX);
 822        return 0;
 823}
 824pure_initcall(bpf_jit_charge_init);
 825
 826int bpf_jit_charge_modmem(u32 pages)
 827{
 828        if (atomic_long_add_return(pages, &bpf_jit_current) >
 829            (bpf_jit_limit >> PAGE_SHIFT)) {
 830                if (!capable(CAP_SYS_ADMIN)) {
 831                        atomic_long_sub(pages, &bpf_jit_current);
 832                        return -EPERM;
 833                }
 834        }
 835
 836        return 0;
 837}
 838
 839void bpf_jit_uncharge_modmem(u32 pages)
 840{
 841        atomic_long_sub(pages, &bpf_jit_current);
 842}
 843
 844void *__weak bpf_jit_alloc_exec(unsigned long size)
 845{
 846        return module_alloc(size);
 847}
 848
 849void __weak bpf_jit_free_exec(void *addr)
 850{
 851        module_memfree(addr);
 852}
 853
 854struct bpf_binary_header *
 855bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 856                     unsigned int alignment,
 857                     bpf_jit_fill_hole_t bpf_fill_ill_insns)
 858{
 859        struct bpf_binary_header *hdr;
 860        u32 size, hole, start, pages;
 861
 862        WARN_ON_ONCE(!is_power_of_2(alignment) ||
 863                     alignment > BPF_IMAGE_ALIGNMENT);
 864
 865        /* Most of BPF filters are really small, but if some of them
 866         * fill a page, allow at least 128 extra bytes to insert a
 867         * random section of illegal instructions.
 868         */
 869        size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
 870        pages = size / PAGE_SIZE;
 871
 872        if (bpf_jit_charge_modmem(pages))
 873                return NULL;
 874        hdr = bpf_jit_alloc_exec(size);
 875        if (!hdr) {
 876                bpf_jit_uncharge_modmem(pages);
 877                return NULL;
 878        }
 879
 880        /* Fill space with illegal/arch-dep instructions. */
 881        bpf_fill_ill_insns(hdr, size);
 882
 883        hdr->pages = pages;
 884        hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 885                     PAGE_SIZE - sizeof(*hdr));
 886        start = (get_random_int() % hole) & ~(alignment - 1);
 887
 888        /* Leave a random number of instructions before BPF code. */
 889        *image_ptr = &hdr->image[start];
 890
 891        return hdr;
 892}
 893
 894void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 895{
 896        u32 pages = hdr->pages;
 897
 898        bpf_jit_free_exec(hdr);
 899        bpf_jit_uncharge_modmem(pages);
 900}
 901
 902/* This symbol is only overridden by archs that have different
 903 * requirements than the usual eBPF JITs, f.e. when they only
 904 * implement cBPF JIT, do not set images read-only, etc.
 905 */
 906void __weak bpf_jit_free(struct bpf_prog *fp)
 907{
 908        if (fp->jited) {
 909                struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
 910
 911                bpf_jit_binary_free(hdr);
 912
 913                WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
 914        }
 915
 916        bpf_prog_unlock_free(fp);
 917}
 918
 919int bpf_jit_get_func_addr(const struct bpf_prog *prog,
 920                          const struct bpf_insn *insn, bool extra_pass,
 921                          u64 *func_addr, bool *func_addr_fixed)
 922{
 923        s16 off = insn->off;
 924        s32 imm = insn->imm;
 925        u8 *addr;
 926
 927        *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
 928        if (!*func_addr_fixed) {
 929                /* Place-holder address till the last pass has collected
 930                 * all addresses for JITed subprograms in which case we
 931                 * can pick them up from prog->aux.
 932                 */
 933                if (!extra_pass)
 934                        addr = NULL;
 935                else if (prog->aux->func &&
 936                         off >= 0 && off < prog->aux->func_cnt)
 937                        addr = (u8 *)prog->aux->func[off]->bpf_func;
 938                else
 939                        return -EINVAL;
 940        } else {
 941                /* Address of a BPF helper call. Since part of the core
 942                 * kernel, it's always at a fixed location. __bpf_call_base
 943                 * and the helper with imm relative to it are both in core
 944                 * kernel.
 945                 */
 946                addr = (u8 *)__bpf_call_base + imm;
 947        }
 948
 949        *func_addr = (unsigned long)addr;
 950        return 0;
 951}
 952
 953static int bpf_jit_blind_insn(const struct bpf_insn *from,
 954                              const struct bpf_insn *aux,
 955                              struct bpf_insn *to_buff,
 956                              bool emit_zext)
 957{
 958        struct bpf_insn *to = to_buff;
 959        u32 imm_rnd = get_random_int();
 960        s16 off;
 961
 962        BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
 963        BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
 964
 965        /* Constraints on AX register:
 966         *
 967         * AX register is inaccessible from user space. It is mapped in
 968         * all JITs, and used here for constant blinding rewrites. It is
 969         * typically "stateless" meaning its contents are only valid within
 970         * the executed instruction, but not across several instructions.
 971         * There are a few exceptions however which are further detailed
 972         * below.
 973         *
 974         * Constant blinding is only used by JITs, not in the interpreter.
 975         * The interpreter uses AX in some occasions as a local temporary
 976         * register e.g. in DIV or MOD instructions.
 977         *
 978         * In restricted circumstances, the verifier can also use the AX
 979         * register for rewrites as long as they do not interfere with
 980         * the above cases!
 981         */
 982        if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
 983                goto out;
 984
 985        if (from->imm == 0 &&
 986            (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
 987             from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
 988                *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
 989                goto out;
 990        }
 991
 992        switch (from->code) {
 993        case BPF_ALU | BPF_ADD | BPF_K:
 994        case BPF_ALU | BPF_SUB | BPF_K:
 995        case BPF_ALU | BPF_AND | BPF_K:
 996        case BPF_ALU | BPF_OR  | BPF_K:
 997        case BPF_ALU | BPF_XOR | BPF_K:
 998        case BPF_ALU | BPF_MUL | BPF_K:
 999        case BPF_ALU | BPF_MOV | BPF_K:
1000        case BPF_ALU | BPF_DIV | BPF_K:
1001        case BPF_ALU | BPF_MOD | BPF_K:
1002                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1003                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1004                *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1005                break;
1006
1007        case BPF_ALU64 | BPF_ADD | BPF_K:
1008        case BPF_ALU64 | BPF_SUB | BPF_K:
1009        case BPF_ALU64 | BPF_AND | BPF_K:
1010        case BPF_ALU64 | BPF_OR  | BPF_K:
1011        case BPF_ALU64 | BPF_XOR | BPF_K:
1012        case BPF_ALU64 | BPF_MUL | BPF_K:
1013        case BPF_ALU64 | BPF_MOV | BPF_K:
1014        case BPF_ALU64 | BPF_DIV | BPF_K:
1015        case BPF_ALU64 | BPF_MOD | BPF_K:
1016                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1017                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1018                *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1019                break;
1020
1021        case BPF_JMP | BPF_JEQ  | BPF_K:
1022        case BPF_JMP | BPF_JNE  | BPF_K:
1023        case BPF_JMP | BPF_JGT  | BPF_K:
1024        case BPF_JMP | BPF_JLT  | BPF_K:
1025        case BPF_JMP | BPF_JGE  | BPF_K:
1026        case BPF_JMP | BPF_JLE  | BPF_K:
1027        case BPF_JMP | BPF_JSGT | BPF_K:
1028        case BPF_JMP | BPF_JSLT | BPF_K:
1029        case BPF_JMP | BPF_JSGE | BPF_K:
1030        case BPF_JMP | BPF_JSLE | BPF_K:
1031        case BPF_JMP | BPF_JSET | BPF_K:
1032                /* Accommodate for extra offset in case of a backjump. */
1033                off = from->off;
1034                if (off < 0)
1035                        off -= 2;
1036                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1037                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1038                *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1039                break;
1040
1041        case BPF_JMP32 | BPF_JEQ  | BPF_K:
1042        case BPF_JMP32 | BPF_JNE  | BPF_K:
1043        case BPF_JMP32 | BPF_JGT  | BPF_K:
1044        case BPF_JMP32 | BPF_JLT  | BPF_K:
1045        case BPF_JMP32 | BPF_JGE  | BPF_K:
1046        case BPF_JMP32 | BPF_JLE  | BPF_K:
1047        case BPF_JMP32 | BPF_JSGT | BPF_K:
1048        case BPF_JMP32 | BPF_JSLT | BPF_K:
1049        case BPF_JMP32 | BPF_JSGE | BPF_K:
1050        case BPF_JMP32 | BPF_JSLE | BPF_K:
1051        case BPF_JMP32 | BPF_JSET | BPF_K:
1052                /* Accommodate for extra offset in case of a backjump. */
1053                off = from->off;
1054                if (off < 0)
1055                        off -= 2;
1056                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1057                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1058                *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1059                                      off);
1060                break;
1061
1062        case BPF_LD | BPF_IMM | BPF_DW:
1063                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1064                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1065                *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1066                *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1067                break;
1068        case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1069                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1070                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1071                if (emit_zext)
1072                        *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1073                *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1074                break;
1075
1076        case BPF_ST | BPF_MEM | BPF_DW:
1077        case BPF_ST | BPF_MEM | BPF_W:
1078        case BPF_ST | BPF_MEM | BPF_H:
1079        case BPF_ST | BPF_MEM | BPF_B:
1080                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1081                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1082                *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1083                break;
1084        }
1085out:
1086        return to - to_buff;
1087}
1088
1089static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1090                                              gfp_t gfp_extra_flags)
1091{
1092        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1093        struct bpf_prog *fp;
1094
1095        fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1096        if (fp != NULL) {
1097                /* aux->prog still points to the fp_other one, so
1098                 * when promoting the clone to the real program,
1099                 * this still needs to be adapted.
1100                 */
1101                memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1102        }
1103
1104        return fp;
1105}
1106
1107static void bpf_prog_clone_free(struct bpf_prog *fp)
1108{
1109        /* aux was stolen by the other clone, so we cannot free
1110         * it from this path! It will be freed eventually by the
1111         * other program on release.
1112         *
1113         * At this point, we don't need a deferred release since
1114         * clone is guaranteed to not be locked.
1115         */
1116        fp->aux = NULL;
1117        fp->stats = NULL;
1118        fp->active = NULL;
1119        __bpf_prog_free(fp);
1120}
1121
1122void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1123{
1124        /* We have to repoint aux->prog to self, as we don't
1125         * know whether fp here is the clone or the original.
1126         */
1127        fp->aux->prog = fp;
1128        bpf_prog_clone_free(fp_other);
1129}
1130
1131struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1132{
1133        struct bpf_insn insn_buff[16], aux[2];
1134        struct bpf_prog *clone, *tmp;
1135        int insn_delta, insn_cnt;
1136        struct bpf_insn *insn;
1137        int i, rewritten;
1138
1139        if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1140                return prog;
1141
1142        clone = bpf_prog_clone_create(prog, GFP_USER);
1143        if (!clone)
1144                return ERR_PTR(-ENOMEM);
1145
1146        insn_cnt = clone->len;
1147        insn = clone->insnsi;
1148
1149        for (i = 0; i < insn_cnt; i++, insn++) {
1150                /* We temporarily need to hold the original ld64 insn
1151                 * so that we can still access the first part in the
1152                 * second blinding run.
1153                 */
1154                if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1155                    insn[1].code == 0)
1156                        memcpy(aux, insn, sizeof(aux));
1157
1158                rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1159                                                clone->aux->verifier_zext);
1160                if (!rewritten)
1161                        continue;
1162
1163                tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1164                if (IS_ERR(tmp)) {
1165                        /* Patching may have repointed aux->prog during
1166                         * realloc from the original one, so we need to
1167                         * fix it up here on error.
1168                         */
1169                        bpf_jit_prog_release_other(prog, clone);
1170                        return tmp;
1171                }
1172
1173                clone = tmp;
1174                insn_delta = rewritten - 1;
1175
1176                /* Walk new program and skip insns we just inserted. */
1177                insn = clone->insnsi + i + insn_delta;
1178                insn_cnt += insn_delta;
1179                i        += insn_delta;
1180        }
1181
1182        clone->blinded = 1;
1183        return clone;
1184}
1185#endif /* CONFIG_BPF_JIT */
1186
1187/* Base function for offset calculation. Needs to go into .text section,
1188 * therefore keeping it non-static as well; will also be used by JITs
1189 * anyway later on, so do not let the compiler omit it. This also needs
1190 * to go into kallsyms for correlation from e.g. bpftool, so naming
1191 * must not change.
1192 */
1193noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1194{
1195        return 0;
1196}
1197EXPORT_SYMBOL_GPL(__bpf_call_base);
1198
1199/* All UAPI available opcodes. */
1200#define BPF_INSN_MAP(INSN_2, INSN_3)            \
1201        /* 32 bit ALU operations. */            \
1202        /*   Register based. */                 \
1203        INSN_3(ALU, ADD,  X),                   \
1204        INSN_3(ALU, SUB,  X),                   \
1205        INSN_3(ALU, AND,  X),                   \
1206        INSN_3(ALU, OR,   X),                   \
1207        INSN_3(ALU, LSH,  X),                   \
1208        INSN_3(ALU, RSH,  X),                   \
1209        INSN_3(ALU, XOR,  X),                   \
1210        INSN_3(ALU, MUL,  X),                   \
1211        INSN_3(ALU, MOV,  X),                   \
1212        INSN_3(ALU, ARSH, X),                   \
1213        INSN_3(ALU, DIV,  X),                   \
1214        INSN_3(ALU, MOD,  X),                   \
1215        INSN_2(ALU, NEG),                       \
1216        INSN_3(ALU, END, TO_BE),                \
1217        INSN_3(ALU, END, TO_LE),                \
1218        /*   Immediate based. */                \
1219        INSN_3(ALU, ADD,  K),                   \
1220        INSN_3(ALU, SUB,  K),                   \
1221        INSN_3(ALU, AND,  K),                   \
1222        INSN_3(ALU, OR,   K),                   \
1223        INSN_3(ALU, LSH,  K),                   \
1224        INSN_3(ALU, RSH,  K),                   \
1225        INSN_3(ALU, XOR,  K),                   \
1226        INSN_3(ALU, MUL,  K),                   \
1227        INSN_3(ALU, MOV,  K),                   \
1228        INSN_3(ALU, ARSH, K),                   \
1229        INSN_3(ALU, DIV,  K),                   \
1230        INSN_3(ALU, MOD,  K),                   \
1231        /* 64 bit ALU operations. */            \
1232        /*   Register based. */                 \
1233        INSN_3(ALU64, ADD,  X),                 \
1234        INSN_3(ALU64, SUB,  X),                 \
1235        INSN_3(ALU64, AND,  X),                 \
1236        INSN_3(ALU64, OR,   X),                 \
1237        INSN_3(ALU64, LSH,  X),                 \
1238        INSN_3(ALU64, RSH,  X),                 \
1239        INSN_3(ALU64, XOR,  X),                 \
1240        INSN_3(ALU64, MUL,  X),                 \
1241        INSN_3(ALU64, MOV,  X),                 \
1242        INSN_3(ALU64, ARSH, X),                 \
1243        INSN_3(ALU64, DIV,  X),                 \
1244        INSN_3(ALU64, MOD,  X),                 \
1245        INSN_2(ALU64, NEG),                     \
1246        /*   Immediate based. */                \
1247        INSN_3(ALU64, ADD,  K),                 \
1248        INSN_3(ALU64, SUB,  K),                 \
1249        INSN_3(ALU64, AND,  K),                 \
1250        INSN_3(ALU64, OR,   K),                 \
1251        INSN_3(ALU64, LSH,  K),                 \
1252        INSN_3(ALU64, RSH,  K),                 \
1253        INSN_3(ALU64, XOR,  K),                 \
1254        INSN_3(ALU64, MUL,  K),                 \
1255        INSN_3(ALU64, MOV,  K),                 \
1256        INSN_3(ALU64, ARSH, K),                 \
1257        INSN_3(ALU64, DIV,  K),                 \
1258        INSN_3(ALU64, MOD,  K),                 \
1259        /* Call instruction. */                 \
1260        INSN_2(JMP, CALL),                      \
1261        /* Exit instruction. */                 \
1262        INSN_2(JMP, EXIT),                      \
1263        /* 32-bit Jump instructions. */         \
1264        /*   Register based. */                 \
1265        INSN_3(JMP32, JEQ,  X),                 \
1266        INSN_3(JMP32, JNE,  X),                 \
1267        INSN_3(JMP32, JGT,  X),                 \
1268        INSN_3(JMP32, JLT,  X),                 \
1269        INSN_3(JMP32, JGE,  X),                 \
1270        INSN_3(JMP32, JLE,  X),                 \
1271        INSN_3(JMP32, JSGT, X),                 \
1272        INSN_3(JMP32, JSLT, X),                 \
1273        INSN_3(JMP32, JSGE, X),                 \
1274        INSN_3(JMP32, JSLE, X),                 \
1275        INSN_3(JMP32, JSET, X),                 \
1276        /*   Immediate based. */                \
1277        INSN_3(JMP32, JEQ,  K),                 \
1278        INSN_3(JMP32, JNE,  K),                 \
1279        INSN_3(JMP32, JGT,  K),                 \
1280        INSN_3(JMP32, JLT,  K),                 \
1281        INSN_3(JMP32, JGE,  K),                 \
1282        INSN_3(JMP32, JLE,  K),                 \
1283        INSN_3(JMP32, JSGT, K),                 \
1284        INSN_3(JMP32, JSLT, K),                 \
1285        INSN_3(JMP32, JSGE, K),                 \
1286        INSN_3(JMP32, JSLE, K),                 \
1287        INSN_3(JMP32, JSET, K),                 \
1288        /* Jump instructions. */                \
1289        /*   Register based. */                 \
1290        INSN_3(JMP, JEQ,  X),                   \
1291        INSN_3(JMP, JNE,  X),                   \
1292        INSN_3(JMP, JGT,  X),                   \
1293        INSN_3(JMP, JLT,  X),                   \
1294        INSN_3(JMP, JGE,  X),                   \
1295        INSN_3(JMP, JLE,  X),                   \
1296        INSN_3(JMP, JSGT, X),                   \
1297        INSN_3(JMP, JSLT, X),                   \
1298        INSN_3(JMP, JSGE, X),                   \
1299        INSN_3(JMP, JSLE, X),                   \
1300        INSN_3(JMP, JSET, X),                   \
1301        /*   Immediate based. */                \
1302        INSN_3(JMP, JEQ,  K),                   \
1303        INSN_3(JMP, JNE,  K),                   \
1304        INSN_3(JMP, JGT,  K),                   \
1305        INSN_3(JMP, JLT,  K),                   \
1306        INSN_3(JMP, JGE,  K),                   \
1307        INSN_3(JMP, JLE,  K),                   \
1308        INSN_3(JMP, JSGT, K),                   \
1309        INSN_3(JMP, JSLT, K),                   \
1310        INSN_3(JMP, JSGE, K),                   \
1311        INSN_3(JMP, JSLE, K),                   \
1312        INSN_3(JMP, JSET, K),                   \
1313        INSN_2(JMP, JA),                        \
1314        /* Store instructions. */               \
1315        /*   Register based. */                 \
1316        INSN_3(STX, MEM,  B),                   \
1317        INSN_3(STX, MEM,  H),                   \
1318        INSN_3(STX, MEM,  W),                   \
1319        INSN_3(STX, MEM,  DW),                  \
1320        INSN_3(STX, ATOMIC, W),                 \
1321        INSN_3(STX, ATOMIC, DW),                \
1322        /*   Immediate based. */                \
1323        INSN_3(ST, MEM, B),                     \
1324        INSN_3(ST, MEM, H),                     \
1325        INSN_3(ST, MEM, W),                     \
1326        INSN_3(ST, MEM, DW),                    \
1327        /* Load instructions. */                \
1328        /*   Register based. */                 \
1329        INSN_3(LDX, MEM, B),                    \
1330        INSN_3(LDX, MEM, H),                    \
1331        INSN_3(LDX, MEM, W),                    \
1332        INSN_3(LDX, MEM, DW),                   \
1333        /*   Immediate based. */                \
1334        INSN_3(LD, IMM, DW)
1335
1336bool bpf_opcode_in_insntable(u8 code)
1337{
1338#define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1339#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1340        static const bool public_insntable[256] = {
1341                [0 ... 255] = false,
1342                /* Now overwrite non-defaults ... */
1343                BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1344                /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1345                [BPF_LD | BPF_ABS | BPF_B] = true,
1346                [BPF_LD | BPF_ABS | BPF_H] = true,
1347                [BPF_LD | BPF_ABS | BPF_W] = true,
1348                [BPF_LD | BPF_IND | BPF_B] = true,
1349                [BPF_LD | BPF_IND | BPF_H] = true,
1350                [BPF_LD | BPF_IND | BPF_W] = true,
1351        };
1352#undef BPF_INSN_3_TBL
1353#undef BPF_INSN_2_TBL
1354        return public_insntable[code];
1355}
1356
1357#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1358u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1359{
1360        memset(dst, 0, size);
1361        return -EFAULT;
1362}
1363
1364/**
1365 *      ___bpf_prog_run - run eBPF program on a given context
1366 *      @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1367 *      @insn: is the array of eBPF instructions
1368 *
1369 * Decode and execute eBPF instructions.
1370 *
1371 * Return: whatever value is in %BPF_R0 at program exit
1372 */
1373static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1374{
1375#define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1376#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1377        static const void * const jumptable[256] __annotate_jump_table = {
1378                [0 ... 255] = &&default_label,
1379                /* Now overwrite non-defaults ... */
1380                BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1381                /* Non-UAPI available opcodes. */
1382                [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1383                [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1384                [BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1385                [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1386                [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1387                [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1388                [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1389        };
1390#undef BPF_INSN_3_LBL
1391#undef BPF_INSN_2_LBL
1392        u32 tail_call_cnt = 0;
1393
1394#define CONT     ({ insn++; goto select_insn; })
1395#define CONT_JMP ({ insn++; goto select_insn; })
1396
1397select_insn:
1398        goto *jumptable[insn->code];
1399
1400        /* Explicitly mask the register-based shift amounts with 63 or 31
1401         * to avoid undefined behavior. Normally this won't affect the
1402         * generated code, for example, in case of native 64 bit archs such
1403         * as x86-64 or arm64, the compiler is optimizing the AND away for
1404         * the interpreter. In case of JITs, each of the JIT backends compiles
1405         * the BPF shift operations to machine instructions which produce
1406         * implementation-defined results in such a case; the resulting
1407         * contents of the register may be arbitrary, but program behaviour
1408         * as a whole remains defined. In other words, in case of JIT backends,
1409         * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1410         */
1411        /* ALU (shifts) */
1412#define SHT(OPCODE, OP)                                 \
1413        ALU64_##OPCODE##_X:                             \
1414                DST = DST OP (SRC & 63);                \
1415                CONT;                                   \
1416        ALU_##OPCODE##_X:                               \
1417                DST = (u32) DST OP ((u32) SRC & 31);    \
1418                CONT;                                   \
1419        ALU64_##OPCODE##_K:                             \
1420                DST = DST OP IMM;                       \
1421                CONT;                                   \
1422        ALU_##OPCODE##_K:                               \
1423                DST = (u32) DST OP (u32) IMM;           \
1424                CONT;
1425        /* ALU (rest) */
1426#define ALU(OPCODE, OP)                                 \
1427        ALU64_##OPCODE##_X:                             \
1428                DST = DST OP SRC;                       \
1429                CONT;                                   \
1430        ALU_##OPCODE##_X:                               \
1431                DST = (u32) DST OP (u32) SRC;           \
1432                CONT;                                   \
1433        ALU64_##OPCODE##_K:                             \
1434                DST = DST OP IMM;                       \
1435                CONT;                                   \
1436        ALU_##OPCODE##_K:                               \
1437                DST = (u32) DST OP (u32) IMM;           \
1438                CONT;
1439        ALU(ADD,  +)
1440        ALU(SUB,  -)
1441        ALU(AND,  &)
1442        ALU(OR,   |)
1443        ALU(XOR,  ^)
1444        ALU(MUL,  *)
1445        SHT(LSH, <<)
1446        SHT(RSH, >>)
1447#undef SHT
1448#undef ALU
1449        ALU_NEG:
1450                DST = (u32) -DST;
1451                CONT;
1452        ALU64_NEG:
1453                DST = -DST;
1454                CONT;
1455        ALU_MOV_X:
1456                DST = (u32) SRC;
1457                CONT;
1458        ALU_MOV_K:
1459                DST = (u32) IMM;
1460                CONT;
1461        ALU64_MOV_X:
1462                DST = SRC;
1463                CONT;
1464        ALU64_MOV_K:
1465                DST = IMM;
1466                CONT;
1467        LD_IMM_DW:
1468                DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1469                insn++;
1470                CONT;
1471        ALU_ARSH_X:
1472                DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1473                CONT;
1474        ALU_ARSH_K:
1475                DST = (u64) (u32) (((s32) DST) >> IMM);
1476                CONT;
1477        ALU64_ARSH_X:
1478                (*(s64 *) &DST) >>= (SRC & 63);
1479                CONT;
1480        ALU64_ARSH_K:
1481                (*(s64 *) &DST) >>= IMM;
1482                CONT;
1483        ALU64_MOD_X:
1484                div64_u64_rem(DST, SRC, &AX);
1485                DST = AX;
1486                CONT;
1487        ALU_MOD_X:
1488                AX = (u32) DST;
1489                DST = do_div(AX, (u32) SRC);
1490                CONT;
1491        ALU64_MOD_K:
1492                div64_u64_rem(DST, IMM, &AX);
1493                DST = AX;
1494                CONT;
1495        ALU_MOD_K:
1496                AX = (u32) DST;
1497                DST = do_div(AX, (u32) IMM);
1498                CONT;
1499        ALU64_DIV_X:
1500                DST = div64_u64(DST, SRC);
1501                CONT;
1502        ALU_DIV_X:
1503                AX = (u32) DST;
1504                do_div(AX, (u32) SRC);
1505                DST = (u32) AX;
1506                CONT;
1507        ALU64_DIV_K:
1508                DST = div64_u64(DST, IMM);
1509                CONT;
1510        ALU_DIV_K:
1511                AX = (u32) DST;
1512                do_div(AX, (u32) IMM);
1513                DST = (u32) AX;
1514                CONT;
1515        ALU_END_TO_BE:
1516                switch (IMM) {
1517                case 16:
1518                        DST = (__force u16) cpu_to_be16(DST);
1519                        break;
1520                case 32:
1521                        DST = (__force u32) cpu_to_be32(DST);
1522                        break;
1523                case 64:
1524                        DST = (__force u64) cpu_to_be64(DST);
1525                        break;
1526                }
1527                CONT;
1528        ALU_END_TO_LE:
1529                switch (IMM) {
1530                case 16:
1531                        DST = (__force u16) cpu_to_le16(DST);
1532                        break;
1533                case 32:
1534                        DST = (__force u32) cpu_to_le32(DST);
1535                        break;
1536                case 64:
1537                        DST = (__force u64) cpu_to_le64(DST);
1538                        break;
1539                }
1540                CONT;
1541
1542        /* CALL */
1543        JMP_CALL:
1544                /* Function call scratches BPF_R1-BPF_R5 registers,
1545                 * preserves BPF_R6-BPF_R9, and stores return value
1546                 * into BPF_R0.
1547                 */
1548                BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1549                                                       BPF_R4, BPF_R5);
1550                CONT;
1551
1552        JMP_CALL_ARGS:
1553                BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1554                                                            BPF_R3, BPF_R4,
1555                                                            BPF_R5,
1556                                                            insn + insn->off + 1);
1557                CONT;
1558
1559        JMP_TAIL_CALL: {
1560                struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1561                struct bpf_array *array = container_of(map, struct bpf_array, map);
1562                struct bpf_prog *prog;
1563                u32 index = BPF_R3;
1564
1565                if (unlikely(index >= array->map.max_entries))
1566                        goto out;
1567                if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1568                        goto out;
1569
1570                tail_call_cnt++;
1571
1572                prog = READ_ONCE(array->ptrs[index]);
1573                if (!prog)
1574                        goto out;
1575
1576                /* ARG1 at this point is guaranteed to point to CTX from
1577                 * the verifier side due to the fact that the tail call is
1578                 * handled like a helper, that is, bpf_tail_call_proto,
1579                 * where arg1_type is ARG_PTR_TO_CTX.
1580                 */
1581                insn = prog->insnsi;
1582                goto select_insn;
1583out:
1584                CONT;
1585        }
1586        JMP_JA:
1587                insn += insn->off;
1588                CONT;
1589        JMP_EXIT:
1590                return BPF_R0;
1591        /* JMP */
1592#define COND_JMP(SIGN, OPCODE, CMP_OP)                          \
1593        JMP_##OPCODE##_X:                                       \
1594                if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {     \
1595                        insn += insn->off;                      \
1596                        CONT_JMP;                               \
1597                }                                               \
1598                CONT;                                           \
1599        JMP32_##OPCODE##_X:                                     \
1600                if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {     \
1601                        insn += insn->off;                      \
1602                        CONT_JMP;                               \
1603                }                                               \
1604                CONT;                                           \
1605        JMP_##OPCODE##_K:                                       \
1606                if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {     \
1607                        insn += insn->off;                      \
1608                        CONT_JMP;                               \
1609                }                                               \
1610                CONT;                                           \
1611        JMP32_##OPCODE##_K:                                     \
1612                if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {     \
1613                        insn += insn->off;                      \
1614                        CONT_JMP;                               \
1615                }                                               \
1616                CONT;
1617        COND_JMP(u, JEQ, ==)
1618        COND_JMP(u, JNE, !=)
1619        COND_JMP(u, JGT, >)
1620        COND_JMP(u, JLT, <)
1621        COND_JMP(u, JGE, >=)
1622        COND_JMP(u, JLE, <=)
1623        COND_JMP(u, JSET, &)
1624        COND_JMP(s, JSGT, >)
1625        COND_JMP(s, JSLT, <)
1626        COND_JMP(s, JSGE, >=)
1627        COND_JMP(s, JSLE, <=)
1628#undef COND_JMP
1629        /* ST, STX and LDX*/
1630        ST_NOSPEC:
1631                /* Speculation barrier for mitigating Speculative Store Bypass.
1632                 * In case of arm64, we rely on the firmware mitigation as
1633                 * controlled via the ssbd kernel parameter. Whenever the
1634                 * mitigation is enabled, it works for all of the kernel code
1635                 * with no need to provide any additional instructions here.
1636                 * In case of x86, we use 'lfence' insn for mitigation. We
1637                 * reuse preexisting logic from Spectre v1 mitigation that
1638                 * happens to produce the required code on x86 for v4 as well.
1639                 */
1640#ifdef CONFIG_X86
1641                barrier_nospec();
1642#endif
1643                CONT;
1644#define LDST(SIZEOP, SIZE)                                              \
1645        STX_MEM_##SIZEOP:                                               \
1646                *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
1647                CONT;                                                   \
1648        ST_MEM_##SIZEOP:                                                \
1649                *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
1650                CONT;                                                   \
1651        LDX_MEM_##SIZEOP:                                               \
1652                DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
1653                CONT;
1654
1655        LDST(B,   u8)
1656        LDST(H,  u16)
1657        LDST(W,  u32)
1658        LDST(DW, u64)
1659#undef LDST
1660#define LDX_PROBE(SIZEOP, SIZE)                                                 \
1661        LDX_PROBE_MEM_##SIZEOP:                                                 \
1662                bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off));      \
1663                CONT;
1664        LDX_PROBE(B,  1)
1665        LDX_PROBE(H,  2)
1666        LDX_PROBE(W,  4)
1667        LDX_PROBE(DW, 8)
1668#undef LDX_PROBE
1669
1670#define ATOMIC_ALU_OP(BOP, KOP)                                         \
1671                case BOP:                                               \
1672                        if (BPF_SIZE(insn->code) == BPF_W)              \
1673                                atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1674                                             (DST + insn->off));        \
1675                        else                                            \
1676                                atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1677                                               (DST + insn->off));      \
1678                        break;                                          \
1679                case BOP | BPF_FETCH:                                   \
1680                        if (BPF_SIZE(insn->code) == BPF_W)              \
1681                                SRC = (u32) atomic_fetch_##KOP(         \
1682                                        (u32) SRC,                      \
1683                                        (atomic_t *)(unsigned long) (DST + insn->off)); \
1684                        else                                            \
1685                                SRC = (u64) atomic64_fetch_##KOP(       \
1686                                        (u64) SRC,                      \
1687                                        (atomic64_t *)(unsigned long) (DST + insn->off)); \
1688                        break;
1689
1690        STX_ATOMIC_DW:
1691        STX_ATOMIC_W:
1692                switch (IMM) {
1693                ATOMIC_ALU_OP(BPF_ADD, add)
1694                ATOMIC_ALU_OP(BPF_AND, and)
1695                ATOMIC_ALU_OP(BPF_OR, or)
1696                ATOMIC_ALU_OP(BPF_XOR, xor)
1697#undef ATOMIC_ALU_OP
1698
1699                case BPF_XCHG:
1700                        if (BPF_SIZE(insn->code) == BPF_W)
1701                                SRC = (u32) atomic_xchg(
1702                                        (atomic_t *)(unsigned long) (DST + insn->off),
1703                                        (u32) SRC);
1704                        else
1705                                SRC = (u64) atomic64_xchg(
1706                                        (atomic64_t *)(unsigned long) (DST + insn->off),
1707                                        (u64) SRC);
1708                        break;
1709                case BPF_CMPXCHG:
1710                        if (BPF_SIZE(insn->code) == BPF_W)
1711                                BPF_R0 = (u32) atomic_cmpxchg(
1712                                        (atomic_t *)(unsigned long) (DST + insn->off),
1713                                        (u32) BPF_R0, (u32) SRC);
1714                        else
1715                                BPF_R0 = (u64) atomic64_cmpxchg(
1716                                        (atomic64_t *)(unsigned long) (DST + insn->off),
1717                                        (u64) BPF_R0, (u64) SRC);
1718                        break;
1719
1720                default:
1721                        goto default_label;
1722                }
1723                CONT;
1724
1725        default_label:
1726                /* If we ever reach this, we have a bug somewhere. Die hard here
1727                 * instead of just returning 0; we could be somewhere in a subprog,
1728                 * so execution could continue otherwise which we do /not/ want.
1729                 *
1730                 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1731                 */
1732                pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
1733                        insn->code, insn->imm);
1734                BUG_ON(1);
1735                return 0;
1736}
1737
1738#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1739#define DEFINE_BPF_PROG_RUN(stack_size) \
1740static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1741{ \
1742        u64 stack[stack_size / sizeof(u64)]; \
1743        u64 regs[MAX_BPF_EXT_REG]; \
1744\
1745        FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1746        ARG1 = (u64) (unsigned long) ctx; \
1747        return ___bpf_prog_run(regs, insn); \
1748}
1749
1750#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1751#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1752static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1753                                      const struct bpf_insn *insn) \
1754{ \
1755        u64 stack[stack_size / sizeof(u64)]; \
1756        u64 regs[MAX_BPF_EXT_REG]; \
1757\
1758        FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1759        BPF_R1 = r1; \
1760        BPF_R2 = r2; \
1761        BPF_R3 = r3; \
1762        BPF_R4 = r4; \
1763        BPF_R5 = r5; \
1764        return ___bpf_prog_run(regs, insn); \
1765}
1766
1767#define EVAL1(FN, X) FN(X)
1768#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1769#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1770#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1771#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1772#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1773
1774EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1775EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1776EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1777
1778EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1779EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1780EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1781
1782#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1783
1784static unsigned int (*interpreters[])(const void *ctx,
1785                                      const struct bpf_insn *insn) = {
1786EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1787EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1788EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1789};
1790#undef PROG_NAME_LIST
1791#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1792static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1793                                  const struct bpf_insn *insn) = {
1794EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1795EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1796EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1797};
1798#undef PROG_NAME_LIST
1799
1800void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1801{
1802        stack_depth = max_t(u32, stack_depth, 1);
1803        insn->off = (s16) insn->imm;
1804        insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1805                __bpf_call_base_args;
1806        insn->code = BPF_JMP | BPF_CALL_ARGS;
1807}
1808
1809#else
1810static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1811                                         const struct bpf_insn *insn)
1812{
1813        /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1814         * is not working properly, so warn about it!
1815         */
1816        WARN_ON_ONCE(1);
1817        return 0;
1818}
1819#endif
1820
1821bool bpf_prog_array_compatible(struct bpf_array *array,
1822                               const struct bpf_prog *fp)
1823{
1824        if (fp->kprobe_override)
1825                return false;
1826
1827        if (!array->aux->type) {
1828                /* There's no owner yet where we could check for
1829                 * compatibility.
1830                 */
1831                array->aux->type  = fp->type;
1832                array->aux->jited = fp->jited;
1833                return true;
1834        }
1835
1836        return array->aux->type  == fp->type &&
1837               array->aux->jited == fp->jited;
1838}
1839
1840static int bpf_check_tail_call(const struct bpf_prog *fp)
1841{
1842        struct bpf_prog_aux *aux = fp->aux;
1843        int i, ret = 0;
1844
1845        mutex_lock(&aux->used_maps_mutex);
1846        for (i = 0; i < aux->used_map_cnt; i++) {
1847                struct bpf_map *map = aux->used_maps[i];
1848                struct bpf_array *array;
1849
1850                if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1851                        continue;
1852
1853                array = container_of(map, struct bpf_array, map);
1854                if (!bpf_prog_array_compatible(array, fp)) {
1855                        ret = -EINVAL;
1856                        goto out;
1857                }
1858        }
1859
1860out:
1861        mutex_unlock(&aux->used_maps_mutex);
1862        return ret;
1863}
1864
1865static void bpf_prog_select_func(struct bpf_prog *fp)
1866{
1867#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1868        u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1869
1870        fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1871#else
1872        fp->bpf_func = __bpf_prog_ret0_warn;
1873#endif
1874}
1875
1876/**
1877 *      bpf_prog_select_runtime - select exec runtime for BPF program
1878 *      @fp: bpf_prog populated with internal BPF program
1879 *      @err: pointer to error variable
1880 *
1881 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1882 * The BPF program will be executed via BPF_PROG_RUN() macro.
1883 *
1884 * Return: the &fp argument along with &err set to 0 for success or
1885 * a negative errno code on failure
1886 */
1887struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1888{
1889        /* In case of BPF to BPF calls, verifier did all the prep
1890         * work with regards to JITing, etc.
1891         */
1892        bool jit_needed = false;
1893
1894        if (fp->bpf_func)
1895                goto finalize;
1896
1897        if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
1898            bpf_prog_has_kfunc_call(fp))
1899                jit_needed = true;
1900
1901        bpf_prog_select_func(fp);
1902
1903        /* eBPF JITs can rewrite the program in case constant
1904         * blinding is active. However, in case of error during
1905         * blinding, bpf_int_jit_compile() must always return a
1906         * valid program, which in this case would simply not
1907         * be JITed, but falls back to the interpreter.
1908         */
1909        if (!bpf_prog_is_dev_bound(fp->aux)) {
1910                *err = bpf_prog_alloc_jited_linfo(fp);
1911                if (*err)
1912                        return fp;
1913
1914                fp = bpf_int_jit_compile(fp);
1915                bpf_prog_jit_attempt_done(fp);
1916                if (!fp->jited && jit_needed) {
1917                        *err = -ENOTSUPP;
1918                        return fp;
1919                }
1920        } else {
1921                *err = bpf_prog_offload_compile(fp);
1922                if (*err)
1923                        return fp;
1924        }
1925
1926finalize:
1927        bpf_prog_lock_ro(fp);
1928
1929        /* The tail call compatibility check can only be done at
1930         * this late stage as we need to determine, if we deal
1931         * with JITed or non JITed program concatenations and not
1932         * all eBPF JITs might immediately support all features.
1933         */
1934        *err = bpf_check_tail_call(fp);
1935
1936        return fp;
1937}
1938EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1939
1940static unsigned int __bpf_prog_ret1(const void *ctx,
1941                                    const struct bpf_insn *insn)
1942{
1943        return 1;
1944}
1945
1946static struct bpf_prog_dummy {
1947        struct bpf_prog prog;
1948} dummy_bpf_prog = {
1949        .prog = {
1950                .bpf_func = __bpf_prog_ret1,
1951        },
1952};
1953
1954/* to avoid allocating empty bpf_prog_array for cgroups that
1955 * don't have bpf program attached use one global 'empty_prog_array'
1956 * It will not be modified the caller of bpf_prog_array_alloc()
1957 * (since caller requested prog_cnt == 0)
1958 * that pointer should be 'freed' by bpf_prog_array_free()
1959 */
1960static struct {
1961        struct bpf_prog_array hdr;
1962        struct bpf_prog *null_prog;
1963} empty_prog_array = {
1964        .null_prog = NULL,
1965};
1966
1967struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1968{
1969        if (prog_cnt)
1970                return kzalloc(sizeof(struct bpf_prog_array) +
1971                               sizeof(struct bpf_prog_array_item) *
1972                               (prog_cnt + 1),
1973                               flags);
1974
1975        return &empty_prog_array.hdr;
1976}
1977
1978void bpf_prog_array_free(struct bpf_prog_array *progs)
1979{
1980        if (!progs || progs == &empty_prog_array.hdr)
1981                return;
1982        kfree_rcu(progs, rcu);
1983}
1984
1985int bpf_prog_array_length(struct bpf_prog_array *array)
1986{
1987        struct bpf_prog_array_item *item;
1988        u32 cnt = 0;
1989
1990        for (item = array->items; item->prog; item++)
1991                if (item->prog != &dummy_bpf_prog.prog)
1992                        cnt++;
1993        return cnt;
1994}
1995
1996bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1997{
1998        struct bpf_prog_array_item *item;
1999
2000        for (item = array->items; item->prog; item++)
2001                if (item->prog != &dummy_bpf_prog.prog)
2002                        return false;
2003        return true;
2004}
2005
2006static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2007                                     u32 *prog_ids,
2008                                     u32 request_cnt)
2009{
2010        struct bpf_prog_array_item *item;
2011        int i = 0;
2012
2013        for (item = array->items; item->prog; item++) {
2014                if (item->prog == &dummy_bpf_prog.prog)
2015                        continue;
2016                prog_ids[i] = item->prog->aux->id;
2017                if (++i == request_cnt) {
2018                        item++;
2019                        break;
2020                }
2021        }
2022
2023        return !!(item->prog);
2024}
2025
2026int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2027                                __u32 __user *prog_ids, u32 cnt)
2028{
2029        unsigned long err = 0;
2030        bool nospc;
2031        u32 *ids;
2032
2033        /* users of this function are doing:
2034         * cnt = bpf_prog_array_length();
2035         * if (cnt > 0)
2036         *     bpf_prog_array_copy_to_user(..., cnt);
2037         * so below kcalloc doesn't need extra cnt > 0 check.
2038         */
2039        ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2040        if (!ids)
2041                return -ENOMEM;
2042        nospc = bpf_prog_array_copy_core(array, ids, cnt);
2043        err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2044        kfree(ids);
2045        if (err)
2046                return -EFAULT;
2047        if (nospc)
2048                return -ENOSPC;
2049        return 0;
2050}
2051
2052void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2053                                struct bpf_prog *old_prog)
2054{
2055        struct bpf_prog_array_item *item;
2056
2057        for (item = array->items; item->prog; item++)
2058                if (item->prog == old_prog) {
2059                        WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2060                        break;
2061                }
2062}
2063
2064/**
2065 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2066 *                                   index into the program array with
2067 *                                   a dummy no-op program.
2068 * @array: a bpf_prog_array
2069 * @index: the index of the program to replace
2070 *
2071 * Skips over dummy programs, by not counting them, when calculating
2072 * the position of the program to replace.
2073 *
2074 * Return:
2075 * * 0          - Success
2076 * * -EINVAL    - Invalid index value. Must be a non-negative integer.
2077 * * -ENOENT    - Index out of range
2078 */
2079int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2080{
2081        return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2082}
2083
2084/**
2085 * bpf_prog_array_update_at() - Updates the program at the given index
2086 *                              into the program array.
2087 * @array: a bpf_prog_array
2088 * @index: the index of the program to update
2089 * @prog: the program to insert into the array
2090 *
2091 * Skips over dummy programs, by not counting them, when calculating
2092 * the position of the program to update.
2093 *
2094 * Return:
2095 * * 0          - Success
2096 * * -EINVAL    - Invalid index value. Must be a non-negative integer.
2097 * * -ENOENT    - Index out of range
2098 */
2099int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2100                             struct bpf_prog *prog)
2101{
2102        struct bpf_prog_array_item *item;
2103
2104        if (unlikely(index < 0))
2105                return -EINVAL;
2106
2107        for (item = array->items; item->prog; item++) {
2108                if (item->prog == &dummy_bpf_prog.prog)
2109                        continue;
2110                if (!index) {
2111                        WRITE_ONCE(item->prog, prog);
2112                        return 0;
2113                }
2114                index--;
2115        }
2116        return -ENOENT;
2117}
2118
2119int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2120                        struct bpf_prog *exclude_prog,
2121                        struct bpf_prog *include_prog,
2122                        struct bpf_prog_array **new_array)
2123{
2124        int new_prog_cnt, carry_prog_cnt = 0;
2125        struct bpf_prog_array_item *existing;
2126        struct bpf_prog_array *array;
2127        bool found_exclude = false;
2128        int new_prog_idx = 0;
2129
2130        /* Figure out how many existing progs we need to carry over to
2131         * the new array.
2132         */
2133        if (old_array) {
2134                existing = old_array->items;
2135                for (; existing->prog; existing++) {
2136                        if (existing->prog == exclude_prog) {
2137                                found_exclude = true;
2138                                continue;
2139                        }
2140                        if (existing->prog != &dummy_bpf_prog.prog)
2141                                carry_prog_cnt++;
2142                        if (existing->prog == include_prog)
2143                                return -EEXIST;
2144                }
2145        }
2146
2147        if (exclude_prog && !found_exclude)
2148                return -ENOENT;
2149
2150        /* How many progs (not NULL) will be in the new array? */
2151        new_prog_cnt = carry_prog_cnt;
2152        if (include_prog)
2153                new_prog_cnt += 1;
2154
2155        /* Do we have any prog (not NULL) in the new array? */
2156        if (!new_prog_cnt) {
2157                *new_array = NULL;
2158                return 0;
2159        }
2160
2161        /* +1 as the end of prog_array is marked with NULL */
2162        array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2163        if (!array)
2164                return -ENOMEM;
2165
2166        /* Fill in the new prog array */
2167        if (carry_prog_cnt) {
2168                existing = old_array->items;
2169                for (; existing->prog; existing++)
2170                        if (existing->prog != exclude_prog &&
2171                            existing->prog != &dummy_bpf_prog.prog) {
2172                                array->items[new_prog_idx++].prog =
2173                                        existing->prog;
2174                        }
2175        }
2176        if (include_prog)
2177                array->items[new_prog_idx++].prog = include_prog;
2178        array->items[new_prog_idx].prog = NULL;
2179        *new_array = array;
2180        return 0;
2181}
2182
2183int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2184                             u32 *prog_ids, u32 request_cnt,
2185                             u32 *prog_cnt)
2186{
2187        u32 cnt = 0;
2188
2189        if (array)
2190                cnt = bpf_prog_array_length(array);
2191
2192        *prog_cnt = cnt;
2193
2194        /* return early if user requested only program count or nothing to copy */
2195        if (!request_cnt || !cnt)
2196                return 0;
2197
2198        /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2199        return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2200                                                                     : 0;
2201}
2202
2203void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2204                          struct bpf_map **used_maps, u32 len)
2205{
2206        struct bpf_map *map;
2207        u32 i;
2208
2209        for (i = 0; i < len; i++) {
2210                map = used_maps[i];
2211                if (map->ops->map_poke_untrack)
2212                        map->ops->map_poke_untrack(map, aux);
2213                bpf_map_put(map);
2214        }
2215}
2216
2217static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2218{
2219        __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2220        kfree(aux->used_maps);
2221}
2222
2223void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2224                          struct btf_mod_pair *used_btfs, u32 len)
2225{
2226#ifdef CONFIG_BPF_SYSCALL
2227        struct btf_mod_pair *btf_mod;
2228        u32 i;
2229
2230        for (i = 0; i < len; i++) {
2231                btf_mod = &used_btfs[i];
2232                if (btf_mod->module)
2233                        module_put(btf_mod->module);
2234                btf_put(btf_mod->btf);
2235        }
2236#endif
2237}
2238
2239static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2240{
2241        __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2242        kfree(aux->used_btfs);
2243}
2244
2245static void bpf_prog_free_deferred(struct work_struct *work)
2246{
2247        struct bpf_prog_aux *aux;
2248        int i;
2249
2250        aux = container_of(work, struct bpf_prog_aux, work);
2251        bpf_free_used_maps(aux);
2252        bpf_free_used_btfs(aux);
2253        if (bpf_prog_is_dev_bound(aux))
2254                bpf_prog_offload_destroy(aux->prog);
2255#ifdef CONFIG_PERF_EVENTS
2256        if (aux->prog->has_callchain_buf)
2257                put_callchain_buffers();
2258#endif
2259        if (aux->dst_trampoline)
2260                bpf_trampoline_put(aux->dst_trampoline);
2261        for (i = 0; i < aux->func_cnt; i++) {
2262                /* We can just unlink the subprog poke descriptor table as
2263                 * it was originally linked to the main program and is also
2264                 * released along with it.
2265                 */
2266                aux->func[i]->aux->poke_tab = NULL;
2267                bpf_jit_free(aux->func[i]);
2268        }
2269        if (aux->func_cnt) {
2270                kfree(aux->func);
2271                bpf_prog_unlock_free(aux->prog);
2272        } else {
2273                bpf_jit_free(aux->prog);
2274        }
2275}
2276
2277/* Free internal BPF program */
2278void bpf_prog_free(struct bpf_prog *fp)
2279{
2280        struct bpf_prog_aux *aux = fp->aux;
2281
2282        if (aux->dst_prog)
2283                bpf_prog_put(aux->dst_prog);
2284        INIT_WORK(&aux->work, bpf_prog_free_deferred);
2285        schedule_work(&aux->work);
2286}
2287EXPORT_SYMBOL_GPL(bpf_prog_free);
2288
2289/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2290static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2291
2292void bpf_user_rnd_init_once(void)
2293{
2294        prandom_init_once(&bpf_user_rnd_state);
2295}
2296
2297BPF_CALL_0(bpf_user_rnd_u32)
2298{
2299        /* Should someone ever have the rather unwise idea to use some
2300         * of the registers passed into this function, then note that
2301         * this function is called from native eBPF and classic-to-eBPF
2302         * transformations. Register assignments from both sides are
2303         * different, f.e. classic always sets fn(ctx, A, X) here.
2304         */
2305        struct rnd_state *state;
2306        u32 res;
2307
2308        state = &get_cpu_var(bpf_user_rnd_state);
2309        res = prandom_u32_state(state);
2310        put_cpu_var(bpf_user_rnd_state);
2311
2312        return res;
2313}
2314
2315BPF_CALL_0(bpf_get_raw_cpu_id)
2316{
2317        return raw_smp_processor_id();
2318}
2319
2320/* Weak definitions of helper functions in case we don't have bpf syscall. */
2321const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2322const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2323const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2324const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2325const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2326const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2327const struct bpf_func_proto bpf_spin_lock_proto __weak;
2328const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2329const struct bpf_func_proto bpf_jiffies64_proto __weak;
2330
2331const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2332const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2333const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2334const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2335const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2336const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2337
2338const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2339const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2340const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2341const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2342const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2343const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2344const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2345const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2346const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2347
2348const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2349{
2350        return NULL;
2351}
2352
2353u64 __weak
2354bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2355                 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2356{
2357        return -ENOTSUPP;
2358}
2359EXPORT_SYMBOL_GPL(bpf_event_output);
2360
2361/* Always built-in helper functions. */
2362const struct bpf_func_proto bpf_tail_call_proto = {
2363        .func           = NULL,
2364        .gpl_only       = false,
2365        .ret_type       = RET_VOID,
2366        .arg1_type      = ARG_PTR_TO_CTX,
2367        .arg2_type      = ARG_CONST_MAP_PTR,
2368        .arg3_type      = ARG_ANYTHING,
2369};
2370
2371/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2372 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2373 * eBPF and implicitly also cBPF can get JITed!
2374 */
2375struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2376{
2377        return prog;
2378}
2379
2380/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2381 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2382 */
2383void __weak bpf_jit_compile(struct bpf_prog *prog)
2384{
2385}
2386
2387bool __weak bpf_helper_changes_pkt_data(void *func)
2388{
2389        return false;
2390}
2391
2392/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2393 * analysis code and wants explicit zero extension inserted by verifier.
2394 * Otherwise, return FALSE.
2395 *
2396 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2397 * you don't override this. JITs that don't want these extra insns can detect
2398 * them using insn_is_zext.
2399 */
2400bool __weak bpf_jit_needs_zext(void)
2401{
2402        return false;
2403}
2404
2405bool __weak bpf_jit_supports_kfunc_call(void)
2406{
2407        return false;
2408}
2409
2410/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2411 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2412 */
2413int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2414                         int len)
2415{
2416        return -EFAULT;
2417}
2418
2419int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2420                              void *addr1, void *addr2)
2421{
2422        return -ENOTSUPP;
2423}
2424
2425DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2426EXPORT_SYMBOL(bpf_stats_enabled_key);
2427
2428/* All definitions of tracepoints related to BPF. */
2429#define CREATE_TRACE_POINTS
2430#include <linux/bpf_trace.h>
2431
2432EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2433EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2434