linux/kernel/bpf/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Linux Socket Filter - Kernel level socket filtering
   4 *
   5 * Based on the design of the Berkeley Packet Filter. The new
   6 * internal format has been designed by PLUMgrid:
   7 *
   8 *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
   9 *
  10 * Authors:
  11 *
  12 *      Jay Schulist <jschlst@samba.org>
  13 *      Alexei Starovoitov <ast@plumgrid.com>
  14 *      Daniel Borkmann <dborkman@redhat.com>
  15 *
  16 * Andi Kleen - Fix a few bad bugs and races.
  17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  18 */
  19
  20#include <uapi/linux/btf.h>
  21#include <linux/filter.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/random.h>
  25#include <linux/moduleloader.h>
  26#include <linux/bpf.h>
  27#include <linux/btf.h>
  28#include <linux/frame.h>
  29#include <linux/rbtree_latch.h>
  30#include <linux/kallsyms.h>
  31#include <linux/rcupdate.h>
  32#include <linux/perf_event.h>
  33#include <linux/extable.h>
  34#include <linux/log2.h>
  35#include <asm/unaligned.h>
  36
  37/* Registers */
  38#define BPF_R0  regs[BPF_REG_0]
  39#define BPF_R1  regs[BPF_REG_1]
  40#define BPF_R2  regs[BPF_REG_2]
  41#define BPF_R3  regs[BPF_REG_3]
  42#define BPF_R4  regs[BPF_REG_4]
  43#define BPF_R5  regs[BPF_REG_5]
  44#define BPF_R6  regs[BPF_REG_6]
  45#define BPF_R7  regs[BPF_REG_7]
  46#define BPF_R8  regs[BPF_REG_8]
  47#define BPF_R9  regs[BPF_REG_9]
  48#define BPF_R10 regs[BPF_REG_10]
  49
  50/* Named registers */
  51#define DST     regs[insn->dst_reg]
  52#define SRC     regs[insn->src_reg]
  53#define FP      regs[BPF_REG_FP]
  54#define AX      regs[BPF_REG_AX]
  55#define ARG1    regs[BPF_REG_ARG1]
  56#define CTX     regs[BPF_REG_CTX]
  57#define IMM     insn->imm
  58
  59/* No hurry in this branch
  60 *
  61 * Exported for the bpf jit load helper.
  62 */
  63void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
  64{
  65        u8 *ptr = NULL;
  66
  67        if (k >= SKF_NET_OFF)
  68                ptr = skb_network_header(skb) + k - SKF_NET_OFF;
  69        else if (k >= SKF_LL_OFF)
  70                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
  71
  72        if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
  73                return ptr;
  74
  75        return NULL;
  76}
  77
  78struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
  79{
  80        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
  81        struct bpf_prog_aux *aux;
  82        struct bpf_prog *fp;
  83
  84        size = round_up(size, PAGE_SIZE);
  85        fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
  86        if (fp == NULL)
  87                return NULL;
  88
  89        aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
  90        if (aux == NULL) {
  91                vfree(fp);
  92                return NULL;
  93        }
  94
  95        fp->pages = size / PAGE_SIZE;
  96        fp->aux = aux;
  97        fp->aux->prog = fp;
  98        fp->jit_requested = ebpf_jit_enabled();
  99
 100        INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
 101
 102        return fp;
 103}
 104
 105struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 106{
 107        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 108        struct bpf_prog *prog;
 109        int cpu;
 110
 111        prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
 112        if (!prog)
 113                return NULL;
 114
 115        prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
 116        if (!prog->aux->stats) {
 117                kfree(prog->aux);
 118                vfree(prog);
 119                return NULL;
 120        }
 121
 122        for_each_possible_cpu(cpu) {
 123                struct bpf_prog_stats *pstats;
 124
 125                pstats = per_cpu_ptr(prog->aux->stats, cpu);
 126                u64_stats_init(&pstats->syncp);
 127        }
 128        return prog;
 129}
 130EXPORT_SYMBOL_GPL(bpf_prog_alloc);
 131
 132int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
 133{
 134        if (!prog->aux->nr_linfo || !prog->jit_requested)
 135                return 0;
 136
 137        prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
 138                                         sizeof(*prog->aux->jited_linfo),
 139                                         GFP_KERNEL | __GFP_NOWARN);
 140        if (!prog->aux->jited_linfo)
 141                return -ENOMEM;
 142
 143        return 0;
 144}
 145
 146void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
 147{
 148        kfree(prog->aux->jited_linfo);
 149        prog->aux->jited_linfo = NULL;
 150}
 151
 152void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
 153{
 154        if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
 155                bpf_prog_free_jited_linfo(prog);
 156}
 157
 158/* The jit engine is responsible to provide an array
 159 * for insn_off to the jited_off mapping (insn_to_jit_off).
 160 *
 161 * The idx to this array is the insn_off.  Hence, the insn_off
 162 * here is relative to the prog itself instead of the main prog.
 163 * This array has one entry for each xlated bpf insn.
 164 *
 165 * jited_off is the byte off to the last byte of the jited insn.
 166 *
 167 * Hence, with
 168 * insn_start:
 169 *      The first bpf insn off of the prog.  The insn off
 170 *      here is relative to the main prog.
 171 *      e.g. if prog is a subprog, insn_start > 0
 172 * linfo_idx:
 173 *      The prog's idx to prog->aux->linfo and jited_linfo
 174 *
 175 * jited_linfo[linfo_idx] = prog->bpf_func
 176 *
 177 * For i > linfo_idx,
 178 *
 179 * jited_linfo[i] = prog->bpf_func +
 180 *      insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
 181 */
 182void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
 183                               const u32 *insn_to_jit_off)
 184{
 185        u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
 186        const struct bpf_line_info *linfo;
 187        void **jited_linfo;
 188
 189        if (!prog->aux->jited_linfo)
 190                /* Userspace did not provide linfo */
 191                return;
 192
 193        linfo_idx = prog->aux->linfo_idx;
 194        linfo = &prog->aux->linfo[linfo_idx];
 195        insn_start = linfo[0].insn_off;
 196        insn_end = insn_start + prog->len;
 197
 198        jited_linfo = &prog->aux->jited_linfo[linfo_idx];
 199        jited_linfo[0] = prog->bpf_func;
 200
 201        nr_linfo = prog->aux->nr_linfo - linfo_idx;
 202
 203        for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
 204                /* The verifier ensures that linfo[i].insn_off is
 205                 * strictly increasing
 206                 */
 207                jited_linfo[i] = prog->bpf_func +
 208                        insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
 209}
 210
 211void bpf_prog_free_linfo(struct bpf_prog *prog)
 212{
 213        bpf_prog_free_jited_linfo(prog);
 214        kvfree(prog->aux->linfo);
 215}
 216
 217struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 218                                  gfp_t gfp_extra_flags)
 219{
 220        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 221        struct bpf_prog *fp;
 222        u32 pages, delta;
 223        int ret;
 224
 225        size = round_up(size, PAGE_SIZE);
 226        pages = size / PAGE_SIZE;
 227        if (pages <= fp_old->pages)
 228                return fp_old;
 229
 230        delta = pages - fp_old->pages;
 231        ret = __bpf_prog_charge(fp_old->aux->user, delta);
 232        if (ret)
 233                return NULL;
 234
 235        fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
 236        if (fp == NULL) {
 237                __bpf_prog_uncharge(fp_old->aux->user, delta);
 238        } else {
 239                memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
 240                fp->pages = pages;
 241                fp->aux->prog = fp;
 242
 243                /* We keep fp->aux from fp_old around in the new
 244                 * reallocated structure.
 245                 */
 246                fp_old->aux = NULL;
 247                __bpf_prog_free(fp_old);
 248        }
 249
 250        return fp;
 251}
 252
 253void __bpf_prog_free(struct bpf_prog *fp)
 254{
 255        if (fp->aux) {
 256                free_percpu(fp->aux->stats);
 257                kfree(fp->aux->poke_tab);
 258                kfree(fp->aux);
 259        }
 260        vfree(fp);
 261}
 262
 263int bpf_prog_calc_tag(struct bpf_prog *fp)
 264{
 265        const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
 266        u32 raw_size = bpf_prog_tag_scratch_size(fp);
 267        u32 digest[SHA_DIGEST_WORDS];
 268        u32 ws[SHA_WORKSPACE_WORDS];
 269        u32 i, bsize, psize, blocks;
 270        struct bpf_insn *dst;
 271        bool was_ld_map;
 272        u8 *raw, *todo;
 273        __be32 *result;
 274        __be64 *bits;
 275
 276        raw = vmalloc(raw_size);
 277        if (!raw)
 278                return -ENOMEM;
 279
 280        sha_init(digest);
 281        memset(ws, 0, sizeof(ws));
 282
 283        /* We need to take out the map fd for the digest calculation
 284         * since they are unstable from user space side.
 285         */
 286        dst = (void *)raw;
 287        for (i = 0, was_ld_map = false; i < fp->len; i++) {
 288                dst[i] = fp->insnsi[i];
 289                if (!was_ld_map &&
 290                    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
 291                    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
 292                     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
 293                        was_ld_map = true;
 294                        dst[i].imm = 0;
 295                } else if (was_ld_map &&
 296                           dst[i].code == 0 &&
 297                           dst[i].dst_reg == 0 &&
 298                           dst[i].src_reg == 0 &&
 299                           dst[i].off == 0) {
 300                        was_ld_map = false;
 301                        dst[i].imm = 0;
 302                } else {
 303                        was_ld_map = false;
 304                }
 305        }
 306
 307        psize = bpf_prog_insn_size(fp);
 308        memset(&raw[psize], 0, raw_size - psize);
 309        raw[psize++] = 0x80;
 310
 311        bsize  = round_up(psize, SHA_MESSAGE_BYTES);
 312        blocks = bsize / SHA_MESSAGE_BYTES;
 313        todo   = raw;
 314        if (bsize - psize >= sizeof(__be64)) {
 315                bits = (__be64 *)(todo + bsize - sizeof(__be64));
 316        } else {
 317                bits = (__be64 *)(todo + bsize + bits_offset);
 318                blocks++;
 319        }
 320        *bits = cpu_to_be64((psize - 1) << 3);
 321
 322        while (blocks--) {
 323                sha_transform(digest, todo, ws);
 324                todo += SHA_MESSAGE_BYTES;
 325        }
 326
 327        result = (__force __be32 *)digest;
 328        for (i = 0; i < SHA_DIGEST_WORDS; i++)
 329                result[i] = cpu_to_be32(digest[i]);
 330        memcpy(fp->tag, result, sizeof(fp->tag));
 331
 332        vfree(raw);
 333        return 0;
 334}
 335
 336static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
 337                                s32 end_new, s32 curr, const bool probe_pass)
 338{
 339        const s64 imm_min = S32_MIN, imm_max = S32_MAX;
 340        s32 delta = end_new - end_old;
 341        s64 imm = insn->imm;
 342
 343        if (curr < pos && curr + imm + 1 >= end_old)
 344                imm += delta;
 345        else if (curr >= end_new && curr + imm + 1 < end_new)
 346                imm -= delta;
 347        if (imm < imm_min || imm > imm_max)
 348                return -ERANGE;
 349        if (!probe_pass)
 350                insn->imm = imm;
 351        return 0;
 352}
 353
 354static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
 355                                s32 end_new, s32 curr, const bool probe_pass)
 356{
 357        const s32 off_min = S16_MIN, off_max = S16_MAX;
 358        s32 delta = end_new - end_old;
 359        s32 off = insn->off;
 360
 361        if (curr < pos && curr + off + 1 >= end_old)
 362                off += delta;
 363        else if (curr >= end_new && curr + off + 1 < end_new)
 364                off -= delta;
 365        if (off < off_min || off > off_max)
 366                return -ERANGE;
 367        if (!probe_pass)
 368                insn->off = off;
 369        return 0;
 370}
 371
 372static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
 373                            s32 end_new, const bool probe_pass)
 374{
 375        u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
 376        struct bpf_insn *insn = prog->insnsi;
 377        int ret = 0;
 378
 379        for (i = 0; i < insn_cnt; i++, insn++) {
 380                u8 code;
 381
 382                /* In the probing pass we still operate on the original,
 383                 * unpatched image in order to check overflows before we
 384                 * do any other adjustments. Therefore skip the patchlet.
 385                 */
 386                if (probe_pass && i == pos) {
 387                        i = end_new;
 388                        insn = prog->insnsi + end_old;
 389                }
 390                code = insn->code;
 391                if ((BPF_CLASS(code) != BPF_JMP &&
 392                     BPF_CLASS(code) != BPF_JMP32) ||
 393                    BPF_OP(code) == BPF_EXIT)
 394                        continue;
 395                /* Adjust offset of jmps if we cross patch boundaries. */
 396                if (BPF_OP(code) == BPF_CALL) {
 397                        if (insn->src_reg != BPF_PSEUDO_CALL)
 398                                continue;
 399                        ret = bpf_adj_delta_to_imm(insn, pos, end_old,
 400                                                   end_new, i, probe_pass);
 401                } else {
 402                        ret = bpf_adj_delta_to_off(insn, pos, end_old,
 403                                                   end_new, i, probe_pass);
 404                }
 405                if (ret)
 406                        break;
 407        }
 408
 409        return ret;
 410}
 411
 412static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
 413{
 414        struct bpf_line_info *linfo;
 415        u32 i, nr_linfo;
 416
 417        nr_linfo = prog->aux->nr_linfo;
 418        if (!nr_linfo || !delta)
 419                return;
 420
 421        linfo = prog->aux->linfo;
 422
 423        for (i = 0; i < nr_linfo; i++)
 424                if (off < linfo[i].insn_off)
 425                        break;
 426
 427        /* Push all off < linfo[i].insn_off by delta */
 428        for (; i < nr_linfo; i++)
 429                linfo[i].insn_off += delta;
 430}
 431
 432struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 433                                       const struct bpf_insn *patch, u32 len)
 434{
 435        u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
 436        const u32 cnt_max = S16_MAX;
 437        struct bpf_prog *prog_adj;
 438        int err;
 439
 440        /* Since our patchlet doesn't expand the image, we're done. */
 441        if (insn_delta == 0) {
 442                memcpy(prog->insnsi + off, patch, sizeof(*patch));
 443                return prog;
 444        }
 445
 446        insn_adj_cnt = prog->len + insn_delta;
 447
 448        /* Reject anything that would potentially let the insn->off
 449         * target overflow when we have excessive program expansions.
 450         * We need to probe here before we do any reallocation where
 451         * we afterwards may not fail anymore.
 452         */
 453        if (insn_adj_cnt > cnt_max &&
 454            (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
 455                return ERR_PTR(err);
 456
 457        /* Several new instructions need to be inserted. Make room
 458         * for them. Likely, there's no need for a new allocation as
 459         * last page could have large enough tailroom.
 460         */
 461        prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
 462                                    GFP_USER);
 463        if (!prog_adj)
 464                return ERR_PTR(-ENOMEM);
 465
 466        prog_adj->len = insn_adj_cnt;
 467
 468        /* Patching happens in 3 steps:
 469         *
 470         * 1) Move over tail of insnsi from next instruction onwards,
 471         *    so we can patch the single target insn with one or more
 472         *    new ones (patching is always from 1 to n insns, n > 0).
 473         * 2) Inject new instructions at the target location.
 474         * 3) Adjust branch offsets if necessary.
 475         */
 476        insn_rest = insn_adj_cnt - off - len;
 477
 478        memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
 479                sizeof(*patch) * insn_rest);
 480        memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
 481
 482        /* We are guaranteed to not fail at this point, otherwise
 483         * the ship has sailed to reverse to the original state. An
 484         * overflow cannot happen at this point.
 485         */
 486        BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
 487
 488        bpf_adj_linfo(prog_adj, off, insn_delta);
 489
 490        return prog_adj;
 491}
 492
 493int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
 494{
 495        /* Branch offsets can't overflow when program is shrinking, no need
 496         * to call bpf_adj_branches(..., true) here
 497         */
 498        memmove(prog->insnsi + off, prog->insnsi + off + cnt,
 499                sizeof(struct bpf_insn) * (prog->len - off - cnt));
 500        prog->len -= cnt;
 501
 502        return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
 503}
 504
 505static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
 506{
 507        int i;
 508
 509        for (i = 0; i < fp->aux->func_cnt; i++)
 510                bpf_prog_kallsyms_del(fp->aux->func[i]);
 511}
 512
 513void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
 514{
 515        bpf_prog_kallsyms_del_subprogs(fp);
 516        bpf_prog_kallsyms_del(fp);
 517}
 518
 519#ifdef CONFIG_BPF_JIT
 520/* All BPF JIT sysctl knobs here. */
 521int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
 522int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
 523int bpf_jit_harden   __read_mostly;
 524long bpf_jit_limit   __read_mostly;
 525
 526static void
 527bpf_prog_ksym_set_addr(struct bpf_prog *prog)
 528{
 529        const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
 530        unsigned long addr = (unsigned long)hdr;
 531
 532        WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
 533
 534        prog->aux->ksym.start = (unsigned long) prog->bpf_func;
 535        prog->aux->ksym.end   = addr + hdr->pages * PAGE_SIZE;
 536}
 537
 538static void
 539bpf_prog_ksym_set_name(struct bpf_prog *prog)
 540{
 541        char *sym = prog->aux->ksym.name;
 542        const char *end = sym + KSYM_NAME_LEN;
 543        const struct btf_type *type;
 544        const char *func_name;
 545
 546        BUILD_BUG_ON(sizeof("bpf_prog_") +
 547                     sizeof(prog->tag) * 2 +
 548                     /* name has been null terminated.
 549                      * We should need +1 for the '_' preceding
 550                      * the name.  However, the null character
 551                      * is double counted between the name and the
 552                      * sizeof("bpf_prog_") above, so we omit
 553                      * the +1 here.
 554                      */
 555                     sizeof(prog->aux->name) > KSYM_NAME_LEN);
 556
 557        sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
 558        sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
 559
 560        /* prog->aux->name will be ignored if full btf name is available */
 561        if (prog->aux->func_info_cnt) {
 562                type = btf_type_by_id(prog->aux->btf,
 563                                      prog->aux->func_info[prog->aux->func_idx].type_id);
 564                func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
 565                snprintf(sym, (size_t)(end - sym), "_%s", func_name);
 566                return;
 567        }
 568
 569        if (prog->aux->name[0])
 570                snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
 571        else
 572                *sym = 0;
 573}
 574
 575static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
 576{
 577        return container_of(n, struct bpf_ksym, tnode)->start;
 578}
 579
 580static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
 581                                          struct latch_tree_node *b)
 582{
 583        return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
 584}
 585
 586static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
 587{
 588        unsigned long val = (unsigned long)key;
 589        const struct bpf_ksym *ksym;
 590
 591        ksym = container_of(n, struct bpf_ksym, tnode);
 592
 593        if (val < ksym->start)
 594                return -1;
 595        if (val >= ksym->end)
 596                return  1;
 597
 598        return 0;
 599}
 600
 601static const struct latch_tree_ops bpf_tree_ops = {
 602        .less   = bpf_tree_less,
 603        .comp   = bpf_tree_comp,
 604};
 605
 606static DEFINE_SPINLOCK(bpf_lock);
 607static LIST_HEAD(bpf_kallsyms);
 608static struct latch_tree_root bpf_tree __cacheline_aligned;
 609
 610void bpf_ksym_add(struct bpf_ksym *ksym)
 611{
 612        spin_lock_bh(&bpf_lock);
 613        WARN_ON_ONCE(!list_empty(&ksym->lnode));
 614        list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
 615        latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
 616        spin_unlock_bh(&bpf_lock);
 617}
 618
 619static void __bpf_ksym_del(struct bpf_ksym *ksym)
 620{
 621        if (list_empty(&ksym->lnode))
 622                return;
 623
 624        latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
 625        list_del_rcu(&ksym->lnode);
 626}
 627
 628void bpf_ksym_del(struct bpf_ksym *ksym)
 629{
 630        spin_lock_bh(&bpf_lock);
 631        __bpf_ksym_del(ksym);
 632        spin_unlock_bh(&bpf_lock);
 633}
 634
 635static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 636{
 637        return fp->jited && !bpf_prog_was_classic(fp);
 638}
 639
 640static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
 641{
 642        return list_empty(&fp->aux->ksym.lnode) ||
 643               fp->aux->ksym.lnode.prev == LIST_POISON2;
 644}
 645
 646void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 647{
 648        if (!bpf_prog_kallsyms_candidate(fp) ||
 649            !capable(CAP_SYS_ADMIN))
 650                return;
 651
 652        bpf_prog_ksym_set_addr(fp);
 653        bpf_prog_ksym_set_name(fp);
 654        fp->aux->ksym.prog = true;
 655
 656        bpf_ksym_add(&fp->aux->ksym);
 657}
 658
 659void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 660{
 661        if (!bpf_prog_kallsyms_candidate(fp))
 662                return;
 663
 664        bpf_ksym_del(&fp->aux->ksym);
 665}
 666
 667static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
 668{
 669        struct latch_tree_node *n;
 670
 671        n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
 672        return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
 673}
 674
 675const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
 676                                 unsigned long *off, char *sym)
 677{
 678        struct bpf_ksym *ksym;
 679        char *ret = NULL;
 680
 681        rcu_read_lock();
 682        ksym = bpf_ksym_find(addr);
 683        if (ksym) {
 684                unsigned long symbol_start = ksym->start;
 685                unsigned long symbol_end = ksym->end;
 686
 687                strncpy(sym, ksym->name, KSYM_NAME_LEN);
 688
 689                ret = sym;
 690                if (size)
 691                        *size = symbol_end - symbol_start;
 692                if (off)
 693                        *off  = addr - symbol_start;
 694        }
 695        rcu_read_unlock();
 696
 697        return ret;
 698}
 699
 700bool is_bpf_text_address(unsigned long addr)
 701{
 702        bool ret;
 703
 704        rcu_read_lock();
 705        ret = bpf_ksym_find(addr) != NULL;
 706        rcu_read_unlock();
 707
 708        return ret;
 709}
 710
 711static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
 712{
 713        struct bpf_ksym *ksym = bpf_ksym_find(addr);
 714
 715        return ksym && ksym->prog ?
 716               container_of(ksym, struct bpf_prog_aux, ksym)->prog :
 717               NULL;
 718}
 719
 720const struct exception_table_entry *search_bpf_extables(unsigned long addr)
 721{
 722        const struct exception_table_entry *e = NULL;
 723        struct bpf_prog *prog;
 724
 725        rcu_read_lock();
 726        prog = bpf_prog_ksym_find(addr);
 727        if (!prog)
 728                goto out;
 729        if (!prog->aux->num_exentries)
 730                goto out;
 731
 732        e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
 733out:
 734        rcu_read_unlock();
 735        return e;
 736}
 737
 738int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 739                    char *sym)
 740{
 741        struct bpf_ksym *ksym;
 742        unsigned int it = 0;
 743        int ret = -ERANGE;
 744
 745        if (!bpf_jit_kallsyms_enabled())
 746                return ret;
 747
 748        rcu_read_lock();
 749        list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
 750                if (it++ != symnum)
 751                        continue;
 752
 753                strncpy(sym, ksym->name, KSYM_NAME_LEN);
 754
 755                *value = ksym->start;
 756                *type  = BPF_SYM_ELF_TYPE;
 757
 758                ret = 0;
 759                break;
 760        }
 761        rcu_read_unlock();
 762
 763        return ret;
 764}
 765
 766int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
 767                                struct bpf_jit_poke_descriptor *poke)
 768{
 769        struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
 770        static const u32 poke_tab_max = 1024;
 771        u32 slot = prog->aux->size_poke_tab;
 772        u32 size = slot + 1;
 773
 774        if (size > poke_tab_max)
 775                return -ENOSPC;
 776        if (poke->ip || poke->ip_stable || poke->adj_off)
 777                return -EINVAL;
 778
 779        switch (poke->reason) {
 780        case BPF_POKE_REASON_TAIL_CALL:
 781                if (!poke->tail_call.map)
 782                        return -EINVAL;
 783                break;
 784        default:
 785                return -EINVAL;
 786        }
 787
 788        tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
 789        if (!tab)
 790                return -ENOMEM;
 791
 792        memcpy(&tab[slot], poke, sizeof(*poke));
 793        prog->aux->size_poke_tab = size;
 794        prog->aux->poke_tab = tab;
 795
 796        return slot;
 797}
 798
 799static atomic_long_t bpf_jit_current;
 800
 801/* Can be overridden by an arch's JIT compiler if it has a custom,
 802 * dedicated BPF backend memory area, or if neither of the two
 803 * below apply.
 804 */
 805u64 __weak bpf_jit_alloc_exec_limit(void)
 806{
 807#if defined(MODULES_VADDR)
 808        return MODULES_END - MODULES_VADDR;
 809#else
 810        return VMALLOC_END - VMALLOC_START;
 811#endif
 812}
 813
 814static int __init bpf_jit_charge_init(void)
 815{
 816        /* Only used as heuristic here to derive limit. */
 817        bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
 818                                            PAGE_SIZE), LONG_MAX);
 819        return 0;
 820}
 821pure_initcall(bpf_jit_charge_init);
 822
 823static int bpf_jit_charge_modmem(u32 pages)
 824{
 825        if (atomic_long_add_return(pages, &bpf_jit_current) >
 826            (bpf_jit_limit >> PAGE_SHIFT)) {
 827                if (!capable(CAP_SYS_ADMIN)) {
 828                        atomic_long_sub(pages, &bpf_jit_current);
 829                        return -EPERM;
 830                }
 831        }
 832
 833        return 0;
 834}
 835
 836static void bpf_jit_uncharge_modmem(u32 pages)
 837{
 838        atomic_long_sub(pages, &bpf_jit_current);
 839}
 840
 841void *__weak bpf_jit_alloc_exec(unsigned long size)
 842{
 843        return module_alloc(size);
 844}
 845
 846void __weak bpf_jit_free_exec(void *addr)
 847{
 848        module_memfree(addr);
 849}
 850
 851struct bpf_binary_header *
 852bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 853                     unsigned int alignment,
 854                     bpf_jit_fill_hole_t bpf_fill_ill_insns)
 855{
 856        struct bpf_binary_header *hdr;
 857        u32 size, hole, start, pages;
 858
 859        WARN_ON_ONCE(!is_power_of_2(alignment) ||
 860                     alignment > BPF_IMAGE_ALIGNMENT);
 861
 862        /* Most of BPF filters are really small, but if some of them
 863         * fill a page, allow at least 128 extra bytes to insert a
 864         * random section of illegal instructions.
 865         */
 866        size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
 867        pages = size / PAGE_SIZE;
 868
 869        if (bpf_jit_charge_modmem(pages))
 870                return NULL;
 871        hdr = bpf_jit_alloc_exec(size);
 872        if (!hdr) {
 873                bpf_jit_uncharge_modmem(pages);
 874                return NULL;
 875        }
 876
 877        /* Fill space with illegal/arch-dep instructions. */
 878        bpf_fill_ill_insns(hdr, size);
 879
 880        hdr->pages = pages;
 881        hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 882                     PAGE_SIZE - sizeof(*hdr));
 883        start = (get_random_int() % hole) & ~(alignment - 1);
 884
 885        /* Leave a random number of instructions before BPF code. */
 886        *image_ptr = &hdr->image[start];
 887
 888        return hdr;
 889}
 890
 891void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 892{
 893        u32 pages = hdr->pages;
 894
 895        bpf_jit_free_exec(hdr);
 896        bpf_jit_uncharge_modmem(pages);
 897}
 898
 899/* This symbol is only overridden by archs that have different
 900 * requirements than the usual eBPF JITs, f.e. when they only
 901 * implement cBPF JIT, do not set images read-only, etc.
 902 */
 903void __weak bpf_jit_free(struct bpf_prog *fp)
 904{
 905        if (fp->jited) {
 906                struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
 907
 908                bpf_jit_binary_free(hdr);
 909
 910                WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
 911        }
 912
 913        bpf_prog_unlock_free(fp);
 914}
 915
 916int bpf_jit_get_func_addr(const struct bpf_prog *prog,
 917                          const struct bpf_insn *insn, bool extra_pass,
 918                          u64 *func_addr, bool *func_addr_fixed)
 919{
 920        s16 off = insn->off;
 921        s32 imm = insn->imm;
 922        u8 *addr;
 923
 924        *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
 925        if (!*func_addr_fixed) {
 926                /* Place-holder address till the last pass has collected
 927                 * all addresses for JITed subprograms in which case we
 928                 * can pick them up from prog->aux.
 929                 */
 930                if (!extra_pass)
 931                        addr = NULL;
 932                else if (prog->aux->func &&
 933                         off >= 0 && off < prog->aux->func_cnt)
 934                        addr = (u8 *)prog->aux->func[off]->bpf_func;
 935                else
 936                        return -EINVAL;
 937        } else {
 938                /* Address of a BPF helper call. Since part of the core
 939                 * kernel, it's always at a fixed location. __bpf_call_base
 940                 * and the helper with imm relative to it are both in core
 941                 * kernel.
 942                 */
 943                addr = (u8 *)__bpf_call_base + imm;
 944        }
 945
 946        *func_addr = (unsigned long)addr;
 947        return 0;
 948}
 949
 950static int bpf_jit_blind_insn(const struct bpf_insn *from,
 951                              const struct bpf_insn *aux,
 952                              struct bpf_insn *to_buff,
 953                              bool emit_zext)
 954{
 955        struct bpf_insn *to = to_buff;
 956        u32 imm_rnd = get_random_int();
 957        s16 off;
 958
 959        BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
 960        BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
 961
 962        /* Constraints on AX register:
 963         *
 964         * AX register is inaccessible from user space. It is mapped in
 965         * all JITs, and used here for constant blinding rewrites. It is
 966         * typically "stateless" meaning its contents are only valid within
 967         * the executed instruction, but not across several instructions.
 968         * There are a few exceptions however which are further detailed
 969         * below.
 970         *
 971         * Constant blinding is only used by JITs, not in the interpreter.
 972         * The interpreter uses AX in some occasions as a local temporary
 973         * register e.g. in DIV or MOD instructions.
 974         *
 975         * In restricted circumstances, the verifier can also use the AX
 976         * register for rewrites as long as they do not interfere with
 977         * the above cases!
 978         */
 979        if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
 980                goto out;
 981
 982        if (from->imm == 0 &&
 983            (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
 984             from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
 985                *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
 986                goto out;
 987        }
 988
 989        switch (from->code) {
 990        case BPF_ALU | BPF_ADD | BPF_K:
 991        case BPF_ALU | BPF_SUB | BPF_K:
 992        case BPF_ALU | BPF_AND | BPF_K:
 993        case BPF_ALU | BPF_OR  | BPF_K:
 994        case BPF_ALU | BPF_XOR | BPF_K:
 995        case BPF_ALU | BPF_MUL | BPF_K:
 996        case BPF_ALU | BPF_MOV | BPF_K:
 997        case BPF_ALU | BPF_DIV | BPF_K:
 998        case BPF_ALU | BPF_MOD | BPF_K:
 999                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1000                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1001                *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1002                break;
1003
1004        case BPF_ALU64 | BPF_ADD | BPF_K:
1005        case BPF_ALU64 | BPF_SUB | BPF_K:
1006        case BPF_ALU64 | BPF_AND | BPF_K:
1007        case BPF_ALU64 | BPF_OR  | BPF_K:
1008        case BPF_ALU64 | BPF_XOR | BPF_K:
1009        case BPF_ALU64 | BPF_MUL | BPF_K:
1010        case BPF_ALU64 | BPF_MOV | BPF_K:
1011        case BPF_ALU64 | BPF_DIV | BPF_K:
1012        case BPF_ALU64 | BPF_MOD | BPF_K:
1013                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1014                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1015                *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1016                break;
1017
1018        case BPF_JMP | BPF_JEQ  | BPF_K:
1019        case BPF_JMP | BPF_JNE  | BPF_K:
1020        case BPF_JMP | BPF_JGT  | BPF_K:
1021        case BPF_JMP | BPF_JLT  | BPF_K:
1022        case BPF_JMP | BPF_JGE  | BPF_K:
1023        case BPF_JMP | BPF_JLE  | BPF_K:
1024        case BPF_JMP | BPF_JSGT | BPF_K:
1025        case BPF_JMP | BPF_JSLT | BPF_K:
1026        case BPF_JMP | BPF_JSGE | BPF_K:
1027        case BPF_JMP | BPF_JSLE | BPF_K:
1028        case BPF_JMP | BPF_JSET | BPF_K:
1029                /* Accommodate for extra offset in case of a backjump. */
1030                off = from->off;
1031                if (off < 0)
1032                        off -= 2;
1033                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1034                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1035                *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1036                break;
1037
1038        case BPF_JMP32 | BPF_JEQ  | BPF_K:
1039        case BPF_JMP32 | BPF_JNE  | BPF_K:
1040        case BPF_JMP32 | BPF_JGT  | BPF_K:
1041        case BPF_JMP32 | BPF_JLT  | BPF_K:
1042        case BPF_JMP32 | BPF_JGE  | BPF_K:
1043        case BPF_JMP32 | BPF_JLE  | BPF_K:
1044        case BPF_JMP32 | BPF_JSGT | BPF_K:
1045        case BPF_JMP32 | BPF_JSLT | BPF_K:
1046        case BPF_JMP32 | BPF_JSGE | BPF_K:
1047        case BPF_JMP32 | BPF_JSLE | BPF_K:
1048        case BPF_JMP32 | BPF_JSET | BPF_K:
1049                /* Accommodate for extra offset in case of a backjump. */
1050                off = from->off;
1051                if (off < 0)
1052                        off -= 2;
1053                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1054                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1055                *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1056                                      off);
1057                break;
1058
1059        case BPF_LD | BPF_IMM | BPF_DW:
1060                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1061                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1062                *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1063                *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1064                break;
1065        case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1066                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1067                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1068                if (emit_zext)
1069                        *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1070                *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1071                break;
1072
1073        case BPF_ST | BPF_MEM | BPF_DW:
1074        case BPF_ST | BPF_MEM | BPF_W:
1075        case BPF_ST | BPF_MEM | BPF_H:
1076        case BPF_ST | BPF_MEM | BPF_B:
1077                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1078                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1079                *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1080                break;
1081        }
1082out:
1083        return to - to_buff;
1084}
1085
1086static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1087                                              gfp_t gfp_extra_flags)
1088{
1089        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1090        struct bpf_prog *fp;
1091
1092        fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
1093        if (fp != NULL) {
1094                /* aux->prog still points to the fp_other one, so
1095                 * when promoting the clone to the real program,
1096                 * this still needs to be adapted.
1097                 */
1098                memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1099        }
1100
1101        return fp;
1102}
1103
1104static void bpf_prog_clone_free(struct bpf_prog *fp)
1105{
1106        /* aux was stolen by the other clone, so we cannot free
1107         * it from this path! It will be freed eventually by the
1108         * other program on release.
1109         *
1110         * At this point, we don't need a deferred release since
1111         * clone is guaranteed to not be locked.
1112         */
1113        fp->aux = NULL;
1114        __bpf_prog_free(fp);
1115}
1116
1117void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1118{
1119        /* We have to repoint aux->prog to self, as we don't
1120         * know whether fp here is the clone or the original.
1121         */
1122        fp->aux->prog = fp;
1123        bpf_prog_clone_free(fp_other);
1124}
1125
1126struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1127{
1128        struct bpf_insn insn_buff[16], aux[2];
1129        struct bpf_prog *clone, *tmp;
1130        int insn_delta, insn_cnt;
1131        struct bpf_insn *insn;
1132        int i, rewritten;
1133
1134        if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1135                return prog;
1136
1137        clone = bpf_prog_clone_create(prog, GFP_USER);
1138        if (!clone)
1139                return ERR_PTR(-ENOMEM);
1140
1141        insn_cnt = clone->len;
1142        insn = clone->insnsi;
1143
1144        for (i = 0; i < insn_cnt; i++, insn++) {
1145                /* We temporarily need to hold the original ld64 insn
1146                 * so that we can still access the first part in the
1147                 * second blinding run.
1148                 */
1149                if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1150                    insn[1].code == 0)
1151                        memcpy(aux, insn, sizeof(aux));
1152
1153                rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1154                                                clone->aux->verifier_zext);
1155                if (!rewritten)
1156                        continue;
1157
1158                tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1159                if (IS_ERR(tmp)) {
1160                        /* Patching may have repointed aux->prog during
1161                         * realloc from the original one, so we need to
1162                         * fix it up here on error.
1163                         */
1164                        bpf_jit_prog_release_other(prog, clone);
1165                        return tmp;
1166                }
1167
1168                clone = tmp;
1169                insn_delta = rewritten - 1;
1170
1171                /* Walk new program and skip insns we just inserted. */
1172                insn = clone->insnsi + i + insn_delta;
1173                insn_cnt += insn_delta;
1174                i        += insn_delta;
1175        }
1176
1177        clone->blinded = 1;
1178        return clone;
1179}
1180#endif /* CONFIG_BPF_JIT */
1181
1182/* Base function for offset calculation. Needs to go into .text section,
1183 * therefore keeping it non-static as well; will also be used by JITs
1184 * anyway later on, so do not let the compiler omit it. This also needs
1185 * to go into kallsyms for correlation from e.g. bpftool, so naming
1186 * must not change.
1187 */
1188noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1189{
1190        return 0;
1191}
1192EXPORT_SYMBOL_GPL(__bpf_call_base);
1193
1194/* All UAPI available opcodes. */
1195#define BPF_INSN_MAP(INSN_2, INSN_3)            \
1196        /* 32 bit ALU operations. */            \
1197        /*   Register based. */                 \
1198        INSN_3(ALU, ADD,  X),                   \
1199        INSN_3(ALU, SUB,  X),                   \
1200        INSN_3(ALU, AND,  X),                   \
1201        INSN_3(ALU, OR,   X),                   \
1202        INSN_3(ALU, LSH,  X),                   \
1203        INSN_3(ALU, RSH,  X),                   \
1204        INSN_3(ALU, XOR,  X),                   \
1205        INSN_3(ALU, MUL,  X),                   \
1206        INSN_3(ALU, MOV,  X),                   \
1207        INSN_3(ALU, ARSH, X),                   \
1208        INSN_3(ALU, DIV,  X),                   \
1209        INSN_3(ALU, MOD,  X),                   \
1210        INSN_2(ALU, NEG),                       \
1211        INSN_3(ALU, END, TO_BE),                \
1212        INSN_3(ALU, END, TO_LE),                \
1213        /*   Immediate based. */                \
1214        INSN_3(ALU, ADD,  K),                   \
1215        INSN_3(ALU, SUB,  K),                   \
1216        INSN_3(ALU, AND,  K),                   \
1217        INSN_3(ALU, OR,   K),                   \
1218        INSN_3(ALU, LSH,  K),                   \
1219        INSN_3(ALU, RSH,  K),                   \
1220        INSN_3(ALU, XOR,  K),                   \
1221        INSN_3(ALU, MUL,  K),                   \
1222        INSN_3(ALU, MOV,  K),                   \
1223        INSN_3(ALU, ARSH, K),                   \
1224        INSN_3(ALU, DIV,  K),                   \
1225        INSN_3(ALU, MOD,  K),                   \
1226        /* 64 bit ALU operations. */            \
1227        /*   Register based. */                 \
1228        INSN_3(ALU64, ADD,  X),                 \
1229        INSN_3(ALU64, SUB,  X),                 \
1230        INSN_3(ALU64, AND,  X),                 \
1231        INSN_3(ALU64, OR,   X),                 \
1232        INSN_3(ALU64, LSH,  X),                 \
1233        INSN_3(ALU64, RSH,  X),                 \
1234        INSN_3(ALU64, XOR,  X),                 \
1235        INSN_3(ALU64, MUL,  X),                 \
1236        INSN_3(ALU64, MOV,  X),                 \
1237        INSN_3(ALU64, ARSH, X),                 \
1238        INSN_3(ALU64, DIV,  X),                 \
1239        INSN_3(ALU64, MOD,  X),                 \
1240        INSN_2(ALU64, NEG),                     \
1241        /*   Immediate based. */                \
1242        INSN_3(ALU64, ADD,  K),                 \
1243        INSN_3(ALU64, SUB,  K),                 \
1244        INSN_3(ALU64, AND,  K),                 \
1245        INSN_3(ALU64, OR,   K),                 \
1246        INSN_3(ALU64, LSH,  K),                 \
1247        INSN_3(ALU64, RSH,  K),                 \
1248        INSN_3(ALU64, XOR,  K),                 \
1249        INSN_3(ALU64, MUL,  K),                 \
1250        INSN_3(ALU64, MOV,  K),                 \
1251        INSN_3(ALU64, ARSH, K),                 \
1252        INSN_3(ALU64, DIV,  K),                 \
1253        INSN_3(ALU64, MOD,  K),                 \
1254        /* Call instruction. */                 \
1255        INSN_2(JMP, CALL),                      \
1256        /* Exit instruction. */                 \
1257        INSN_2(JMP, EXIT),                      \
1258        /* 32-bit Jump instructions. */         \
1259        /*   Register based. */                 \
1260        INSN_3(JMP32, JEQ,  X),                 \
1261        INSN_3(JMP32, JNE,  X),                 \
1262        INSN_3(JMP32, JGT,  X),                 \
1263        INSN_3(JMP32, JLT,  X),                 \
1264        INSN_3(JMP32, JGE,  X),                 \
1265        INSN_3(JMP32, JLE,  X),                 \
1266        INSN_3(JMP32, JSGT, X),                 \
1267        INSN_3(JMP32, JSLT, X),                 \
1268        INSN_3(JMP32, JSGE, X),                 \
1269        INSN_3(JMP32, JSLE, X),                 \
1270        INSN_3(JMP32, JSET, X),                 \
1271        /*   Immediate based. */                \
1272        INSN_3(JMP32, JEQ,  K),                 \
1273        INSN_3(JMP32, JNE,  K),                 \
1274        INSN_3(JMP32, JGT,  K),                 \
1275        INSN_3(JMP32, JLT,  K),                 \
1276        INSN_3(JMP32, JGE,  K),                 \
1277        INSN_3(JMP32, JLE,  K),                 \
1278        INSN_3(JMP32, JSGT, K),                 \
1279        INSN_3(JMP32, JSLT, K),                 \
1280        INSN_3(JMP32, JSGE, K),                 \
1281        INSN_3(JMP32, JSLE, K),                 \
1282        INSN_3(JMP32, JSET, K),                 \
1283        /* Jump instructions. */                \
1284        /*   Register based. */                 \
1285        INSN_3(JMP, JEQ,  X),                   \
1286        INSN_3(JMP, JNE,  X),                   \
1287        INSN_3(JMP, JGT,  X),                   \
1288        INSN_3(JMP, JLT,  X),                   \
1289        INSN_3(JMP, JGE,  X),                   \
1290        INSN_3(JMP, JLE,  X),                   \
1291        INSN_3(JMP, JSGT, X),                   \
1292        INSN_3(JMP, JSLT, X),                   \
1293        INSN_3(JMP, JSGE, X),                   \
1294        INSN_3(JMP, JSLE, X),                   \
1295        INSN_3(JMP, JSET, X),                   \
1296        /*   Immediate based. */                \
1297        INSN_3(JMP, JEQ,  K),                   \
1298        INSN_3(JMP, JNE,  K),                   \
1299        INSN_3(JMP, JGT,  K),                   \
1300        INSN_3(JMP, JLT,  K),                   \
1301        INSN_3(JMP, JGE,  K),                   \
1302        INSN_3(JMP, JLE,  K),                   \
1303        INSN_3(JMP, JSGT, K),                   \
1304        INSN_3(JMP, JSLT, K),                   \
1305        INSN_3(JMP, JSGE, K),                   \
1306        INSN_3(JMP, JSLE, K),                   \
1307        INSN_3(JMP, JSET, K),                   \
1308        INSN_2(JMP, JA),                        \
1309        /* Store instructions. */               \
1310        /*   Register based. */                 \
1311        INSN_3(STX, MEM,  B),                   \
1312        INSN_3(STX, MEM,  H),                   \
1313        INSN_3(STX, MEM,  W),                   \
1314        INSN_3(STX, MEM,  DW),                  \
1315        INSN_3(STX, XADD, W),                   \
1316        INSN_3(STX, XADD, DW),                  \
1317        /*   Immediate based. */                \
1318        INSN_3(ST, MEM, B),                     \
1319        INSN_3(ST, MEM, H),                     \
1320        INSN_3(ST, MEM, W),                     \
1321        INSN_3(ST, MEM, DW),                    \
1322        /* Load instructions. */                \
1323        /*   Register based. */                 \
1324        INSN_3(LDX, MEM, B),                    \
1325        INSN_3(LDX, MEM, H),                    \
1326        INSN_3(LDX, MEM, W),                    \
1327        INSN_3(LDX, MEM, DW),                   \
1328        /*   Immediate based. */                \
1329        INSN_3(LD, IMM, DW)
1330
1331bool bpf_opcode_in_insntable(u8 code)
1332{
1333#define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1334#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1335        static const bool public_insntable[256] = {
1336                [0 ... 255] = false,
1337                /* Now overwrite non-defaults ... */
1338                BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1339                /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1340                [BPF_LD | BPF_ABS | BPF_B] = true,
1341                [BPF_LD | BPF_ABS | BPF_H] = true,
1342                [BPF_LD | BPF_ABS | BPF_W] = true,
1343                [BPF_LD | BPF_IND | BPF_B] = true,
1344                [BPF_LD | BPF_IND | BPF_H] = true,
1345                [BPF_LD | BPF_IND | BPF_W] = true,
1346        };
1347#undef BPF_INSN_3_TBL
1348#undef BPF_INSN_2_TBL
1349        return public_insntable[code];
1350}
1351
1352#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1353u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1354{
1355        memset(dst, 0, size);
1356        return -EFAULT;
1357}
1358
1359/**
1360 *      __bpf_prog_run - run eBPF program on a given context
1361 *      @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1362 *      @insn: is the array of eBPF instructions
1363 *      @stack: is the eBPF storage stack
1364 *
1365 * Decode and execute eBPF instructions.
1366 */
1367static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1368{
1369#define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1370#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1371        static const void * const jumptable[256] __annotate_jump_table = {
1372                [0 ... 255] = &&default_label,
1373                /* Now overwrite non-defaults ... */
1374                BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1375                /* Non-UAPI available opcodes. */
1376                [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1377                [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1378                [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1379                [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1380                [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1381                [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1382        };
1383#undef BPF_INSN_3_LBL
1384#undef BPF_INSN_2_LBL
1385        u32 tail_call_cnt = 0;
1386
1387#define CONT     ({ insn++; goto select_insn; })
1388#define CONT_JMP ({ insn++; goto select_insn; })
1389
1390select_insn:
1391        goto *jumptable[insn->code];
1392
1393        /* ALU */
1394#define ALU(OPCODE, OP)                 \
1395        ALU64_##OPCODE##_X:             \
1396                DST = DST OP SRC;       \
1397                CONT;                   \
1398        ALU_##OPCODE##_X:               \
1399                DST = (u32) DST OP (u32) SRC;   \
1400                CONT;                   \
1401        ALU64_##OPCODE##_K:             \
1402                DST = DST OP IMM;               \
1403                CONT;                   \
1404        ALU_##OPCODE##_K:               \
1405                DST = (u32) DST OP (u32) IMM;   \
1406                CONT;
1407
1408        ALU(ADD,  +)
1409        ALU(SUB,  -)
1410        ALU(AND,  &)
1411        ALU(OR,   |)
1412        ALU(LSH, <<)
1413        ALU(RSH, >>)
1414        ALU(XOR,  ^)
1415        ALU(MUL,  *)
1416#undef ALU
1417        ALU_NEG:
1418                DST = (u32) -DST;
1419                CONT;
1420        ALU64_NEG:
1421                DST = -DST;
1422                CONT;
1423        ALU_MOV_X:
1424                DST = (u32) SRC;
1425                CONT;
1426        ALU_MOV_K:
1427                DST = (u32) IMM;
1428                CONT;
1429        ALU64_MOV_X:
1430                DST = SRC;
1431                CONT;
1432        ALU64_MOV_K:
1433                DST = IMM;
1434                CONT;
1435        LD_IMM_DW:
1436                DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1437                insn++;
1438                CONT;
1439        ALU_ARSH_X:
1440                DST = (u64) (u32) (((s32) DST) >> SRC);
1441                CONT;
1442        ALU_ARSH_K:
1443                DST = (u64) (u32) (((s32) DST) >> IMM);
1444                CONT;
1445        ALU64_ARSH_X:
1446                (*(s64 *) &DST) >>= SRC;
1447                CONT;
1448        ALU64_ARSH_K:
1449                (*(s64 *) &DST) >>= IMM;
1450                CONT;
1451        ALU64_MOD_X:
1452                div64_u64_rem(DST, SRC, &AX);
1453                DST = AX;
1454                CONT;
1455        ALU_MOD_X:
1456                AX = (u32) DST;
1457                DST = do_div(AX, (u32) SRC);
1458                CONT;
1459        ALU64_MOD_K:
1460                div64_u64_rem(DST, IMM, &AX);
1461                DST = AX;
1462                CONT;
1463        ALU_MOD_K:
1464                AX = (u32) DST;
1465                DST = do_div(AX, (u32) IMM);
1466                CONT;
1467        ALU64_DIV_X:
1468                DST = div64_u64(DST, SRC);
1469                CONT;
1470        ALU_DIV_X:
1471                AX = (u32) DST;
1472                do_div(AX, (u32) SRC);
1473                DST = (u32) AX;
1474                CONT;
1475        ALU64_DIV_K:
1476                DST = div64_u64(DST, IMM);
1477                CONT;
1478        ALU_DIV_K:
1479                AX = (u32) DST;
1480                do_div(AX, (u32) IMM);
1481                DST = (u32) AX;
1482                CONT;
1483        ALU_END_TO_BE:
1484                switch (IMM) {
1485                case 16:
1486                        DST = (__force u16) cpu_to_be16(DST);
1487                        break;
1488                case 32:
1489                        DST = (__force u32) cpu_to_be32(DST);
1490                        break;
1491                case 64:
1492                        DST = (__force u64) cpu_to_be64(DST);
1493                        break;
1494                }
1495                CONT;
1496        ALU_END_TO_LE:
1497                switch (IMM) {
1498                case 16:
1499                        DST = (__force u16) cpu_to_le16(DST);
1500                        break;
1501                case 32:
1502                        DST = (__force u32) cpu_to_le32(DST);
1503                        break;
1504                case 64:
1505                        DST = (__force u64) cpu_to_le64(DST);
1506                        break;
1507                }
1508                CONT;
1509
1510        /* CALL */
1511        JMP_CALL:
1512                /* Function call scratches BPF_R1-BPF_R5 registers,
1513                 * preserves BPF_R6-BPF_R9, and stores return value
1514                 * into BPF_R0.
1515                 */
1516                BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1517                                                       BPF_R4, BPF_R5);
1518                CONT;
1519
1520        JMP_CALL_ARGS:
1521                BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1522                                                            BPF_R3, BPF_R4,
1523                                                            BPF_R5,
1524                                                            insn + insn->off + 1);
1525                CONT;
1526
1527        JMP_TAIL_CALL: {
1528                struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1529                struct bpf_array *array = container_of(map, struct bpf_array, map);
1530                struct bpf_prog *prog;
1531                u32 index = BPF_R3;
1532
1533                if (unlikely(index >= array->map.max_entries))
1534                        goto out;
1535                if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1536                        goto out;
1537
1538                tail_call_cnt++;
1539
1540                prog = READ_ONCE(array->ptrs[index]);
1541                if (!prog)
1542                        goto out;
1543
1544                /* ARG1 at this point is guaranteed to point to CTX from
1545                 * the verifier side due to the fact that the tail call is
1546                 * handeled like a helper, that is, bpf_tail_call_proto,
1547                 * where arg1_type is ARG_PTR_TO_CTX.
1548                 */
1549                insn = prog->insnsi;
1550                goto select_insn;
1551out:
1552                CONT;
1553        }
1554        JMP_JA:
1555                insn += insn->off;
1556                CONT;
1557        JMP_EXIT:
1558                return BPF_R0;
1559        /* JMP */
1560#define COND_JMP(SIGN, OPCODE, CMP_OP)                          \
1561        JMP_##OPCODE##_X:                                       \
1562                if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {     \
1563                        insn += insn->off;                      \
1564                        CONT_JMP;                               \
1565                }                                               \
1566                CONT;                                           \
1567        JMP32_##OPCODE##_X:                                     \
1568                if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {     \
1569                        insn += insn->off;                      \
1570                        CONT_JMP;                               \
1571                }                                               \
1572                CONT;                                           \
1573        JMP_##OPCODE##_K:                                       \
1574                if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {     \
1575                        insn += insn->off;                      \
1576                        CONT_JMP;                               \
1577                }                                               \
1578                CONT;                                           \
1579        JMP32_##OPCODE##_K:                                     \
1580                if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {     \
1581                        insn += insn->off;                      \
1582                        CONT_JMP;                               \
1583                }                                               \
1584                CONT;
1585        COND_JMP(u, JEQ, ==)
1586        COND_JMP(u, JNE, !=)
1587        COND_JMP(u, JGT, >)
1588        COND_JMP(u, JLT, <)
1589        COND_JMP(u, JGE, >=)
1590        COND_JMP(u, JLE, <=)
1591        COND_JMP(u, JSET, &)
1592        COND_JMP(s, JSGT, >)
1593        COND_JMP(s, JSLT, <)
1594        COND_JMP(s, JSGE, >=)
1595        COND_JMP(s, JSLE, <=)
1596#undef COND_JMP
1597        /* STX and ST and LDX*/
1598#define LDST(SIZEOP, SIZE)                                              \
1599        STX_MEM_##SIZEOP:                                               \
1600                *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
1601                CONT;                                                   \
1602        ST_MEM_##SIZEOP:                                                \
1603                *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
1604                CONT;                                                   \
1605        LDX_MEM_##SIZEOP:                                               \
1606                DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
1607                CONT;
1608
1609        LDST(B,   u8)
1610        LDST(H,  u16)
1611        LDST(W,  u32)
1612        LDST(DW, u64)
1613#undef LDST
1614#define LDX_PROBE(SIZEOP, SIZE)                                                 \
1615        LDX_PROBE_MEM_##SIZEOP:                                                 \
1616                bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off));      \
1617                CONT;
1618        LDX_PROBE(B,  1)
1619        LDX_PROBE(H,  2)
1620        LDX_PROBE(W,  4)
1621        LDX_PROBE(DW, 8)
1622#undef LDX_PROBE
1623
1624        STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1625                atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1626                           (DST + insn->off));
1627                CONT;
1628        STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1629                atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1630                             (DST + insn->off));
1631                CONT;
1632
1633        default_label:
1634                /* If we ever reach this, we have a bug somewhere. Die hard here
1635                 * instead of just returning 0; we could be somewhere in a subprog,
1636                 * so execution could continue otherwise which we do /not/ want.
1637                 *
1638                 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1639                 */
1640                pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1641                BUG_ON(1);
1642                return 0;
1643}
1644
1645#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1646#define DEFINE_BPF_PROG_RUN(stack_size) \
1647static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1648{ \
1649        u64 stack[stack_size / sizeof(u64)]; \
1650        u64 regs[MAX_BPF_EXT_REG]; \
1651\
1652        FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1653        ARG1 = (u64) (unsigned long) ctx; \
1654        return ___bpf_prog_run(regs, insn, stack); \
1655}
1656
1657#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1658#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1659static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1660                                      const struct bpf_insn *insn) \
1661{ \
1662        u64 stack[stack_size / sizeof(u64)]; \
1663        u64 regs[MAX_BPF_EXT_REG]; \
1664\
1665        FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1666        BPF_R1 = r1; \
1667        BPF_R2 = r2; \
1668        BPF_R3 = r3; \
1669        BPF_R4 = r4; \
1670        BPF_R5 = r5; \
1671        return ___bpf_prog_run(regs, insn, stack); \
1672}
1673
1674#define EVAL1(FN, X) FN(X)
1675#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1676#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1677#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1678#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1679#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1680
1681EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1682EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1683EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1684
1685EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1686EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1687EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1688
1689#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1690
1691static unsigned int (*interpreters[])(const void *ctx,
1692                                      const struct bpf_insn *insn) = {
1693EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1694EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1695EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1696};
1697#undef PROG_NAME_LIST
1698#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1699static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1700                                  const struct bpf_insn *insn) = {
1701EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1702EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1703EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1704};
1705#undef PROG_NAME_LIST
1706
1707void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1708{
1709        stack_depth = max_t(u32, stack_depth, 1);
1710        insn->off = (s16) insn->imm;
1711        insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1712                __bpf_call_base_args;
1713        insn->code = BPF_JMP | BPF_CALL_ARGS;
1714}
1715
1716#else
1717static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1718                                         const struct bpf_insn *insn)
1719{
1720        /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1721         * is not working properly, so warn about it!
1722         */
1723        WARN_ON_ONCE(1);
1724        return 0;
1725}
1726#endif
1727
1728bool bpf_prog_array_compatible(struct bpf_array *array,
1729                               const struct bpf_prog *fp)
1730{
1731        if (fp->kprobe_override)
1732                return false;
1733
1734        if (!array->aux->type) {
1735                /* There's no owner yet where we could check for
1736                 * compatibility.
1737                 */
1738                array->aux->type  = fp->type;
1739                array->aux->jited = fp->jited;
1740                return true;
1741        }
1742
1743        return array->aux->type  == fp->type &&
1744               array->aux->jited == fp->jited;
1745}
1746
1747static int bpf_check_tail_call(const struct bpf_prog *fp)
1748{
1749        struct bpf_prog_aux *aux = fp->aux;
1750        int i;
1751
1752        for (i = 0; i < aux->used_map_cnt; i++) {
1753                struct bpf_map *map = aux->used_maps[i];
1754                struct bpf_array *array;
1755
1756                if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1757                        continue;
1758
1759                array = container_of(map, struct bpf_array, map);
1760                if (!bpf_prog_array_compatible(array, fp))
1761                        return -EINVAL;
1762        }
1763
1764        return 0;
1765}
1766
1767static void bpf_prog_select_func(struct bpf_prog *fp)
1768{
1769#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1770        u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1771
1772        fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1773#else
1774        fp->bpf_func = __bpf_prog_ret0_warn;
1775#endif
1776}
1777
1778/**
1779 *      bpf_prog_select_runtime - select exec runtime for BPF program
1780 *      @fp: bpf_prog populated with internal BPF program
1781 *      @err: pointer to error variable
1782 *
1783 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1784 * The BPF program will be executed via BPF_PROG_RUN() macro.
1785 */
1786struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1787{
1788        /* In case of BPF to BPF calls, verifier did all the prep
1789         * work with regards to JITing, etc.
1790         */
1791        if (fp->bpf_func)
1792                goto finalize;
1793
1794        bpf_prog_select_func(fp);
1795
1796        /* eBPF JITs can rewrite the program in case constant
1797         * blinding is active. However, in case of error during
1798         * blinding, bpf_int_jit_compile() must always return a
1799         * valid program, which in this case would simply not
1800         * be JITed, but falls back to the interpreter.
1801         */
1802        if (!bpf_prog_is_dev_bound(fp->aux)) {
1803                *err = bpf_prog_alloc_jited_linfo(fp);
1804                if (*err)
1805                        return fp;
1806
1807                fp = bpf_int_jit_compile(fp);
1808                if (!fp->jited) {
1809                        bpf_prog_free_jited_linfo(fp);
1810#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1811                        *err = -ENOTSUPP;
1812                        return fp;
1813#endif
1814                } else {
1815                        bpf_prog_free_unused_jited_linfo(fp);
1816                }
1817        } else {
1818                *err = bpf_prog_offload_compile(fp);
1819                if (*err)
1820                        return fp;
1821        }
1822
1823finalize:
1824        bpf_prog_lock_ro(fp);
1825
1826        /* The tail call compatibility check can only be done at
1827         * this late stage as we need to determine, if we deal
1828         * with JITed or non JITed program concatenations and not
1829         * all eBPF JITs might immediately support all features.
1830         */
1831        *err = bpf_check_tail_call(fp);
1832
1833        return fp;
1834}
1835EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1836
1837static unsigned int __bpf_prog_ret1(const void *ctx,
1838                                    const struct bpf_insn *insn)
1839{
1840        return 1;
1841}
1842
1843static struct bpf_prog_dummy {
1844        struct bpf_prog prog;
1845} dummy_bpf_prog = {
1846        .prog = {
1847                .bpf_func = __bpf_prog_ret1,
1848        },
1849};
1850
1851/* to avoid allocating empty bpf_prog_array for cgroups that
1852 * don't have bpf program attached use one global 'empty_prog_array'
1853 * It will not be modified the caller of bpf_prog_array_alloc()
1854 * (since caller requested prog_cnt == 0)
1855 * that pointer should be 'freed' by bpf_prog_array_free()
1856 */
1857static struct {
1858        struct bpf_prog_array hdr;
1859        struct bpf_prog *null_prog;
1860} empty_prog_array = {
1861        .null_prog = NULL,
1862};
1863
1864struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1865{
1866        if (prog_cnt)
1867                return kzalloc(sizeof(struct bpf_prog_array) +
1868                               sizeof(struct bpf_prog_array_item) *
1869                               (prog_cnt + 1),
1870                               flags);
1871
1872        return &empty_prog_array.hdr;
1873}
1874
1875void bpf_prog_array_free(struct bpf_prog_array *progs)
1876{
1877        if (!progs || progs == &empty_prog_array.hdr)
1878                return;
1879        kfree_rcu(progs, rcu);
1880}
1881
1882int bpf_prog_array_length(struct bpf_prog_array *array)
1883{
1884        struct bpf_prog_array_item *item;
1885        u32 cnt = 0;
1886
1887        for (item = array->items; item->prog; item++)
1888                if (item->prog != &dummy_bpf_prog.prog)
1889                        cnt++;
1890        return cnt;
1891}
1892
1893bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1894{
1895        struct bpf_prog_array_item *item;
1896
1897        for (item = array->items; item->prog; item++)
1898                if (item->prog != &dummy_bpf_prog.prog)
1899                        return false;
1900        return true;
1901}
1902
1903static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1904                                     u32 *prog_ids,
1905                                     u32 request_cnt)
1906{
1907        struct bpf_prog_array_item *item;
1908        int i = 0;
1909
1910        for (item = array->items; item->prog; item++) {
1911                if (item->prog == &dummy_bpf_prog.prog)
1912                        continue;
1913                prog_ids[i] = item->prog->aux->id;
1914                if (++i == request_cnt) {
1915                        item++;
1916                        break;
1917                }
1918        }
1919
1920        return !!(item->prog);
1921}
1922
1923int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
1924                                __u32 __user *prog_ids, u32 cnt)
1925{
1926        unsigned long err = 0;
1927        bool nospc;
1928        u32 *ids;
1929
1930        /* users of this function are doing:
1931         * cnt = bpf_prog_array_length();
1932         * if (cnt > 0)
1933         *     bpf_prog_array_copy_to_user(..., cnt);
1934         * so below kcalloc doesn't need extra cnt > 0 check.
1935         */
1936        ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1937        if (!ids)
1938                return -ENOMEM;
1939        nospc = bpf_prog_array_copy_core(array, ids, cnt);
1940        err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1941        kfree(ids);
1942        if (err)
1943                return -EFAULT;
1944        if (nospc)
1945                return -ENOSPC;
1946        return 0;
1947}
1948
1949void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
1950                                struct bpf_prog *old_prog)
1951{
1952        struct bpf_prog_array_item *item;
1953
1954        for (item = array->items; item->prog; item++)
1955                if (item->prog == old_prog) {
1956                        WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1957                        break;
1958                }
1959}
1960
1961int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1962                        struct bpf_prog *exclude_prog,
1963                        struct bpf_prog *include_prog,
1964                        struct bpf_prog_array **new_array)
1965{
1966        int new_prog_cnt, carry_prog_cnt = 0;
1967        struct bpf_prog_array_item *existing;
1968        struct bpf_prog_array *array;
1969        bool found_exclude = false;
1970        int new_prog_idx = 0;
1971
1972        /* Figure out how many existing progs we need to carry over to
1973         * the new array.
1974         */
1975        if (old_array) {
1976                existing = old_array->items;
1977                for (; existing->prog; existing++) {
1978                        if (existing->prog == exclude_prog) {
1979                                found_exclude = true;
1980                                continue;
1981                        }
1982                        if (existing->prog != &dummy_bpf_prog.prog)
1983                                carry_prog_cnt++;
1984                        if (existing->prog == include_prog)
1985                                return -EEXIST;
1986                }
1987        }
1988
1989        if (exclude_prog && !found_exclude)
1990                return -ENOENT;
1991
1992        /* How many progs (not NULL) will be in the new array? */
1993        new_prog_cnt = carry_prog_cnt;
1994        if (include_prog)
1995                new_prog_cnt += 1;
1996
1997        /* Do we have any prog (not NULL) in the new array? */
1998        if (!new_prog_cnt) {
1999                *new_array = NULL;
2000                return 0;
2001        }
2002
2003        /* +1 as the end of prog_array is marked with NULL */
2004        array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2005        if (!array)
2006                return -ENOMEM;
2007
2008        /* Fill in the new prog array */
2009        if (carry_prog_cnt) {
2010                existing = old_array->items;
2011                for (; existing->prog; existing++)
2012                        if (existing->prog != exclude_prog &&
2013                            existing->prog != &dummy_bpf_prog.prog) {
2014                                array->items[new_prog_idx++].prog =
2015                                        existing->prog;
2016                        }
2017        }
2018        if (include_prog)
2019                array->items[new_prog_idx++].prog = include_prog;
2020        array->items[new_prog_idx].prog = NULL;
2021        *new_array = array;
2022        return 0;
2023}
2024
2025int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2026                             u32 *prog_ids, u32 request_cnt,
2027                             u32 *prog_cnt)
2028{
2029        u32 cnt = 0;
2030
2031        if (array)
2032                cnt = bpf_prog_array_length(array);
2033
2034        *prog_cnt = cnt;
2035
2036        /* return early if user requested only program count or nothing to copy */
2037        if (!request_cnt || !cnt)
2038                return 0;
2039
2040        /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2041        return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2042                                                                     : 0;
2043}
2044
2045static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux)
2046{
2047        enum bpf_cgroup_storage_type stype;
2048
2049        for_each_cgroup_storage_type(stype) {
2050                if (!aux->cgroup_storage[stype])
2051                        continue;
2052                bpf_cgroup_storage_release(aux, aux->cgroup_storage[stype]);
2053        }
2054}
2055
2056void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2057                          struct bpf_map **used_maps, u32 len)
2058{
2059        struct bpf_map *map;
2060        u32 i;
2061
2062        bpf_free_cgroup_storage(aux);
2063        for (i = 0; i < len; i++) {
2064                map = used_maps[i];
2065                if (map->ops->map_poke_untrack)
2066                        map->ops->map_poke_untrack(map, aux);
2067                bpf_map_put(map);
2068        }
2069}
2070
2071static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2072{
2073        __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2074        kfree(aux->used_maps);
2075}
2076
2077static void bpf_prog_free_deferred(struct work_struct *work)
2078{
2079        struct bpf_prog_aux *aux;
2080        int i;
2081
2082        aux = container_of(work, struct bpf_prog_aux, work);
2083        bpf_free_used_maps(aux);
2084        if (bpf_prog_is_dev_bound(aux))
2085                bpf_prog_offload_destroy(aux->prog);
2086#ifdef CONFIG_PERF_EVENTS
2087        if (aux->prog->has_callchain_buf)
2088                put_callchain_buffers();
2089#endif
2090        bpf_trampoline_put(aux->trampoline);
2091        for (i = 0; i < aux->func_cnt; i++)
2092                bpf_jit_free(aux->func[i]);
2093        if (aux->func_cnt) {
2094                kfree(aux->func);
2095                bpf_prog_unlock_free(aux->prog);
2096        } else {
2097                bpf_jit_free(aux->prog);
2098        }
2099}
2100
2101/* Free internal BPF program */
2102void bpf_prog_free(struct bpf_prog *fp)
2103{
2104        struct bpf_prog_aux *aux = fp->aux;
2105
2106        if (aux->linked_prog)
2107                bpf_prog_put(aux->linked_prog);
2108        INIT_WORK(&aux->work, bpf_prog_free_deferred);
2109        schedule_work(&aux->work);
2110}
2111EXPORT_SYMBOL_GPL(bpf_prog_free);
2112
2113/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2114static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2115
2116void bpf_user_rnd_init_once(void)
2117{
2118        prandom_init_once(&bpf_user_rnd_state);
2119}
2120
2121BPF_CALL_0(bpf_user_rnd_u32)
2122{
2123        /* Should someone ever have the rather unwise idea to use some
2124         * of the registers passed into this function, then note that
2125         * this function is called from native eBPF and classic-to-eBPF
2126         * transformations. Register assignments from both sides are
2127         * different, f.e. classic always sets fn(ctx, A, X) here.
2128         */
2129        struct rnd_state *state;
2130        u32 res;
2131
2132        state = &get_cpu_var(bpf_user_rnd_state);
2133        res = prandom_u32_state(state);
2134        put_cpu_var(bpf_user_rnd_state);
2135
2136        return res;
2137}
2138
2139/* Weak definitions of helper functions in case we don't have bpf syscall. */
2140const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2141const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2142const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2143const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2144const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2145const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2146const struct bpf_func_proto bpf_spin_lock_proto __weak;
2147const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2148const struct bpf_func_proto bpf_jiffies64_proto __weak;
2149
2150const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2151const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2152const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2153const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2154
2155const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2156const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2157const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2158const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2159const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2160const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2161const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2162
2163const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2164{
2165        return NULL;
2166}
2167
2168u64 __weak
2169bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2170                 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2171{
2172        return -ENOTSUPP;
2173}
2174EXPORT_SYMBOL_GPL(bpf_event_output);
2175
2176/* Always built-in helper functions. */
2177const struct bpf_func_proto bpf_tail_call_proto = {
2178        .func           = NULL,
2179        .gpl_only       = false,
2180        .ret_type       = RET_VOID,
2181        .arg1_type      = ARG_PTR_TO_CTX,
2182        .arg2_type      = ARG_CONST_MAP_PTR,
2183        .arg3_type      = ARG_ANYTHING,
2184};
2185
2186/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2187 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2188 * eBPF and implicitly also cBPF can get JITed!
2189 */
2190struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2191{
2192        return prog;
2193}
2194
2195/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2196 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2197 */
2198void __weak bpf_jit_compile(struct bpf_prog *prog)
2199{
2200}
2201
2202bool __weak bpf_helper_changes_pkt_data(void *func)
2203{
2204        return false;
2205}
2206
2207/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2208 * analysis code and wants explicit zero extension inserted by verifier.
2209 * Otherwise, return FALSE.
2210 */
2211bool __weak bpf_jit_needs_zext(void)
2212{
2213        return false;
2214}
2215
2216/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2217 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2218 */
2219int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2220                         int len)
2221{
2222        return -EFAULT;
2223}
2224
2225int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2226                              void *addr1, void *addr2)
2227{
2228        return -ENOTSUPP;
2229}
2230
2231DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2232EXPORT_SYMBOL(bpf_stats_enabled_key);
2233
2234/* All definitions of tracepoints related to BPF. */
2235#define CREATE_TRACE_POINTS
2236#include <linux/bpf_trace.h>
2237
2238EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2239EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2240