linux/kernel/bpf/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Linux Socket Filter - Kernel level socket filtering
   4 *
   5 * Based on the design of the Berkeley Packet Filter. The new
   6 * internal format has been designed by PLUMgrid:
   7 *
   8 *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
   9 *
  10 * Authors:
  11 *
  12 *      Jay Schulist <jschlst@samba.org>
  13 *      Alexei Starovoitov <ast@plumgrid.com>
  14 *      Daniel Borkmann <dborkman@redhat.com>
  15 *
  16 * Andi Kleen - Fix a few bad bugs and races.
  17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  18 */
  19
  20#include <uapi/linux/btf.h>
  21#include <linux/filter.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/random.h>
  25#include <linux/moduleloader.h>
  26#include <linux/bpf.h>
  27#include <linux/btf.h>
  28#include <linux/frame.h>
  29#include <linux/rbtree_latch.h>
  30#include <linux/kallsyms.h>
  31#include <linux/rcupdate.h>
  32#include <linux/perf_event.h>
  33
  34#include <asm/unaligned.h>
  35
  36/* Registers */
  37#define BPF_R0  regs[BPF_REG_0]
  38#define BPF_R1  regs[BPF_REG_1]
  39#define BPF_R2  regs[BPF_REG_2]
  40#define BPF_R3  regs[BPF_REG_3]
  41#define BPF_R4  regs[BPF_REG_4]
  42#define BPF_R5  regs[BPF_REG_5]
  43#define BPF_R6  regs[BPF_REG_6]
  44#define BPF_R7  regs[BPF_REG_7]
  45#define BPF_R8  regs[BPF_REG_8]
  46#define BPF_R9  regs[BPF_REG_9]
  47#define BPF_R10 regs[BPF_REG_10]
  48
  49/* Named registers */
  50#define DST     regs[insn->dst_reg]
  51#define SRC     regs[insn->src_reg]
  52#define FP      regs[BPF_REG_FP]
  53#define AX      regs[BPF_REG_AX]
  54#define ARG1    regs[BPF_REG_ARG1]
  55#define CTX     regs[BPF_REG_CTX]
  56#define IMM     insn->imm
  57
  58/* No hurry in this branch
  59 *
  60 * Exported for the bpf jit load helper.
  61 */
  62void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
  63{
  64        u8 *ptr = NULL;
  65
  66        if (k >= SKF_NET_OFF)
  67                ptr = skb_network_header(skb) + k - SKF_NET_OFF;
  68        else if (k >= SKF_LL_OFF)
  69                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
  70
  71        if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
  72                return ptr;
  73
  74        return NULL;
  75}
  76
  77struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
  78{
  79        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
  80        struct bpf_prog_aux *aux;
  81        struct bpf_prog *fp;
  82
  83        size = round_up(size, PAGE_SIZE);
  84        fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
  85        if (fp == NULL)
  86                return NULL;
  87
  88        aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
  89        if (aux == NULL) {
  90                vfree(fp);
  91                return NULL;
  92        }
  93
  94        fp->pages = size / PAGE_SIZE;
  95        fp->aux = aux;
  96        fp->aux->prog = fp;
  97        fp->jit_requested = ebpf_jit_enabled();
  98
  99        INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
 100
 101        return fp;
 102}
 103
 104struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 105{
 106        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 107        struct bpf_prog *prog;
 108        int cpu;
 109
 110        prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
 111        if (!prog)
 112                return NULL;
 113
 114        prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
 115        if (!prog->aux->stats) {
 116                kfree(prog->aux);
 117                vfree(prog);
 118                return NULL;
 119        }
 120
 121        for_each_possible_cpu(cpu) {
 122                struct bpf_prog_stats *pstats;
 123
 124                pstats = per_cpu_ptr(prog->aux->stats, cpu);
 125                u64_stats_init(&pstats->syncp);
 126        }
 127        return prog;
 128}
 129EXPORT_SYMBOL_GPL(bpf_prog_alloc);
 130
 131int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
 132{
 133        if (!prog->aux->nr_linfo || !prog->jit_requested)
 134                return 0;
 135
 136        prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
 137                                         sizeof(*prog->aux->jited_linfo),
 138                                         GFP_KERNEL | __GFP_NOWARN);
 139        if (!prog->aux->jited_linfo)
 140                return -ENOMEM;
 141
 142        return 0;
 143}
 144
 145void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
 146{
 147        kfree(prog->aux->jited_linfo);
 148        prog->aux->jited_linfo = NULL;
 149}
 150
 151void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
 152{
 153        if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
 154                bpf_prog_free_jited_linfo(prog);
 155}
 156
 157/* The jit engine is responsible to provide an array
 158 * for insn_off to the jited_off mapping (insn_to_jit_off).
 159 *
 160 * The idx to this array is the insn_off.  Hence, the insn_off
 161 * here is relative to the prog itself instead of the main prog.
 162 * This array has one entry for each xlated bpf insn.
 163 *
 164 * jited_off is the byte off to the last byte of the jited insn.
 165 *
 166 * Hence, with
 167 * insn_start:
 168 *      The first bpf insn off of the prog.  The insn off
 169 *      here is relative to the main prog.
 170 *      e.g. if prog is a subprog, insn_start > 0
 171 * linfo_idx:
 172 *      The prog's idx to prog->aux->linfo and jited_linfo
 173 *
 174 * jited_linfo[linfo_idx] = prog->bpf_func
 175 *
 176 * For i > linfo_idx,
 177 *
 178 * jited_linfo[i] = prog->bpf_func +
 179 *      insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
 180 */
 181void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
 182                               const u32 *insn_to_jit_off)
 183{
 184        u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
 185        const struct bpf_line_info *linfo;
 186        void **jited_linfo;
 187
 188        if (!prog->aux->jited_linfo)
 189                /* Userspace did not provide linfo */
 190                return;
 191
 192        linfo_idx = prog->aux->linfo_idx;
 193        linfo = &prog->aux->linfo[linfo_idx];
 194        insn_start = linfo[0].insn_off;
 195        insn_end = insn_start + prog->len;
 196
 197        jited_linfo = &prog->aux->jited_linfo[linfo_idx];
 198        jited_linfo[0] = prog->bpf_func;
 199
 200        nr_linfo = prog->aux->nr_linfo - linfo_idx;
 201
 202        for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
 203                /* The verifier ensures that linfo[i].insn_off is
 204                 * strictly increasing
 205                 */
 206                jited_linfo[i] = prog->bpf_func +
 207                        insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
 208}
 209
 210void bpf_prog_free_linfo(struct bpf_prog *prog)
 211{
 212        bpf_prog_free_jited_linfo(prog);
 213        kvfree(prog->aux->linfo);
 214}
 215
 216struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 217                                  gfp_t gfp_extra_flags)
 218{
 219        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 220        struct bpf_prog *fp;
 221        u32 pages, delta;
 222        int ret;
 223
 224        BUG_ON(fp_old == NULL);
 225
 226        size = round_up(size, PAGE_SIZE);
 227        pages = size / PAGE_SIZE;
 228        if (pages <= fp_old->pages)
 229                return fp_old;
 230
 231        delta = pages - fp_old->pages;
 232        ret = __bpf_prog_charge(fp_old->aux->user, delta);
 233        if (ret)
 234                return NULL;
 235
 236        fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
 237        if (fp == NULL) {
 238                __bpf_prog_uncharge(fp_old->aux->user, delta);
 239        } else {
 240                memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
 241                fp->pages = pages;
 242                fp->aux->prog = fp;
 243
 244                /* We keep fp->aux from fp_old around in the new
 245                 * reallocated structure.
 246                 */
 247                fp_old->aux = NULL;
 248                __bpf_prog_free(fp_old);
 249        }
 250
 251        return fp;
 252}
 253
 254void __bpf_prog_free(struct bpf_prog *fp)
 255{
 256        if (fp->aux) {
 257                free_percpu(fp->aux->stats);
 258                kfree(fp->aux);
 259        }
 260        vfree(fp);
 261}
 262
 263int bpf_prog_calc_tag(struct bpf_prog *fp)
 264{
 265        const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
 266        u32 raw_size = bpf_prog_tag_scratch_size(fp);
 267        u32 digest[SHA_DIGEST_WORDS];
 268        u32 ws[SHA_WORKSPACE_WORDS];
 269        u32 i, bsize, psize, blocks;
 270        struct bpf_insn *dst;
 271        bool was_ld_map;
 272        u8 *raw, *todo;
 273        __be32 *result;
 274        __be64 *bits;
 275
 276        raw = vmalloc(raw_size);
 277        if (!raw)
 278                return -ENOMEM;
 279
 280        sha_init(digest);
 281        memset(ws, 0, sizeof(ws));
 282
 283        /* We need to take out the map fd for the digest calculation
 284         * since they are unstable from user space side.
 285         */
 286        dst = (void *)raw;
 287        for (i = 0, was_ld_map = false; i < fp->len; i++) {
 288                dst[i] = fp->insnsi[i];
 289                if (!was_ld_map &&
 290                    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
 291                    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
 292                     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
 293                        was_ld_map = true;
 294                        dst[i].imm = 0;
 295                } else if (was_ld_map &&
 296                           dst[i].code == 0 &&
 297                           dst[i].dst_reg == 0 &&
 298                           dst[i].src_reg == 0 &&
 299                           dst[i].off == 0) {
 300                        was_ld_map = false;
 301                        dst[i].imm = 0;
 302                } else {
 303                        was_ld_map = false;
 304                }
 305        }
 306
 307        psize = bpf_prog_insn_size(fp);
 308        memset(&raw[psize], 0, raw_size - psize);
 309        raw[psize++] = 0x80;
 310
 311        bsize  = round_up(psize, SHA_MESSAGE_BYTES);
 312        blocks = bsize / SHA_MESSAGE_BYTES;
 313        todo   = raw;
 314        if (bsize - psize >= sizeof(__be64)) {
 315                bits = (__be64 *)(todo + bsize - sizeof(__be64));
 316        } else {
 317                bits = (__be64 *)(todo + bsize + bits_offset);
 318                blocks++;
 319        }
 320        *bits = cpu_to_be64((psize - 1) << 3);
 321
 322        while (blocks--) {
 323                sha_transform(digest, todo, ws);
 324                todo += SHA_MESSAGE_BYTES;
 325        }
 326
 327        result = (__force __be32 *)digest;
 328        for (i = 0; i < SHA_DIGEST_WORDS; i++)
 329                result[i] = cpu_to_be32(digest[i]);
 330        memcpy(fp->tag, result, sizeof(fp->tag));
 331
 332        vfree(raw);
 333        return 0;
 334}
 335
 336static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
 337                                s32 end_new, s32 curr, const bool probe_pass)
 338{
 339        const s64 imm_min = S32_MIN, imm_max = S32_MAX;
 340        s32 delta = end_new - end_old;
 341        s64 imm = insn->imm;
 342
 343        if (curr < pos && curr + imm + 1 >= end_old)
 344                imm += delta;
 345        else if (curr >= end_new && curr + imm + 1 < end_new)
 346                imm -= delta;
 347        if (imm < imm_min || imm > imm_max)
 348                return -ERANGE;
 349        if (!probe_pass)
 350                insn->imm = imm;
 351        return 0;
 352}
 353
 354static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
 355                                s32 end_new, s32 curr, const bool probe_pass)
 356{
 357        const s32 off_min = S16_MIN, off_max = S16_MAX;
 358        s32 delta = end_new - end_old;
 359        s32 off = insn->off;
 360
 361        if (curr < pos && curr + off + 1 >= end_old)
 362                off += delta;
 363        else if (curr >= end_new && curr + off + 1 < end_new)
 364                off -= delta;
 365        if (off < off_min || off > off_max)
 366                return -ERANGE;
 367        if (!probe_pass)
 368                insn->off = off;
 369        return 0;
 370}
 371
 372static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
 373                            s32 end_new, const bool probe_pass)
 374{
 375        u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
 376        struct bpf_insn *insn = prog->insnsi;
 377        int ret = 0;
 378
 379        for (i = 0; i < insn_cnt; i++, insn++) {
 380                u8 code;
 381
 382                /* In the probing pass we still operate on the original,
 383                 * unpatched image in order to check overflows before we
 384                 * do any other adjustments. Therefore skip the patchlet.
 385                 */
 386                if (probe_pass && i == pos) {
 387                        i = end_new;
 388                        insn = prog->insnsi + end_old;
 389                }
 390                code = insn->code;
 391                if ((BPF_CLASS(code) != BPF_JMP &&
 392                     BPF_CLASS(code) != BPF_JMP32) ||
 393                    BPF_OP(code) == BPF_EXIT)
 394                        continue;
 395                /* Adjust offset of jmps if we cross patch boundaries. */
 396                if (BPF_OP(code) == BPF_CALL) {
 397                        if (insn->src_reg != BPF_PSEUDO_CALL)
 398                                continue;
 399                        ret = bpf_adj_delta_to_imm(insn, pos, end_old,
 400                                                   end_new, i, probe_pass);
 401                } else {
 402                        ret = bpf_adj_delta_to_off(insn, pos, end_old,
 403                                                   end_new, i, probe_pass);
 404                }
 405                if (ret)
 406                        break;
 407        }
 408
 409        return ret;
 410}
 411
 412static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
 413{
 414        struct bpf_line_info *linfo;
 415        u32 i, nr_linfo;
 416
 417        nr_linfo = prog->aux->nr_linfo;
 418        if (!nr_linfo || !delta)
 419                return;
 420
 421        linfo = prog->aux->linfo;
 422
 423        for (i = 0; i < nr_linfo; i++)
 424                if (off < linfo[i].insn_off)
 425                        break;
 426
 427        /* Push all off < linfo[i].insn_off by delta */
 428        for (; i < nr_linfo; i++)
 429                linfo[i].insn_off += delta;
 430}
 431
 432struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 433                                       const struct bpf_insn *patch, u32 len)
 434{
 435        u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
 436        const u32 cnt_max = S16_MAX;
 437        struct bpf_prog *prog_adj;
 438        int err;
 439
 440        /* Since our patchlet doesn't expand the image, we're done. */
 441        if (insn_delta == 0) {
 442                memcpy(prog->insnsi + off, patch, sizeof(*patch));
 443                return prog;
 444        }
 445
 446        insn_adj_cnt = prog->len + insn_delta;
 447
 448        /* Reject anything that would potentially let the insn->off
 449         * target overflow when we have excessive program expansions.
 450         * We need to probe here before we do any reallocation where
 451         * we afterwards may not fail anymore.
 452         */
 453        if (insn_adj_cnt > cnt_max &&
 454            (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
 455                return ERR_PTR(err);
 456
 457        /* Several new instructions need to be inserted. Make room
 458         * for them. Likely, there's no need for a new allocation as
 459         * last page could have large enough tailroom.
 460         */
 461        prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
 462                                    GFP_USER);
 463        if (!prog_adj)
 464                return ERR_PTR(-ENOMEM);
 465
 466        prog_adj->len = insn_adj_cnt;
 467
 468        /* Patching happens in 3 steps:
 469         *
 470         * 1) Move over tail of insnsi from next instruction onwards,
 471         *    so we can patch the single target insn with one or more
 472         *    new ones (patching is always from 1 to n insns, n > 0).
 473         * 2) Inject new instructions at the target location.
 474         * 3) Adjust branch offsets if necessary.
 475         */
 476        insn_rest = insn_adj_cnt - off - len;
 477
 478        memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
 479                sizeof(*patch) * insn_rest);
 480        memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
 481
 482        /* We are guaranteed to not fail at this point, otherwise
 483         * the ship has sailed to reverse to the original state. An
 484         * overflow cannot happen at this point.
 485         */
 486        BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
 487
 488        bpf_adj_linfo(prog_adj, off, insn_delta);
 489
 490        return prog_adj;
 491}
 492
 493int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
 494{
 495        /* Branch offsets can't overflow when program is shrinking, no need
 496         * to call bpf_adj_branches(..., true) here
 497         */
 498        memmove(prog->insnsi + off, prog->insnsi + off + cnt,
 499                sizeof(struct bpf_insn) * (prog->len - off - cnt));
 500        prog->len -= cnt;
 501
 502        return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
 503}
 504
 505void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
 506{
 507        int i;
 508
 509        for (i = 0; i < fp->aux->func_cnt; i++)
 510                bpf_prog_kallsyms_del(fp->aux->func[i]);
 511}
 512
 513void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
 514{
 515        bpf_prog_kallsyms_del_subprogs(fp);
 516        bpf_prog_kallsyms_del(fp);
 517}
 518
 519#ifdef CONFIG_BPF_JIT
 520/* All BPF JIT sysctl knobs here. */
 521int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
 522int bpf_jit_harden   __read_mostly;
 523int bpf_jit_kallsyms __read_mostly;
 524long bpf_jit_limit   __read_mostly;
 525
 526static __always_inline void
 527bpf_get_prog_addr_region(const struct bpf_prog *prog,
 528                         unsigned long *symbol_start,
 529                         unsigned long *symbol_end)
 530{
 531        const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
 532        unsigned long addr = (unsigned long)hdr;
 533
 534        WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
 535
 536        *symbol_start = addr;
 537        *symbol_end   = addr + hdr->pages * PAGE_SIZE;
 538}
 539
 540void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
 541{
 542        const char *end = sym + KSYM_NAME_LEN;
 543        const struct btf_type *type;
 544        const char *func_name;
 545
 546        BUILD_BUG_ON(sizeof("bpf_prog_") +
 547                     sizeof(prog->tag) * 2 +
 548                     /* name has been null terminated.
 549                      * We should need +1 for the '_' preceding
 550                      * the name.  However, the null character
 551                      * is double counted between the name and the
 552                      * sizeof("bpf_prog_") above, so we omit
 553                      * the +1 here.
 554                      */
 555                     sizeof(prog->aux->name) > KSYM_NAME_LEN);
 556
 557        sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
 558        sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
 559
 560        /* prog->aux->name will be ignored if full btf name is available */
 561        if (prog->aux->func_info_cnt) {
 562                type = btf_type_by_id(prog->aux->btf,
 563                                      prog->aux->func_info[prog->aux->func_idx].type_id);
 564                func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
 565                snprintf(sym, (size_t)(end - sym), "_%s", func_name);
 566                return;
 567        }
 568
 569        if (prog->aux->name[0])
 570                snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
 571        else
 572                *sym = 0;
 573}
 574
 575static __always_inline unsigned long
 576bpf_get_prog_addr_start(struct latch_tree_node *n)
 577{
 578        unsigned long symbol_start, symbol_end;
 579        const struct bpf_prog_aux *aux;
 580
 581        aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
 582        bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 583
 584        return symbol_start;
 585}
 586
 587static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
 588                                          struct latch_tree_node *b)
 589{
 590        return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
 591}
 592
 593static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
 594{
 595        unsigned long val = (unsigned long)key;
 596        unsigned long symbol_start, symbol_end;
 597        const struct bpf_prog_aux *aux;
 598
 599        aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
 600        bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 601
 602        if (val < symbol_start)
 603                return -1;
 604        if (val >= symbol_end)
 605                return  1;
 606
 607        return 0;
 608}
 609
 610static const struct latch_tree_ops bpf_tree_ops = {
 611        .less   = bpf_tree_less,
 612        .comp   = bpf_tree_comp,
 613};
 614
 615static DEFINE_SPINLOCK(bpf_lock);
 616static LIST_HEAD(bpf_kallsyms);
 617static struct latch_tree_root bpf_tree __cacheline_aligned;
 618
 619static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
 620{
 621        WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
 622        list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
 623        latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
 624}
 625
 626static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
 627{
 628        if (list_empty(&aux->ksym_lnode))
 629                return;
 630
 631        latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
 632        list_del_rcu(&aux->ksym_lnode);
 633}
 634
 635static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 636{
 637        return fp->jited && !bpf_prog_was_classic(fp);
 638}
 639
 640static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
 641{
 642        return list_empty(&fp->aux->ksym_lnode) ||
 643               fp->aux->ksym_lnode.prev == LIST_POISON2;
 644}
 645
 646void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 647{
 648        if (!bpf_prog_kallsyms_candidate(fp) ||
 649            !capable(CAP_SYS_ADMIN))
 650                return;
 651
 652        spin_lock_bh(&bpf_lock);
 653        bpf_prog_ksym_node_add(fp->aux);
 654        spin_unlock_bh(&bpf_lock);
 655}
 656
 657void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 658{
 659        if (!bpf_prog_kallsyms_candidate(fp))
 660                return;
 661
 662        spin_lock_bh(&bpf_lock);
 663        bpf_prog_ksym_node_del(fp->aux);
 664        spin_unlock_bh(&bpf_lock);
 665}
 666
 667static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
 668{
 669        struct latch_tree_node *n;
 670
 671        if (!bpf_jit_kallsyms_enabled())
 672                return NULL;
 673
 674        n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
 675        return n ?
 676               container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
 677               NULL;
 678}
 679
 680const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
 681                                 unsigned long *off, char *sym)
 682{
 683        unsigned long symbol_start, symbol_end;
 684        struct bpf_prog *prog;
 685        char *ret = NULL;
 686
 687        rcu_read_lock();
 688        prog = bpf_prog_kallsyms_find(addr);
 689        if (prog) {
 690                bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
 691                bpf_get_prog_name(prog, sym);
 692
 693                ret = sym;
 694                if (size)
 695                        *size = symbol_end - symbol_start;
 696                if (off)
 697                        *off  = addr - symbol_start;
 698        }
 699        rcu_read_unlock();
 700
 701        return ret;
 702}
 703
 704bool is_bpf_text_address(unsigned long addr)
 705{
 706        bool ret;
 707
 708        rcu_read_lock();
 709        ret = bpf_prog_kallsyms_find(addr) != NULL;
 710        rcu_read_unlock();
 711
 712        return ret;
 713}
 714
 715int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 716                    char *sym)
 717{
 718        struct bpf_prog_aux *aux;
 719        unsigned int it = 0;
 720        int ret = -ERANGE;
 721
 722        if (!bpf_jit_kallsyms_enabled())
 723                return ret;
 724
 725        rcu_read_lock();
 726        list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
 727                if (it++ != symnum)
 728                        continue;
 729
 730                bpf_get_prog_name(aux->prog, sym);
 731
 732                *value = (unsigned long)aux->prog->bpf_func;
 733                *type  = BPF_SYM_ELF_TYPE;
 734
 735                ret = 0;
 736                break;
 737        }
 738        rcu_read_unlock();
 739
 740        return ret;
 741}
 742
 743static atomic_long_t bpf_jit_current;
 744
 745/* Can be overridden by an arch's JIT compiler if it has a custom,
 746 * dedicated BPF backend memory area, or if neither of the two
 747 * below apply.
 748 */
 749u64 __weak bpf_jit_alloc_exec_limit(void)
 750{
 751#if defined(MODULES_VADDR)
 752        return MODULES_END - MODULES_VADDR;
 753#else
 754        return VMALLOC_END - VMALLOC_START;
 755#endif
 756}
 757
 758static int __init bpf_jit_charge_init(void)
 759{
 760        /* Only used as heuristic here to derive limit. */
 761        bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
 762                                            PAGE_SIZE), LONG_MAX);
 763        return 0;
 764}
 765pure_initcall(bpf_jit_charge_init);
 766
 767static int bpf_jit_charge_modmem(u32 pages)
 768{
 769        if (atomic_long_add_return(pages, &bpf_jit_current) >
 770            (bpf_jit_limit >> PAGE_SHIFT)) {
 771                if (!capable(CAP_SYS_ADMIN)) {
 772                        atomic_long_sub(pages, &bpf_jit_current);
 773                        return -EPERM;
 774                }
 775        }
 776
 777        return 0;
 778}
 779
 780static void bpf_jit_uncharge_modmem(u32 pages)
 781{
 782        atomic_long_sub(pages, &bpf_jit_current);
 783}
 784
 785void *__weak bpf_jit_alloc_exec(unsigned long size)
 786{
 787        return module_alloc(size);
 788}
 789
 790void __weak bpf_jit_free_exec(void *addr)
 791{
 792        module_memfree(addr);
 793}
 794
 795struct bpf_binary_header *
 796bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 797                     unsigned int alignment,
 798                     bpf_jit_fill_hole_t bpf_fill_ill_insns)
 799{
 800        struct bpf_binary_header *hdr;
 801        u32 size, hole, start, pages;
 802
 803        /* Most of BPF filters are really small, but if some of them
 804         * fill a page, allow at least 128 extra bytes to insert a
 805         * random section of illegal instructions.
 806         */
 807        size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
 808        pages = size / PAGE_SIZE;
 809
 810        if (bpf_jit_charge_modmem(pages))
 811                return NULL;
 812        hdr = bpf_jit_alloc_exec(size);
 813        if (!hdr) {
 814                bpf_jit_uncharge_modmem(pages);
 815                return NULL;
 816        }
 817
 818        /* Fill space with illegal/arch-dep instructions. */
 819        bpf_fill_ill_insns(hdr, size);
 820
 821        hdr->pages = pages;
 822        hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 823                     PAGE_SIZE - sizeof(*hdr));
 824        start = (get_random_int() % hole) & ~(alignment - 1);
 825
 826        /* Leave a random number of instructions before BPF code. */
 827        *image_ptr = &hdr->image[start];
 828
 829        return hdr;
 830}
 831
 832void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 833{
 834        u32 pages = hdr->pages;
 835
 836        bpf_jit_free_exec(hdr);
 837        bpf_jit_uncharge_modmem(pages);
 838}
 839
 840/* This symbol is only overridden by archs that have different
 841 * requirements than the usual eBPF JITs, f.e. when they only
 842 * implement cBPF JIT, do not set images read-only, etc.
 843 */
 844void __weak bpf_jit_free(struct bpf_prog *fp)
 845{
 846        if (fp->jited) {
 847                struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
 848
 849                bpf_jit_binary_free(hdr);
 850
 851                WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
 852        }
 853
 854        bpf_prog_unlock_free(fp);
 855}
 856
 857int bpf_jit_get_func_addr(const struct bpf_prog *prog,
 858                          const struct bpf_insn *insn, bool extra_pass,
 859                          u64 *func_addr, bool *func_addr_fixed)
 860{
 861        s16 off = insn->off;
 862        s32 imm = insn->imm;
 863        u8 *addr;
 864
 865        *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
 866        if (!*func_addr_fixed) {
 867                /* Place-holder address till the last pass has collected
 868                 * all addresses for JITed subprograms in which case we
 869                 * can pick them up from prog->aux.
 870                 */
 871                if (!extra_pass)
 872                        addr = NULL;
 873                else if (prog->aux->func &&
 874                         off >= 0 && off < prog->aux->func_cnt)
 875                        addr = (u8 *)prog->aux->func[off]->bpf_func;
 876                else
 877                        return -EINVAL;
 878        } else {
 879                /* Address of a BPF helper call. Since part of the core
 880                 * kernel, it's always at a fixed location. __bpf_call_base
 881                 * and the helper with imm relative to it are both in core
 882                 * kernel.
 883                 */
 884                addr = (u8 *)__bpf_call_base + imm;
 885        }
 886
 887        *func_addr = (unsigned long)addr;
 888        return 0;
 889}
 890
 891static int bpf_jit_blind_insn(const struct bpf_insn *from,
 892                              const struct bpf_insn *aux,
 893                              struct bpf_insn *to_buff)
 894{
 895        struct bpf_insn *to = to_buff;
 896        u32 imm_rnd = get_random_int();
 897        s16 off;
 898
 899        BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
 900        BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
 901
 902        /* Constraints on AX register:
 903         *
 904         * AX register is inaccessible from user space. It is mapped in
 905         * all JITs, and used here for constant blinding rewrites. It is
 906         * typically "stateless" meaning its contents are only valid within
 907         * the executed instruction, but not across several instructions.
 908         * There are a few exceptions however which are further detailed
 909         * below.
 910         *
 911         * Constant blinding is only used by JITs, not in the interpreter.
 912         * The interpreter uses AX in some occasions as a local temporary
 913         * register e.g. in DIV or MOD instructions.
 914         *
 915         * In restricted circumstances, the verifier can also use the AX
 916         * register for rewrites as long as they do not interfere with
 917         * the above cases!
 918         */
 919        if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
 920                goto out;
 921
 922        if (from->imm == 0 &&
 923            (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
 924             from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
 925                *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
 926                goto out;
 927        }
 928
 929        switch (from->code) {
 930        case BPF_ALU | BPF_ADD | BPF_K:
 931        case BPF_ALU | BPF_SUB | BPF_K:
 932        case BPF_ALU | BPF_AND | BPF_K:
 933        case BPF_ALU | BPF_OR  | BPF_K:
 934        case BPF_ALU | BPF_XOR | BPF_K:
 935        case BPF_ALU | BPF_MUL | BPF_K:
 936        case BPF_ALU | BPF_MOV | BPF_K:
 937        case BPF_ALU | BPF_DIV | BPF_K:
 938        case BPF_ALU | BPF_MOD | BPF_K:
 939                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 940                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 941                *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
 942                break;
 943
 944        case BPF_ALU64 | BPF_ADD | BPF_K:
 945        case BPF_ALU64 | BPF_SUB | BPF_K:
 946        case BPF_ALU64 | BPF_AND | BPF_K:
 947        case BPF_ALU64 | BPF_OR  | BPF_K:
 948        case BPF_ALU64 | BPF_XOR | BPF_K:
 949        case BPF_ALU64 | BPF_MUL | BPF_K:
 950        case BPF_ALU64 | BPF_MOV | BPF_K:
 951        case BPF_ALU64 | BPF_DIV | BPF_K:
 952        case BPF_ALU64 | BPF_MOD | BPF_K:
 953                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 954                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 955                *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
 956                break;
 957
 958        case BPF_JMP | BPF_JEQ  | BPF_K:
 959        case BPF_JMP | BPF_JNE  | BPF_K:
 960        case BPF_JMP | BPF_JGT  | BPF_K:
 961        case BPF_JMP | BPF_JLT  | BPF_K:
 962        case BPF_JMP | BPF_JGE  | BPF_K:
 963        case BPF_JMP | BPF_JLE  | BPF_K:
 964        case BPF_JMP | BPF_JSGT | BPF_K:
 965        case BPF_JMP | BPF_JSLT | BPF_K:
 966        case BPF_JMP | BPF_JSGE | BPF_K:
 967        case BPF_JMP | BPF_JSLE | BPF_K:
 968        case BPF_JMP | BPF_JSET | BPF_K:
 969                /* Accommodate for extra offset in case of a backjump. */
 970                off = from->off;
 971                if (off < 0)
 972                        off -= 2;
 973                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 974                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 975                *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
 976                break;
 977
 978        case BPF_JMP32 | BPF_JEQ  | BPF_K:
 979        case BPF_JMP32 | BPF_JNE  | BPF_K:
 980        case BPF_JMP32 | BPF_JGT  | BPF_K:
 981        case BPF_JMP32 | BPF_JLT  | BPF_K:
 982        case BPF_JMP32 | BPF_JGE  | BPF_K:
 983        case BPF_JMP32 | BPF_JLE  | BPF_K:
 984        case BPF_JMP32 | BPF_JSGT | BPF_K:
 985        case BPF_JMP32 | BPF_JSLT | BPF_K:
 986        case BPF_JMP32 | BPF_JSGE | BPF_K:
 987        case BPF_JMP32 | BPF_JSLE | BPF_K:
 988        case BPF_JMP32 | BPF_JSET | BPF_K:
 989                /* Accommodate for extra offset in case of a backjump. */
 990                off = from->off;
 991                if (off < 0)
 992                        off -= 2;
 993                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 994                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 995                *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
 996                                      off);
 997                break;
 998
 999        case BPF_LD | BPF_IMM | BPF_DW:
1000                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1001                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1002                *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1003                *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1004                break;
1005        case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1006                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1007                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1008                *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1009                break;
1010
1011        case BPF_ST | BPF_MEM | BPF_DW:
1012        case BPF_ST | BPF_MEM | BPF_W:
1013        case BPF_ST | BPF_MEM | BPF_H:
1014        case BPF_ST | BPF_MEM | BPF_B:
1015                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1016                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1017                *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1018                break;
1019        }
1020out:
1021        return to - to_buff;
1022}
1023
1024static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1025                                              gfp_t gfp_extra_flags)
1026{
1027        gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1028        struct bpf_prog *fp;
1029
1030        fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
1031        if (fp != NULL) {
1032                /* aux->prog still points to the fp_other one, so
1033                 * when promoting the clone to the real program,
1034                 * this still needs to be adapted.
1035                 */
1036                memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1037        }
1038
1039        return fp;
1040}
1041
1042static void bpf_prog_clone_free(struct bpf_prog *fp)
1043{
1044        /* aux was stolen by the other clone, so we cannot free
1045         * it from this path! It will be freed eventually by the
1046         * other program on release.
1047         *
1048         * At this point, we don't need a deferred release since
1049         * clone is guaranteed to not be locked.
1050         */
1051        fp->aux = NULL;
1052        __bpf_prog_free(fp);
1053}
1054
1055void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1056{
1057        /* We have to repoint aux->prog to self, as we don't
1058         * know whether fp here is the clone or the original.
1059         */
1060        fp->aux->prog = fp;
1061        bpf_prog_clone_free(fp_other);
1062}
1063
1064struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1065{
1066        struct bpf_insn insn_buff[16], aux[2];
1067        struct bpf_prog *clone, *tmp;
1068        int insn_delta, insn_cnt;
1069        struct bpf_insn *insn;
1070        int i, rewritten;
1071
1072        if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1073                return prog;
1074
1075        clone = bpf_prog_clone_create(prog, GFP_USER);
1076        if (!clone)
1077                return ERR_PTR(-ENOMEM);
1078
1079        insn_cnt = clone->len;
1080        insn = clone->insnsi;
1081
1082        for (i = 0; i < insn_cnt; i++, insn++) {
1083                /* We temporarily need to hold the original ld64 insn
1084                 * so that we can still access the first part in the
1085                 * second blinding run.
1086                 */
1087                if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1088                    insn[1].code == 0)
1089                        memcpy(aux, insn, sizeof(aux));
1090
1091                rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
1092                if (!rewritten)
1093                        continue;
1094
1095                tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1096                if (IS_ERR(tmp)) {
1097                        /* Patching may have repointed aux->prog during
1098                         * realloc from the original one, so we need to
1099                         * fix it up here on error.
1100                         */
1101                        bpf_jit_prog_release_other(prog, clone);
1102                        return tmp;
1103                }
1104
1105                clone = tmp;
1106                insn_delta = rewritten - 1;
1107
1108                /* Walk new program and skip insns we just inserted. */
1109                insn = clone->insnsi + i + insn_delta;
1110                insn_cnt += insn_delta;
1111                i        += insn_delta;
1112        }
1113
1114        clone->blinded = 1;
1115        return clone;
1116}
1117#endif /* CONFIG_BPF_JIT */
1118
1119/* Base function for offset calculation. Needs to go into .text section,
1120 * therefore keeping it non-static as well; will also be used by JITs
1121 * anyway later on, so do not let the compiler omit it. This also needs
1122 * to go into kallsyms for correlation from e.g. bpftool, so naming
1123 * must not change.
1124 */
1125noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1126{
1127        return 0;
1128}
1129EXPORT_SYMBOL_GPL(__bpf_call_base);
1130
1131/* All UAPI available opcodes. */
1132#define BPF_INSN_MAP(INSN_2, INSN_3)            \
1133        /* 32 bit ALU operations. */            \
1134        /*   Register based. */                 \
1135        INSN_3(ALU, ADD,  X),                   \
1136        INSN_3(ALU, SUB,  X),                   \
1137        INSN_3(ALU, AND,  X),                   \
1138        INSN_3(ALU, OR,   X),                   \
1139        INSN_3(ALU, LSH,  X),                   \
1140        INSN_3(ALU, RSH,  X),                   \
1141        INSN_3(ALU, XOR,  X),                   \
1142        INSN_3(ALU, MUL,  X),                   \
1143        INSN_3(ALU, MOV,  X),                   \
1144        INSN_3(ALU, ARSH, X),                   \
1145        INSN_3(ALU, DIV,  X),                   \
1146        INSN_3(ALU, MOD,  X),                   \
1147        INSN_2(ALU, NEG),                       \
1148        INSN_3(ALU, END, TO_BE),                \
1149        INSN_3(ALU, END, TO_LE),                \
1150        /*   Immediate based. */                \
1151        INSN_3(ALU, ADD,  K),                   \
1152        INSN_3(ALU, SUB,  K),                   \
1153        INSN_3(ALU, AND,  K),                   \
1154        INSN_3(ALU, OR,   K),                   \
1155        INSN_3(ALU, LSH,  K),                   \
1156        INSN_3(ALU, RSH,  K),                   \
1157        INSN_3(ALU, XOR,  K),                   \
1158        INSN_3(ALU, MUL,  K),                   \
1159        INSN_3(ALU, MOV,  K),                   \
1160        INSN_3(ALU, ARSH, K),                   \
1161        INSN_3(ALU, DIV,  K),                   \
1162        INSN_3(ALU, MOD,  K),                   \
1163        /* 64 bit ALU operations. */            \
1164        /*   Register based. */                 \
1165        INSN_3(ALU64, ADD,  X),                 \
1166        INSN_3(ALU64, SUB,  X),                 \
1167        INSN_3(ALU64, AND,  X),                 \
1168        INSN_3(ALU64, OR,   X),                 \
1169        INSN_3(ALU64, LSH,  X),                 \
1170        INSN_3(ALU64, RSH,  X),                 \
1171        INSN_3(ALU64, XOR,  X),                 \
1172        INSN_3(ALU64, MUL,  X),                 \
1173        INSN_3(ALU64, MOV,  X),                 \
1174        INSN_3(ALU64, ARSH, X),                 \
1175        INSN_3(ALU64, DIV,  X),                 \
1176        INSN_3(ALU64, MOD,  X),                 \
1177        INSN_2(ALU64, NEG),                     \
1178        /*   Immediate based. */                \
1179        INSN_3(ALU64, ADD,  K),                 \
1180        INSN_3(ALU64, SUB,  K),                 \
1181        INSN_3(ALU64, AND,  K),                 \
1182        INSN_3(ALU64, OR,   K),                 \
1183        INSN_3(ALU64, LSH,  K),                 \
1184        INSN_3(ALU64, RSH,  K),                 \
1185        INSN_3(ALU64, XOR,  K),                 \
1186        INSN_3(ALU64, MUL,  K),                 \
1187        INSN_3(ALU64, MOV,  K),                 \
1188        INSN_3(ALU64, ARSH, K),                 \
1189        INSN_3(ALU64, DIV,  K),                 \
1190        INSN_3(ALU64, MOD,  K),                 \
1191        /* Call instruction. */                 \
1192        INSN_2(JMP, CALL),                      \
1193        /* Exit instruction. */                 \
1194        INSN_2(JMP, EXIT),                      \
1195        /* 32-bit Jump instructions. */         \
1196        /*   Register based. */                 \
1197        INSN_3(JMP32, JEQ,  X),                 \
1198        INSN_3(JMP32, JNE,  X),                 \
1199        INSN_3(JMP32, JGT,  X),                 \
1200        INSN_3(JMP32, JLT,  X),                 \
1201        INSN_3(JMP32, JGE,  X),                 \
1202        INSN_3(JMP32, JLE,  X),                 \
1203        INSN_3(JMP32, JSGT, X),                 \
1204        INSN_3(JMP32, JSLT, X),                 \
1205        INSN_3(JMP32, JSGE, X),                 \
1206        INSN_3(JMP32, JSLE, X),                 \
1207        INSN_3(JMP32, JSET, X),                 \
1208        /*   Immediate based. */                \
1209        INSN_3(JMP32, JEQ,  K),                 \
1210        INSN_3(JMP32, JNE,  K),                 \
1211        INSN_3(JMP32, JGT,  K),                 \
1212        INSN_3(JMP32, JLT,  K),                 \
1213        INSN_3(JMP32, JGE,  K),                 \
1214        INSN_3(JMP32, JLE,  K),                 \
1215        INSN_3(JMP32, JSGT, K),                 \
1216        INSN_3(JMP32, JSLT, K),                 \
1217        INSN_3(JMP32, JSGE, K),                 \
1218        INSN_3(JMP32, JSLE, K),                 \
1219        INSN_3(JMP32, JSET, K),                 \
1220        /* Jump instructions. */                \
1221        /*   Register based. */                 \
1222        INSN_3(JMP, JEQ,  X),                   \
1223        INSN_3(JMP, JNE,  X),                   \
1224        INSN_3(JMP, JGT,  X),                   \
1225        INSN_3(JMP, JLT,  X),                   \
1226        INSN_3(JMP, JGE,  X),                   \
1227        INSN_3(JMP, JLE,  X),                   \
1228        INSN_3(JMP, JSGT, X),                   \
1229        INSN_3(JMP, JSLT, X),                   \
1230        INSN_3(JMP, JSGE, X),                   \
1231        INSN_3(JMP, JSLE, X),                   \
1232        INSN_3(JMP, JSET, X),                   \
1233        /*   Immediate based. */                \
1234        INSN_3(JMP, JEQ,  K),                   \
1235        INSN_3(JMP, JNE,  K),                   \
1236        INSN_3(JMP, JGT,  K),                   \
1237        INSN_3(JMP, JLT,  K),                   \
1238        INSN_3(JMP, JGE,  K),                   \
1239        INSN_3(JMP, JLE,  K),                   \
1240        INSN_3(JMP, JSGT, K),                   \
1241        INSN_3(JMP, JSLT, K),                   \
1242        INSN_3(JMP, JSGE, K),                   \
1243        INSN_3(JMP, JSLE, K),                   \
1244        INSN_3(JMP, JSET, K),                   \
1245        INSN_2(JMP, JA),                        \
1246        /* Store instructions. */               \
1247        /*   Register based. */                 \
1248        INSN_3(STX, MEM,  B),                   \
1249        INSN_3(STX, MEM,  H),                   \
1250        INSN_3(STX, MEM,  W),                   \
1251        INSN_3(STX, MEM,  DW),                  \
1252        INSN_3(STX, XADD, W),                   \
1253        INSN_3(STX, XADD, DW),                  \
1254        /*   Immediate based. */                \
1255        INSN_3(ST, MEM, B),                     \
1256        INSN_3(ST, MEM, H),                     \
1257        INSN_3(ST, MEM, W),                     \
1258        INSN_3(ST, MEM, DW),                    \
1259        /* Load instructions. */                \
1260        /*   Register based. */                 \
1261        INSN_3(LDX, MEM, B),                    \
1262        INSN_3(LDX, MEM, H),                    \
1263        INSN_3(LDX, MEM, W),                    \
1264        INSN_3(LDX, MEM, DW),                   \
1265        /*   Immediate based. */                \
1266        INSN_3(LD, IMM, DW)
1267
1268bool bpf_opcode_in_insntable(u8 code)
1269{
1270#define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1271#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1272        static const bool public_insntable[256] = {
1273                [0 ... 255] = false,
1274                /* Now overwrite non-defaults ... */
1275                BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1276                /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1277                [BPF_LD | BPF_ABS | BPF_B] = true,
1278                [BPF_LD | BPF_ABS | BPF_H] = true,
1279                [BPF_LD | BPF_ABS | BPF_W] = true,
1280                [BPF_LD | BPF_IND | BPF_B] = true,
1281                [BPF_LD | BPF_IND | BPF_H] = true,
1282                [BPF_LD | BPF_IND | BPF_W] = true,
1283        };
1284#undef BPF_INSN_3_TBL
1285#undef BPF_INSN_2_TBL
1286        return public_insntable[code];
1287}
1288
1289#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1290/**
1291 *      __bpf_prog_run - run eBPF program on a given context
1292 *      @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1293 *      @insn: is the array of eBPF instructions
1294 *      @stack: is the eBPF storage stack
1295 *
1296 * Decode and execute eBPF instructions.
1297 */
1298static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1299{
1300#define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1301#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1302        static const void *jumptable[256] = {
1303                [0 ... 255] = &&default_label,
1304                /* Now overwrite non-defaults ... */
1305                BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1306                /* Non-UAPI available opcodes. */
1307                [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1308                [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1309        };
1310#undef BPF_INSN_3_LBL
1311#undef BPF_INSN_2_LBL
1312        u32 tail_call_cnt = 0;
1313
1314#define CONT     ({ insn++; goto select_insn; })
1315#define CONT_JMP ({ insn++; goto select_insn; })
1316
1317select_insn:
1318        goto *jumptable[insn->code];
1319
1320        /* ALU */
1321#define ALU(OPCODE, OP)                 \
1322        ALU64_##OPCODE##_X:             \
1323                DST = DST OP SRC;       \
1324                CONT;                   \
1325        ALU_##OPCODE##_X:               \
1326                DST = (u32) DST OP (u32) SRC;   \
1327                CONT;                   \
1328        ALU64_##OPCODE##_K:             \
1329                DST = DST OP IMM;               \
1330                CONT;                   \
1331        ALU_##OPCODE##_K:               \
1332                DST = (u32) DST OP (u32) IMM;   \
1333                CONT;
1334
1335        ALU(ADD,  +)
1336        ALU(SUB,  -)
1337        ALU(AND,  &)
1338        ALU(OR,   |)
1339        ALU(LSH, <<)
1340        ALU(RSH, >>)
1341        ALU(XOR,  ^)
1342        ALU(MUL,  *)
1343#undef ALU
1344        ALU_NEG:
1345                DST = (u32) -DST;
1346                CONT;
1347        ALU64_NEG:
1348                DST = -DST;
1349                CONT;
1350        ALU_MOV_X:
1351                DST = (u32) SRC;
1352                CONT;
1353        ALU_MOV_K:
1354                DST = (u32) IMM;
1355                CONT;
1356        ALU64_MOV_X:
1357                DST = SRC;
1358                CONT;
1359        ALU64_MOV_K:
1360                DST = IMM;
1361                CONT;
1362        LD_IMM_DW:
1363                DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1364                insn++;
1365                CONT;
1366        ALU_ARSH_X:
1367                DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
1368                CONT;
1369        ALU_ARSH_K:
1370                DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
1371                CONT;
1372        ALU64_ARSH_X:
1373                (*(s64 *) &DST) >>= SRC;
1374                CONT;
1375        ALU64_ARSH_K:
1376                (*(s64 *) &DST) >>= IMM;
1377                CONT;
1378        ALU64_MOD_X:
1379                div64_u64_rem(DST, SRC, &AX);
1380                DST = AX;
1381                CONT;
1382        ALU_MOD_X:
1383                AX = (u32) DST;
1384                DST = do_div(AX, (u32) SRC);
1385                CONT;
1386        ALU64_MOD_K:
1387                div64_u64_rem(DST, IMM, &AX);
1388                DST = AX;
1389                CONT;
1390        ALU_MOD_K:
1391                AX = (u32) DST;
1392                DST = do_div(AX, (u32) IMM);
1393                CONT;
1394        ALU64_DIV_X:
1395                DST = div64_u64(DST, SRC);
1396                CONT;
1397        ALU_DIV_X:
1398                AX = (u32) DST;
1399                do_div(AX, (u32) SRC);
1400                DST = (u32) AX;
1401                CONT;
1402        ALU64_DIV_K:
1403                DST = div64_u64(DST, IMM);
1404                CONT;
1405        ALU_DIV_K:
1406                AX = (u32) DST;
1407                do_div(AX, (u32) IMM);
1408                DST = (u32) AX;
1409                CONT;
1410        ALU_END_TO_BE:
1411                switch (IMM) {
1412                case 16:
1413                        DST = (__force u16) cpu_to_be16(DST);
1414                        break;
1415                case 32:
1416                        DST = (__force u32) cpu_to_be32(DST);
1417                        break;
1418                case 64:
1419                        DST = (__force u64) cpu_to_be64(DST);
1420                        break;
1421                }
1422                CONT;
1423        ALU_END_TO_LE:
1424                switch (IMM) {
1425                case 16:
1426                        DST = (__force u16) cpu_to_le16(DST);
1427                        break;
1428                case 32:
1429                        DST = (__force u32) cpu_to_le32(DST);
1430                        break;
1431                case 64:
1432                        DST = (__force u64) cpu_to_le64(DST);
1433                        break;
1434                }
1435                CONT;
1436
1437        /* CALL */
1438        JMP_CALL:
1439                /* Function call scratches BPF_R1-BPF_R5 registers,
1440                 * preserves BPF_R6-BPF_R9, and stores return value
1441                 * into BPF_R0.
1442                 */
1443                BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1444                                                       BPF_R4, BPF_R5);
1445                CONT;
1446
1447        JMP_CALL_ARGS:
1448                BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1449                                                            BPF_R3, BPF_R4,
1450                                                            BPF_R5,
1451                                                            insn + insn->off + 1);
1452                CONT;
1453
1454        JMP_TAIL_CALL: {
1455                struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1456                struct bpf_array *array = container_of(map, struct bpf_array, map);
1457                struct bpf_prog *prog;
1458                u32 index = BPF_R3;
1459
1460                if (unlikely(index >= array->map.max_entries))
1461                        goto out;
1462                if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1463                        goto out;
1464
1465                tail_call_cnt++;
1466
1467                prog = READ_ONCE(array->ptrs[index]);
1468                if (!prog)
1469                        goto out;
1470
1471                /* ARG1 at this point is guaranteed to point to CTX from
1472                 * the verifier side due to the fact that the tail call is
1473                 * handeled like a helper, that is, bpf_tail_call_proto,
1474                 * where arg1_type is ARG_PTR_TO_CTX.
1475                 */
1476                insn = prog->insnsi;
1477                goto select_insn;
1478out:
1479                CONT;
1480        }
1481        JMP_JA:
1482                insn += insn->off;
1483                CONT;
1484        JMP_EXIT:
1485                return BPF_R0;
1486        /* JMP */
1487#define COND_JMP(SIGN, OPCODE, CMP_OP)                          \
1488        JMP_##OPCODE##_X:                                       \
1489                if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {     \
1490                        insn += insn->off;                      \
1491                        CONT_JMP;                               \
1492                }                                               \
1493                CONT;                                           \
1494        JMP32_##OPCODE##_X:                                     \
1495                if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {     \
1496                        insn += insn->off;                      \
1497                        CONT_JMP;                               \
1498                }                                               \
1499                CONT;                                           \
1500        JMP_##OPCODE##_K:                                       \
1501                if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {     \
1502                        insn += insn->off;                      \
1503                        CONT_JMP;                               \
1504                }                                               \
1505                CONT;                                           \
1506        JMP32_##OPCODE##_K:                                     \
1507                if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {     \
1508                        insn += insn->off;                      \
1509                        CONT_JMP;                               \
1510                }                                               \
1511                CONT;
1512        COND_JMP(u, JEQ, ==)
1513        COND_JMP(u, JNE, !=)
1514        COND_JMP(u, JGT, >)
1515        COND_JMP(u, JLT, <)
1516        COND_JMP(u, JGE, >=)
1517        COND_JMP(u, JLE, <=)
1518        COND_JMP(u, JSET, &)
1519        COND_JMP(s, JSGT, >)
1520        COND_JMP(s, JSLT, <)
1521        COND_JMP(s, JSGE, >=)
1522        COND_JMP(s, JSLE, <=)
1523#undef COND_JMP
1524        /* STX and ST and LDX*/
1525#define LDST(SIZEOP, SIZE)                                              \
1526        STX_MEM_##SIZEOP:                                               \
1527                *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
1528                CONT;                                                   \
1529        ST_MEM_##SIZEOP:                                                \
1530                *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
1531                CONT;                                                   \
1532        LDX_MEM_##SIZEOP:                                               \
1533                DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
1534                CONT;
1535
1536        LDST(B,   u8)
1537        LDST(H,  u16)
1538        LDST(W,  u32)
1539        LDST(DW, u64)
1540#undef LDST
1541        STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1542                atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1543                           (DST + insn->off));
1544                CONT;
1545        STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1546                atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1547                             (DST + insn->off));
1548                CONT;
1549
1550        default_label:
1551                /* If we ever reach this, we have a bug somewhere. Die hard here
1552                 * instead of just returning 0; we could be somewhere in a subprog,
1553                 * so execution could continue otherwise which we do /not/ want.
1554                 *
1555                 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1556                 */
1557                pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1558                BUG_ON(1);
1559                return 0;
1560}
1561STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1562
1563#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1564#define DEFINE_BPF_PROG_RUN(stack_size) \
1565static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1566{ \
1567        u64 stack[stack_size / sizeof(u64)]; \
1568        u64 regs[MAX_BPF_EXT_REG]; \
1569\
1570        FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1571        ARG1 = (u64) (unsigned long) ctx; \
1572        return ___bpf_prog_run(regs, insn, stack); \
1573}
1574
1575#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1576#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1577static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1578                                      const struct bpf_insn *insn) \
1579{ \
1580        u64 stack[stack_size / sizeof(u64)]; \
1581        u64 regs[MAX_BPF_EXT_REG]; \
1582\
1583        FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1584        BPF_R1 = r1; \
1585        BPF_R2 = r2; \
1586        BPF_R3 = r3; \
1587        BPF_R4 = r4; \
1588        BPF_R5 = r5; \
1589        return ___bpf_prog_run(regs, insn, stack); \
1590}
1591
1592#define EVAL1(FN, X) FN(X)
1593#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1594#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1595#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1596#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1597#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1598
1599EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1600EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1601EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1602
1603EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1604EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1605EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1606
1607#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1608
1609static unsigned int (*interpreters[])(const void *ctx,
1610                                      const struct bpf_insn *insn) = {
1611EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1612EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1613EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1614};
1615#undef PROG_NAME_LIST
1616#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1617static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1618                                  const struct bpf_insn *insn) = {
1619EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1620EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1621EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1622};
1623#undef PROG_NAME_LIST
1624
1625void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1626{
1627        stack_depth = max_t(u32, stack_depth, 1);
1628        insn->off = (s16) insn->imm;
1629        insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1630                __bpf_call_base_args;
1631        insn->code = BPF_JMP | BPF_CALL_ARGS;
1632}
1633
1634#else
1635static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1636                                         const struct bpf_insn *insn)
1637{
1638        /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1639         * is not working properly, so warn about it!
1640         */
1641        WARN_ON_ONCE(1);
1642        return 0;
1643}
1644#endif
1645
1646bool bpf_prog_array_compatible(struct bpf_array *array,
1647                               const struct bpf_prog *fp)
1648{
1649        if (fp->kprobe_override)
1650                return false;
1651
1652        if (!array->owner_prog_type) {
1653                /* There's no owner yet where we could check for
1654                 * compatibility.
1655                 */
1656                array->owner_prog_type = fp->type;
1657                array->owner_jited = fp->jited;
1658
1659                return true;
1660        }
1661
1662        return array->owner_prog_type == fp->type &&
1663               array->owner_jited == fp->jited;
1664}
1665
1666static int bpf_check_tail_call(const struct bpf_prog *fp)
1667{
1668        struct bpf_prog_aux *aux = fp->aux;
1669        int i;
1670
1671        for (i = 0; i < aux->used_map_cnt; i++) {
1672                struct bpf_map *map = aux->used_maps[i];
1673                struct bpf_array *array;
1674
1675                if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1676                        continue;
1677
1678                array = container_of(map, struct bpf_array, map);
1679                if (!bpf_prog_array_compatible(array, fp))
1680                        return -EINVAL;
1681        }
1682
1683        return 0;
1684}
1685
1686static void bpf_prog_select_func(struct bpf_prog *fp)
1687{
1688#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1689        u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1690
1691        fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1692#else
1693        fp->bpf_func = __bpf_prog_ret0_warn;
1694#endif
1695}
1696
1697/**
1698 *      bpf_prog_select_runtime - select exec runtime for BPF program
1699 *      @fp: bpf_prog populated with internal BPF program
1700 *      @err: pointer to error variable
1701 *
1702 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1703 * The BPF program will be executed via BPF_PROG_RUN() macro.
1704 */
1705struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1706{
1707        /* In case of BPF to BPF calls, verifier did all the prep
1708         * work with regards to JITing, etc.
1709         */
1710        if (fp->bpf_func)
1711                goto finalize;
1712
1713        bpf_prog_select_func(fp);
1714
1715        /* eBPF JITs can rewrite the program in case constant
1716         * blinding is active. However, in case of error during
1717         * blinding, bpf_int_jit_compile() must always return a
1718         * valid program, which in this case would simply not
1719         * be JITed, but falls back to the interpreter.
1720         */
1721        if (!bpf_prog_is_dev_bound(fp->aux)) {
1722                *err = bpf_prog_alloc_jited_linfo(fp);
1723                if (*err)
1724                        return fp;
1725
1726                fp = bpf_int_jit_compile(fp);
1727                if (!fp->jited) {
1728                        bpf_prog_free_jited_linfo(fp);
1729#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1730                        *err = -ENOTSUPP;
1731                        return fp;
1732#endif
1733                } else {
1734                        bpf_prog_free_unused_jited_linfo(fp);
1735                }
1736        } else {
1737                *err = bpf_prog_offload_compile(fp);
1738                if (*err)
1739                        return fp;
1740        }
1741
1742finalize:
1743        bpf_prog_lock_ro(fp);
1744
1745        /* The tail call compatibility check can only be done at
1746         * this late stage as we need to determine, if we deal
1747         * with JITed or non JITed program concatenations and not
1748         * all eBPF JITs might immediately support all features.
1749         */
1750        *err = bpf_check_tail_call(fp);
1751
1752        return fp;
1753}
1754EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1755
1756static unsigned int __bpf_prog_ret1(const void *ctx,
1757                                    const struct bpf_insn *insn)
1758{
1759        return 1;
1760}
1761
1762static struct bpf_prog_dummy {
1763        struct bpf_prog prog;
1764} dummy_bpf_prog = {
1765        .prog = {
1766                .bpf_func = __bpf_prog_ret1,
1767        },
1768};
1769
1770/* to avoid allocating empty bpf_prog_array for cgroups that
1771 * don't have bpf program attached use one global 'empty_prog_array'
1772 * It will not be modified the caller of bpf_prog_array_alloc()
1773 * (since caller requested prog_cnt == 0)
1774 * that pointer should be 'freed' by bpf_prog_array_free()
1775 */
1776static struct {
1777        struct bpf_prog_array hdr;
1778        struct bpf_prog *null_prog;
1779} empty_prog_array = {
1780        .null_prog = NULL,
1781};
1782
1783struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1784{
1785        if (prog_cnt)
1786                return kzalloc(sizeof(struct bpf_prog_array) +
1787                               sizeof(struct bpf_prog_array_item) *
1788                               (prog_cnt + 1),
1789                               flags);
1790
1791        return &empty_prog_array.hdr;
1792}
1793
1794void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1795{
1796        if (!progs ||
1797            progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1798                return;
1799        kfree_rcu(progs, rcu);
1800}
1801
1802int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
1803{
1804        struct bpf_prog_array_item *item;
1805        u32 cnt = 0;
1806
1807        rcu_read_lock();
1808        item = rcu_dereference(array)->items;
1809        for (; item->prog; item++)
1810                if (item->prog != &dummy_bpf_prog.prog)
1811                        cnt++;
1812        rcu_read_unlock();
1813        return cnt;
1814}
1815
1816
1817static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
1818                                     u32 *prog_ids,
1819                                     u32 request_cnt)
1820{
1821        struct bpf_prog_array_item *item;
1822        int i = 0;
1823
1824        item = rcu_dereference_check(array, 1)->items;
1825        for (; item->prog; item++) {
1826                if (item->prog == &dummy_bpf_prog.prog)
1827                        continue;
1828                prog_ids[i] = item->prog->aux->id;
1829                if (++i == request_cnt) {
1830                        item++;
1831                        break;
1832                }
1833        }
1834
1835        return !!(item->prog);
1836}
1837
1838int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
1839                                __u32 __user *prog_ids, u32 cnt)
1840{
1841        unsigned long err = 0;
1842        bool nospc;
1843        u32 *ids;
1844
1845        /* users of this function are doing:
1846         * cnt = bpf_prog_array_length();
1847         * if (cnt > 0)
1848         *     bpf_prog_array_copy_to_user(..., cnt);
1849         * so below kcalloc doesn't need extra cnt > 0 check, but
1850         * bpf_prog_array_length() releases rcu lock and
1851         * prog array could have been swapped with empty or larger array,
1852         * so always copy 'cnt' prog_ids to the user.
1853         * In a rare race the user will see zero prog_ids
1854         */
1855        ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1856        if (!ids)
1857                return -ENOMEM;
1858        rcu_read_lock();
1859        nospc = bpf_prog_array_copy_core(array, ids, cnt);
1860        rcu_read_unlock();
1861        err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1862        kfree(ids);
1863        if (err)
1864                return -EFAULT;
1865        if (nospc)
1866                return -ENOSPC;
1867        return 0;
1868}
1869
1870void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
1871                                struct bpf_prog *old_prog)
1872{
1873        struct bpf_prog_array_item *item = array->items;
1874
1875        for (; item->prog; item++)
1876                if (item->prog == old_prog) {
1877                        WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1878                        break;
1879                }
1880}
1881
1882int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1883                        struct bpf_prog *exclude_prog,
1884                        struct bpf_prog *include_prog,
1885                        struct bpf_prog_array **new_array)
1886{
1887        int new_prog_cnt, carry_prog_cnt = 0;
1888        struct bpf_prog_array_item *existing;
1889        struct bpf_prog_array *array;
1890        bool found_exclude = false;
1891        int new_prog_idx = 0;
1892
1893        /* Figure out how many existing progs we need to carry over to
1894         * the new array.
1895         */
1896        if (old_array) {
1897                existing = old_array->items;
1898                for (; existing->prog; existing++) {
1899                        if (existing->prog == exclude_prog) {
1900                                found_exclude = true;
1901                                continue;
1902                        }
1903                        if (existing->prog != &dummy_bpf_prog.prog)
1904                                carry_prog_cnt++;
1905                        if (existing->prog == include_prog)
1906                                return -EEXIST;
1907                }
1908        }
1909
1910        if (exclude_prog && !found_exclude)
1911                return -ENOENT;
1912
1913        /* How many progs (not NULL) will be in the new array? */
1914        new_prog_cnt = carry_prog_cnt;
1915        if (include_prog)
1916                new_prog_cnt += 1;
1917
1918        /* Do we have any prog (not NULL) in the new array? */
1919        if (!new_prog_cnt) {
1920                *new_array = NULL;
1921                return 0;
1922        }
1923
1924        /* +1 as the end of prog_array is marked with NULL */
1925        array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1926        if (!array)
1927                return -ENOMEM;
1928
1929        /* Fill in the new prog array */
1930        if (carry_prog_cnt) {
1931                existing = old_array->items;
1932                for (; existing->prog; existing++)
1933                        if (existing->prog != exclude_prog &&
1934                            existing->prog != &dummy_bpf_prog.prog) {
1935                                array->items[new_prog_idx++].prog =
1936                                        existing->prog;
1937                        }
1938        }
1939        if (include_prog)
1940                array->items[new_prog_idx++].prog = include_prog;
1941        array->items[new_prog_idx].prog = NULL;
1942        *new_array = array;
1943        return 0;
1944}
1945
1946int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1947                             u32 *prog_ids, u32 request_cnt,
1948                             u32 *prog_cnt)
1949{
1950        u32 cnt = 0;
1951
1952        if (array)
1953                cnt = bpf_prog_array_length(array);
1954
1955        *prog_cnt = cnt;
1956
1957        /* return early if user requested only program count or nothing to copy */
1958        if (!request_cnt || !cnt)
1959                return 0;
1960
1961        /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1962        return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
1963                                                                     : 0;
1964}
1965
1966static void bpf_prog_free_deferred(struct work_struct *work)
1967{
1968        struct bpf_prog_aux *aux;
1969        int i;
1970
1971        aux = container_of(work, struct bpf_prog_aux, work);
1972        if (bpf_prog_is_dev_bound(aux))
1973                bpf_prog_offload_destroy(aux->prog);
1974#ifdef CONFIG_PERF_EVENTS
1975        if (aux->prog->has_callchain_buf)
1976                put_callchain_buffers();
1977#endif
1978        for (i = 0; i < aux->func_cnt; i++)
1979                bpf_jit_free(aux->func[i]);
1980        if (aux->func_cnt) {
1981                kfree(aux->func);
1982                bpf_prog_unlock_free(aux->prog);
1983        } else {
1984                bpf_jit_free(aux->prog);
1985        }
1986}
1987
1988/* Free internal BPF program */
1989void bpf_prog_free(struct bpf_prog *fp)
1990{
1991        struct bpf_prog_aux *aux = fp->aux;
1992
1993        INIT_WORK(&aux->work, bpf_prog_free_deferred);
1994        schedule_work(&aux->work);
1995}
1996EXPORT_SYMBOL_GPL(bpf_prog_free);
1997
1998/* RNG for unpriviledged user space with separated state from prandom_u32(). */
1999static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2000
2001void bpf_user_rnd_init_once(void)
2002{
2003        prandom_init_once(&bpf_user_rnd_state);
2004}
2005
2006BPF_CALL_0(bpf_user_rnd_u32)
2007{
2008        /* Should someone ever have the rather unwise idea to use some
2009         * of the registers passed into this function, then note that
2010         * this function is called from native eBPF and classic-to-eBPF
2011         * transformations. Register assignments from both sides are
2012         * different, f.e. classic always sets fn(ctx, A, X) here.
2013         */
2014        struct rnd_state *state;
2015        u32 res;
2016
2017        state = &get_cpu_var(bpf_user_rnd_state);
2018        res = prandom_u32_state(state);
2019        put_cpu_var(bpf_user_rnd_state);
2020
2021        return res;
2022}
2023
2024/* Weak definitions of helper functions in case we don't have bpf syscall. */
2025const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2026const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2027const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2028const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2029const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2030const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2031const struct bpf_func_proto bpf_spin_lock_proto __weak;
2032const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2033
2034const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2035const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2036const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2037const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2038
2039const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2040const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2041const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2042const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2043const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2044
2045const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2046{
2047        return NULL;
2048}
2049
2050u64 __weak
2051bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2052                 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2053{
2054        return -ENOTSUPP;
2055}
2056EXPORT_SYMBOL_GPL(bpf_event_output);
2057
2058/* Always built-in helper functions. */
2059const struct bpf_func_proto bpf_tail_call_proto = {
2060        .func           = NULL,
2061        .gpl_only       = false,
2062        .ret_type       = RET_VOID,
2063        .arg1_type      = ARG_PTR_TO_CTX,
2064        .arg2_type      = ARG_CONST_MAP_PTR,
2065        .arg3_type      = ARG_ANYTHING,
2066};
2067
2068/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2069 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2070 * eBPF and implicitly also cBPF can get JITed!
2071 */
2072struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2073{
2074        return prog;
2075}
2076
2077/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2078 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2079 */
2080void __weak bpf_jit_compile(struct bpf_prog *prog)
2081{
2082}
2083
2084bool __weak bpf_helper_changes_pkt_data(void *func)
2085{
2086        return false;
2087}
2088
2089/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2090 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2091 */
2092int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2093                         int len)
2094{
2095        return -EFAULT;
2096}
2097
2098DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2099EXPORT_SYMBOL(bpf_stats_enabled_key);
2100
2101/* All definitions of tracepoints related to BPF. */
2102#define CREATE_TRACE_POINTS
2103#include <linux/bpf_trace.h>
2104
2105EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2106