linux/kernel/bpf/trampoline.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2019 Facebook */
   3#include <linux/hash.h>
   4#include <linux/bpf.h>
   5#include <linux/filter.h>
   6#include <linux/ftrace.h>
   7#include <linux/rbtree_latch.h>
   8#include <linux/perf_event.h>
   9#include <linux/btf.h>
  10#include <linux/rcupdate_trace.h>
  11#include <linux/rcupdate_wait.h>
  12#include <linux/module.h>
  13#include <linux/static_call.h>
  14
  15/* dummy _ops. The verifier will operate on target program's ops. */
  16const struct bpf_verifier_ops bpf_extension_verifier_ops = {
  17};
  18const struct bpf_prog_ops bpf_extension_prog_ops = {
  19};
  20
  21/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
  22#define TRAMPOLINE_HASH_BITS 10
  23#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
  24
  25static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
  26
  27/* serializes access to trampoline_table */
  28static DEFINE_MUTEX(trampoline_mutex);
  29
  30bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
  31{
  32        enum bpf_attach_type eatype = prog->expected_attach_type;
  33
  34        return eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
  35               eatype == BPF_MODIFY_RETURN;
  36}
  37
  38void *bpf_jit_alloc_exec_page(void)
  39{
  40        void *image;
  41
  42        image = bpf_jit_alloc_exec(PAGE_SIZE);
  43        if (!image)
  44                return NULL;
  45
  46        set_vm_flush_reset_perms(image);
  47        /* Keep image as writeable. The alternative is to keep flipping ro/rw
  48         * everytime new program is attached or detached.
  49         */
  50        set_memory_x((long)image, 1);
  51        return image;
  52}
  53
  54void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
  55{
  56        ksym->start = (unsigned long) data;
  57        ksym->end = ksym->start + PAGE_SIZE;
  58        bpf_ksym_add(ksym);
  59        perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
  60                           PAGE_SIZE, false, ksym->name);
  61}
  62
  63void bpf_image_ksym_del(struct bpf_ksym *ksym)
  64{
  65        bpf_ksym_del(ksym);
  66        perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
  67                           PAGE_SIZE, true, ksym->name);
  68}
  69
  70static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
  71{
  72        struct bpf_trampoline *tr;
  73        struct hlist_head *head;
  74        int i;
  75
  76        mutex_lock(&trampoline_mutex);
  77        head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
  78        hlist_for_each_entry(tr, head, hlist) {
  79                if (tr->key == key) {
  80                        refcount_inc(&tr->refcnt);
  81                        goto out;
  82                }
  83        }
  84        tr = kzalloc(sizeof(*tr), GFP_KERNEL);
  85        if (!tr)
  86                goto out;
  87
  88        tr->key = key;
  89        INIT_HLIST_NODE(&tr->hlist);
  90        hlist_add_head(&tr->hlist, head);
  91        refcount_set(&tr->refcnt, 1);
  92        mutex_init(&tr->mutex);
  93        for (i = 0; i < BPF_TRAMP_MAX; i++)
  94                INIT_HLIST_HEAD(&tr->progs_hlist[i]);
  95out:
  96        mutex_unlock(&trampoline_mutex);
  97        return tr;
  98}
  99
 100static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
 101{
 102        struct module *mod;
 103        int err = 0;
 104
 105        preempt_disable();
 106        mod = __module_text_address((unsigned long) tr->func.addr);
 107        if (mod && !try_module_get(mod))
 108                err = -ENOENT;
 109        preempt_enable();
 110        tr->mod = mod;
 111        return err;
 112}
 113
 114static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
 115{
 116        module_put(tr->mod);
 117        tr->mod = NULL;
 118}
 119
 120static int is_ftrace_location(void *ip)
 121{
 122        long addr;
 123
 124        addr = ftrace_location((long)ip);
 125        if (!addr)
 126                return 0;
 127        if (WARN_ON_ONCE(addr != (long)ip))
 128                return -EFAULT;
 129        return 1;
 130}
 131
 132static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
 133{
 134        void *ip = tr->func.addr;
 135        int ret;
 136
 137        if (tr->func.ftrace_managed)
 138                ret = unregister_ftrace_direct((long)ip, (long)old_addr);
 139        else
 140                ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
 141
 142        if (!ret)
 143                bpf_trampoline_module_put(tr);
 144        return ret;
 145}
 146
 147static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
 148{
 149        void *ip = tr->func.addr;
 150        int ret;
 151
 152        if (tr->func.ftrace_managed)
 153                ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
 154        else
 155                ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
 156        return ret;
 157}
 158
 159/* first time registering */
 160static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
 161{
 162        void *ip = tr->func.addr;
 163        int ret;
 164
 165        ret = is_ftrace_location(ip);
 166        if (ret < 0)
 167                return ret;
 168        tr->func.ftrace_managed = ret;
 169
 170        if (bpf_trampoline_module_get(tr))
 171                return -ENOENT;
 172
 173        if (tr->func.ftrace_managed)
 174                ret = register_ftrace_direct((long)ip, (long)new_addr);
 175        else
 176                ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
 177
 178        if (ret)
 179                bpf_trampoline_module_put(tr);
 180        return ret;
 181}
 182
 183static struct bpf_tramp_progs *
 184bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
 185{
 186        const struct bpf_prog_aux *aux;
 187        struct bpf_tramp_progs *tprogs;
 188        struct bpf_prog **progs;
 189        int kind;
 190
 191        *total = 0;
 192        tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
 193        if (!tprogs)
 194                return ERR_PTR(-ENOMEM);
 195
 196        for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
 197                tprogs[kind].nr_progs = tr->progs_cnt[kind];
 198                *total += tr->progs_cnt[kind];
 199                progs = tprogs[kind].progs;
 200
 201                hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) {
 202                        *ip_arg |= aux->prog->call_get_func_ip;
 203                        *progs++ = aux->prog;
 204                }
 205        }
 206        return tprogs;
 207}
 208
 209static void __bpf_tramp_image_put_deferred(struct work_struct *work)
 210{
 211        struct bpf_tramp_image *im;
 212
 213        im = container_of(work, struct bpf_tramp_image, work);
 214        bpf_image_ksym_del(&im->ksym);
 215        bpf_jit_free_exec(im->image);
 216        bpf_jit_uncharge_modmem(1);
 217        percpu_ref_exit(&im->pcref);
 218        kfree_rcu(im, rcu);
 219}
 220
 221/* callback, fexit step 3 or fentry step 2 */
 222static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
 223{
 224        struct bpf_tramp_image *im;
 225
 226        im = container_of(rcu, struct bpf_tramp_image, rcu);
 227        INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
 228        schedule_work(&im->work);
 229}
 230
 231/* callback, fexit step 2. Called after percpu_ref_kill confirms. */
 232static void __bpf_tramp_image_release(struct percpu_ref *pcref)
 233{
 234        struct bpf_tramp_image *im;
 235
 236        im = container_of(pcref, struct bpf_tramp_image, pcref);
 237        call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
 238}
 239
 240/* callback, fexit or fentry step 1 */
 241static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
 242{
 243        struct bpf_tramp_image *im;
 244
 245        im = container_of(rcu, struct bpf_tramp_image, rcu);
 246        if (im->ip_after_call)
 247                /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
 248                percpu_ref_kill(&im->pcref);
 249        else
 250                /* the case of fentry trampoline */
 251                call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
 252}
 253
 254static void bpf_tramp_image_put(struct bpf_tramp_image *im)
 255{
 256        /* The trampoline image that calls original function is using:
 257         * rcu_read_lock_trace to protect sleepable bpf progs
 258         * rcu_read_lock to protect normal bpf progs
 259         * percpu_ref to protect trampoline itself
 260         * rcu tasks to protect trampoline asm not covered by percpu_ref
 261         * (which are few asm insns before __bpf_tramp_enter and
 262         *  after __bpf_tramp_exit)
 263         *
 264         * The trampoline is unreachable before bpf_tramp_image_put().
 265         *
 266         * First, patch the trampoline to avoid calling into fexit progs.
 267         * The progs will be freed even if the original function is still
 268         * executing or sleeping.
 269         * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
 270         * first few asm instructions to execute and call into
 271         * __bpf_tramp_enter->percpu_ref_get.
 272         * Then use percpu_ref_kill to wait for the trampoline and the original
 273         * function to finish.
 274         * Then use call_rcu_tasks() to make sure few asm insns in
 275         * the trampoline epilogue are done as well.
 276         *
 277         * In !PREEMPT case the task that got interrupted in the first asm
 278         * insns won't go through an RCU quiescent state which the
 279         * percpu_ref_kill will be waiting for. Hence the first
 280         * call_rcu_tasks() is not necessary.
 281         */
 282        if (im->ip_after_call) {
 283                int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
 284                                             NULL, im->ip_epilogue);
 285                WARN_ON(err);
 286                if (IS_ENABLED(CONFIG_PREEMPTION))
 287                        call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
 288                else
 289                        percpu_ref_kill(&im->pcref);
 290                return;
 291        }
 292
 293        /* The trampoline without fexit and fmod_ret progs doesn't call original
 294         * function and doesn't use percpu_ref.
 295         * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
 296         * Then use call_rcu_tasks() to wait for the rest of trampoline asm
 297         * and normal progs.
 298         */
 299        call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
 300}
 301
 302static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
 303{
 304        struct bpf_tramp_image *im;
 305        struct bpf_ksym *ksym;
 306        void *image;
 307        int err = -ENOMEM;
 308
 309        im = kzalloc(sizeof(*im), GFP_KERNEL);
 310        if (!im)
 311                goto out;
 312
 313        err = bpf_jit_charge_modmem(1);
 314        if (err)
 315                goto out_free_im;
 316
 317        err = -ENOMEM;
 318        im->image = image = bpf_jit_alloc_exec_page();
 319        if (!image)
 320                goto out_uncharge;
 321
 322        err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
 323        if (err)
 324                goto out_free_image;
 325
 326        ksym = &im->ksym;
 327        INIT_LIST_HEAD_RCU(&ksym->lnode);
 328        snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
 329        bpf_image_ksym_add(image, ksym);
 330        return im;
 331
 332out_free_image:
 333        bpf_jit_free_exec(im->image);
 334out_uncharge:
 335        bpf_jit_uncharge_modmem(1);
 336out_free_im:
 337        kfree(im);
 338out:
 339        return ERR_PTR(err);
 340}
 341
 342static int bpf_trampoline_update(struct bpf_trampoline *tr)
 343{
 344        struct bpf_tramp_image *im;
 345        struct bpf_tramp_progs *tprogs;
 346        u32 flags = BPF_TRAMP_F_RESTORE_REGS;
 347        bool ip_arg = false;
 348        int err, total;
 349
 350        tprogs = bpf_trampoline_get_progs(tr, &total, &ip_arg);
 351        if (IS_ERR(tprogs))
 352                return PTR_ERR(tprogs);
 353
 354        if (total == 0) {
 355                err = unregister_fentry(tr, tr->cur_image->image);
 356                bpf_tramp_image_put(tr->cur_image);
 357                tr->cur_image = NULL;
 358                tr->selector = 0;
 359                goto out;
 360        }
 361
 362        im = bpf_tramp_image_alloc(tr->key, tr->selector);
 363        if (IS_ERR(im)) {
 364                err = PTR_ERR(im);
 365                goto out;
 366        }
 367
 368        if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
 369            tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
 370                flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
 371
 372        if (ip_arg)
 373                flags |= BPF_TRAMP_F_IP_ARG;
 374
 375        err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
 376                                          &tr->func.model, flags, tprogs,
 377                                          tr->func.addr);
 378        if (err < 0)
 379                goto out;
 380
 381        WARN_ON(tr->cur_image && tr->selector == 0);
 382        WARN_ON(!tr->cur_image && tr->selector);
 383        if (tr->cur_image)
 384                /* progs already running at this address */
 385                err = modify_fentry(tr, tr->cur_image->image, im->image);
 386        else
 387                /* first time registering */
 388                err = register_fentry(tr, im->image);
 389        if (err)
 390                goto out;
 391        if (tr->cur_image)
 392                bpf_tramp_image_put(tr->cur_image);
 393        tr->cur_image = im;
 394        tr->selector++;
 395out:
 396        kfree(tprogs);
 397        return err;
 398}
 399
 400static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
 401{
 402        switch (prog->expected_attach_type) {
 403        case BPF_TRACE_FENTRY:
 404                return BPF_TRAMP_FENTRY;
 405        case BPF_MODIFY_RETURN:
 406                return BPF_TRAMP_MODIFY_RETURN;
 407        case BPF_TRACE_FEXIT:
 408                return BPF_TRAMP_FEXIT;
 409        case BPF_LSM_MAC:
 410                if (!prog->aux->attach_func_proto->type)
 411                        /* The function returns void, we cannot modify its
 412                         * return value.
 413                         */
 414                        return BPF_TRAMP_FEXIT;
 415                else
 416                        return BPF_TRAMP_MODIFY_RETURN;
 417        default:
 418                return BPF_TRAMP_REPLACE;
 419        }
 420}
 421
 422int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
 423{
 424        enum bpf_tramp_prog_type kind;
 425        int err = 0;
 426        int cnt;
 427
 428        kind = bpf_attach_type_to_tramp(prog);
 429        mutex_lock(&tr->mutex);
 430        if (tr->extension_prog) {
 431                /* cannot attach fentry/fexit if extension prog is attached.
 432                 * cannot overwrite extension prog either.
 433                 */
 434                err = -EBUSY;
 435                goto out;
 436        }
 437        cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
 438        if (kind == BPF_TRAMP_REPLACE) {
 439                /* Cannot attach extension if fentry/fexit are in use. */
 440                if (cnt) {
 441                        err = -EBUSY;
 442                        goto out;
 443                }
 444                tr->extension_prog = prog;
 445                err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
 446                                         prog->bpf_func);
 447                goto out;
 448        }
 449        if (cnt >= BPF_MAX_TRAMP_PROGS) {
 450                err = -E2BIG;
 451                goto out;
 452        }
 453        if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
 454                /* prog already linked */
 455                err = -EBUSY;
 456                goto out;
 457        }
 458        hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
 459        tr->progs_cnt[kind]++;
 460        err = bpf_trampoline_update(tr);
 461        if (err) {
 462                hlist_del_init(&prog->aux->tramp_hlist);
 463                tr->progs_cnt[kind]--;
 464        }
 465out:
 466        mutex_unlock(&tr->mutex);
 467        return err;
 468}
 469
 470/* bpf_trampoline_unlink_prog() should never fail. */
 471int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
 472{
 473        enum bpf_tramp_prog_type kind;
 474        int err;
 475
 476        kind = bpf_attach_type_to_tramp(prog);
 477        mutex_lock(&tr->mutex);
 478        if (kind == BPF_TRAMP_REPLACE) {
 479                WARN_ON_ONCE(!tr->extension_prog);
 480                err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
 481                                         tr->extension_prog->bpf_func, NULL);
 482                tr->extension_prog = NULL;
 483                goto out;
 484        }
 485        hlist_del_init(&prog->aux->tramp_hlist);
 486        tr->progs_cnt[kind]--;
 487        err = bpf_trampoline_update(tr);
 488out:
 489        mutex_unlock(&tr->mutex);
 490        return err;
 491}
 492
 493struct bpf_trampoline *bpf_trampoline_get(u64 key,
 494                                          struct bpf_attach_target_info *tgt_info)
 495{
 496        struct bpf_trampoline *tr;
 497
 498        tr = bpf_trampoline_lookup(key);
 499        if (!tr)
 500                return NULL;
 501
 502        mutex_lock(&tr->mutex);
 503        if (tr->func.addr)
 504                goto out;
 505
 506        memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
 507        tr->func.addr = (void *)tgt_info->tgt_addr;
 508out:
 509        mutex_unlock(&tr->mutex);
 510        return tr;
 511}
 512
 513void bpf_trampoline_put(struct bpf_trampoline *tr)
 514{
 515        if (!tr)
 516                return;
 517        mutex_lock(&trampoline_mutex);
 518        if (!refcount_dec_and_test(&tr->refcnt))
 519                goto out;
 520        WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
 521        if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
 522                goto out;
 523        if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
 524                goto out;
 525        /* This code will be executed even when the last bpf_tramp_image
 526         * is alive. All progs are detached from the trampoline and the
 527         * trampoline image is patched with jmp into epilogue to skip
 528         * fexit progs. The fentry-only trampoline will be freed via
 529         * multiple rcu callbacks.
 530         */
 531        hlist_del(&tr->hlist);
 532        kfree(tr);
 533out:
 534        mutex_unlock(&trampoline_mutex);
 535}
 536
 537#define NO_START_TIME 1
 538static __always_inline u64 notrace bpf_prog_start_time(void)
 539{
 540        u64 start = NO_START_TIME;
 541
 542        if (static_branch_unlikely(&bpf_stats_enabled_key)) {
 543                start = sched_clock();
 544                if (unlikely(!start))
 545                        start = NO_START_TIME;
 546        }
 547        return start;
 548}
 549
 550static void notrace inc_misses_counter(struct bpf_prog *prog)
 551{
 552        struct bpf_prog_stats *stats;
 553        unsigned int flags;
 554
 555        stats = this_cpu_ptr(prog->stats);
 556        flags = u64_stats_update_begin_irqsave(&stats->syncp);
 557        u64_stats_inc(&stats->misses);
 558        u64_stats_update_end_irqrestore(&stats->syncp, flags);
 559}
 560
 561/* The logic is similar to bpf_prog_run(), but with an explicit
 562 * rcu_read_lock() and migrate_disable() which are required
 563 * for the trampoline. The macro is split into
 564 * call __bpf_prog_enter
 565 * call prog->bpf_func
 566 * call __bpf_prog_exit
 567 *
 568 * __bpf_prog_enter returns:
 569 * 0 - skip execution of the bpf prog
 570 * 1 - execute bpf prog
 571 * [2..MAX_U64] - execute bpf prog and record execution time.
 572 *     This is start time.
 573 */
 574u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
 575        __acquires(RCU)
 576{
 577        rcu_read_lock();
 578        migrate_disable();
 579        if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
 580                inc_misses_counter(prog);
 581                return 0;
 582        }
 583        return bpf_prog_start_time();
 584}
 585
 586static void notrace update_prog_stats(struct bpf_prog *prog,
 587                                      u64 start)
 588{
 589        struct bpf_prog_stats *stats;
 590
 591        if (static_branch_unlikely(&bpf_stats_enabled_key) &&
 592            /* static_key could be enabled in __bpf_prog_enter*
 593             * and disabled in __bpf_prog_exit*.
 594             * And vice versa.
 595             * Hence check that 'start' is valid.
 596             */
 597            start > NO_START_TIME) {
 598                unsigned long flags;
 599
 600                stats = this_cpu_ptr(prog->stats);
 601                flags = u64_stats_update_begin_irqsave(&stats->syncp);
 602                u64_stats_inc(&stats->cnt);
 603                u64_stats_add(&stats->nsecs, sched_clock() - start);
 604                u64_stats_update_end_irqrestore(&stats->syncp, flags);
 605        }
 606}
 607
 608void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
 609        __releases(RCU)
 610{
 611        update_prog_stats(prog, start);
 612        __this_cpu_dec(*(prog->active));
 613        migrate_enable();
 614        rcu_read_unlock();
 615}
 616
 617u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
 618{
 619        rcu_read_lock_trace();
 620        migrate_disable();
 621        might_fault();
 622        if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
 623                inc_misses_counter(prog);
 624                return 0;
 625        }
 626        return bpf_prog_start_time();
 627}
 628
 629void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
 630{
 631        update_prog_stats(prog, start);
 632        __this_cpu_dec(*(prog->active));
 633        migrate_enable();
 634        rcu_read_unlock_trace();
 635}
 636
 637void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
 638{
 639        percpu_ref_get(&tr->pcref);
 640}
 641
 642void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
 643{
 644        percpu_ref_put(&tr->pcref);
 645}
 646
 647int __weak
 648arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
 649                            const struct btf_func_model *m, u32 flags,
 650                            struct bpf_tramp_progs *tprogs,
 651                            void *orig_call)
 652{
 653        return -ENOTSUPP;
 654}
 655
 656static int __init init_trampolines(void)
 657{
 658        int i;
 659
 660        for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
 661                INIT_HLIST_HEAD(&trampoline_table[i]);
 662        return 0;
 663}
 664late_initcall(init_trampolines);
 665