linux/kernel/trace/bpf_trace.c
<<
>>
Prefs
   1/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
   2 * Copyright (c) 2016 Facebook
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of version 2 of the GNU General Public
   6 * License as published by the Free Software Foundation.
   7 */
   8#include <linux/kernel.h>
   9#include <linux/types.h>
  10#include <linux/slab.h>
  11#include <linux/bpf.h>
  12#include <linux/bpf_perf_event.h>
  13#include <linux/filter.h>
  14#include <linux/uaccess.h>
  15#include <linux/ctype.h>
  16#include <linux/kprobes.h>
  17#include <linux/error-injection.h>
  18
  19#include "trace_probe.h"
  20#include "trace.h"
  21
  22u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  23
  24/**
  25 * trace_call_bpf - invoke BPF program
  26 * @call: tracepoint event
  27 * @ctx: opaque context pointer
  28 *
  29 * kprobe handlers execute BPF programs via this helper.
  30 * Can be used from static tracepoints in the future.
  31 *
  32 * Return: BPF programs always return an integer which is interpreted by
  33 * kprobe handler as:
  34 * 0 - return from kprobe (event is filtered out)
  35 * 1 - store kprobe event into ring buffer
  36 * Other values are reserved and currently alias to 1
  37 */
  38unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
  39{
  40        unsigned int ret;
  41
  42        if (in_nmi()) /* not supported yet */
  43                return 1;
  44
  45        preempt_disable();
  46
  47        if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
  48                /*
  49                 * since some bpf program is already running on this cpu,
  50                 * don't call into another bpf program (same or different)
  51                 * and don't send kprobe event into ring-buffer,
  52                 * so return zero here
  53                 */
  54                ret = 0;
  55                goto out;
  56        }
  57
  58        /*
  59         * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
  60         * to all call sites, we did a bpf_prog_array_valid() there to check
  61         * whether call->prog_array is empty or not, which is
  62         * a heurisitc to speed up execution.
  63         *
  64         * If bpf_prog_array_valid() fetched prog_array was
  65         * non-NULL, we go into trace_call_bpf() and do the actual
  66         * proper rcu_dereference() under RCU lock.
  67         * If it turns out that prog_array is NULL then, we bail out.
  68         * For the opposite, if the bpf_prog_array_valid() fetched pointer
  69         * was NULL, you'll skip the prog_array with the risk of missing
  70         * out of events when it was updated in between this and the
  71         * rcu_dereference() which is accepted risk.
  72         */
  73        ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
  74
  75 out:
  76        __this_cpu_dec(bpf_prog_active);
  77        preempt_enable();
  78
  79        return ret;
  80}
  81EXPORT_SYMBOL_GPL(trace_call_bpf);
  82
  83#ifdef CONFIG_BPF_KPROBE_OVERRIDE
  84BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
  85{
  86        regs_set_return_value(regs, rc);
  87        override_function_with_return(regs);
  88        return 0;
  89}
  90
  91static const struct bpf_func_proto bpf_override_return_proto = {
  92        .func           = bpf_override_return,
  93        .gpl_only       = true,
  94        .ret_type       = RET_INTEGER,
  95        .arg1_type      = ARG_PTR_TO_CTX,
  96        .arg2_type      = ARG_ANYTHING,
  97};
  98#endif
  99
 100BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
 101{
 102        int ret;
 103
 104        ret = probe_kernel_read(dst, unsafe_ptr, size);
 105        if (unlikely(ret < 0))
 106                memset(dst, 0, size);
 107
 108        return ret;
 109}
 110
 111static const struct bpf_func_proto bpf_probe_read_proto = {
 112        .func           = bpf_probe_read,
 113        .gpl_only       = true,
 114        .ret_type       = RET_INTEGER,
 115        .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
 116        .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
 117        .arg3_type      = ARG_ANYTHING,
 118};
 119
 120BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
 121           u32, size)
 122{
 123        /*
 124         * Ensure we're in user context which is safe for the helper to
 125         * run. This helper has no business in a kthread.
 126         *
 127         * access_ok() should prevent writing to non-user memory, but in
 128         * some situations (nommu, temporary switch, etc) access_ok() does
 129         * not provide enough validation, hence the check on KERNEL_DS.
 130         */
 131
 132        if (unlikely(in_interrupt() ||
 133                     current->flags & (PF_KTHREAD | PF_EXITING)))
 134                return -EPERM;
 135        if (unlikely(uaccess_kernel()))
 136                return -EPERM;
 137        if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
 138                return -EPERM;
 139
 140        return probe_kernel_write(unsafe_ptr, src, size);
 141}
 142
 143static const struct bpf_func_proto bpf_probe_write_user_proto = {
 144        .func           = bpf_probe_write_user,
 145        .gpl_only       = true,
 146        .ret_type       = RET_INTEGER,
 147        .arg1_type      = ARG_ANYTHING,
 148        .arg2_type      = ARG_PTR_TO_MEM,
 149        .arg3_type      = ARG_CONST_SIZE,
 150};
 151
 152static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
 153{
 154        pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
 155                            current->comm, task_pid_nr(current));
 156
 157        return &bpf_probe_write_user_proto;
 158}
 159
 160/*
 161 * Only limited trace_printk() conversion specifiers allowed:
 162 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
 163 */
 164BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
 165           u64, arg2, u64, arg3)
 166{
 167        bool str_seen = false;
 168        int mod[3] = {};
 169        int fmt_cnt = 0;
 170        u64 unsafe_addr;
 171        char buf[64];
 172        int i;
 173
 174        /*
 175         * bpf_check()->check_func_arg()->check_stack_boundary()
 176         * guarantees that fmt points to bpf program stack,
 177         * fmt_size bytes of it were initialized and fmt_size > 0
 178         */
 179        if (fmt[--fmt_size] != 0)
 180                return -EINVAL;
 181
 182        /* check format string for allowed specifiers */
 183        for (i = 0; i < fmt_size; i++) {
 184                if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
 185                        return -EINVAL;
 186
 187                if (fmt[i] != '%')
 188                        continue;
 189
 190                if (fmt_cnt >= 3)
 191                        return -EINVAL;
 192
 193                /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
 194                i++;
 195                if (fmt[i] == 'l') {
 196                        mod[fmt_cnt]++;
 197                        i++;
 198                } else if (fmt[i] == 'p' || fmt[i] == 's') {
 199                        mod[fmt_cnt]++;
 200                        i++;
 201                        if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
 202                                return -EINVAL;
 203                        fmt_cnt++;
 204                        if (fmt[i - 1] == 's') {
 205                                if (str_seen)
 206                                        /* allow only one '%s' per fmt string */
 207                                        return -EINVAL;
 208                                str_seen = true;
 209
 210                                switch (fmt_cnt) {
 211                                case 1:
 212                                        unsafe_addr = arg1;
 213                                        arg1 = (long) buf;
 214                                        break;
 215                                case 2:
 216                                        unsafe_addr = arg2;
 217                                        arg2 = (long) buf;
 218                                        break;
 219                                case 3:
 220                                        unsafe_addr = arg3;
 221                                        arg3 = (long) buf;
 222                                        break;
 223                                }
 224                                buf[0] = 0;
 225                                strncpy_from_unsafe(buf,
 226                                                    (void *) (long) unsafe_addr,
 227                                                    sizeof(buf));
 228                        }
 229                        continue;
 230                }
 231
 232                if (fmt[i] == 'l') {
 233                        mod[fmt_cnt]++;
 234                        i++;
 235                }
 236
 237                if (fmt[i] != 'i' && fmt[i] != 'd' &&
 238                    fmt[i] != 'u' && fmt[i] != 'x')
 239                        return -EINVAL;
 240                fmt_cnt++;
 241        }
 242
 243/* Horrid workaround for getting va_list handling working with different
 244 * argument type combinations generically for 32 and 64 bit archs.
 245 */
 246#define __BPF_TP_EMIT() __BPF_ARG3_TP()
 247#define __BPF_TP(...)                                                   \
 248        __trace_printk(0 /* Fake ip */,                                 \
 249                       fmt, ##__VA_ARGS__)
 250
 251#define __BPF_ARG1_TP(...)                                              \
 252        ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64))        \
 253          ? __BPF_TP(arg1, ##__VA_ARGS__)                               \
 254          : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32))    \
 255              ? __BPF_TP((long)arg1, ##__VA_ARGS__)                     \
 256              : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
 257
 258#define __BPF_ARG2_TP(...)                                              \
 259        ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64))        \
 260          ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__)                          \
 261          : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32))    \
 262              ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__)                \
 263              : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
 264
 265#define __BPF_ARG3_TP(...)                                              \
 266        ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64))        \
 267          ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__)                          \
 268          : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32))    \
 269              ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__)                \
 270              : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
 271
 272        return __BPF_TP_EMIT();
 273}
 274
 275static const struct bpf_func_proto bpf_trace_printk_proto = {
 276        .func           = bpf_trace_printk,
 277        .gpl_only       = true,
 278        .ret_type       = RET_INTEGER,
 279        .arg1_type      = ARG_PTR_TO_MEM,
 280        .arg2_type      = ARG_CONST_SIZE,
 281};
 282
 283const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
 284{
 285        /*
 286         * this program might be calling bpf_trace_printk,
 287         * so allocate per-cpu printk buffers
 288         */
 289        trace_printk_init_buffers();
 290
 291        return &bpf_trace_printk_proto;
 292}
 293
 294static __always_inline int
 295get_map_perf_counter(struct bpf_map *map, u64 flags,
 296                     u64 *value, u64 *enabled, u64 *running)
 297{
 298        struct bpf_array *array = container_of(map, struct bpf_array, map);
 299        unsigned int cpu = smp_processor_id();
 300        u64 index = flags & BPF_F_INDEX_MASK;
 301        struct bpf_event_entry *ee;
 302
 303        if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 304                return -EINVAL;
 305        if (index == BPF_F_CURRENT_CPU)
 306                index = cpu;
 307        if (unlikely(index >= array->map.max_entries))
 308                return -E2BIG;
 309
 310        ee = READ_ONCE(array->ptrs[index]);
 311        if (!ee)
 312                return -ENOENT;
 313
 314        return perf_event_read_local(ee->event, value, enabled, running);
 315}
 316
 317BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
 318{
 319        u64 value = 0;
 320        int err;
 321
 322        err = get_map_perf_counter(map, flags, &value, NULL, NULL);
 323        /*
 324         * this api is ugly since we miss [-22..-2] range of valid
 325         * counter values, but that's uapi
 326         */
 327        if (err)
 328                return err;
 329        return value;
 330}
 331
 332static const struct bpf_func_proto bpf_perf_event_read_proto = {
 333        .func           = bpf_perf_event_read,
 334        .gpl_only       = true,
 335        .ret_type       = RET_INTEGER,
 336        .arg1_type      = ARG_CONST_MAP_PTR,
 337        .arg2_type      = ARG_ANYTHING,
 338};
 339
 340BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
 341           struct bpf_perf_event_value *, buf, u32, size)
 342{
 343        int err = -EINVAL;
 344
 345        if (unlikely(size != sizeof(struct bpf_perf_event_value)))
 346                goto clear;
 347        err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
 348                                   &buf->running);
 349        if (unlikely(err))
 350                goto clear;
 351        return 0;
 352clear:
 353        memset(buf, 0, size);
 354        return err;
 355}
 356
 357static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
 358        .func           = bpf_perf_event_read_value,
 359        .gpl_only       = true,
 360        .ret_type       = RET_INTEGER,
 361        .arg1_type      = ARG_CONST_MAP_PTR,
 362        .arg2_type      = ARG_ANYTHING,
 363        .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
 364        .arg4_type      = ARG_CONST_SIZE,
 365};
 366
 367static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
 368
 369static __always_inline u64
 370__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
 371                        u64 flags, struct perf_sample_data *sd)
 372{
 373        struct bpf_array *array = container_of(map, struct bpf_array, map);
 374        unsigned int cpu = smp_processor_id();
 375        u64 index = flags & BPF_F_INDEX_MASK;
 376        struct bpf_event_entry *ee;
 377        struct perf_event *event;
 378
 379        if (index == BPF_F_CURRENT_CPU)
 380                index = cpu;
 381        if (unlikely(index >= array->map.max_entries))
 382                return -E2BIG;
 383
 384        ee = READ_ONCE(array->ptrs[index]);
 385        if (!ee)
 386                return -ENOENT;
 387
 388        event = ee->event;
 389        if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
 390                     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
 391                return -EINVAL;
 392
 393        if (unlikely(event->oncpu != cpu))
 394                return -EOPNOTSUPP;
 395
 396        perf_event_output(event, sd, regs);
 397        return 0;
 398}
 399
 400BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
 401           u64, flags, void *, data, u64, size)
 402{
 403        struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
 404        struct perf_raw_record raw = {
 405                .frag = {
 406                        .size = size,
 407                        .data = data,
 408                },
 409        };
 410
 411        if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
 412                return -EINVAL;
 413
 414        perf_sample_data_init(sd, 0, 0);
 415        sd->raw = &raw;
 416
 417        return __bpf_perf_event_output(regs, map, flags, sd);
 418}
 419
 420static const struct bpf_func_proto bpf_perf_event_output_proto = {
 421        .func           = bpf_perf_event_output,
 422        .gpl_only       = true,
 423        .ret_type       = RET_INTEGER,
 424        .arg1_type      = ARG_PTR_TO_CTX,
 425        .arg2_type      = ARG_CONST_MAP_PTR,
 426        .arg3_type      = ARG_ANYTHING,
 427        .arg4_type      = ARG_PTR_TO_MEM,
 428        .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 429};
 430
 431static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
 432static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
 433
 434u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 435                     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 436{
 437        struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
 438        struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
 439        struct perf_raw_frag frag = {
 440                .copy           = ctx_copy,
 441                .size           = ctx_size,
 442                .data           = ctx,
 443        };
 444        struct perf_raw_record raw = {
 445                .frag = {
 446                        {
 447                                .next   = ctx_size ? &frag : NULL,
 448                        },
 449                        .size   = meta_size,
 450                        .data   = meta,
 451                },
 452        };
 453
 454        perf_fetch_caller_regs(regs);
 455        perf_sample_data_init(sd, 0, 0);
 456        sd->raw = &raw;
 457
 458        return __bpf_perf_event_output(regs, map, flags, sd);
 459}
 460
 461BPF_CALL_0(bpf_get_current_task)
 462{
 463        return (long) current;
 464}
 465
 466static const struct bpf_func_proto bpf_get_current_task_proto = {
 467        .func           = bpf_get_current_task,
 468        .gpl_only       = true,
 469        .ret_type       = RET_INTEGER,
 470};
 471
 472BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
 473{
 474        struct bpf_array *array = container_of(map, struct bpf_array, map);
 475        struct cgroup *cgrp;
 476
 477        if (unlikely(in_interrupt()))
 478                return -EINVAL;
 479        if (unlikely(idx >= array->map.max_entries))
 480                return -E2BIG;
 481
 482        cgrp = READ_ONCE(array->ptrs[idx]);
 483        if (unlikely(!cgrp))
 484                return -EAGAIN;
 485
 486        return task_under_cgroup_hierarchy(current, cgrp);
 487}
 488
 489static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
 490        .func           = bpf_current_task_under_cgroup,
 491        .gpl_only       = false,
 492        .ret_type       = RET_INTEGER,
 493        .arg1_type      = ARG_CONST_MAP_PTR,
 494        .arg2_type      = ARG_ANYTHING,
 495};
 496
 497BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
 498           const void *, unsafe_ptr)
 499{
 500        int ret;
 501
 502        /*
 503         * The strncpy_from_unsafe() call will likely not fill the entire
 504         * buffer, but that's okay in this circumstance as we're probing
 505         * arbitrary memory anyway similar to bpf_probe_read() and might
 506         * as well probe the stack. Thus, memory is explicitly cleared
 507         * only in error case, so that improper users ignoring return
 508         * code altogether don't copy garbage; otherwise length of string
 509         * is returned that can be used for bpf_perf_event_output() et al.
 510         */
 511        ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
 512        if (unlikely(ret < 0))
 513                memset(dst, 0, size);
 514
 515        return ret;
 516}
 517
 518static const struct bpf_func_proto bpf_probe_read_str_proto = {
 519        .func           = bpf_probe_read_str,
 520        .gpl_only       = true,
 521        .ret_type       = RET_INTEGER,
 522        .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
 523        .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
 524        .arg3_type      = ARG_ANYTHING,
 525};
 526
 527static const struct bpf_func_proto *
 528tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 529{
 530        switch (func_id) {
 531        case BPF_FUNC_map_lookup_elem:
 532                return &bpf_map_lookup_elem_proto;
 533        case BPF_FUNC_map_update_elem:
 534                return &bpf_map_update_elem_proto;
 535        case BPF_FUNC_map_delete_elem:
 536                return &bpf_map_delete_elem_proto;
 537        case BPF_FUNC_probe_read:
 538                return &bpf_probe_read_proto;
 539        case BPF_FUNC_ktime_get_ns:
 540                return &bpf_ktime_get_ns_proto;
 541        case BPF_FUNC_tail_call:
 542                return &bpf_tail_call_proto;
 543        case BPF_FUNC_get_current_pid_tgid:
 544                return &bpf_get_current_pid_tgid_proto;
 545        case BPF_FUNC_get_current_task:
 546                return &bpf_get_current_task_proto;
 547        case BPF_FUNC_get_current_uid_gid:
 548                return &bpf_get_current_uid_gid_proto;
 549        case BPF_FUNC_get_current_comm:
 550                return &bpf_get_current_comm_proto;
 551        case BPF_FUNC_trace_printk:
 552                return bpf_get_trace_printk_proto();
 553        case BPF_FUNC_get_smp_processor_id:
 554                return &bpf_get_smp_processor_id_proto;
 555        case BPF_FUNC_get_numa_node_id:
 556                return &bpf_get_numa_node_id_proto;
 557        case BPF_FUNC_perf_event_read:
 558                return &bpf_perf_event_read_proto;
 559        case BPF_FUNC_probe_write_user:
 560                return bpf_get_probe_write_proto();
 561        case BPF_FUNC_current_task_under_cgroup:
 562                return &bpf_current_task_under_cgroup_proto;
 563        case BPF_FUNC_get_prandom_u32:
 564                return &bpf_get_prandom_u32_proto;
 565        case BPF_FUNC_probe_read_str:
 566                return &bpf_probe_read_str_proto;
 567        default:
 568                return NULL;
 569        }
 570}
 571
 572static const struct bpf_func_proto *
 573kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 574{
 575        switch (func_id) {
 576        case BPF_FUNC_perf_event_output:
 577                return &bpf_perf_event_output_proto;
 578        case BPF_FUNC_get_stackid:
 579                return &bpf_get_stackid_proto;
 580        case BPF_FUNC_perf_event_read_value:
 581                return &bpf_perf_event_read_value_proto;
 582#ifdef CONFIG_BPF_KPROBE_OVERRIDE
 583        case BPF_FUNC_override_return:
 584                return &bpf_override_return_proto;
 585#endif
 586        default:
 587                return tracing_func_proto(func_id, prog);
 588        }
 589}
 590
 591/* bpf+kprobe programs can access fields of 'struct pt_regs' */
 592static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
 593                                        const struct bpf_prog *prog,
 594                                        struct bpf_insn_access_aux *info)
 595{
 596        if (off < 0 || off >= sizeof(struct pt_regs))
 597                return false;
 598        if (type != BPF_READ)
 599                return false;
 600        if (off % size != 0)
 601                return false;
 602        /*
 603         * Assertion for 32 bit to make sure last 8 byte access
 604         * (BPF_DW) to the last 4 byte member is disallowed.
 605         */
 606        if (off + size > sizeof(struct pt_regs))
 607                return false;
 608
 609        return true;
 610}
 611
 612const struct bpf_verifier_ops kprobe_verifier_ops = {
 613        .get_func_proto  = kprobe_prog_func_proto,
 614        .is_valid_access = kprobe_prog_is_valid_access,
 615};
 616
 617const struct bpf_prog_ops kprobe_prog_ops = {
 618};
 619
 620BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
 621           u64, flags, void *, data, u64, size)
 622{
 623        struct pt_regs *regs = *(struct pt_regs **)tp_buff;
 624
 625        /*
 626         * r1 points to perf tracepoint buffer where first 8 bytes are hidden
 627         * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
 628         * from there and call the same bpf_perf_event_output() helper inline.
 629         */
 630        return ____bpf_perf_event_output(regs, map, flags, data, size);
 631}
 632
 633static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
 634        .func           = bpf_perf_event_output_tp,
 635        .gpl_only       = true,
 636        .ret_type       = RET_INTEGER,
 637        .arg1_type      = ARG_PTR_TO_CTX,
 638        .arg2_type      = ARG_CONST_MAP_PTR,
 639        .arg3_type      = ARG_ANYTHING,
 640        .arg4_type      = ARG_PTR_TO_MEM,
 641        .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 642};
 643
 644BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
 645           u64, flags)
 646{
 647        struct pt_regs *regs = *(struct pt_regs **)tp_buff;
 648
 649        /*
 650         * Same comment as in bpf_perf_event_output_tp(), only that this time
 651         * the other helper's function body cannot be inlined due to being
 652         * external, thus we need to call raw helper function.
 653         */
 654        return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
 655                               flags, 0, 0);
 656}
 657
 658static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
 659        .func           = bpf_get_stackid_tp,
 660        .gpl_only       = true,
 661        .ret_type       = RET_INTEGER,
 662        .arg1_type      = ARG_PTR_TO_CTX,
 663        .arg2_type      = ARG_CONST_MAP_PTR,
 664        .arg3_type      = ARG_ANYTHING,
 665};
 666
 667static const struct bpf_func_proto *
 668tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 669{
 670        switch (func_id) {
 671        case BPF_FUNC_perf_event_output:
 672                return &bpf_perf_event_output_proto_tp;
 673        case BPF_FUNC_get_stackid:
 674                return &bpf_get_stackid_proto_tp;
 675        default:
 676                return tracing_func_proto(func_id, prog);
 677        }
 678}
 679
 680static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
 681                                    const struct bpf_prog *prog,
 682                                    struct bpf_insn_access_aux *info)
 683{
 684        if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
 685                return false;
 686        if (type != BPF_READ)
 687                return false;
 688        if (off % size != 0)
 689                return false;
 690
 691        BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
 692        return true;
 693}
 694
 695const struct bpf_verifier_ops tracepoint_verifier_ops = {
 696        .get_func_proto  = tp_prog_func_proto,
 697        .is_valid_access = tp_prog_is_valid_access,
 698};
 699
 700const struct bpf_prog_ops tracepoint_prog_ops = {
 701};
 702
 703BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
 704           struct bpf_perf_event_value *, buf, u32, size)
 705{
 706        int err = -EINVAL;
 707
 708        if (unlikely(size != sizeof(struct bpf_perf_event_value)))
 709                goto clear;
 710        err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
 711                                    &buf->running);
 712        if (unlikely(err))
 713                goto clear;
 714        return 0;
 715clear:
 716        memset(buf, 0, size);
 717        return err;
 718}
 719
 720static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
 721         .func           = bpf_perf_prog_read_value,
 722         .gpl_only       = true,
 723         .ret_type       = RET_INTEGER,
 724         .arg1_type      = ARG_PTR_TO_CTX,
 725         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
 726         .arg3_type      = ARG_CONST_SIZE,
 727};
 728
 729static const struct bpf_func_proto *
 730pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 731{
 732        switch (func_id) {
 733        case BPF_FUNC_perf_event_output:
 734                return &bpf_perf_event_output_proto_tp;
 735        case BPF_FUNC_get_stackid:
 736                return &bpf_get_stackid_proto_tp;
 737        case BPF_FUNC_perf_prog_read_value:
 738                return &bpf_perf_prog_read_value_proto;
 739        default:
 740                return tracing_func_proto(func_id, prog);
 741        }
 742}
 743
 744/*
 745 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
 746 * to avoid potential recursive reuse issue when/if tracepoints are added
 747 * inside bpf_*_event_output and/or bpf_get_stack_id
 748 */
 749static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs);
 750BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
 751           struct bpf_map *, map, u64, flags, void *, data, u64, size)
 752{
 753        struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
 754
 755        perf_fetch_caller_regs(regs);
 756        return ____bpf_perf_event_output(regs, map, flags, data, size);
 757}
 758
 759static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
 760        .func           = bpf_perf_event_output_raw_tp,
 761        .gpl_only       = true,
 762        .ret_type       = RET_INTEGER,
 763        .arg1_type      = ARG_PTR_TO_CTX,
 764        .arg2_type      = ARG_CONST_MAP_PTR,
 765        .arg3_type      = ARG_ANYTHING,
 766        .arg4_type      = ARG_PTR_TO_MEM,
 767        .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 768};
 769
 770BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
 771           struct bpf_map *, map, u64, flags)
 772{
 773        struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
 774
 775        perf_fetch_caller_regs(regs);
 776        /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
 777        return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
 778                               flags, 0, 0);
 779}
 780
 781static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
 782        .func           = bpf_get_stackid_raw_tp,
 783        .gpl_only       = true,
 784        .ret_type       = RET_INTEGER,
 785        .arg1_type      = ARG_PTR_TO_CTX,
 786        .arg2_type      = ARG_CONST_MAP_PTR,
 787        .arg3_type      = ARG_ANYTHING,
 788};
 789
 790static const struct bpf_func_proto *
 791raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 792{
 793        switch (func_id) {
 794        case BPF_FUNC_perf_event_output:
 795                return &bpf_perf_event_output_proto_raw_tp;
 796        case BPF_FUNC_get_stackid:
 797                return &bpf_get_stackid_proto_raw_tp;
 798        default:
 799                return tracing_func_proto(func_id, prog);
 800        }
 801}
 802
 803static bool raw_tp_prog_is_valid_access(int off, int size,
 804                                        enum bpf_access_type type,
 805                                        const struct bpf_prog *prog,
 806                                        struct bpf_insn_access_aux *info)
 807{
 808        /* largest tracepoint in the kernel has 12 args */
 809        if (off < 0 || off >= sizeof(__u64) * 12)
 810                return false;
 811        if (type != BPF_READ)
 812                return false;
 813        if (off % size != 0)
 814                return false;
 815        return true;
 816}
 817
 818const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
 819        .get_func_proto  = raw_tp_prog_func_proto,
 820        .is_valid_access = raw_tp_prog_is_valid_access,
 821};
 822
 823const struct bpf_prog_ops raw_tracepoint_prog_ops = {
 824};
 825
 826static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
 827                                    const struct bpf_prog *prog,
 828                                    struct bpf_insn_access_aux *info)
 829{
 830        const int size_u64 = sizeof(u64);
 831
 832        if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
 833                return false;
 834        if (type != BPF_READ)
 835                return false;
 836        if (off % size != 0)
 837                return false;
 838
 839        switch (off) {
 840        case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
 841                bpf_ctx_record_field_size(info, size_u64);
 842                if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
 843                        return false;
 844                break;
 845        case bpf_ctx_range(struct bpf_perf_event_data, addr):
 846                bpf_ctx_record_field_size(info, size_u64);
 847                if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
 848                        return false;
 849                break;
 850        default:
 851                if (size != sizeof(long))
 852                        return false;
 853        }
 854
 855        return true;
 856}
 857
 858static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
 859                                      const struct bpf_insn *si,
 860                                      struct bpf_insn *insn_buf,
 861                                      struct bpf_prog *prog, u32 *target_size)
 862{
 863        struct bpf_insn *insn = insn_buf;
 864
 865        switch (si->off) {
 866        case offsetof(struct bpf_perf_event_data, sample_period):
 867                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
 868                                                       data), si->dst_reg, si->src_reg,
 869                                      offsetof(struct bpf_perf_event_data_kern, data));
 870                *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
 871                                      bpf_target_off(struct perf_sample_data, period, 8,
 872                                                     target_size));
 873                break;
 874        case offsetof(struct bpf_perf_event_data, addr):
 875                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
 876                                                       data), si->dst_reg, si->src_reg,
 877                                      offsetof(struct bpf_perf_event_data_kern, data));
 878                *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
 879                                      bpf_target_off(struct perf_sample_data, addr, 8,
 880                                                     target_size));
 881                break;
 882        default:
 883                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
 884                                                       regs), si->dst_reg, si->src_reg,
 885                                      offsetof(struct bpf_perf_event_data_kern, regs));
 886                *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
 887                                      si->off);
 888                break;
 889        }
 890
 891        return insn - insn_buf;
 892}
 893
 894const struct bpf_verifier_ops perf_event_verifier_ops = {
 895        .get_func_proto         = pe_prog_func_proto,
 896        .is_valid_access        = pe_prog_is_valid_access,
 897        .convert_ctx_access     = pe_prog_convert_ctx_access,
 898};
 899
 900const struct bpf_prog_ops perf_event_prog_ops = {
 901};
 902
 903static DEFINE_MUTEX(bpf_event_mutex);
 904
 905#define BPF_TRACE_MAX_PROGS 64
 906
 907int perf_event_attach_bpf_prog(struct perf_event *event,
 908                               struct bpf_prog *prog)
 909{
 910        struct bpf_prog_array __rcu *old_array;
 911        struct bpf_prog_array *new_array;
 912        int ret = -EEXIST;
 913
 914        /*
 915         * Kprobe override only works if they are on the function entry,
 916         * and only if they are on the opt-in list.
 917         */
 918        if (prog->kprobe_override &&
 919            (!trace_kprobe_on_func_entry(event->tp_event) ||
 920             !trace_kprobe_error_injectable(event->tp_event)))
 921                return -EINVAL;
 922
 923        mutex_lock(&bpf_event_mutex);
 924
 925        if (event->prog)
 926                goto unlock;
 927
 928        old_array = event->tp_event->prog_array;
 929        if (old_array &&
 930            bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
 931                ret = -E2BIG;
 932                goto unlock;
 933        }
 934
 935        ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
 936        if (ret < 0)
 937                goto unlock;
 938
 939        /* set the new array to event->tp_event and set event->prog */
 940        event->prog = prog;
 941        rcu_assign_pointer(event->tp_event->prog_array, new_array);
 942        bpf_prog_array_free(old_array);
 943
 944unlock:
 945        mutex_unlock(&bpf_event_mutex);
 946        return ret;
 947}
 948
 949void perf_event_detach_bpf_prog(struct perf_event *event)
 950{
 951        struct bpf_prog_array __rcu *old_array;
 952        struct bpf_prog_array *new_array;
 953        int ret;
 954
 955        mutex_lock(&bpf_event_mutex);
 956
 957        if (!event->prog)
 958                goto unlock;
 959
 960        old_array = event->tp_event->prog_array;
 961        ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
 962        if (ret < 0) {
 963                bpf_prog_array_delete_safe(old_array, event->prog);
 964        } else {
 965                rcu_assign_pointer(event->tp_event->prog_array, new_array);
 966                bpf_prog_array_free(old_array);
 967        }
 968
 969        bpf_prog_put(event->prog);
 970        event->prog = NULL;
 971
 972unlock:
 973        mutex_unlock(&bpf_event_mutex);
 974}
 975
 976int perf_event_query_prog_array(struct perf_event *event, void __user *info)
 977{
 978        struct perf_event_query_bpf __user *uquery = info;
 979        struct perf_event_query_bpf query = {};
 980        u32 *ids, prog_cnt, ids_len;
 981        int ret;
 982
 983        if (!capable(CAP_SYS_ADMIN))
 984                return -EPERM;
 985        if (event->attr.type != PERF_TYPE_TRACEPOINT)
 986                return -EINVAL;
 987        if (copy_from_user(&query, uquery, sizeof(query)))
 988                return -EFAULT;
 989
 990        ids_len = query.ids_len;
 991        if (ids_len > BPF_TRACE_MAX_PROGS)
 992                return -E2BIG;
 993        ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
 994        if (!ids)
 995                return -ENOMEM;
 996        /*
 997         * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
 998         * is required when user only wants to check for uquery->prog_cnt.
 999         * There is no need to check for it since the case is handled
1000         * gracefully in bpf_prog_array_copy_info.
1001         */
1002
1003        mutex_lock(&bpf_event_mutex);
1004        ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
1005                                       ids,
1006                                       ids_len,
1007                                       &prog_cnt);
1008        mutex_unlock(&bpf_event_mutex);
1009
1010        if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1011            copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1012                ret = -EFAULT;
1013
1014        kfree(ids);
1015        return ret;
1016}
1017
1018extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1019extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1020
1021struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
1022{
1023        struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1024
1025        for (; btp < __stop__bpf_raw_tp; btp++) {
1026                if (!strcmp(btp->tp->name, name))
1027                        return btp;
1028        }
1029        return NULL;
1030}
1031
1032static __always_inline
1033void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1034{
1035        rcu_read_lock();
1036        preempt_disable();
1037        (void) BPF_PROG_RUN(prog, args);
1038        preempt_enable();
1039        rcu_read_unlock();
1040}
1041
1042#define UNPACK(...)                     __VA_ARGS__
1043#define REPEAT_1(FN, DL, X, ...)        FN(X)
1044#define REPEAT_2(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1045#define REPEAT_3(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1046#define REPEAT_4(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1047#define REPEAT_5(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1048#define REPEAT_6(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1049#define REPEAT_7(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1050#define REPEAT_8(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1051#define REPEAT_9(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1052#define REPEAT_10(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1053#define REPEAT_11(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1054#define REPEAT_12(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1055#define REPEAT(X, FN, DL, ...)          REPEAT_##X(FN, DL, __VA_ARGS__)
1056
1057#define SARG(X)         u64 arg##X
1058#define COPY(X)         args[X] = arg##X
1059
1060#define __DL_COM        (,)
1061#define __DL_SEM        (;)
1062
1063#define __SEQ_0_11      0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1064
1065#define BPF_TRACE_DEFN_x(x)                                             \
1066        void bpf_trace_run##x(struct bpf_prog *prog,                    \
1067                              REPEAT(x, SARG, __DL_COM, __SEQ_0_11))    \
1068        {                                                               \
1069                u64 args[x];                                            \
1070                REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);                  \
1071                __bpf_trace_run(prog, args);                            \
1072        }                                                               \
1073        EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1074BPF_TRACE_DEFN_x(1);
1075BPF_TRACE_DEFN_x(2);
1076BPF_TRACE_DEFN_x(3);
1077BPF_TRACE_DEFN_x(4);
1078BPF_TRACE_DEFN_x(5);
1079BPF_TRACE_DEFN_x(6);
1080BPF_TRACE_DEFN_x(7);
1081BPF_TRACE_DEFN_x(8);
1082BPF_TRACE_DEFN_x(9);
1083BPF_TRACE_DEFN_x(10);
1084BPF_TRACE_DEFN_x(11);
1085BPF_TRACE_DEFN_x(12);
1086
1087static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1088{
1089        struct tracepoint *tp = btp->tp;
1090
1091        /*
1092         * check that program doesn't access arguments beyond what's
1093         * available in this tracepoint
1094         */
1095        if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1096                return -EINVAL;
1097
1098        return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1099}
1100
1101int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1102{
1103        int err;
1104
1105        mutex_lock(&bpf_event_mutex);
1106        err = __bpf_probe_register(btp, prog);
1107        mutex_unlock(&bpf_event_mutex);
1108        return err;
1109}
1110
1111int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1112{
1113        int err;
1114
1115        mutex_lock(&bpf_event_mutex);
1116        err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1117        mutex_unlock(&bpf_event_mutex);
1118        return err;
1119}
1120