linux/tools/perf/util/bpf-event.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <stdlib.h>
   4#include <bpf/bpf.h>
   5#include <bpf/btf.h>
   6#include <bpf/libbpf.h>
   7#include <linux/btf.h>
   8#include <linux/err.h>
   9#include "bpf-event.h"
  10#include "debug.h"
  11#include "symbol.h"
  12#include "machine.h"
  13#include "env.h"
  14#include "session.h"
  15#include "map.h"
  16#include "evlist.h"
  17
  18#define ptr_to_u64(ptr)    ((__u64)(unsigned long)(ptr))
  19
  20static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
  21{
  22        int ret = 0;
  23        size_t i;
  24
  25        for (i = 0; i < len; i++)
  26                ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
  27        return ret;
  28}
  29
  30static int machine__process_bpf_event_load(struct machine *machine,
  31                                           union perf_event *event,
  32                                           struct perf_sample *sample __maybe_unused)
  33{
  34        struct bpf_prog_info_linear *info_linear;
  35        struct bpf_prog_info_node *info_node;
  36        struct perf_env *env = machine->env;
  37        int id = event->bpf_event.id;
  38        unsigned int i;
  39
  40        /* perf-record, no need to handle bpf-event */
  41        if (env == NULL)
  42                return 0;
  43
  44        info_node = perf_env__find_bpf_prog_info(env, id);
  45        if (!info_node)
  46                return 0;
  47        info_linear = info_node->info_linear;
  48
  49        for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
  50                u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
  51                u64 addr = addrs[i];
  52                struct map *map;
  53
  54                map = map_groups__find(&machine->kmaps, addr);
  55
  56                if (map) {
  57                        map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
  58                        map->dso->bpf_prog.id = id;
  59                        map->dso->bpf_prog.sub_id = i;
  60                        map->dso->bpf_prog.env = env;
  61                }
  62        }
  63        return 0;
  64}
  65
  66int machine__process_bpf_event(struct machine *machine __maybe_unused,
  67                               union perf_event *event,
  68                               struct perf_sample *sample __maybe_unused)
  69{
  70        if (dump_trace)
  71                perf_event__fprintf_bpf_event(event, stdout);
  72
  73        switch (event->bpf_event.type) {
  74        case PERF_BPF_EVENT_PROG_LOAD:
  75                return machine__process_bpf_event_load(machine, event, sample);
  76
  77        case PERF_BPF_EVENT_PROG_UNLOAD:
  78                /*
  79                 * Do not free bpf_prog_info and btf of the program here,
  80                 * as annotation still need them. They will be freed at
  81                 * the end of the session.
  82                 */
  83                break;
  84        default:
  85                pr_debug("unexpected bpf_event type of %d\n",
  86                         event->bpf_event.type);
  87                break;
  88        }
  89        return 0;
  90}
  91
  92static int perf_env__fetch_btf(struct perf_env *env,
  93                               u32 btf_id,
  94                               struct btf *btf)
  95{
  96        struct btf_node *node;
  97        u32 data_size;
  98        const void *data;
  99
 100        data = btf__get_raw_data(btf, &data_size);
 101
 102        node = malloc(data_size + sizeof(struct btf_node));
 103        if (!node)
 104                return -1;
 105
 106        node->id = btf_id;
 107        node->data_size = data_size;
 108        memcpy(node->data, data, data_size);
 109
 110        perf_env__insert_btf(env, node);
 111        return 0;
 112}
 113
 114static int synthesize_bpf_prog_name(char *buf, int size,
 115                                    struct bpf_prog_info *info,
 116                                    struct btf *btf,
 117                                    u32 sub_id)
 118{
 119        u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
 120        void *func_infos = (void *)(uintptr_t)(info->func_info);
 121        u32 sub_prog_cnt = info->nr_jited_ksyms;
 122        const struct bpf_func_info *finfo;
 123        const char *short_name = NULL;
 124        const struct btf_type *t;
 125        int name_len;
 126
 127        name_len = snprintf(buf, size, "bpf_prog_");
 128        name_len += snprintf_hex(buf + name_len, size - name_len,
 129                                 prog_tags[sub_id], BPF_TAG_SIZE);
 130        if (btf) {
 131                finfo = func_infos + sub_id * info->func_info_rec_size;
 132                t = btf__type_by_id(btf, finfo->type_id);
 133                short_name = btf__name_by_offset(btf, t->name_off);
 134        } else if (sub_id == 0 && sub_prog_cnt == 1) {
 135                /* no subprog */
 136                if (info->name[0])
 137                        short_name = info->name;
 138        } else
 139                short_name = "F";
 140        if (short_name)
 141                name_len += snprintf(buf + name_len, size - name_len,
 142                                     "_%s", short_name);
 143        return name_len;
 144}
 145
 146/*
 147 * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
 148 * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
 149 * one PERF_RECORD_KSYMBOL is generated for each sub program.
 150 *
 151 * Returns:
 152 *    0 for success;
 153 *   -1 for failures;
 154 *   -2 for lack of kernel support.
 155 */
 156static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
 157                                               perf_event__handler_t process,
 158                                               struct machine *machine,
 159                                               int fd,
 160                                               union perf_event *event,
 161                                               struct record_opts *opts)
 162{
 163        struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
 164        struct bpf_event *bpf_event = &event->bpf_event;
 165        struct bpf_prog_info_linear *info_linear;
 166        struct perf_tool *tool = session->tool;
 167        struct bpf_prog_info_node *info_node;
 168        struct bpf_prog_info *info;
 169        struct btf *btf = NULL;
 170        struct perf_env *env;
 171        u32 sub_prog_cnt, i;
 172        int err = 0;
 173        u64 arrays;
 174
 175        /*
 176         * for perf-record and perf-report use header.env;
 177         * otherwise, use global perf_env.
 178         */
 179        env = session->data ? &session->header.env : &perf_env;
 180
 181        arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
 182        arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
 183        arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
 184        arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
 185        arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
 186        arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
 187        arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
 188
 189        info_linear = bpf_program__get_prog_info_linear(fd, arrays);
 190        if (IS_ERR_OR_NULL(info_linear)) {
 191                info_linear = NULL;
 192                pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
 193                return -1;
 194        }
 195
 196        if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
 197                pr_debug("%s: the kernel is too old, aborting\n", __func__);
 198                return -2;
 199        }
 200
 201        info = &info_linear->info;
 202
 203        /* number of ksyms, func_lengths, and tags should match */
 204        sub_prog_cnt = info->nr_jited_ksyms;
 205        if (sub_prog_cnt != info->nr_prog_tags ||
 206            sub_prog_cnt != info->nr_jited_func_lens)
 207                return -1;
 208
 209        /* check BTF func info support */
 210        if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
 211                /* btf func info number should be same as sub_prog_cnt */
 212                if (sub_prog_cnt != info->nr_func_info) {
 213                        pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
 214                        err = -1;
 215                        goto out;
 216                }
 217                if (btf__get_from_id(info->btf_id, &btf)) {
 218                        pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
 219                        err = -1;
 220                        btf = NULL;
 221                        goto out;
 222                }
 223                perf_env__fetch_btf(env, info->btf_id, btf);
 224        }
 225
 226        /* Synthesize PERF_RECORD_KSYMBOL */
 227        for (i = 0; i < sub_prog_cnt; i++) {
 228                __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
 229                __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
 230                int name_len;
 231
 232                *ksymbol_event = (struct ksymbol_event){
 233                        .header = {
 234                                .type = PERF_RECORD_KSYMBOL,
 235                                .size = offsetof(struct ksymbol_event, name),
 236                        },
 237                        .addr = prog_addrs[i],
 238                        .len = prog_lens[i],
 239                        .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
 240                        .flags = 0,
 241                };
 242
 243                name_len = synthesize_bpf_prog_name(ksymbol_event->name,
 244                                                    KSYM_NAME_LEN, info, btf, i);
 245                ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
 246                                                         sizeof(u64));
 247
 248                memset((void *)event + event->header.size, 0, machine->id_hdr_size);
 249                event->header.size += machine->id_hdr_size;
 250                err = perf_tool__process_synth_event(tool, event,
 251                                                     machine, process);
 252        }
 253
 254        if (!opts->no_bpf_event) {
 255                /* Synthesize PERF_RECORD_BPF_EVENT */
 256                *bpf_event = (struct bpf_event){
 257                        .header = {
 258                                .type = PERF_RECORD_BPF_EVENT,
 259                                .size = sizeof(struct bpf_event),
 260                        },
 261                        .type = PERF_BPF_EVENT_PROG_LOAD,
 262                        .flags = 0,
 263                        .id = info->id,
 264                };
 265                memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
 266                memset((void *)event + event->header.size, 0, machine->id_hdr_size);
 267                event->header.size += machine->id_hdr_size;
 268
 269                /* save bpf_prog_info to env */
 270                info_node = malloc(sizeof(struct bpf_prog_info_node));
 271                if (!info_node) {
 272                        err = -1;
 273                        goto out;
 274                }
 275
 276                info_node->info_linear = info_linear;
 277                perf_env__insert_bpf_prog_info(env, info_node);
 278                info_linear = NULL;
 279
 280                /*
 281                 * process after saving bpf_prog_info to env, so that
 282                 * required information is ready for look up
 283                 */
 284                err = perf_tool__process_synth_event(tool, event,
 285                                                     machine, process);
 286        }
 287
 288out:
 289        free(info_linear);
 290        free(btf);
 291        return err ? -1 : 0;
 292}
 293
 294int perf_event__synthesize_bpf_events(struct perf_session *session,
 295                                      perf_event__handler_t process,
 296                                      struct machine *machine,
 297                                      struct record_opts *opts)
 298{
 299        union perf_event *event;
 300        __u32 id = 0;
 301        int err;
 302        int fd;
 303
 304        event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN + machine->id_hdr_size);
 305        if (!event)
 306                return -1;
 307        while (true) {
 308                err = bpf_prog_get_next_id(id, &id);
 309                if (err) {
 310                        if (errno == ENOENT) {
 311                                err = 0;
 312                                break;
 313                        }
 314                        pr_debug("%s: can't get next program: %s%s\n",
 315                                 __func__, strerror(errno),
 316                                 errno == EINVAL ? " -- kernel too old?" : "");
 317                        /* don't report error on old kernel or EPERM  */
 318                        err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
 319                        break;
 320                }
 321                fd = bpf_prog_get_fd_by_id(id);
 322                if (fd < 0) {
 323                        pr_debug("%s: failed to get fd for prog_id %u\n",
 324                                 __func__, id);
 325                        continue;
 326                }
 327
 328                err = perf_event__synthesize_one_bpf_prog(session, process,
 329                                                          machine, fd,
 330                                                          event, opts);
 331                close(fd);
 332                if (err) {
 333                        /* do not return error for old kernel */
 334                        if (err == -2)
 335                                err = 0;
 336                        break;
 337                }
 338        }
 339        free(event);
 340        return err;
 341}
 342
 343static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
 344{
 345        struct bpf_prog_info_linear *info_linear;
 346        struct bpf_prog_info_node *info_node;
 347        struct btf *btf = NULL;
 348        u64 arrays;
 349        u32 btf_id;
 350        int fd;
 351
 352        fd = bpf_prog_get_fd_by_id(id);
 353        if (fd < 0)
 354                return;
 355
 356        arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
 357        arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
 358        arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
 359        arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
 360        arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
 361        arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
 362        arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
 363
 364        info_linear = bpf_program__get_prog_info_linear(fd, arrays);
 365        if (IS_ERR_OR_NULL(info_linear)) {
 366                pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
 367                goto out;
 368        }
 369
 370        btf_id = info_linear->info.btf_id;
 371
 372        info_node = malloc(sizeof(struct bpf_prog_info_node));
 373        if (info_node) {
 374                info_node->info_linear = info_linear;
 375                perf_env__insert_bpf_prog_info(env, info_node);
 376        } else
 377                free(info_linear);
 378
 379        if (btf_id == 0)
 380                goto out;
 381
 382        if (btf__get_from_id(btf_id, &btf)) {
 383                pr_debug("%s: failed to get BTF of id %u, aborting\n",
 384                         __func__, btf_id);
 385                goto out;
 386        }
 387        perf_env__fetch_btf(env, btf_id, btf);
 388
 389out:
 390        free(btf);
 391        close(fd);
 392}
 393
 394static int bpf_event__sb_cb(union perf_event *event, void *data)
 395{
 396        struct perf_env *env = data;
 397
 398        if (event->header.type != PERF_RECORD_BPF_EVENT)
 399                return -1;
 400
 401        switch (event->bpf_event.type) {
 402        case PERF_BPF_EVENT_PROG_LOAD:
 403                perf_env__add_bpf_info(env, event->bpf_event.id);
 404
 405        case PERF_BPF_EVENT_PROG_UNLOAD:
 406                /*
 407                 * Do not free bpf_prog_info and btf of the program here,
 408                 * as annotation still need them. They will be freed at
 409                 * the end of the session.
 410                 */
 411                break;
 412        default:
 413                pr_debug("unexpected bpf_event type of %d\n",
 414                         event->bpf_event.type);
 415                break;
 416        }
 417
 418        return 0;
 419}
 420
 421int bpf_event__add_sb_event(struct perf_evlist **evlist,
 422                            struct perf_env *env)
 423{
 424        struct perf_event_attr attr = {
 425                .type             = PERF_TYPE_SOFTWARE,
 426                .config           = PERF_COUNT_SW_DUMMY,
 427                .sample_id_all    = 1,
 428                .watermark        = 1,
 429                .bpf_event        = 1,
 430                .size      = sizeof(attr), /* to capture ABI version */
 431        };
 432
 433        /*
 434         * Older gcc versions don't support designated initializers, like above,
 435         * for unnamed union members, such as the following:
 436         */
 437        attr.wakeup_watermark = 1;
 438
 439        return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
 440}
 441
 442void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
 443                                    struct perf_env *env,
 444                                    FILE *fp)
 445{
 446        __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
 447        __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
 448        char name[KSYM_NAME_LEN];
 449        struct btf *btf = NULL;
 450        u32 sub_prog_cnt, i;
 451
 452        sub_prog_cnt = info->nr_jited_ksyms;
 453        if (sub_prog_cnt != info->nr_prog_tags ||
 454            sub_prog_cnt != info->nr_jited_func_lens)
 455                return;
 456
 457        if (info->btf_id) {
 458                struct btf_node *node;
 459
 460                node = perf_env__find_btf(env, info->btf_id);
 461                if (node)
 462                        btf = btf__new((__u8 *)(node->data),
 463                                       node->data_size);
 464        }
 465
 466        if (sub_prog_cnt == 1) {
 467                synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
 468                fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
 469                        info->id, name, prog_addrs[0], prog_lens[0]);
 470                return;
 471        }
 472
 473        fprintf(fp, "# bpf_prog_info %u:\n", info->id);
 474        for (i = 0; i < sub_prog_cnt; i++) {
 475                synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
 476
 477                fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
 478                        i, name, prog_addrs[i], prog_lens[i]);
 479        }
 480}
 481