linux/tools/perf/util/bpf-event.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <stdlib.h>
   4#include <bpf/bpf.h>
   5#include <bpf/btf.h>
   6#include <bpf/libbpf.h>
   7#include <linux/btf.h>
   8#include <linux/err.h>
   9#include <linux/string.h>
  10#include <internal/lib.h>
  11#include <symbol/kallsyms.h>
  12#include "bpf-event.h"
  13#include "bpf-utils.h"
  14#include "debug.h"
  15#include "dso.h"
  16#include "symbol.h"
  17#include "machine.h"
  18#include "env.h"
  19#include "session.h"
  20#include "map.h"
  21#include "evlist.h"
  22#include "record.h"
  23#include "util/synthetic-events.h"
  24
  25struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
  26{
  27       struct btf *btf;
  28#pragma GCC diagnostic push
  29#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
  30       int err = btf__get_from_id(id, &btf);
  31#pragma GCC diagnostic pop
  32
  33       return err ? ERR_PTR(err) : btf;
  34}
  35
  36struct bpf_program * __weak
  37bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
  38{
  39#pragma GCC diagnostic push
  40#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
  41        return bpf_program__next(prev, obj);
  42#pragma GCC diagnostic pop
  43}
  44
  45struct bpf_map * __weak
  46bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
  47{
  48#pragma GCC diagnostic push
  49#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
  50        return bpf_map__next(prev, obj);
  51#pragma GCC diagnostic pop
  52}
  53
  54const void * __weak
  55btf__raw_data(const struct btf *btf_ro, __u32 *size)
  56{
  57#pragma GCC diagnostic push
  58#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
  59        return btf__get_raw_data(btf_ro, size);
  60#pragma GCC diagnostic pop
  61}
  62
  63static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
  64{
  65        int ret = 0;
  66        size_t i;
  67
  68        for (i = 0; i < len; i++)
  69                ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
  70        return ret;
  71}
  72
  73static int machine__process_bpf_event_load(struct machine *machine,
  74                                           union perf_event *event,
  75                                           struct perf_sample *sample __maybe_unused)
  76{
  77        struct bpf_prog_info_node *info_node;
  78        struct perf_env *env = machine->env;
  79        struct perf_bpil *info_linear;
  80        int id = event->bpf.id;
  81        unsigned int i;
  82
  83        /* perf-record, no need to handle bpf-event */
  84        if (env == NULL)
  85                return 0;
  86
  87        info_node = perf_env__find_bpf_prog_info(env, id);
  88        if (!info_node)
  89                return 0;
  90        info_linear = info_node->info_linear;
  91
  92        for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
  93                u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
  94                u64 addr = addrs[i];
  95                struct map *map = maps__find(&machine->kmaps, addr);
  96
  97                if (map) {
  98                        map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
  99                        map->dso->bpf_prog.id = id;
 100                        map->dso->bpf_prog.sub_id = i;
 101                        map->dso->bpf_prog.env = env;
 102                }
 103        }
 104        return 0;
 105}
 106
 107int machine__process_bpf(struct machine *machine, union perf_event *event,
 108                         struct perf_sample *sample)
 109{
 110        if (dump_trace)
 111                perf_event__fprintf_bpf(event, stdout);
 112
 113        switch (event->bpf.type) {
 114        case PERF_BPF_EVENT_PROG_LOAD:
 115                return machine__process_bpf_event_load(machine, event, sample);
 116
 117        case PERF_BPF_EVENT_PROG_UNLOAD:
 118                /*
 119                 * Do not free bpf_prog_info and btf of the program here,
 120                 * as annotation still need them. They will be freed at
 121                 * the end of the session.
 122                 */
 123                break;
 124        default:
 125                pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
 126                break;
 127        }
 128        return 0;
 129}
 130
 131static int perf_env__fetch_btf(struct perf_env *env,
 132                               u32 btf_id,
 133                               struct btf *btf)
 134{
 135        struct btf_node *node;
 136        u32 data_size;
 137        const void *data;
 138
 139        data = btf__raw_data(btf, &data_size);
 140
 141        node = malloc(data_size + sizeof(struct btf_node));
 142        if (!node)
 143                return -1;
 144
 145        node->id = btf_id;
 146        node->data_size = data_size;
 147        memcpy(node->data, data, data_size);
 148
 149        if (!perf_env__insert_btf(env, node)) {
 150                /* Insertion failed because of a duplicate. */
 151                free(node);
 152                return -1;
 153        }
 154        return 0;
 155}
 156
 157static int synthesize_bpf_prog_name(char *buf, int size,
 158                                    struct bpf_prog_info *info,
 159                                    struct btf *btf,
 160                                    u32 sub_id)
 161{
 162        u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
 163        void *func_infos = (void *)(uintptr_t)(info->func_info);
 164        u32 sub_prog_cnt = info->nr_jited_ksyms;
 165        const struct bpf_func_info *finfo;
 166        const char *short_name = NULL;
 167        const struct btf_type *t;
 168        int name_len;
 169
 170        name_len = snprintf(buf, size, "bpf_prog_");
 171        name_len += snprintf_hex(buf + name_len, size - name_len,
 172                                 prog_tags[sub_id], BPF_TAG_SIZE);
 173        if (btf) {
 174                finfo = func_infos + sub_id * info->func_info_rec_size;
 175                t = btf__type_by_id(btf, finfo->type_id);
 176                short_name = btf__name_by_offset(btf, t->name_off);
 177        } else if (sub_id == 0 && sub_prog_cnt == 1) {
 178                /* no subprog */
 179                if (info->name[0])
 180                        short_name = info->name;
 181        } else
 182                short_name = "F";
 183        if (short_name)
 184                name_len += snprintf(buf + name_len, size - name_len,
 185                                     "_%s", short_name);
 186        return name_len;
 187}
 188
 189/*
 190 * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
 191 * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
 192 * one PERF_RECORD_KSYMBOL is generated for each sub program.
 193 *
 194 * Returns:
 195 *    0 for success;
 196 *   -1 for failures;
 197 *   -2 for lack of kernel support.
 198 */
 199static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
 200                                               perf_event__handler_t process,
 201                                               struct machine *machine,
 202                                               int fd,
 203                                               union perf_event *event,
 204                                               struct record_opts *opts)
 205{
 206        struct perf_record_ksymbol *ksymbol_event = &event->ksymbol;
 207        struct perf_record_bpf_event *bpf_event = &event->bpf;
 208        struct perf_tool *tool = session->tool;
 209        struct bpf_prog_info_node *info_node;
 210        struct perf_bpil *info_linear;
 211        struct bpf_prog_info *info;
 212        struct btf *btf = NULL;
 213        struct perf_env *env;
 214        u32 sub_prog_cnt, i;
 215        int err = 0;
 216        u64 arrays;
 217
 218        /*
 219         * for perf-record and perf-report use header.env;
 220         * otherwise, use global perf_env.
 221         */
 222        env = session->data ? &session->header.env : &perf_env;
 223
 224        arrays = 1UL << PERF_BPIL_JITED_KSYMS;
 225        arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
 226        arrays |= 1UL << PERF_BPIL_FUNC_INFO;
 227        arrays |= 1UL << PERF_BPIL_PROG_TAGS;
 228        arrays |= 1UL << PERF_BPIL_JITED_INSNS;
 229        arrays |= 1UL << PERF_BPIL_LINE_INFO;
 230        arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO;
 231
 232        info_linear = get_bpf_prog_info_linear(fd, arrays);
 233        if (IS_ERR_OR_NULL(info_linear)) {
 234                info_linear = NULL;
 235                pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
 236                return -1;
 237        }
 238
 239        if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
 240                free(info_linear);
 241                pr_debug("%s: the kernel is too old, aborting\n", __func__);
 242                return -2;
 243        }
 244
 245        info = &info_linear->info;
 246        if (!info->jited_ksyms) {
 247                free(info_linear);
 248                return -1;
 249        }
 250
 251        /* number of ksyms, func_lengths, and tags should match */
 252        sub_prog_cnt = info->nr_jited_ksyms;
 253        if (sub_prog_cnt != info->nr_prog_tags ||
 254            sub_prog_cnt != info->nr_jited_func_lens) {
 255                free(info_linear);
 256                return -1;
 257        }
 258
 259        /* check BTF func info support */
 260        if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
 261                /* btf func info number should be same as sub_prog_cnt */
 262                if (sub_prog_cnt != info->nr_func_info) {
 263                        pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
 264                        free(info_linear);
 265                        return -1;
 266                }
 267                btf = btf__load_from_kernel_by_id(info->btf_id);
 268                if (libbpf_get_error(btf)) {
 269                        pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
 270                        err = -1;
 271                        goto out;
 272                }
 273                perf_env__fetch_btf(env, info->btf_id, btf);
 274        }
 275
 276        /* Synthesize PERF_RECORD_KSYMBOL */
 277        for (i = 0; i < sub_prog_cnt; i++) {
 278                __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
 279                __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
 280                int name_len;
 281
 282                *ksymbol_event = (struct perf_record_ksymbol) {
 283                        .header = {
 284                                .type = PERF_RECORD_KSYMBOL,
 285                                .size = offsetof(struct perf_record_ksymbol, name),
 286                        },
 287                        .addr = prog_addrs[i],
 288                        .len = prog_lens[i],
 289                        .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
 290                        .flags = 0,
 291                };
 292
 293                name_len = synthesize_bpf_prog_name(ksymbol_event->name,
 294                                                    KSYM_NAME_LEN, info, btf, i);
 295                ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
 296                                                         sizeof(u64));
 297
 298                memset((void *)event + event->header.size, 0, machine->id_hdr_size);
 299                event->header.size += machine->id_hdr_size;
 300                err = perf_tool__process_synth_event(tool, event,
 301                                                     machine, process);
 302        }
 303
 304        if (!opts->no_bpf_event) {
 305                /* Synthesize PERF_RECORD_BPF_EVENT */
 306                *bpf_event = (struct perf_record_bpf_event) {
 307                        .header = {
 308                                .type = PERF_RECORD_BPF_EVENT,
 309                                .size = sizeof(struct perf_record_bpf_event),
 310                        },
 311                        .type = PERF_BPF_EVENT_PROG_LOAD,
 312                        .flags = 0,
 313                        .id = info->id,
 314                };
 315                memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
 316                memset((void *)event + event->header.size, 0, machine->id_hdr_size);
 317                event->header.size += machine->id_hdr_size;
 318
 319                /* save bpf_prog_info to env */
 320                info_node = malloc(sizeof(struct bpf_prog_info_node));
 321                if (!info_node) {
 322                        err = -1;
 323                        goto out;
 324                }
 325
 326                info_node->info_linear = info_linear;
 327                perf_env__insert_bpf_prog_info(env, info_node);
 328                info_linear = NULL;
 329
 330                /*
 331                 * process after saving bpf_prog_info to env, so that
 332                 * required information is ready for look up
 333                 */
 334                err = perf_tool__process_synth_event(tool, event,
 335                                                     machine, process);
 336        }
 337
 338out:
 339        free(info_linear);
 340        btf__free(btf);
 341        return err ? -1 : 0;
 342}
 343
 344struct kallsyms_parse {
 345        union perf_event        *event;
 346        perf_event__handler_t    process;
 347        struct machine          *machine;
 348        struct perf_tool        *tool;
 349};
 350
 351static int
 352process_bpf_image(char *name, u64 addr, struct kallsyms_parse *data)
 353{
 354        struct machine *machine = data->machine;
 355        union perf_event *event = data->event;
 356        struct perf_record_ksymbol *ksymbol;
 357        int len;
 358
 359        ksymbol = &event->ksymbol;
 360
 361        *ksymbol = (struct perf_record_ksymbol) {
 362                .header = {
 363                        .type = PERF_RECORD_KSYMBOL,
 364                        .size = offsetof(struct perf_record_ksymbol, name),
 365                },
 366                .addr      = addr,
 367                .len       = page_size,
 368                .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
 369                .flags     = 0,
 370        };
 371
 372        len = scnprintf(ksymbol->name, KSYM_NAME_LEN, "%s", name);
 373        ksymbol->header.size += PERF_ALIGN(len + 1, sizeof(u64));
 374        memset((void *) event + event->header.size, 0, machine->id_hdr_size);
 375        event->header.size += machine->id_hdr_size;
 376
 377        return perf_tool__process_synth_event(data->tool, event, machine,
 378                                              data->process);
 379}
 380
 381static int
 382kallsyms_process_symbol(void *data, const char *_name,
 383                        char type __maybe_unused, u64 start)
 384{
 385        char disp[KSYM_NAME_LEN];
 386        char *module, *name;
 387        unsigned long id;
 388        int err = 0;
 389
 390        module = strchr(_name, '\t');
 391        if (!module)
 392                return 0;
 393
 394        /* We are going after [bpf] module ... */
 395        if (strcmp(module + 1, "[bpf]"))
 396                return 0;
 397
 398        name = memdup(_name, (module - _name) + 1);
 399        if (!name)
 400                return -ENOMEM;
 401
 402        name[module - _name] = 0;
 403
 404        /* .. and only for trampolines and dispatchers */
 405        if ((sscanf(name, "bpf_trampoline_%lu", &id) == 1) ||
 406            (sscanf(name, "bpf_dispatcher_%s", disp) == 1))
 407                err = process_bpf_image(name, start, data);
 408
 409        free(name);
 410        return err;
 411}
 412
 413int perf_event__synthesize_bpf_events(struct perf_session *session,
 414                                      perf_event__handler_t process,
 415                                      struct machine *machine,
 416                                      struct record_opts *opts)
 417{
 418        const char *kallsyms_filename = "/proc/kallsyms";
 419        struct kallsyms_parse arg;
 420        union perf_event *event;
 421        __u32 id = 0;
 422        int err;
 423        int fd;
 424
 425        event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size);
 426        if (!event)
 427                return -1;
 428
 429        /* Synthesize all the bpf programs in system. */
 430        while (true) {
 431                err = bpf_prog_get_next_id(id, &id);
 432                if (err) {
 433                        if (errno == ENOENT) {
 434                                err = 0;
 435                                break;
 436                        }
 437                        pr_debug("%s: can't get next program: %s%s\n",
 438                                 __func__, strerror(errno),
 439                                 errno == EINVAL ? " -- kernel too old?" : "");
 440                        /* don't report error on old kernel or EPERM  */
 441                        err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
 442                        break;
 443                }
 444                fd = bpf_prog_get_fd_by_id(id);
 445                if (fd < 0) {
 446                        pr_debug("%s: failed to get fd for prog_id %u\n",
 447                                 __func__, id);
 448                        continue;
 449                }
 450
 451                err = perf_event__synthesize_one_bpf_prog(session, process,
 452                                                          machine, fd,
 453                                                          event, opts);
 454                close(fd);
 455                if (err) {
 456                        /* do not return error for old kernel */
 457                        if (err == -2)
 458                                err = 0;
 459                        break;
 460                }
 461        }
 462
 463        /* Synthesize all the bpf images - trampolines/dispatchers. */
 464        if (symbol_conf.kallsyms_name != NULL)
 465                kallsyms_filename = symbol_conf.kallsyms_name;
 466
 467        arg = (struct kallsyms_parse) {
 468                .event   = event,
 469                .process = process,
 470                .machine = machine,
 471                .tool    = session->tool,
 472        };
 473
 474        if (kallsyms__parse(kallsyms_filename, &arg, kallsyms_process_symbol)) {
 475                pr_err("%s: failed to synthesize bpf images: %s\n",
 476                       __func__, strerror(errno));
 477        }
 478
 479        free(event);
 480        return err;
 481}
 482
 483static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
 484{
 485        struct bpf_prog_info_node *info_node;
 486        struct perf_bpil *info_linear;
 487        struct btf *btf = NULL;
 488        u64 arrays;
 489        u32 btf_id;
 490        int fd;
 491
 492        fd = bpf_prog_get_fd_by_id(id);
 493        if (fd < 0)
 494                return;
 495
 496        arrays = 1UL << PERF_BPIL_JITED_KSYMS;
 497        arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
 498        arrays |= 1UL << PERF_BPIL_FUNC_INFO;
 499        arrays |= 1UL << PERF_BPIL_PROG_TAGS;
 500        arrays |= 1UL << PERF_BPIL_JITED_INSNS;
 501        arrays |= 1UL << PERF_BPIL_LINE_INFO;
 502        arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO;
 503
 504        info_linear = get_bpf_prog_info_linear(fd, arrays);
 505        if (IS_ERR_OR_NULL(info_linear)) {
 506                pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
 507                goto out;
 508        }
 509
 510        btf_id = info_linear->info.btf_id;
 511
 512        info_node = malloc(sizeof(struct bpf_prog_info_node));
 513        if (info_node) {
 514                info_node->info_linear = info_linear;
 515                perf_env__insert_bpf_prog_info(env, info_node);
 516        } else
 517                free(info_linear);
 518
 519        if (btf_id == 0)
 520                goto out;
 521
 522        btf = btf__load_from_kernel_by_id(btf_id);
 523        if (libbpf_get_error(btf)) {
 524                pr_debug("%s: failed to get BTF of id %u, aborting\n",
 525                         __func__, btf_id);
 526                goto out;
 527        }
 528        perf_env__fetch_btf(env, btf_id, btf);
 529
 530out:
 531        btf__free(btf);
 532        close(fd);
 533}
 534
 535static int bpf_event__sb_cb(union perf_event *event, void *data)
 536{
 537        struct perf_env *env = data;
 538
 539        if (event->header.type != PERF_RECORD_BPF_EVENT)
 540                return -1;
 541
 542        switch (event->bpf.type) {
 543        case PERF_BPF_EVENT_PROG_LOAD:
 544                perf_env__add_bpf_info(env, event->bpf.id);
 545
 546        case PERF_BPF_EVENT_PROG_UNLOAD:
 547                /*
 548                 * Do not free bpf_prog_info and btf of the program here,
 549                 * as annotation still need them. They will be freed at
 550                 * the end of the session.
 551                 */
 552                break;
 553        default:
 554                pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
 555                break;
 556        }
 557
 558        return 0;
 559}
 560
 561int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
 562{
 563        struct perf_event_attr attr = {
 564                .type             = PERF_TYPE_SOFTWARE,
 565                .config           = PERF_COUNT_SW_DUMMY,
 566                .sample_id_all    = 1,
 567                .watermark        = 1,
 568                .bpf_event        = 1,
 569                .size      = sizeof(attr), /* to capture ABI version */
 570        };
 571
 572        /*
 573         * Older gcc versions don't support designated initializers, like above,
 574         * for unnamed union members, such as the following:
 575         */
 576        attr.wakeup_watermark = 1;
 577
 578        return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
 579}
 580
 581void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
 582                                    struct perf_env *env,
 583                                    FILE *fp)
 584{
 585        __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
 586        __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
 587        char name[KSYM_NAME_LEN];
 588        struct btf *btf = NULL;
 589        u32 sub_prog_cnt, i;
 590
 591        sub_prog_cnt = info->nr_jited_ksyms;
 592        if (sub_prog_cnt != info->nr_prog_tags ||
 593            sub_prog_cnt != info->nr_jited_func_lens)
 594                return;
 595
 596        if (info->btf_id) {
 597                struct btf_node *node;
 598
 599                node = perf_env__find_btf(env, info->btf_id);
 600                if (node)
 601                        btf = btf__new((__u8 *)(node->data),
 602                                       node->data_size);
 603        }
 604
 605        if (sub_prog_cnt == 1) {
 606                synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
 607                fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
 608                        info->id, name, prog_addrs[0], prog_lens[0]);
 609                goto out;
 610        }
 611
 612        fprintf(fp, "# bpf_prog_info %u:\n", info->id);
 613        for (i = 0; i < sub_prog_cnt; i++) {
 614                synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
 615
 616                fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
 617                        i, name, prog_addrs[i], prog_lens[i]);
 618        }
 619out:
 620        btf__free(btf);
 621}
 622