linux/tools/perf/util/bpf-event.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <stdlib.h>
   4#include <bpf/bpf.h>
   5#include <bpf/btf.h>
   6#include <bpf/libbpf.h>
   7#include <linux/btf.h>
   8#include <linux/err.h>
   9#include <linux/string.h>
  10#include <internal/lib.h>
  11#include <symbol/kallsyms.h>
  12#include "bpf-event.h"
  13#include "debug.h"
  14#include "dso.h"
  15#include "symbol.h"
  16#include "machine.h"
  17#include "env.h"
  18#include "session.h"
  19#include "map.h"
  20#include "evlist.h"
  21#include "record.h"
  22#include "util/synthetic-events.h"
  23
  24struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
  25{
  26       struct btf *btf;
  27#pragma GCC diagnostic push
  28#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
  29       int err = btf__get_from_id(id, &btf);
  30#pragma GCC diagnostic pop
  31
  32       return err ? ERR_PTR(err) : btf;
  33}
  34
  35#define ptr_to_u64(ptr)    ((__u64)(unsigned long)(ptr))
  36
  37static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
  38{
  39        int ret = 0;
  40        size_t i;
  41
  42        for (i = 0; i < len; i++)
  43                ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
  44        return ret;
  45}
  46
  47static int machine__process_bpf_event_load(struct machine *machine,
  48                                           union perf_event *event,
  49                                           struct perf_sample *sample __maybe_unused)
  50{
  51        struct bpf_prog_info_linear *info_linear;
  52        struct bpf_prog_info_node *info_node;
  53        struct perf_env *env = machine->env;
  54        int id = event->bpf.id;
  55        unsigned int i;
  56
  57        /* perf-record, no need to handle bpf-event */
  58        if (env == NULL)
  59                return 0;
  60
  61        info_node = perf_env__find_bpf_prog_info(env, id);
  62        if (!info_node)
  63                return 0;
  64        info_linear = info_node->info_linear;
  65
  66        for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
  67                u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
  68                u64 addr = addrs[i];
  69                struct map *map = maps__find(&machine->kmaps, addr);
  70
  71                if (map) {
  72                        map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
  73                        map->dso->bpf_prog.id = id;
  74                        map->dso->bpf_prog.sub_id = i;
  75                        map->dso->bpf_prog.env = env;
  76                }
  77        }
  78        return 0;
  79}
  80
  81int machine__process_bpf(struct machine *machine, union perf_event *event,
  82                         struct perf_sample *sample)
  83{
  84        if (dump_trace)
  85                perf_event__fprintf_bpf(event, stdout);
  86
  87        switch (event->bpf.type) {
  88        case PERF_BPF_EVENT_PROG_LOAD:
  89                return machine__process_bpf_event_load(machine, event, sample);
  90
  91        case PERF_BPF_EVENT_PROG_UNLOAD:
  92                /*
  93                 * Do not free bpf_prog_info and btf of the program here,
  94                 * as annotation still need them. They will be freed at
  95                 * the end of the session.
  96                 */
  97                break;
  98        default:
  99                pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
 100                break;
 101        }
 102        return 0;
 103}
 104
 105static int perf_env__fetch_btf(struct perf_env *env,
 106                               u32 btf_id,
 107                               struct btf *btf)
 108{
 109        struct btf_node *node;
 110        u32 data_size;
 111        const void *data;
 112
 113        data = btf__get_raw_data(btf, &data_size);
 114
 115        node = malloc(data_size + sizeof(struct btf_node));
 116        if (!node)
 117                return -1;
 118
 119        node->id = btf_id;
 120        node->data_size = data_size;
 121        memcpy(node->data, data, data_size);
 122
 123        perf_env__insert_btf(env, node);
 124        return 0;
 125}
 126
 127static int synthesize_bpf_prog_name(char *buf, int size,
 128                                    struct bpf_prog_info *info,
 129                                    struct btf *btf,
 130                                    u32 sub_id)
 131{
 132        u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
 133        void *func_infos = (void *)(uintptr_t)(info->func_info);
 134        u32 sub_prog_cnt = info->nr_jited_ksyms;
 135        const struct bpf_func_info *finfo;
 136        const char *short_name = NULL;
 137        const struct btf_type *t;
 138        int name_len;
 139
 140        name_len = snprintf(buf, size, "bpf_prog_");
 141        name_len += snprintf_hex(buf + name_len, size - name_len,
 142                                 prog_tags[sub_id], BPF_TAG_SIZE);
 143        if (btf) {
 144                finfo = func_infos + sub_id * info->func_info_rec_size;
 145                t = btf__type_by_id(btf, finfo->type_id);
 146                short_name = btf__name_by_offset(btf, t->name_off);
 147        } else if (sub_id == 0 && sub_prog_cnt == 1) {
 148                /* no subprog */
 149                if (info->name[0])
 150                        short_name = info->name;
 151        } else
 152                short_name = "F";
 153        if (short_name)
 154                name_len += snprintf(buf + name_len, size - name_len,
 155                                     "_%s", short_name);
 156        return name_len;
 157}
 158
 159/*
 160 * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
 161 * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
 162 * one PERF_RECORD_KSYMBOL is generated for each sub program.
 163 *
 164 * Returns:
 165 *    0 for success;
 166 *   -1 for failures;
 167 *   -2 for lack of kernel support.
 168 */
 169static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
 170                                               perf_event__handler_t process,
 171                                               struct machine *machine,
 172                                               int fd,
 173                                               union perf_event *event,
 174                                               struct record_opts *opts)
 175{
 176        struct perf_record_ksymbol *ksymbol_event = &event->ksymbol;
 177        struct perf_record_bpf_event *bpf_event = &event->bpf;
 178        struct bpf_prog_info_linear *info_linear;
 179        struct perf_tool *tool = session->tool;
 180        struct bpf_prog_info_node *info_node;
 181        struct bpf_prog_info *info;
 182        struct btf *btf = NULL;
 183        struct perf_env *env;
 184        u32 sub_prog_cnt, i;
 185        int err = 0;
 186        u64 arrays;
 187
 188        /*
 189         * for perf-record and perf-report use header.env;
 190         * otherwise, use global perf_env.
 191         */
 192        env = session->data ? &session->header.env : &perf_env;
 193
 194        arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
 195        arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
 196        arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
 197        arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
 198        arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
 199        arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
 200        arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
 201
 202        info_linear = bpf_program__get_prog_info_linear(fd, arrays);
 203        if (IS_ERR_OR_NULL(info_linear)) {
 204                info_linear = NULL;
 205                pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
 206                return -1;
 207        }
 208
 209        if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
 210                free(info_linear);
 211                pr_debug("%s: the kernel is too old, aborting\n", __func__);
 212                return -2;
 213        }
 214
 215        info = &info_linear->info;
 216        if (!info->jited_ksyms) {
 217                free(info_linear);
 218                return -1;
 219        }
 220
 221        /* number of ksyms, func_lengths, and tags should match */
 222        sub_prog_cnt = info->nr_jited_ksyms;
 223        if (sub_prog_cnt != info->nr_prog_tags ||
 224            sub_prog_cnt != info->nr_jited_func_lens) {
 225                free(info_linear);
 226                return -1;
 227        }
 228
 229        /* check BTF func info support */
 230        if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
 231                /* btf func info number should be same as sub_prog_cnt */
 232                if (sub_prog_cnt != info->nr_func_info) {
 233                        pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
 234                        free(info_linear);
 235                        return -1;
 236                }
 237                btf = btf__load_from_kernel_by_id(info->btf_id);
 238                if (libbpf_get_error(btf)) {
 239                        pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
 240                        err = -1;
 241                        goto out;
 242                }
 243                perf_env__fetch_btf(env, info->btf_id, btf);
 244        }
 245
 246        /* Synthesize PERF_RECORD_KSYMBOL */
 247        for (i = 0; i < sub_prog_cnt; i++) {
 248                __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
 249                __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
 250                int name_len;
 251
 252                *ksymbol_event = (struct perf_record_ksymbol) {
 253                        .header = {
 254                                .type = PERF_RECORD_KSYMBOL,
 255                                .size = offsetof(struct perf_record_ksymbol, name),
 256                        },
 257                        .addr = prog_addrs[i],
 258                        .len = prog_lens[i],
 259                        .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
 260                        .flags = 0,
 261                };
 262
 263                name_len = synthesize_bpf_prog_name(ksymbol_event->name,
 264                                                    KSYM_NAME_LEN, info, btf, i);
 265                ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
 266                                                         sizeof(u64));
 267
 268                memset((void *)event + event->header.size, 0, machine->id_hdr_size);
 269                event->header.size += machine->id_hdr_size;
 270                err = perf_tool__process_synth_event(tool, event,
 271                                                     machine, process);
 272        }
 273
 274        if (!opts->no_bpf_event) {
 275                /* Synthesize PERF_RECORD_BPF_EVENT */
 276                *bpf_event = (struct perf_record_bpf_event) {
 277                        .header = {
 278                                .type = PERF_RECORD_BPF_EVENT,
 279                                .size = sizeof(struct perf_record_bpf_event),
 280                        },
 281                        .type = PERF_BPF_EVENT_PROG_LOAD,
 282                        .flags = 0,
 283                        .id = info->id,
 284                };
 285                memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
 286                memset((void *)event + event->header.size, 0, machine->id_hdr_size);
 287                event->header.size += machine->id_hdr_size;
 288
 289                /* save bpf_prog_info to env */
 290                info_node = malloc(sizeof(struct bpf_prog_info_node));
 291                if (!info_node) {
 292                        err = -1;
 293                        goto out;
 294                }
 295
 296                info_node->info_linear = info_linear;
 297                perf_env__insert_bpf_prog_info(env, info_node);
 298                info_linear = NULL;
 299
 300                /*
 301                 * process after saving bpf_prog_info to env, so that
 302                 * required information is ready for look up
 303                 */
 304                err = perf_tool__process_synth_event(tool, event,
 305                                                     machine, process);
 306        }
 307
 308out:
 309        free(info_linear);
 310        btf__free(btf);
 311        return err ? -1 : 0;
 312}
 313
 314struct kallsyms_parse {
 315        union perf_event        *event;
 316        perf_event__handler_t    process;
 317        struct machine          *machine;
 318        struct perf_tool        *tool;
 319};
 320
 321static int
 322process_bpf_image(char *name, u64 addr, struct kallsyms_parse *data)
 323{
 324        struct machine *machine = data->machine;
 325        union perf_event *event = data->event;
 326        struct perf_record_ksymbol *ksymbol;
 327        int len;
 328
 329        ksymbol = &event->ksymbol;
 330
 331        *ksymbol = (struct perf_record_ksymbol) {
 332                .header = {
 333                        .type = PERF_RECORD_KSYMBOL,
 334                        .size = offsetof(struct perf_record_ksymbol, name),
 335                },
 336                .addr      = addr,
 337                .len       = page_size,
 338                .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
 339                .flags     = 0,
 340        };
 341
 342        len = scnprintf(ksymbol->name, KSYM_NAME_LEN, "%s", name);
 343        ksymbol->header.size += PERF_ALIGN(len + 1, sizeof(u64));
 344        memset((void *) event + event->header.size, 0, machine->id_hdr_size);
 345        event->header.size += machine->id_hdr_size;
 346
 347        return perf_tool__process_synth_event(data->tool, event, machine,
 348                                              data->process);
 349}
 350
 351static int
 352kallsyms_process_symbol(void *data, const char *_name,
 353                        char type __maybe_unused, u64 start)
 354{
 355        char disp[KSYM_NAME_LEN];
 356        char *module, *name;
 357        unsigned long id;
 358        int err = 0;
 359
 360        module = strchr(_name, '\t');
 361        if (!module)
 362                return 0;
 363
 364        /* We are going after [bpf] module ... */
 365        if (strcmp(module + 1, "[bpf]"))
 366                return 0;
 367
 368        name = memdup(_name, (module - _name) + 1);
 369        if (!name)
 370                return -ENOMEM;
 371
 372        name[module - _name] = 0;
 373
 374        /* .. and only for trampolines and dispatchers */
 375        if ((sscanf(name, "bpf_trampoline_%lu", &id) == 1) ||
 376            (sscanf(name, "bpf_dispatcher_%s", disp) == 1))
 377                err = process_bpf_image(name, start, data);
 378
 379        free(name);
 380        return err;
 381}
 382
 383int perf_event__synthesize_bpf_events(struct perf_session *session,
 384                                      perf_event__handler_t process,
 385                                      struct machine *machine,
 386                                      struct record_opts *opts)
 387{
 388        const char *kallsyms_filename = "/proc/kallsyms";
 389        struct kallsyms_parse arg;
 390        union perf_event *event;
 391        __u32 id = 0;
 392        int err;
 393        int fd;
 394
 395        event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size);
 396        if (!event)
 397                return -1;
 398
 399        /* Synthesize all the bpf programs in system. */
 400        while (true) {
 401                err = bpf_prog_get_next_id(id, &id);
 402                if (err) {
 403                        if (errno == ENOENT) {
 404                                err = 0;
 405                                break;
 406                        }
 407                        pr_debug("%s: can't get next program: %s%s\n",
 408                                 __func__, strerror(errno),
 409                                 errno == EINVAL ? " -- kernel too old?" : "");
 410                        /* don't report error on old kernel or EPERM  */
 411                        err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
 412                        break;
 413                }
 414                fd = bpf_prog_get_fd_by_id(id);
 415                if (fd < 0) {
 416                        pr_debug("%s: failed to get fd for prog_id %u\n",
 417                                 __func__, id);
 418                        continue;
 419                }
 420
 421                err = perf_event__synthesize_one_bpf_prog(session, process,
 422                                                          machine, fd,
 423                                                          event, opts);
 424                close(fd);
 425                if (err) {
 426                        /* do not return error for old kernel */
 427                        if (err == -2)
 428                                err = 0;
 429                        break;
 430                }
 431        }
 432
 433        /* Synthesize all the bpf images - trampolines/dispatchers. */
 434        if (symbol_conf.kallsyms_name != NULL)
 435                kallsyms_filename = symbol_conf.kallsyms_name;
 436
 437        arg = (struct kallsyms_parse) {
 438                .event   = event,
 439                .process = process,
 440                .machine = machine,
 441                .tool    = session->tool,
 442        };
 443
 444        if (kallsyms__parse(kallsyms_filename, &arg, kallsyms_process_symbol)) {
 445                pr_err("%s: failed to synthesize bpf images: %s\n",
 446                       __func__, strerror(errno));
 447        }
 448
 449        free(event);
 450        return err;
 451}
 452
 453static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
 454{
 455        struct bpf_prog_info_linear *info_linear;
 456        struct bpf_prog_info_node *info_node;
 457        struct btf *btf = NULL;
 458        u64 arrays;
 459        u32 btf_id;
 460        int fd;
 461
 462        fd = bpf_prog_get_fd_by_id(id);
 463        if (fd < 0)
 464                return;
 465
 466        arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
 467        arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
 468        arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
 469        arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
 470        arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
 471        arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
 472        arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
 473
 474        info_linear = bpf_program__get_prog_info_linear(fd, arrays);
 475        if (IS_ERR_OR_NULL(info_linear)) {
 476                pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
 477                goto out;
 478        }
 479
 480        btf_id = info_linear->info.btf_id;
 481
 482        info_node = malloc(sizeof(struct bpf_prog_info_node));
 483        if (info_node) {
 484                info_node->info_linear = info_linear;
 485                perf_env__insert_bpf_prog_info(env, info_node);
 486        } else
 487                free(info_linear);
 488
 489        if (btf_id == 0)
 490                goto out;
 491
 492        btf = btf__load_from_kernel_by_id(btf_id);
 493        if (libbpf_get_error(btf)) {
 494                pr_debug("%s: failed to get BTF of id %u, aborting\n",
 495                         __func__, btf_id);
 496                goto out;
 497        }
 498        perf_env__fetch_btf(env, btf_id, btf);
 499
 500out:
 501        btf__free(btf);
 502        close(fd);
 503}
 504
 505static int bpf_event__sb_cb(union perf_event *event, void *data)
 506{
 507        struct perf_env *env = data;
 508
 509        if (event->header.type != PERF_RECORD_BPF_EVENT)
 510                return -1;
 511
 512        switch (event->bpf.type) {
 513        case PERF_BPF_EVENT_PROG_LOAD:
 514                perf_env__add_bpf_info(env, event->bpf.id);
 515
 516        case PERF_BPF_EVENT_PROG_UNLOAD:
 517                /*
 518                 * Do not free bpf_prog_info and btf of the program here,
 519                 * as annotation still need them. They will be freed at
 520                 * the end of the session.
 521                 */
 522                break;
 523        default:
 524                pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
 525                break;
 526        }
 527
 528        return 0;
 529}
 530
 531int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
 532{
 533        struct perf_event_attr attr = {
 534                .type             = PERF_TYPE_SOFTWARE,
 535                .config           = PERF_COUNT_SW_DUMMY,
 536                .sample_id_all    = 1,
 537                .watermark        = 1,
 538                .bpf_event        = 1,
 539                .size      = sizeof(attr), /* to capture ABI version */
 540        };
 541
 542        /*
 543         * Older gcc versions don't support designated initializers, like above,
 544         * for unnamed union members, such as the following:
 545         */
 546        attr.wakeup_watermark = 1;
 547
 548        return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
 549}
 550
 551void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
 552                                    struct perf_env *env,
 553                                    FILE *fp)
 554{
 555        __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
 556        __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
 557        char name[KSYM_NAME_LEN];
 558        struct btf *btf = NULL;
 559        u32 sub_prog_cnt, i;
 560
 561        sub_prog_cnt = info->nr_jited_ksyms;
 562        if (sub_prog_cnt != info->nr_prog_tags ||
 563            sub_prog_cnt != info->nr_jited_func_lens)
 564                return;
 565
 566        if (info->btf_id) {
 567                struct btf_node *node;
 568
 569                node = perf_env__find_btf(env, info->btf_id);
 570                if (node)
 571                        btf = btf__new((__u8 *)(node->data),
 572                                       node->data_size);
 573        }
 574
 575        if (sub_prog_cnt == 1) {
 576                synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
 577                fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
 578                        info->id, name, prog_addrs[0], prog_lens[0]);
 579                return;
 580        }
 581
 582        fprintf(fp, "# bpf_prog_info %u:\n", info->id);
 583        for (i = 0; i < sub_prog_cnt; i++) {
 584                synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
 585
 586                fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
 587                        i, name, prog_addrs[i], prog_lens[i]);
 588        }
 589}
 590