linux/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2// Copyright (c) 2020 Facebook
   3#include <linux/bpf.h>
   4#include <bpf/bpf_helpers.h>
   5#include <bpf/bpf_tracing.h>
   6
   7/* map of perf event fds, num_cpu * num_metric entries */
   8struct {
   9        __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
  10        __uint(key_size, sizeof(__u32));
  11        __uint(value_size, sizeof(int));
  12} events SEC(".maps");
  13
  14/* readings at fentry */
  15struct {
  16        __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
  17        __uint(key_size, sizeof(__u32));
  18        __uint(value_size, sizeof(struct bpf_perf_event_value));
  19        __uint(max_entries, 1);
  20} fentry_readings SEC(".maps");
  21
  22/* accumulated readings */
  23struct {
  24        __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
  25        __uint(key_size, sizeof(__u32));
  26        __uint(value_size, sizeof(struct bpf_perf_event_value));
  27        __uint(max_entries, 1);
  28} accum_readings SEC(".maps");
  29
  30const volatile __u32 num_cpu = 1;
  31
  32SEC("fentry/XXX")
  33int BPF_PROG(fentry_XXX)
  34{
  35        __u32 key = bpf_get_smp_processor_id();
  36        struct bpf_perf_event_value *ptr;
  37        __u32 zero = 0;
  38        long err;
  39
  40        /* look up before reading, to reduce error */
  41        ptr = bpf_map_lookup_elem(&fentry_readings, &zero);
  42        if (!ptr)
  43                return 0;
  44
  45        err = bpf_perf_event_read_value(&events, key, ptr, sizeof(*ptr));
  46        if (err)
  47                return 0;
  48
  49        return 0;
  50}
  51
  52static inline void
  53fexit_update_maps(struct bpf_perf_event_value *after)
  54{
  55        struct bpf_perf_event_value *before, diff;
  56        __u32 zero = 0;
  57
  58        before = bpf_map_lookup_elem(&fentry_readings, &zero);
  59        /* only account samples with a valid fentry_reading */
  60        if (before && before->counter) {
  61                struct bpf_perf_event_value *accum;
  62
  63                diff.counter = after->counter - before->counter;
  64                diff.enabled = after->enabled - before->enabled;
  65                diff.running = after->running - before->running;
  66
  67                accum = bpf_map_lookup_elem(&accum_readings, &zero);
  68                if (accum) {
  69                        accum->counter += diff.counter;
  70                        accum->enabled += diff.enabled;
  71                        accum->running += diff.running;
  72                }
  73        }
  74}
  75
  76SEC("fexit/XXX")
  77int BPF_PROG(fexit_XXX)
  78{
  79        struct bpf_perf_event_value reading;
  80        __u32 cpu = bpf_get_smp_processor_id();
  81        int err;
  82
  83        /* read all events before updating the maps, to reduce error */
  84        err = bpf_perf_event_read_value(&events, cpu, &reading, sizeof(reading));
  85        if (err)
  86                return 0;
  87
  88        fexit_update_maps(&reading);
  89        return 0;
  90}
  91
  92char LICENSE[] SEC("license") = "Dual BSD/GPL";
  93