linux/include/trace/bpf_probe.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2
   3#undef TRACE_SYSTEM_VAR
   4
   5#ifdef CONFIG_BPF_EVENTS
   6
   7#undef __entry
   8#define __entry entry
   9
  10#undef __get_dynamic_array
  11#define __get_dynamic_array(field)      \
  12                ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
  13
  14#undef __get_dynamic_array_len
  15#define __get_dynamic_array_len(field)  \
  16                ((__entry->__data_loc_##field >> 16) & 0xffff)
  17
  18#undef __get_str
  19#define __get_str(field) ((char *)__get_dynamic_array(field))
  20
  21#undef __get_bitmask
  22#define __get_bitmask(field) (char *)__get_dynamic_array(field)
  23
  24#undef __get_sockaddr
  25#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
  26
  27#undef __get_rel_dynamic_array
  28#define __get_rel_dynamic_array(field)  \
  29                ((void *)(&__entry->__rel_loc_##field) +        \
  30                 sizeof(__entry->__rel_loc_##field) +           \
  31                 (__entry->__rel_loc_##field & 0xffff))
  32
  33#undef __get_rel_dynamic_array_len
  34#define __get_rel_dynamic_array_len(field)      \
  35                ((__entry->__rel_loc_##field >> 16) & 0xffff)
  36
  37#undef __get_rel_str
  38#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
  39
  40#undef __get_rel_bitmask
  41#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
  42
  43#undef __get_rel_sockaddr
  44#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
  45
  46#undef __perf_count
  47#define __perf_count(c) (c)
  48
  49#undef __perf_task
  50#define __perf_task(t)  (t)
  51
  52/* cast any integer, pointer, or small struct to u64 */
  53#define UINTTYPE(size) \
  54        __typeof__(__builtin_choose_expr(size == 1,  (u8)1, \
  55                   __builtin_choose_expr(size == 2, (u16)2, \
  56                   __builtin_choose_expr(size == 4, (u32)3, \
  57                   __builtin_choose_expr(size == 8, (u64)4, \
  58                                         (void)5)))))
  59#define __CAST_TO_U64(x) ({ \
  60        typeof(x) __src = (x); \
  61        UINTTYPE(sizeof(x)) __dst; \
  62        memcpy(&__dst, &__src, sizeof(__dst)); \
  63        (u64)__dst; })
  64
  65#define __CAST1(a,...) __CAST_TO_U64(a)
  66#define __CAST2(a,...) __CAST_TO_U64(a), __CAST1(__VA_ARGS__)
  67#define __CAST3(a,...) __CAST_TO_U64(a), __CAST2(__VA_ARGS__)
  68#define __CAST4(a,...) __CAST_TO_U64(a), __CAST3(__VA_ARGS__)
  69#define __CAST5(a,...) __CAST_TO_U64(a), __CAST4(__VA_ARGS__)
  70#define __CAST6(a,...) __CAST_TO_U64(a), __CAST5(__VA_ARGS__)
  71#define __CAST7(a,...) __CAST_TO_U64(a), __CAST6(__VA_ARGS__)
  72#define __CAST8(a,...) __CAST_TO_U64(a), __CAST7(__VA_ARGS__)
  73#define __CAST9(a,...) __CAST_TO_U64(a), __CAST8(__VA_ARGS__)
  74#define __CAST10(a,...) __CAST_TO_U64(a), __CAST9(__VA_ARGS__)
  75#define __CAST11(a,...) __CAST_TO_U64(a), __CAST10(__VA_ARGS__)
  76#define __CAST12(a,...) __CAST_TO_U64(a), __CAST11(__VA_ARGS__)
  77/* tracepoints with more than 12 arguments will hit build error */
  78#define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
  79
  80#define __BPF_DECLARE_TRACE(call, proto, args)                          \
  81static notrace void                                                     \
  82__bpf_trace_##call(void *__data, proto)                                 \
  83{                                                                       \
  84        struct bpf_prog *prog = __data;                                 \
  85        CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args));  \
  86}
  87
  88#undef DECLARE_EVENT_CLASS
  89#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
  90        __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args))
  91
  92/*
  93 * This part is compiled out, it is only here as a build time check
  94 * to make sure that if the tracepoint handling changes, the
  95 * bpf probe will fail to compile unless it too is updated.
  96 */
  97#define __DEFINE_EVENT(template, call, proto, args, size)               \
  98static inline void bpf_test_probe_##call(void)                          \
  99{                                                                       \
 100        check_trace_callback_type_##call(__bpf_trace_##template);       \
 101}                                                                       \
 102typedef void (*btf_trace_##call)(void *__data, proto);                  \
 103static union {                                                          \
 104        struct bpf_raw_event_map event;                                 \
 105        btf_trace_##call handler;                                       \
 106} __bpf_trace_tp_map_##call __used                                      \
 107__section("__bpf_raw_tp_map") = {                                       \
 108        .event = {                                                      \
 109                .tp             = &__tracepoint_##call,                 \
 110                .bpf_func       = __bpf_trace_##template,               \
 111                .num_args       = COUNT_ARGS(args),                     \
 112                .writable_size  = size,                                 \
 113        },                                                              \
 114};
 115
 116#define FIRST(x, ...) x
 117
 118#define __CHECK_WRITABLE_BUF_SIZE(call, proto, args, size)              \
 119static inline void bpf_test_buffer_##call(void)                         \
 120{                                                                       \
 121        /* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \
 122         * BUILD_BUG_ON_ZERO() uses a different mechanism that is not   \
 123         * dead-code-eliminated.                                        \
 124         */                                                             \
 125        FIRST(proto);                                                   \
 126        (void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args)));          \
 127}
 128
 129#undef DEFINE_EVENT_WRITABLE
 130#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
 131        __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
 132        __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
 133
 134#undef DEFINE_EVENT
 135#define DEFINE_EVENT(template, call, proto, args)                       \
 136        __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), 0)
 137
 138#undef DEFINE_EVENT_PRINT
 139#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 140        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 141
 142#undef DECLARE_TRACE
 143#define DECLARE_TRACE(call, proto, args)                                \
 144        __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args))          \
 145        __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0)
 146
 147#undef DECLARE_TRACE_WRITABLE
 148#define DECLARE_TRACE_WRITABLE(call, proto, args, size) \
 149        __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
 150        __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
 151        __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), size)
 152
 153#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 154
 155#undef DECLARE_TRACE_WRITABLE
 156#undef DEFINE_EVENT_WRITABLE
 157#undef __CHECK_WRITABLE_BUF_SIZE
 158#undef __DEFINE_EVENT
 159#undef FIRST
 160
 161#endif /* CONFIG_BPF_EVENTS */
 162