linux/tools/lib/bpf/bpf_core_read.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
   2#ifndef __BPF_CORE_READ_H__
   3#define __BPF_CORE_READ_H__
   4
   5/*
   6 * enum bpf_field_info_kind is passed as a second argument into
   7 * __builtin_preserve_field_info() built-in to get a specific aspect of
   8 * a field, captured as a first argument. __builtin_preserve_field_info(field,
   9 * info_kind) returns __u32 integer and produces BTF field relocation, which
  10 * is understood and processed by libbpf during BPF object loading. See
  11 * selftests/bpf for examples.
  12 */
  13enum bpf_field_info_kind {
  14        BPF_FIELD_BYTE_OFFSET = 0,      /* field byte offset */
  15        BPF_FIELD_BYTE_SIZE = 1,
  16        BPF_FIELD_EXISTS = 2,           /* field existence in target kernel */
  17        BPF_FIELD_SIGNED = 3,
  18        BPF_FIELD_LSHIFT_U64 = 4,
  19        BPF_FIELD_RSHIFT_U64 = 5,
  20};
  21
  22/* second argument to __builtin_btf_type_id() built-in */
  23enum bpf_type_id_kind {
  24        BPF_TYPE_ID_LOCAL = 0,          /* BTF type ID in local program */
  25        BPF_TYPE_ID_TARGET = 1,         /* BTF type ID in target kernel */
  26};
  27
  28/* second argument to __builtin_preserve_type_info() built-in */
  29enum bpf_type_info_kind {
  30        BPF_TYPE_EXISTS = 0,            /* type existence in target kernel */
  31        BPF_TYPE_SIZE = 1,              /* type size in target kernel */
  32};
  33
  34/* second argument to __builtin_preserve_enum_value() built-in */
  35enum bpf_enum_value_kind {
  36        BPF_ENUMVAL_EXISTS = 0,         /* enum value existence in kernel */
  37        BPF_ENUMVAL_VALUE = 1,          /* enum value value relocation */
  38};
  39
  40#define __CORE_RELO(src, field, info)                                         \
  41        __builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
  42
  43#if __BYTE_ORDER == __LITTLE_ENDIAN
  44#define __CORE_BITFIELD_PROBE_READ(dst, src, fld)                             \
  45        bpf_probe_read_kernel(                                                \
  46                        (void *)dst,                                  \
  47                        __CORE_RELO(src, fld, BYTE_SIZE),                     \
  48                        (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
  49#else
  50/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so
  51 * for big-endian we need to adjust destination pointer accordingly, based on
  52 * field byte size
  53 */
  54#define __CORE_BITFIELD_PROBE_READ(dst, src, fld)                             \
  55        bpf_probe_read_kernel(                                                \
  56                        (void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
  57                        __CORE_RELO(src, fld, BYTE_SIZE),                     \
  58                        (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
  59#endif
  60
  61/*
  62 * Extract bitfield, identified by s->field, and return its value as u64.
  63 * All this is done in relocatable manner, so bitfield changes such as
  64 * signedness, bit size, offset changes, this will be handled automatically.
  65 * This version of macro is using bpf_probe_read_kernel() to read underlying
  66 * integer storage. Macro functions as an expression and its return type is
  67 * bpf_probe_read_kernel()'s return value: 0, on success, <0 on error.
  68 */
  69#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({                            \
  70        unsigned long long val = 0;                                           \
  71                                                                              \
  72        __CORE_BITFIELD_PROBE_READ(&val, s, field);                           \
  73        val <<= __CORE_RELO(s, field, LSHIFT_U64);                            \
  74        if (__CORE_RELO(s, field, SIGNED))                                    \
  75                val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64);  \
  76        else                                                                  \
  77                val = val >> __CORE_RELO(s, field, RSHIFT_U64);               \
  78        val;                                                                  \
  79})
  80
  81/*
  82 * Extract bitfield, identified by s->field, and return its value as u64.
  83 * This version of macro is using direct memory reads and should be used from
  84 * BPF program types that support such functionality (e.g., typed raw
  85 * tracepoints).
  86 */
  87#define BPF_CORE_READ_BITFIELD(s, field) ({                                   \
  88        const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
  89        unsigned long long val;                                               \
  90                                                                              \
  91        switch (__CORE_RELO(s, field, BYTE_SIZE)) {                           \
  92        case 1: val = *(const unsigned char *)p;                              \
  93        case 2: val = *(const unsigned short *)p;                             \
  94        case 4: val = *(const unsigned int *)p;                               \
  95        case 8: val = *(const unsigned long long *)p;                         \
  96        }                                                                     \
  97        val <<= __CORE_RELO(s, field, LSHIFT_U64);                            \
  98        if (__CORE_RELO(s, field, SIGNED))                                    \
  99                val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64);  \
 100        else                                                                  \
 101                val = val >> __CORE_RELO(s, field, RSHIFT_U64);               \
 102        val;                                                                  \
 103})
 104
 105/*
 106 * Convenience macro to check that field actually exists in target kernel's.
 107 * Returns:
 108 *    1, if matching field is present in target kernel;
 109 *    0, if no matching field found.
 110 */
 111#define bpf_core_field_exists(field)                                        \
 112        __builtin_preserve_field_info(field, BPF_FIELD_EXISTS)
 113
 114/*
 115 * Convenience macro to get the byte size of a field. Works for integers,
 116 * struct/unions, pointers, arrays, and enums.
 117 */
 118#define bpf_core_field_size(field)                                          \
 119        __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE)
 120
 121/*
 122 * Convenience macro to get BTF type ID of a specified type, using a local BTF
 123 * information. Return 32-bit unsigned integer with type ID from program's own
 124 * BTF. Always succeeds.
 125 */
 126#define bpf_core_type_id_local(type)                                        \
 127        __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL)
 128
 129/*
 130 * Convenience macro to get BTF type ID of a target kernel's type that matches
 131 * specified local type.
 132 * Returns:
 133 *    - valid 32-bit unsigned type ID in kernel BTF;
 134 *    - 0, if no matching type was found in a target kernel BTF.
 135 */
 136#define bpf_core_type_id_kernel(type)                                       \
 137        __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET)
 138
 139/*
 140 * Convenience macro to check that provided named type
 141 * (struct/union/enum/typedef) exists in a target kernel.
 142 * Returns:
 143 *    1, if such type is present in target kernel's BTF;
 144 *    0, if no matching type is found.
 145 */
 146#define bpf_core_type_exists(type)                                          \
 147        __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
 148
 149/*
 150 * Convenience macro to get the byte size of a provided named type
 151 * (struct/union/enum/typedef) in a target kernel.
 152 * Returns:
 153 *    >= 0 size (in bytes), if type is present in target kernel's BTF;
 154 *    0, if no matching type is found.
 155 */
 156#define bpf_core_type_size(type)                                            \
 157        __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE)
 158
 159/*
 160 * Convenience macro to check that provided enumerator value is defined in
 161 * a target kernel.
 162 * Returns:
 163 *    1, if specified enum type and its enumerator value are present in target
 164 *    kernel's BTF;
 165 *    0, if no matching enum and/or enum value within that enum is found.
 166 */
 167#define bpf_core_enum_value_exists(enum_type, enum_value)                   \
 168        __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
 169
 170/*
 171 * Convenience macro to get the integer value of an enumerator value in
 172 * a target kernel.
 173 * Returns:
 174 *    64-bit value, if specified enum type and its enumerator value are
 175 *    present in target kernel's BTF;
 176 *    0, if no matching enum and/or enum value within that enum is found.
 177 */
 178#define bpf_core_enum_value(enum_type, enum_value)                          \
 179        __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
 180
 181/*
 182 * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
 183 * offset relocation for source address using __builtin_preserve_access_index()
 184 * built-in, provided by Clang.
 185 *
 186 * __builtin_preserve_access_index() takes as an argument an expression of
 187 * taking an address of a field within struct/union. It makes compiler emit
 188 * a relocation, which records BTF type ID describing root struct/union and an
 189 * accessor string which describes exact embedded field that was used to take
 190 * an address. See detailed description of this relocation format and
 191 * semantics in comments to struct bpf_field_reloc in libbpf_internal.h.
 192 *
 193 * This relocation allows libbpf to adjust BPF instruction to use correct
 194 * actual field offset, based on target kernel BTF type that matches original
 195 * (local) BTF, used to record relocation.
 196 */
 197#define bpf_core_read(dst, sz, src)                                         \
 198        bpf_probe_read_kernel(dst, sz,                                      \
 199                              (const void *)__builtin_preserve_access_index(src))
 200
 201/*
 202 * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str()
 203 * additionally emitting BPF CO-RE field relocation for specified source
 204 * argument.
 205 */
 206#define bpf_core_read_str(dst, sz, src)                                     \
 207        bpf_probe_read_kernel_str(dst, sz,                                  \
 208                                  (const void *)__builtin_preserve_access_index(src))
 209
 210#define ___concat(a, b) a ## b
 211#define ___apply(fn, n) ___concat(fn, n)
 212#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
 213
 214/*
 215 * return number of provided arguments; used for switch-based variadic macro
 216 * definitions (see ___last, ___arrow, etc below)
 217 */
 218#define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
 219/*
 220 * return 0 if no arguments are passed, N - otherwise; used for
 221 * recursively-defined macros to specify termination (0) case, and generic
 222 * (N) case (e.g., ___read_ptrs, ___core_read)
 223 */
 224#define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
 225
 226#define ___last1(x) x
 227#define ___last2(a, x) x
 228#define ___last3(a, b, x) x
 229#define ___last4(a, b, c, x) x
 230#define ___last5(a, b, c, d, x) x
 231#define ___last6(a, b, c, d, e, x) x
 232#define ___last7(a, b, c, d, e, f, x) x
 233#define ___last8(a, b, c, d, e, f, g, x) x
 234#define ___last9(a, b, c, d, e, f, g, h, x) x
 235#define ___last10(a, b, c, d, e, f, g, h, i, x) x
 236#define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__)
 237
 238#define ___nolast2(a, _) a
 239#define ___nolast3(a, b, _) a, b
 240#define ___nolast4(a, b, c, _) a, b, c
 241#define ___nolast5(a, b, c, d, _) a, b, c, d
 242#define ___nolast6(a, b, c, d, e, _) a, b, c, d, e
 243#define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f
 244#define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g
 245#define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h
 246#define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i
 247#define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__)
 248
 249#define ___arrow1(a) a
 250#define ___arrow2(a, b) a->b
 251#define ___arrow3(a, b, c) a->b->c
 252#define ___arrow4(a, b, c, d) a->b->c->d
 253#define ___arrow5(a, b, c, d, e) a->b->c->d->e
 254#define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f
 255#define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g
 256#define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h
 257#define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i
 258#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
 259#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
 260
 261#define ___type(...) typeof(___arrow(__VA_ARGS__))
 262
 263#define ___read(read_fn, dst, src_type, src, accessor)                      \
 264        read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
 265
 266/* "recursively" read a sequence of inner pointers using local __t var */
 267#define ___rd_first(src, a) ___read(bpf_core_read, &__t, ___type(src), src, a);
 268#define ___rd_last(...)                                                     \
 269        ___read(bpf_core_read, &__t,                                        \
 270                ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__));
 271#define ___rd_p1(...) const void *__t; ___rd_first(__VA_ARGS__)
 272#define ___rd_p2(...) ___rd_p1(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
 273#define ___rd_p3(...) ___rd_p2(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
 274#define ___rd_p4(...) ___rd_p3(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
 275#define ___rd_p5(...) ___rd_p4(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
 276#define ___rd_p6(...) ___rd_p5(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
 277#define ___rd_p7(...) ___rd_p6(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
 278#define ___rd_p8(...) ___rd_p7(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
 279#define ___rd_p9(...) ___rd_p8(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
 280#define ___read_ptrs(src, ...)                                              \
 281        ___apply(___rd_p, ___narg(__VA_ARGS__))(src, __VA_ARGS__)
 282
 283#define ___core_read0(fn, dst, src, a)                                      \
 284        ___read(fn, dst, ___type(src), src, a);
 285#define ___core_readN(fn, dst, src, ...)                                    \
 286        ___read_ptrs(src, ___nolast(__VA_ARGS__))                           \
 287        ___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t,         \
 288                ___last(__VA_ARGS__));
 289#define ___core_read(fn, dst, src, a, ...)                                  \
 290        ___apply(___core_read, ___empty(__VA_ARGS__))(fn, dst,              \
 291                                                      src, a, ##__VA_ARGS__)
 292
 293/*
 294 * BPF_CORE_READ_INTO() is a more performance-conscious variant of
 295 * BPF_CORE_READ(), in which final field is read into user-provided storage.
 296 * See BPF_CORE_READ() below for more details on general usage.
 297 */
 298#define BPF_CORE_READ_INTO(dst, src, a, ...)                                \
 299        ({                                                                  \
 300                ___core_read(bpf_core_read, dst, (src), a, ##__VA_ARGS__)   \
 301        })
 302
 303/*
 304 * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as
 305 * BPF_CORE_READ() for intermediate pointers, but then executes (and returns
 306 * corresponding error code) bpf_core_read_str() for final string read.
 307 */
 308#define BPF_CORE_READ_STR_INTO(dst, src, a, ...)                            \
 309        ({                                                                  \
 310                ___core_read(bpf_core_read_str, dst, (src), a, ##__VA_ARGS__)\
 311        })
 312
 313/*
 314 * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially
 315 * when there are few pointer chasing steps.
 316 * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like:
 317 *      int x = s->a.b.c->d.e->f->g;
 318 * can be succinctly achieved using BPF_CORE_READ as:
 319 *      int x = BPF_CORE_READ(s, a.b.c, d.e, f, g);
 320 *
 321 * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF
 322 * CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically
 323 * equivalent to:
 324 * 1. const void *__t = s->a.b.c;
 325 * 2. __t = __t->d.e;
 326 * 3. __t = __t->f;
 327 * 4. return __t->g;
 328 *
 329 * Equivalence is logical, because there is a heavy type casting/preservation
 330 * involved, as well as all the reads are happening through
 331 * bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to
 332 * emit CO-RE relocations.
 333 *
 334 * N.B. Only up to 9 "field accessors" are supported, which should be more
 335 * than enough for any practical purpose.
 336 */
 337#define BPF_CORE_READ(src, a, ...)                                          \
 338        ({                                                                  \
 339                ___type((src), a, ##__VA_ARGS__) __r;                       \
 340                BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__);          \
 341                __r;                                                        \
 342        })
 343
 344#endif
 345
 346