linux/tools/lib/bpf/relo_core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
   2/* Copyright (c) 2019 Facebook */
   3
   4#ifdef __KERNEL__
   5#include <linux/bpf.h>
   6#include <linux/btf.h>
   7#include <linux/string.h>
   8#include <linux/bpf_verifier.h>
   9#include "relo_core.h"
  10
  11static const char *btf_kind_str(const struct btf_type *t)
  12{
  13        return btf_type_str(t);
  14}
  15
  16static bool is_ldimm64_insn(struct bpf_insn *insn)
  17{
  18        return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
  19}
  20
  21static const struct btf_type *
  22skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id)
  23{
  24        return btf_type_skip_modifiers(btf, id, res_id);
  25}
  26
  27static const char *btf__name_by_offset(const struct btf *btf, u32 offset)
  28{
  29        return btf_name_by_offset(btf, offset);
  30}
  31
  32static s64 btf__resolve_size(const struct btf *btf, u32 type_id)
  33{
  34        const struct btf_type *t;
  35        int size;
  36
  37        t = btf_type_by_id(btf, type_id);
  38        t = btf_resolve_size(btf, t, &size);
  39        if (IS_ERR(t))
  40                return PTR_ERR(t);
  41        return size;
  42}
  43
  44enum libbpf_print_level {
  45        LIBBPF_WARN,
  46        LIBBPF_INFO,
  47        LIBBPF_DEBUG,
  48};
  49
  50#undef pr_warn
  51#undef pr_info
  52#undef pr_debug
  53#define pr_warn(fmt, log, ...)  bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
  54#define pr_info(fmt, log, ...)  bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
  55#define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
  56#define libbpf_print(level, fmt, ...)   bpf_log((void *)prog_name, fmt, ##__VA_ARGS__)
  57#else
  58#include <stdio.h>
  59#include <string.h>
  60#include <errno.h>
  61#include <ctype.h>
  62#include <linux/err.h>
  63
  64#include "libbpf.h"
  65#include "bpf.h"
  66#include "btf.h"
  67#include "str_error.h"
  68#include "libbpf_internal.h"
  69#endif
  70
  71static bool is_flex_arr(const struct btf *btf,
  72                        const struct bpf_core_accessor *acc,
  73                        const struct btf_array *arr)
  74{
  75        const struct btf_type *t;
  76
  77        /* not a flexible array, if not inside a struct or has non-zero size */
  78        if (!acc->name || arr->nelems > 0)
  79                return false;
  80
  81        /* has to be the last member of enclosing struct */
  82        t = btf_type_by_id(btf, acc->type_id);
  83        return acc->idx == btf_vlen(t) - 1;
  84}
  85
  86static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
  87{
  88        switch (kind) {
  89        case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off";
  90        case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz";
  91        case BPF_CORE_FIELD_EXISTS: return "field_exists";
  92        case BPF_CORE_FIELD_SIGNED: return "signed";
  93        case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64";
  94        case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64";
  95        case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
  96        case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
  97        case BPF_CORE_TYPE_EXISTS: return "type_exists";
  98        case BPF_CORE_TYPE_SIZE: return "type_size";
  99        case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
 100        case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
 101        default: return "unknown";
 102        }
 103}
 104
 105static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
 106{
 107        switch (kind) {
 108        case BPF_CORE_FIELD_BYTE_OFFSET:
 109        case BPF_CORE_FIELD_BYTE_SIZE:
 110        case BPF_CORE_FIELD_EXISTS:
 111        case BPF_CORE_FIELD_SIGNED:
 112        case BPF_CORE_FIELD_LSHIFT_U64:
 113        case BPF_CORE_FIELD_RSHIFT_U64:
 114                return true;
 115        default:
 116                return false;
 117        }
 118}
 119
 120static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
 121{
 122        switch (kind) {
 123        case BPF_CORE_TYPE_ID_LOCAL:
 124        case BPF_CORE_TYPE_ID_TARGET:
 125        case BPF_CORE_TYPE_EXISTS:
 126        case BPF_CORE_TYPE_SIZE:
 127                return true;
 128        default:
 129                return false;
 130        }
 131}
 132
 133static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
 134{
 135        switch (kind) {
 136        case BPF_CORE_ENUMVAL_EXISTS:
 137        case BPF_CORE_ENUMVAL_VALUE:
 138                return true;
 139        default:
 140                return false;
 141        }
 142}
 143
 144/*
 145 * Turn bpf_core_relo into a low- and high-level spec representation,
 146 * validating correctness along the way, as well as calculating resulting
 147 * field bit offset, specified by accessor string. Low-level spec captures
 148 * every single level of nestedness, including traversing anonymous
 149 * struct/union members. High-level one only captures semantically meaningful
 150 * "turning points": named fields and array indicies.
 151 * E.g., for this case:
 152 *
 153 *   struct sample {
 154 *       int __unimportant;
 155 *       struct {
 156 *           int __1;
 157 *           int __2;
 158 *           int a[7];
 159 *       };
 160 *   };
 161 *
 162 *   struct sample *s = ...;
 163 *
 164 *   int x = &s->a[3]; // access string = '0:1:2:3'
 165 *
 166 * Low-level spec has 1:1 mapping with each element of access string (it's
 167 * just a parsed access string representation): [0, 1, 2, 3].
 168 *
 169 * High-level spec will capture only 3 points:
 170 *   - intial zero-index access by pointer (&s->... is the same as &s[0]...);
 171 *   - field 'a' access (corresponds to '2' in low-level spec);
 172 *   - array element #3 access (corresponds to '3' in low-level spec).
 173 *
 174 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
 175 * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
 176 * spec and raw_spec are kept empty.
 177 *
 178 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
 179 * string to specify enumerator's value index that need to be relocated.
 180 */
 181static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
 182                               __u32 type_id,
 183                               const char *spec_str,
 184                               enum bpf_core_relo_kind relo_kind,
 185                               struct bpf_core_spec *spec)
 186{
 187        int access_idx, parsed_len, i;
 188        struct bpf_core_accessor *acc;
 189        const struct btf_type *t;
 190        const char *name;
 191        __u32 id;
 192        __s64 sz;
 193
 194        if (str_is_empty(spec_str) || *spec_str == ':')
 195                return -EINVAL;
 196
 197        memset(spec, 0, sizeof(*spec));
 198        spec->btf = btf;
 199        spec->root_type_id = type_id;
 200        spec->relo_kind = relo_kind;
 201
 202        /* type-based relocations don't have a field access string */
 203        if (core_relo_is_type_based(relo_kind)) {
 204                if (strcmp(spec_str, "0"))
 205                        return -EINVAL;
 206                return 0;
 207        }
 208
 209        /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
 210        while (*spec_str) {
 211                if (*spec_str == ':')
 212                        ++spec_str;
 213                if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
 214                        return -EINVAL;
 215                if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
 216                        return -E2BIG;
 217                spec_str += parsed_len;
 218                spec->raw_spec[spec->raw_len++] = access_idx;
 219        }
 220
 221        if (spec->raw_len == 0)
 222                return -EINVAL;
 223
 224        t = skip_mods_and_typedefs(btf, type_id, &id);
 225        if (!t)
 226                return -EINVAL;
 227
 228        access_idx = spec->raw_spec[0];
 229        acc = &spec->spec[0];
 230        acc->type_id = id;
 231        acc->idx = access_idx;
 232        spec->len++;
 233
 234        if (core_relo_is_enumval_based(relo_kind)) {
 235                if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
 236                        return -EINVAL;
 237
 238                /* record enumerator name in a first accessor */
 239                acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
 240                return 0;
 241        }
 242
 243        if (!core_relo_is_field_based(relo_kind))
 244                return -EINVAL;
 245
 246        sz = btf__resolve_size(btf, id);
 247        if (sz < 0)
 248                return sz;
 249        spec->bit_offset = access_idx * sz * 8;
 250
 251        for (i = 1; i < spec->raw_len; i++) {
 252                t = skip_mods_and_typedefs(btf, id, &id);
 253                if (!t)
 254                        return -EINVAL;
 255
 256                access_idx = spec->raw_spec[i];
 257                acc = &spec->spec[spec->len];
 258
 259                if (btf_is_composite(t)) {
 260                        const struct btf_member *m;
 261                        __u32 bit_offset;
 262
 263                        if (access_idx >= btf_vlen(t))
 264                                return -EINVAL;
 265
 266                        bit_offset = btf_member_bit_offset(t, access_idx);
 267                        spec->bit_offset += bit_offset;
 268
 269                        m = btf_members(t) + access_idx;
 270                        if (m->name_off) {
 271                                name = btf__name_by_offset(btf, m->name_off);
 272                                if (str_is_empty(name))
 273                                        return -EINVAL;
 274
 275                                acc->type_id = id;
 276                                acc->idx = access_idx;
 277                                acc->name = name;
 278                                spec->len++;
 279                        }
 280
 281                        id = m->type;
 282                } else if (btf_is_array(t)) {
 283                        const struct btf_array *a = btf_array(t);
 284                        bool flex;
 285
 286                        t = skip_mods_and_typedefs(btf, a->type, &id);
 287                        if (!t)
 288                                return -EINVAL;
 289
 290                        flex = is_flex_arr(btf, acc - 1, a);
 291                        if (!flex && access_idx >= a->nelems)
 292                                return -EINVAL;
 293
 294                        spec->spec[spec->len].type_id = id;
 295                        spec->spec[spec->len].idx = access_idx;
 296                        spec->len++;
 297
 298                        sz = btf__resolve_size(btf, id);
 299                        if (sz < 0)
 300                                return sz;
 301                        spec->bit_offset += access_idx * sz * 8;
 302                } else {
 303                        pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
 304                                prog_name, type_id, spec_str, i, id, btf_kind_str(t));
 305                        return -EINVAL;
 306                }
 307        }
 308
 309        return 0;
 310}
 311
 312/* Check two types for compatibility for the purpose of field access
 313 * relocation. const/volatile/restrict and typedefs are skipped to ensure we
 314 * are relocating semantically compatible entities:
 315 *   - any two STRUCTs/UNIONs are compatible and can be mixed;
 316 *   - any two FWDs are compatible, if their names match (modulo flavor suffix);
 317 *   - any two PTRs are always compatible;
 318 *   - for ENUMs, names should be the same (ignoring flavor suffix) or at
 319 *     least one of enums should be anonymous;
 320 *   - for ENUMs, check sizes, names are ignored;
 321 *   - for INT, size and signedness are ignored;
 322 *   - any two FLOATs are always compatible;
 323 *   - for ARRAY, dimensionality is ignored, element types are checked for
 324 *     compatibility recursively;
 325 *   - everything else shouldn't be ever a target of relocation.
 326 * These rules are not set in stone and probably will be adjusted as we get
 327 * more experience with using BPF CO-RE relocations.
 328 */
 329static int bpf_core_fields_are_compat(const struct btf *local_btf,
 330                                      __u32 local_id,
 331                                      const struct btf *targ_btf,
 332                                      __u32 targ_id)
 333{
 334        const struct btf_type *local_type, *targ_type;
 335
 336recur:
 337        local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
 338        targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
 339        if (!local_type || !targ_type)
 340                return -EINVAL;
 341
 342        if (btf_is_composite(local_type) && btf_is_composite(targ_type))
 343                return 1;
 344        if (btf_kind(local_type) != btf_kind(targ_type))
 345                return 0;
 346
 347        switch (btf_kind(local_type)) {
 348        case BTF_KIND_PTR:
 349        case BTF_KIND_FLOAT:
 350                return 1;
 351        case BTF_KIND_FWD:
 352        case BTF_KIND_ENUM: {
 353                const char *local_name, *targ_name;
 354                size_t local_len, targ_len;
 355
 356                local_name = btf__name_by_offset(local_btf,
 357                                                 local_type->name_off);
 358                targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
 359                local_len = bpf_core_essential_name_len(local_name);
 360                targ_len = bpf_core_essential_name_len(targ_name);
 361                /* one of them is anonymous or both w/ same flavor-less names */
 362                return local_len == 0 || targ_len == 0 ||
 363                       (local_len == targ_len &&
 364                        strncmp(local_name, targ_name, local_len) == 0);
 365        }
 366        case BTF_KIND_INT:
 367                /* just reject deprecated bitfield-like integers; all other
 368                 * integers are by default compatible between each other
 369                 */
 370                return btf_int_offset(local_type) == 0 &&
 371                       btf_int_offset(targ_type) == 0;
 372        case BTF_KIND_ARRAY:
 373                local_id = btf_array(local_type)->type;
 374                targ_id = btf_array(targ_type)->type;
 375                goto recur;
 376        default:
 377                return 0;
 378        }
 379}
 380
 381/*
 382 * Given single high-level named field accessor in local type, find
 383 * corresponding high-level accessor for a target type. Along the way,
 384 * maintain low-level spec for target as well. Also keep updating target
 385 * bit offset.
 386 *
 387 * Searching is performed through recursive exhaustive enumeration of all
 388 * fields of a struct/union. If there are any anonymous (embedded)
 389 * structs/unions, they are recursively searched as well. If field with
 390 * desired name is found, check compatibility between local and target types,
 391 * before returning result.
 392 *
 393 * 1 is returned, if field is found.
 394 * 0 is returned if no compatible field is found.
 395 * <0 is returned on error.
 396 */
 397static int bpf_core_match_member(const struct btf *local_btf,
 398                                 const struct bpf_core_accessor *local_acc,
 399                                 const struct btf *targ_btf,
 400                                 __u32 targ_id,
 401                                 struct bpf_core_spec *spec,
 402                                 __u32 *next_targ_id)
 403{
 404        const struct btf_type *local_type, *targ_type;
 405        const struct btf_member *local_member, *m;
 406        const char *local_name, *targ_name;
 407        __u32 local_id;
 408        int i, n, found;
 409
 410        targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
 411        if (!targ_type)
 412                return -EINVAL;
 413        if (!btf_is_composite(targ_type))
 414                return 0;
 415
 416        local_id = local_acc->type_id;
 417        local_type = btf_type_by_id(local_btf, local_id);
 418        local_member = btf_members(local_type) + local_acc->idx;
 419        local_name = btf__name_by_offset(local_btf, local_member->name_off);
 420
 421        n = btf_vlen(targ_type);
 422        m = btf_members(targ_type);
 423        for (i = 0; i < n; i++, m++) {
 424                __u32 bit_offset;
 425
 426                bit_offset = btf_member_bit_offset(targ_type, i);
 427
 428                /* too deep struct/union/array nesting */
 429                if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
 430                        return -E2BIG;
 431
 432                /* speculate this member will be the good one */
 433                spec->bit_offset += bit_offset;
 434                spec->raw_spec[spec->raw_len++] = i;
 435
 436                targ_name = btf__name_by_offset(targ_btf, m->name_off);
 437                if (str_is_empty(targ_name)) {
 438                        /* embedded struct/union, we need to go deeper */
 439                        found = bpf_core_match_member(local_btf, local_acc,
 440                                                      targ_btf, m->type,
 441                                                      spec, next_targ_id);
 442                        if (found) /* either found or error */
 443                                return found;
 444                } else if (strcmp(local_name, targ_name) == 0) {
 445                        /* matching named field */
 446                        struct bpf_core_accessor *targ_acc;
 447
 448                        targ_acc = &spec->spec[spec->len++];
 449                        targ_acc->type_id = targ_id;
 450                        targ_acc->idx = i;
 451                        targ_acc->name = targ_name;
 452
 453                        *next_targ_id = m->type;
 454                        found = bpf_core_fields_are_compat(local_btf,
 455                                                           local_member->type,
 456                                                           targ_btf, m->type);
 457                        if (!found)
 458                                spec->len--; /* pop accessor */
 459                        return found;
 460                }
 461                /* member turned out not to be what we looked for */
 462                spec->bit_offset -= bit_offset;
 463                spec->raw_len--;
 464        }
 465
 466        return 0;
 467}
 468
 469/*
 470 * Try to match local spec to a target type and, if successful, produce full
 471 * target spec (high-level, low-level + bit offset).
 472 */
 473static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
 474                               const struct btf *targ_btf, __u32 targ_id,
 475                               struct bpf_core_spec *targ_spec)
 476{
 477        const struct btf_type *targ_type;
 478        const struct bpf_core_accessor *local_acc;
 479        struct bpf_core_accessor *targ_acc;
 480        int i, sz, matched;
 481
 482        memset(targ_spec, 0, sizeof(*targ_spec));
 483        targ_spec->btf = targ_btf;
 484        targ_spec->root_type_id = targ_id;
 485        targ_spec->relo_kind = local_spec->relo_kind;
 486
 487        if (core_relo_is_type_based(local_spec->relo_kind)) {
 488                return bpf_core_types_are_compat(local_spec->btf,
 489                                                 local_spec->root_type_id,
 490                                                 targ_btf, targ_id);
 491        }
 492
 493        local_acc = &local_spec->spec[0];
 494        targ_acc = &targ_spec->spec[0];
 495
 496        if (core_relo_is_enumval_based(local_spec->relo_kind)) {
 497                size_t local_essent_len, targ_essent_len;
 498                const struct btf_enum *e;
 499                const char *targ_name;
 500
 501                /* has to resolve to an enum */
 502                targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
 503                if (!btf_is_enum(targ_type))
 504                        return 0;
 505
 506                local_essent_len = bpf_core_essential_name_len(local_acc->name);
 507
 508                for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
 509                        targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
 510                        targ_essent_len = bpf_core_essential_name_len(targ_name);
 511                        if (targ_essent_len != local_essent_len)
 512                                continue;
 513                        if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
 514                                targ_acc->type_id = targ_id;
 515                                targ_acc->idx = i;
 516                                targ_acc->name = targ_name;
 517                                targ_spec->len++;
 518                                targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
 519                                targ_spec->raw_len++;
 520                                return 1;
 521                        }
 522                }
 523                return 0;
 524        }
 525
 526        if (!core_relo_is_field_based(local_spec->relo_kind))
 527                return -EINVAL;
 528
 529        for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
 530                targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
 531                                                   &targ_id);
 532                if (!targ_type)
 533                        return -EINVAL;
 534
 535                if (local_acc->name) {
 536                        matched = bpf_core_match_member(local_spec->btf,
 537                                                        local_acc,
 538                                                        targ_btf, targ_id,
 539                                                        targ_spec, &targ_id);
 540                        if (matched <= 0)
 541                                return matched;
 542                } else {
 543                        /* for i=0, targ_id is already treated as array element
 544                         * type (because it's the original struct), for others
 545                         * we should find array element type first
 546                         */
 547                        if (i > 0) {
 548                                const struct btf_array *a;
 549                                bool flex;
 550
 551                                if (!btf_is_array(targ_type))
 552                                        return 0;
 553
 554                                a = btf_array(targ_type);
 555                                flex = is_flex_arr(targ_btf, targ_acc - 1, a);
 556                                if (!flex && local_acc->idx >= a->nelems)
 557                                        return 0;
 558                                if (!skip_mods_and_typedefs(targ_btf, a->type,
 559                                                            &targ_id))
 560                                        return -EINVAL;
 561                        }
 562
 563                        /* too deep struct/union/array nesting */
 564                        if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
 565                                return -E2BIG;
 566
 567                        targ_acc->type_id = targ_id;
 568                        targ_acc->idx = local_acc->idx;
 569                        targ_acc->name = NULL;
 570                        targ_spec->len++;
 571                        targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
 572                        targ_spec->raw_len++;
 573
 574                        sz = btf__resolve_size(targ_btf, targ_id);
 575                        if (sz < 0)
 576                                return sz;
 577                        targ_spec->bit_offset += local_acc->idx * sz * 8;
 578                }
 579        }
 580
 581        return 1;
 582}
 583
 584static int bpf_core_calc_field_relo(const char *prog_name,
 585                                    const struct bpf_core_relo *relo,
 586                                    const struct bpf_core_spec *spec,
 587                                    __u32 *val, __u32 *field_sz, __u32 *type_id,
 588                                    bool *validate)
 589{
 590        const struct bpf_core_accessor *acc;
 591        const struct btf_type *t;
 592        __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
 593        const struct btf_member *m;
 594        const struct btf_type *mt;
 595        bool bitfield;
 596        __s64 sz;
 597
 598        *field_sz = 0;
 599
 600        if (relo->kind == BPF_CORE_FIELD_EXISTS) {
 601                *val = spec ? 1 : 0;
 602                return 0;
 603        }
 604
 605        if (!spec)
 606                return -EUCLEAN; /* request instruction poisoning */
 607
 608        acc = &spec->spec[spec->len - 1];
 609        t = btf_type_by_id(spec->btf, acc->type_id);
 610
 611        /* a[n] accessor needs special handling */
 612        if (!acc->name) {
 613                if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
 614                        *val = spec->bit_offset / 8;
 615                        /* remember field size for load/store mem size */
 616                        sz = btf__resolve_size(spec->btf, acc->type_id);
 617                        if (sz < 0)
 618                                return -EINVAL;
 619                        *field_sz = sz;
 620                        *type_id = acc->type_id;
 621                } else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) {
 622                        sz = btf__resolve_size(spec->btf, acc->type_id);
 623                        if (sz < 0)
 624                                return -EINVAL;
 625                        *val = sz;
 626                } else {
 627                        pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
 628                                prog_name, relo->kind, relo->insn_off / 8);
 629                        return -EINVAL;
 630                }
 631                if (validate)
 632                        *validate = true;
 633                return 0;
 634        }
 635
 636        m = btf_members(t) + acc->idx;
 637        mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
 638        bit_off = spec->bit_offset;
 639        bit_sz = btf_member_bitfield_size(t, acc->idx);
 640
 641        bitfield = bit_sz > 0;
 642        if (bitfield) {
 643                byte_sz = mt->size;
 644                byte_off = bit_off / 8 / byte_sz * byte_sz;
 645                /* figure out smallest int size necessary for bitfield load */
 646                while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
 647                        if (byte_sz >= 8) {
 648                                /* bitfield can't be read with 64-bit read */
 649                                pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
 650                                        prog_name, relo->kind, relo->insn_off / 8);
 651                                return -E2BIG;
 652                        }
 653                        byte_sz *= 2;
 654                        byte_off = bit_off / 8 / byte_sz * byte_sz;
 655                }
 656        } else {
 657                sz = btf__resolve_size(spec->btf, field_type_id);
 658                if (sz < 0)
 659                        return -EINVAL;
 660                byte_sz = sz;
 661                byte_off = spec->bit_offset / 8;
 662                bit_sz = byte_sz * 8;
 663        }
 664
 665        /* for bitfields, all the relocatable aspects are ambiguous and we
 666         * might disagree with compiler, so turn off validation of expected
 667         * value, except for signedness
 668         */
 669        if (validate)
 670                *validate = !bitfield;
 671
 672        switch (relo->kind) {
 673        case BPF_CORE_FIELD_BYTE_OFFSET:
 674                *val = byte_off;
 675                if (!bitfield) {
 676                        *field_sz = byte_sz;
 677                        *type_id = field_type_id;
 678                }
 679                break;
 680        case BPF_CORE_FIELD_BYTE_SIZE:
 681                *val = byte_sz;
 682                break;
 683        case BPF_CORE_FIELD_SIGNED:
 684                /* enums will be assumed unsigned */
 685                *val = btf_is_enum(mt) ||
 686                       (btf_int_encoding(mt) & BTF_INT_SIGNED);
 687                if (validate)
 688                        *validate = true; /* signedness is never ambiguous */
 689                break;
 690        case BPF_CORE_FIELD_LSHIFT_U64:
 691#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 692                *val = 64 - (bit_off + bit_sz - byte_off  * 8);
 693#else
 694                *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
 695#endif
 696                break;
 697        case BPF_CORE_FIELD_RSHIFT_U64:
 698                *val = 64 - bit_sz;
 699                if (validate)
 700                        *validate = true; /* right shift is never ambiguous */
 701                break;
 702        case BPF_CORE_FIELD_EXISTS:
 703        default:
 704                return -EOPNOTSUPP;
 705        }
 706
 707        return 0;
 708}
 709
 710static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
 711                                   const struct bpf_core_spec *spec,
 712                                   __u32 *val, bool *validate)
 713{
 714        __s64 sz;
 715
 716        /* by default, always check expected value in bpf_insn */
 717        if (validate)
 718                *validate = true;
 719
 720        /* type-based relos return zero when target type is not found */
 721        if (!spec) {
 722                *val = 0;
 723                return 0;
 724        }
 725
 726        switch (relo->kind) {
 727        case BPF_CORE_TYPE_ID_TARGET:
 728                *val = spec->root_type_id;
 729                /* type ID, embedded in bpf_insn, might change during linking,
 730                 * so enforcing it is pointless
 731                 */
 732                if (validate)
 733                        *validate = false;
 734                break;
 735        case BPF_CORE_TYPE_EXISTS:
 736                *val = 1;
 737                break;
 738        case BPF_CORE_TYPE_SIZE:
 739                sz = btf__resolve_size(spec->btf, spec->root_type_id);
 740                if (sz < 0)
 741                        return -EINVAL;
 742                *val = sz;
 743                break;
 744        case BPF_CORE_TYPE_ID_LOCAL:
 745        /* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */
 746        default:
 747                return -EOPNOTSUPP;
 748        }
 749
 750        return 0;
 751}
 752
 753static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
 754                                      const struct bpf_core_spec *spec,
 755                                      __u32 *val)
 756{
 757        const struct btf_type *t;
 758        const struct btf_enum *e;
 759
 760        switch (relo->kind) {
 761        case BPF_CORE_ENUMVAL_EXISTS:
 762                *val = spec ? 1 : 0;
 763                break;
 764        case BPF_CORE_ENUMVAL_VALUE:
 765                if (!spec)
 766                        return -EUCLEAN; /* request instruction poisoning */
 767                t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
 768                e = btf_enum(t) + spec->spec[0].idx;
 769                *val = e->val;
 770                break;
 771        default:
 772                return -EOPNOTSUPP;
 773        }
 774
 775        return 0;
 776}
 777
 778/* Calculate original and target relocation values, given local and target
 779 * specs and relocation kind. These values are calculated for each candidate.
 780 * If there are multiple candidates, resulting values should all be consistent
 781 * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
 782 * If instruction has to be poisoned, *poison will be set to true.
 783 */
 784static int bpf_core_calc_relo(const char *prog_name,
 785                              const struct bpf_core_relo *relo,
 786                              int relo_idx,
 787                              const struct bpf_core_spec *local_spec,
 788                              const struct bpf_core_spec *targ_spec,
 789                              struct bpf_core_relo_res *res)
 790{
 791        int err = -EOPNOTSUPP;
 792
 793        res->orig_val = 0;
 794        res->new_val = 0;
 795        res->poison = false;
 796        res->validate = true;
 797        res->fail_memsz_adjust = false;
 798        res->orig_sz = res->new_sz = 0;
 799        res->orig_type_id = res->new_type_id = 0;
 800
 801        if (core_relo_is_field_based(relo->kind)) {
 802                err = bpf_core_calc_field_relo(prog_name, relo, local_spec,
 803                                               &res->orig_val, &res->orig_sz,
 804                                               &res->orig_type_id, &res->validate);
 805                err = err ?: bpf_core_calc_field_relo(prog_name, relo, targ_spec,
 806                                                      &res->new_val, &res->new_sz,
 807                                                      &res->new_type_id, NULL);
 808                if (err)
 809                        goto done;
 810                /* Validate if it's safe to adjust load/store memory size.
 811                 * Adjustments are performed only if original and new memory
 812                 * sizes differ.
 813                 */
 814                res->fail_memsz_adjust = false;
 815                if (res->orig_sz != res->new_sz) {
 816                        const struct btf_type *orig_t, *new_t;
 817
 818                        orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id);
 819                        new_t = btf_type_by_id(targ_spec->btf, res->new_type_id);
 820
 821                        /* There are two use cases in which it's safe to
 822                         * adjust load/store's mem size:
 823                         *   - reading a 32-bit kernel pointer, while on BPF
 824                         *   size pointers are always 64-bit; in this case
 825                         *   it's safe to "downsize" instruction size due to
 826                         *   pointer being treated as unsigned integer with
 827                         *   zero-extended upper 32-bits;
 828                         *   - reading unsigned integers, again due to
 829                         *   zero-extension is preserving the value correctly.
 830                         *
 831                         * In all other cases it's incorrect to attempt to
 832                         * load/store field because read value will be
 833                         * incorrect, so we poison relocated instruction.
 834                         */
 835                        if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
 836                                goto done;
 837                        if (btf_is_int(orig_t) && btf_is_int(new_t) &&
 838                            btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
 839                            btf_int_encoding(new_t) != BTF_INT_SIGNED)
 840                                goto done;
 841
 842                        /* mark as invalid mem size adjustment, but this will
 843                         * only be checked for LDX/STX/ST insns
 844                         */
 845                        res->fail_memsz_adjust = true;
 846                }
 847        } else if (core_relo_is_type_based(relo->kind)) {
 848                err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate);
 849                err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL);
 850        } else if (core_relo_is_enumval_based(relo->kind)) {
 851                err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
 852                err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
 853        }
 854
 855done:
 856        if (err == -EUCLEAN) {
 857                /* EUCLEAN is used to signal instruction poisoning request */
 858                res->poison = true;
 859                err = 0;
 860        } else if (err == -EOPNOTSUPP) {
 861                /* EOPNOTSUPP means unknown/unsupported relocation */
 862                pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
 863                        prog_name, relo_idx, core_relo_kind_str(relo->kind),
 864                        relo->kind, relo->insn_off / 8);
 865        }
 866
 867        return err;
 868}
 869
 870/*
 871 * Turn instruction for which CO_RE relocation failed into invalid one with
 872 * distinct signature.
 873 */
 874static void bpf_core_poison_insn(const char *prog_name, int relo_idx,
 875                                 int insn_idx, struct bpf_insn *insn)
 876{
 877        pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
 878                 prog_name, relo_idx, insn_idx);
 879        insn->code = BPF_JMP | BPF_CALL;
 880        insn->dst_reg = 0;
 881        insn->src_reg = 0;
 882        insn->off = 0;
 883        /* if this instruction is reachable (not a dead code),
 884         * verifier will complain with the following message:
 885         * invalid func unknown#195896080
 886         */
 887        insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
 888}
 889
 890static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
 891{
 892        switch (BPF_SIZE(insn->code)) {
 893        case BPF_DW: return 8;
 894        case BPF_W: return 4;
 895        case BPF_H: return 2;
 896        case BPF_B: return 1;
 897        default: return -1;
 898        }
 899}
 900
 901static int insn_bytes_to_bpf_size(__u32 sz)
 902{
 903        switch (sz) {
 904        case 8: return BPF_DW;
 905        case 4: return BPF_W;
 906        case 2: return BPF_H;
 907        case 1: return BPF_B;
 908        default: return -1;
 909        }
 910}
 911
 912/*
 913 * Patch relocatable BPF instruction.
 914 *
 915 * Patched value is determined by relocation kind and target specification.
 916 * For existence relocations target spec will be NULL if field/type is not found.
 917 * Expected insn->imm value is determined using relocation kind and local
 918 * spec, and is checked before patching instruction. If actual insn->imm value
 919 * is wrong, bail out with error.
 920 *
 921 * Currently supported classes of BPF instruction are:
 922 * 1. rX = <imm> (assignment with immediate operand);
 923 * 2. rX += <imm> (arithmetic operations with immediate operand);
 924 * 3. rX = <imm64> (load with 64-bit immediate value);
 925 * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
 926 * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
 927 * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
 928 */
 929int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
 930                        int insn_idx, const struct bpf_core_relo *relo,
 931                        int relo_idx, const struct bpf_core_relo_res *res)
 932{
 933        __u32 orig_val, new_val;
 934        __u8 class;
 935
 936        class = BPF_CLASS(insn->code);
 937
 938        if (res->poison) {
 939poison:
 940                /* poison second part of ldimm64 to avoid confusing error from
 941                 * verifier about "unknown opcode 00"
 942                 */
 943                if (is_ldimm64_insn(insn))
 944                        bpf_core_poison_insn(prog_name, relo_idx, insn_idx + 1, insn + 1);
 945                bpf_core_poison_insn(prog_name, relo_idx, insn_idx, insn);
 946                return 0;
 947        }
 948
 949        orig_val = res->orig_val;
 950        new_val = res->new_val;
 951
 952        switch (class) {
 953        case BPF_ALU:
 954        case BPF_ALU64:
 955                if (BPF_SRC(insn->code) != BPF_K)
 956                        return -EINVAL;
 957                if (res->validate && insn->imm != orig_val) {
 958                        pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
 959                                prog_name, relo_idx,
 960                                insn_idx, insn->imm, orig_val, new_val);
 961                        return -EINVAL;
 962                }
 963                orig_val = insn->imm;
 964                insn->imm = new_val;
 965                pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
 966                         prog_name, relo_idx, insn_idx,
 967                         orig_val, new_val);
 968                break;
 969        case BPF_LDX:
 970        case BPF_ST:
 971        case BPF_STX:
 972                if (res->validate && insn->off != orig_val) {
 973                        pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
 974                                prog_name, relo_idx, insn_idx, insn->off, orig_val, new_val);
 975                        return -EINVAL;
 976                }
 977                if (new_val > SHRT_MAX) {
 978                        pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
 979                                prog_name, relo_idx, insn_idx, new_val);
 980                        return -ERANGE;
 981                }
 982                if (res->fail_memsz_adjust) {
 983                        pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
 984                                "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
 985                                prog_name, relo_idx, insn_idx);
 986                        goto poison;
 987                }
 988
 989                orig_val = insn->off;
 990                insn->off = new_val;
 991                pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
 992                         prog_name, relo_idx, insn_idx, orig_val, new_val);
 993
 994                if (res->new_sz != res->orig_sz) {
 995                        int insn_bytes_sz, insn_bpf_sz;
 996
 997                        insn_bytes_sz = insn_bpf_size_to_bytes(insn);
 998                        if (insn_bytes_sz != res->orig_sz) {
 999                                pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
1000                                        prog_name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
1001                                return -EINVAL;
1002                        }
1003
1004                        insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
1005                        if (insn_bpf_sz < 0) {
1006                                pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
1007                                        prog_name, relo_idx, insn_idx, res->new_sz);
1008                                return -EINVAL;
1009                        }
1010
1011                        insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
1012                        pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
1013                                 prog_name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
1014                }
1015                break;
1016        case BPF_LD: {
1017                __u64 imm;
1018
1019                if (!is_ldimm64_insn(insn) ||
1020                    insn[0].src_reg != 0 || insn[0].off != 0 ||
1021                    insn[1].code != 0 || insn[1].dst_reg != 0 ||
1022                    insn[1].src_reg != 0 || insn[1].off != 0) {
1023                        pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
1024                                prog_name, relo_idx, insn_idx);
1025                        return -EINVAL;
1026                }
1027
1028                imm = insn[0].imm + ((__u64)insn[1].imm << 32);
1029                if (res->validate && imm != orig_val) {
1030                        pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
1031                                prog_name, relo_idx,
1032                                insn_idx, (unsigned long long)imm,
1033                                orig_val, new_val);
1034                        return -EINVAL;
1035                }
1036
1037                insn[0].imm = new_val;
1038                insn[1].imm = 0; /* currently only 32-bit values are supported */
1039                pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
1040                         prog_name, relo_idx, insn_idx,
1041                         (unsigned long long)imm, new_val);
1042                break;
1043        }
1044        default:
1045                pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
1046                        prog_name, relo_idx, insn_idx, insn->code,
1047                        insn->src_reg, insn->dst_reg, insn->off, insn->imm);
1048                return -EINVAL;
1049        }
1050
1051        return 0;
1052}
1053
1054/* Output spec definition in the format:
1055 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
1056 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
1057 */
1058static void bpf_core_dump_spec(const char *prog_name, int level, const struct bpf_core_spec *spec)
1059{
1060        const struct btf_type *t;
1061        const struct btf_enum *e;
1062        const char *s;
1063        __u32 type_id;
1064        int i;
1065
1066        type_id = spec->root_type_id;
1067        t = btf_type_by_id(spec->btf, type_id);
1068        s = btf__name_by_offset(spec->btf, t->name_off);
1069
1070        libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
1071
1072        if (core_relo_is_type_based(spec->relo_kind))
1073                return;
1074
1075        if (core_relo_is_enumval_based(spec->relo_kind)) {
1076                t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
1077                e = btf_enum(t) + spec->raw_spec[0];
1078                s = btf__name_by_offset(spec->btf, e->name_off);
1079
1080                libbpf_print(level, "::%s = %u", s, e->val);
1081                return;
1082        }
1083
1084        if (core_relo_is_field_based(spec->relo_kind)) {
1085                for (i = 0; i < spec->len; i++) {
1086                        if (spec->spec[i].name)
1087                                libbpf_print(level, ".%s", spec->spec[i].name);
1088                        else if (i > 0 || spec->spec[i].idx > 0)
1089                                libbpf_print(level, "[%u]", spec->spec[i].idx);
1090                }
1091
1092                libbpf_print(level, " (");
1093                for (i = 0; i < spec->raw_len; i++)
1094                        libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
1095
1096                if (spec->bit_offset % 8)
1097                        libbpf_print(level, " @ offset %u.%u)",
1098                                     spec->bit_offset / 8, spec->bit_offset % 8);
1099                else
1100                        libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
1101                return;
1102        }
1103}
1104
1105/*
1106 * Calculate CO-RE relocation target result.
1107 *
1108 * The outline and important points of the algorithm:
1109 * 1. For given local type, find corresponding candidate target types.
1110 *    Candidate type is a type with the same "essential" name, ignoring
1111 *    everything after last triple underscore (___). E.g., `sample`,
1112 *    `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
1113 *    for each other. Names with triple underscore are referred to as
1114 *    "flavors" and are useful, among other things, to allow to
1115 *    specify/support incompatible variations of the same kernel struct, which
1116 *    might differ between different kernel versions and/or build
1117 *    configurations.
1118 *
1119 *    N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
1120 *    converter, when deduplicated BTF of a kernel still contains more than
1121 *    one different types with the same name. In that case, ___2, ___3, etc
1122 *    are appended starting from second name conflict. But start flavors are
1123 *    also useful to be defined "locally", in BPF program, to extract same
1124 *    data from incompatible changes between different kernel
1125 *    versions/configurations. For instance, to handle field renames between
1126 *    kernel versions, one can use two flavors of the struct name with the
1127 *    same common name and use conditional relocations to extract that field,
1128 *    depending on target kernel version.
1129 * 2. For each candidate type, try to match local specification to this
1130 *    candidate target type. Matching involves finding corresponding
1131 *    high-level spec accessors, meaning that all named fields should match,
1132 *    as well as all array accesses should be within the actual bounds. Also,
1133 *    types should be compatible (see bpf_core_fields_are_compat for details).
1134 * 3. It is supported and expected that there might be multiple flavors
1135 *    matching the spec. As long as all the specs resolve to the same set of
1136 *    offsets across all candidates, there is no error. If there is any
1137 *    ambiguity, CO-RE relocation will fail. This is necessary to accomodate
1138 *    imprefection of BTF deduplication, which can cause slight duplication of
1139 *    the same BTF type, if some directly or indirectly referenced (by
1140 *    pointer) type gets resolved to different actual types in different
1141 *    object files. If such situation occurs, deduplicated BTF will end up
1142 *    with two (or more) structurally identical types, which differ only in
1143 *    types they refer to through pointer. This should be OK in most cases and
1144 *    is not an error.
1145 * 4. Candidate types search is performed by linearly scanning through all
1146 *    types in target BTF. It is anticipated that this is overall more
1147 *    efficient memory-wise and not significantly worse (if not better)
1148 *    CPU-wise compared to prebuilding a map from all local type names to
1149 *    a list of candidate type names. It's also sped up by caching resolved
1150 *    list of matching candidates per each local "root" type ID, that has at
1151 *    least one bpf_core_relo associated with it. This list is shared
1152 *    between multiple relocations for the same type ID and is updated as some
1153 *    of the candidates are pruned due to structural incompatibility.
1154 */
1155int bpf_core_calc_relo_insn(const char *prog_name,
1156                            const struct bpf_core_relo *relo,
1157                            int relo_idx,
1158                            const struct btf *local_btf,
1159                            struct bpf_core_cand_list *cands,
1160                            struct bpf_core_spec *specs_scratch,
1161                            struct bpf_core_relo_res *targ_res)
1162{
1163        struct bpf_core_spec *local_spec = &specs_scratch[0];
1164        struct bpf_core_spec *cand_spec = &specs_scratch[1];
1165        struct bpf_core_spec *targ_spec = &specs_scratch[2];
1166        struct bpf_core_relo_res cand_res;
1167        const struct btf_type *local_type;
1168        const char *local_name;
1169        __u32 local_id;
1170        const char *spec_str;
1171        int i, j, err;
1172
1173        local_id = relo->type_id;
1174        local_type = btf_type_by_id(local_btf, local_id);
1175        local_name = btf__name_by_offset(local_btf, local_type->name_off);
1176        if (!local_name)
1177                return -EINVAL;
1178
1179        spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
1180        if (str_is_empty(spec_str))
1181                return -EINVAL;
1182
1183        err = bpf_core_parse_spec(prog_name, local_btf, local_id, spec_str,
1184                                  relo->kind, local_spec);
1185        if (err) {
1186                pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
1187                        prog_name, relo_idx, local_id, btf_kind_str(local_type),
1188                        str_is_empty(local_name) ? "<anon>" : local_name,
1189                        spec_str, err);
1190                return -EINVAL;
1191        }
1192
1193        pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog_name,
1194                 relo_idx, core_relo_kind_str(relo->kind), relo->kind);
1195        bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, local_spec);
1196        libbpf_print(LIBBPF_DEBUG, "\n");
1197
1198        /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
1199        if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
1200                /* bpf_insn's imm value could get out of sync during linking */
1201                memset(targ_res, 0, sizeof(*targ_res));
1202                targ_res->validate = false;
1203                targ_res->poison = false;
1204                targ_res->orig_val = local_spec->root_type_id;
1205                targ_res->new_val = local_spec->root_type_id;
1206                return 0;
1207        }
1208
1209        /* libbpf doesn't support candidate search for anonymous types */
1210        if (str_is_empty(spec_str)) {
1211                pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
1212                        prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
1213                return -EOPNOTSUPP;
1214        }
1215
1216        for (i = 0, j = 0; i < cands->len; i++) {
1217                err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
1218                                          cands->cands[i].id, cand_spec);
1219                if (err < 0) {
1220                        pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
1221                                prog_name, relo_idx, i);
1222                        bpf_core_dump_spec(prog_name, LIBBPF_WARN, cand_spec);
1223                        libbpf_print(LIBBPF_WARN, ": %d\n", err);
1224                        return err;
1225                }
1226
1227                pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog_name,
1228                         relo_idx, err == 0 ? "non-matching" : "matching", i);
1229                bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, cand_spec);
1230                libbpf_print(LIBBPF_DEBUG, "\n");
1231
1232                if (err == 0)
1233                        continue;
1234
1235                err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res);
1236                if (err)
1237                        return err;
1238
1239                if (j == 0) {
1240                        *targ_res = cand_res;
1241                        *targ_spec = *cand_spec;
1242                } else if (cand_spec->bit_offset != targ_spec->bit_offset) {
1243                        /* if there are many field relo candidates, they
1244                         * should all resolve to the same bit offset
1245                         */
1246                        pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
1247                                prog_name, relo_idx, cand_spec->bit_offset,
1248                                targ_spec->bit_offset);
1249                        return -EINVAL;
1250                } else if (cand_res.poison != targ_res->poison ||
1251                           cand_res.new_val != targ_res->new_val) {
1252                        /* all candidates should result in the same relocation
1253                         * decision and value, otherwise it's dangerous to
1254                         * proceed due to ambiguity
1255                         */
1256                        pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
1257                                prog_name, relo_idx,
1258                                cand_res.poison ? "failure" : "success", cand_res.new_val,
1259                                targ_res->poison ? "failure" : "success", targ_res->new_val);
1260                        return -EINVAL;
1261                }
1262
1263                cands->cands[j++] = cands->cands[i];
1264        }
1265
1266        /*
1267         * For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field
1268         * existence checks or kernel version/config checks, it's expected
1269         * that we might not find any candidates. In this case, if field
1270         * wasn't found in any candidate, the list of candidates shouldn't
1271         * change at all, we'll just handle relocating appropriately,
1272         * depending on relo's kind.
1273         */
1274        if (j > 0)
1275                cands->len = j;
1276
1277        /*
1278         * If no candidates were found, it might be both a programmer error,
1279         * as well as expected case, depending whether instruction w/
1280         * relocation is guarded in some way that makes it unreachable (dead
1281         * code) if relocation can't be resolved. This is handled in
1282         * bpf_core_patch_insn() uniformly by replacing that instruction with
1283         * BPF helper call insn (using invalid helper ID). If that instruction
1284         * is indeed unreachable, then it will be ignored and eliminated by
1285         * verifier. If it was an error, then verifier will complain and point
1286         * to a specific instruction number in its log.
1287         */
1288        if (j == 0) {
1289                pr_debug("prog '%s': relo #%d: no matching targets found\n",
1290                         prog_name, relo_idx);
1291
1292                /* calculate single target relo result explicitly */
1293                err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, targ_res);
1294                if (err)
1295                        return err;
1296        }
1297
1298        return 0;
1299}
1300