linux/tools/objtool/check.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
   4 */
   5
   6#include <string.h>
   7#include <stdlib.h>
   8
   9#include <arch/elf.h>
  10#include <objtool/builtin.h>
  11#include <objtool/cfi.h>
  12#include <objtool/arch.h>
  13#include <objtool/check.h>
  14#include <objtool/special.h>
  15#include <objtool/warn.h>
  16#include <objtool/endianness.h>
  17
  18#include <linux/objtool.h>
  19#include <linux/hashtable.h>
  20#include <linux/kernel.h>
  21#include <linux/static_call_types.h>
  22
  23struct alternative {
  24        struct list_head list;
  25        struct instruction *insn;
  26        bool skip_orig;
  27};
  28
  29struct cfi_init_state initial_func_cfi;
  30
  31struct instruction *find_insn(struct objtool_file *file,
  32                              struct section *sec, unsigned long offset)
  33{
  34        struct instruction *insn;
  35
  36        hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
  37                if (insn->sec == sec && insn->offset == offset)
  38                        return insn;
  39        }
  40
  41        return NULL;
  42}
  43
  44static struct instruction *next_insn_same_sec(struct objtool_file *file,
  45                                              struct instruction *insn)
  46{
  47        struct instruction *next = list_next_entry(insn, list);
  48
  49        if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
  50                return NULL;
  51
  52        return next;
  53}
  54
  55static struct instruction *next_insn_same_func(struct objtool_file *file,
  56                                               struct instruction *insn)
  57{
  58        struct instruction *next = list_next_entry(insn, list);
  59        struct symbol *func = insn->func;
  60
  61        if (!func)
  62                return NULL;
  63
  64        if (&next->list != &file->insn_list && next->func == func)
  65                return next;
  66
  67        /* Check if we're already in the subfunction: */
  68        if (func == func->cfunc)
  69                return NULL;
  70
  71        /* Move to the subfunction: */
  72        return find_insn(file, func->cfunc->sec, func->cfunc->offset);
  73}
  74
  75static struct instruction *prev_insn_same_sym(struct objtool_file *file,
  76                                               struct instruction *insn)
  77{
  78        struct instruction *prev = list_prev_entry(insn, list);
  79
  80        if (&prev->list != &file->insn_list && prev->func == insn->func)
  81                return prev;
  82
  83        return NULL;
  84}
  85
  86#define func_for_each_insn(file, func, insn)                            \
  87        for (insn = find_insn(file, func->sec, func->offset);           \
  88             insn;                                                      \
  89             insn = next_insn_same_func(file, insn))
  90
  91#define sym_for_each_insn(file, sym, insn)                              \
  92        for (insn = find_insn(file, sym->sec, sym->offset);             \
  93             insn && &insn->list != &file->insn_list &&                 \
  94                insn->sec == sym->sec &&                                \
  95                insn->offset < sym->offset + sym->len;                  \
  96             insn = list_next_entry(insn, list))
  97
  98#define sym_for_each_insn_continue_reverse(file, sym, insn)             \
  99        for (insn = list_prev_entry(insn, list);                        \
 100             &insn->list != &file->insn_list &&                         \
 101                insn->sec == sym->sec && insn->offset >= sym->offset;   \
 102             insn = list_prev_entry(insn, list))
 103
 104#define sec_for_each_insn_from(file, insn)                              \
 105        for (; insn; insn = next_insn_same_sec(file, insn))
 106
 107#define sec_for_each_insn_continue(file, insn)                          \
 108        for (insn = next_insn_same_sec(file, insn); insn;               \
 109             insn = next_insn_same_sec(file, insn))
 110
 111static bool is_jump_table_jump(struct instruction *insn)
 112{
 113        struct alt_group *alt_group = insn->alt_group;
 114
 115        if (insn->jump_table)
 116                return true;
 117
 118        /* Retpoline alternative for a jump table? */
 119        return alt_group && alt_group->orig_group &&
 120               alt_group->orig_group->first_insn->jump_table;
 121}
 122
 123static bool is_sibling_call(struct instruction *insn)
 124{
 125        /*
 126         * Assume only ELF functions can make sibling calls.  This ensures
 127         * sibling call detection consistency between vmlinux.o and individual
 128         * objects.
 129         */
 130        if (!insn->func)
 131                return false;
 132
 133        /* An indirect jump is either a sibling call or a jump to a table. */
 134        if (insn->type == INSN_JUMP_DYNAMIC)
 135                return !is_jump_table_jump(insn);
 136
 137        /* add_jump_destinations() sets insn->call_dest for sibling calls. */
 138        return (is_static_jump(insn) && insn->call_dest);
 139}
 140
 141/*
 142 * This checks to see if the given function is a "noreturn" function.
 143 *
 144 * For global functions which are outside the scope of this object file, we
 145 * have to keep a manual list of them.
 146 *
 147 * For local functions, we have to detect them manually by simply looking for
 148 * the lack of a return instruction.
 149 */
 150static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
 151                                int recursion)
 152{
 153        int i;
 154        struct instruction *insn;
 155        bool empty = true;
 156
 157        /*
 158         * Unfortunately these have to be hard coded because the noreturn
 159         * attribute isn't provided in ELF data.
 160         */
 161        static const char * const global_noreturns[] = {
 162                "__stack_chk_fail",
 163                "panic",
 164                "do_exit",
 165                "do_task_dead",
 166                "__module_put_and_exit",
 167                "complete_and_exit",
 168                "__reiserfs_panic",
 169                "lbug_with_loc",
 170                "fortify_panic",
 171                "usercopy_abort",
 172                "machine_real_restart",
 173                "rewind_stack_do_exit",
 174                "kunit_try_catch_throw",
 175                "xen_start_kernel",
 176        };
 177
 178        if (!func)
 179                return false;
 180
 181        if (func->bind == STB_WEAK)
 182                return false;
 183
 184        if (func->bind == STB_GLOBAL)
 185                for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
 186                        if (!strcmp(func->name, global_noreturns[i]))
 187                                return true;
 188
 189        if (!func->len)
 190                return false;
 191
 192        insn = find_insn(file, func->sec, func->offset);
 193        if (!insn->func)
 194                return false;
 195
 196        func_for_each_insn(file, func, insn) {
 197                empty = false;
 198
 199                if (insn->type == INSN_RETURN)
 200                        return false;
 201        }
 202
 203        if (empty)
 204                return false;
 205
 206        /*
 207         * A function can have a sibling call instead of a return.  In that
 208         * case, the function's dead-end status depends on whether the target
 209         * of the sibling call returns.
 210         */
 211        func_for_each_insn(file, func, insn) {
 212                if (is_sibling_call(insn)) {
 213                        struct instruction *dest = insn->jump_dest;
 214
 215                        if (!dest)
 216                                /* sibling call to another file */
 217                                return false;
 218
 219                        /* local sibling call */
 220                        if (recursion == 5) {
 221                                /*
 222                                 * Infinite recursion: two functions have
 223                                 * sibling calls to each other.  This is a very
 224                                 * rare case.  It means they aren't dead ends.
 225                                 */
 226                                return false;
 227                        }
 228
 229                        return __dead_end_function(file, dest->func, recursion+1);
 230                }
 231        }
 232
 233        return true;
 234}
 235
 236static bool dead_end_function(struct objtool_file *file, struct symbol *func)
 237{
 238        return __dead_end_function(file, func, 0);
 239}
 240
 241static void init_cfi_state(struct cfi_state *cfi)
 242{
 243        int i;
 244
 245        for (i = 0; i < CFI_NUM_REGS; i++) {
 246                cfi->regs[i].base = CFI_UNDEFINED;
 247                cfi->vals[i].base = CFI_UNDEFINED;
 248        }
 249        cfi->cfa.base = CFI_UNDEFINED;
 250        cfi->drap_reg = CFI_UNDEFINED;
 251        cfi->drap_offset = -1;
 252}
 253
 254static void init_insn_state(struct insn_state *state, struct section *sec)
 255{
 256        memset(state, 0, sizeof(*state));
 257        init_cfi_state(&state->cfi);
 258
 259        /*
 260         * We need the full vmlinux for noinstr validation, otherwise we can
 261         * not correctly determine insn->call_dest->sec (external symbols do
 262         * not have a section).
 263         */
 264        if (vmlinux && noinstr && sec)
 265                state->noinstr = sec->noinstr;
 266}
 267
 268/*
 269 * Call the arch-specific instruction decoder for all the instructions and add
 270 * them to the global instruction list.
 271 */
 272static int decode_instructions(struct objtool_file *file)
 273{
 274        struct section *sec;
 275        struct symbol *func;
 276        unsigned long offset;
 277        struct instruction *insn;
 278        unsigned long nr_insns = 0;
 279        int ret;
 280
 281        for_each_sec(file, sec) {
 282
 283                if (!(sec->sh.sh_flags & SHF_EXECINSTR))
 284                        continue;
 285
 286                if (strcmp(sec->name, ".altinstr_replacement") &&
 287                    strcmp(sec->name, ".altinstr_aux") &&
 288                    strncmp(sec->name, ".discard.", 9))
 289                        sec->text = true;
 290
 291                if (!strcmp(sec->name, ".noinstr.text") ||
 292                    !strcmp(sec->name, ".entry.text"))
 293                        sec->noinstr = true;
 294
 295                for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
 296                        insn = malloc(sizeof(*insn));
 297                        if (!insn) {
 298                                WARN("malloc failed");
 299                                return -1;
 300                        }
 301                        memset(insn, 0, sizeof(*insn));
 302                        INIT_LIST_HEAD(&insn->alts);
 303                        INIT_LIST_HEAD(&insn->stack_ops);
 304                        init_cfi_state(&insn->cfi);
 305
 306                        insn->sec = sec;
 307                        insn->offset = offset;
 308
 309                        ret = arch_decode_instruction(file->elf, sec, offset,
 310                                                      sec->sh.sh_size - offset,
 311                                                      &insn->len, &insn->type,
 312                                                      &insn->immediate,
 313                                                      &insn->stack_ops);
 314                        if (ret)
 315                                goto err;
 316
 317                        hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
 318                        list_add_tail(&insn->list, &file->insn_list);
 319                        nr_insns++;
 320                }
 321
 322                list_for_each_entry(func, &sec->symbol_list, list) {
 323                        if (func->type != STT_FUNC || func->alias != func)
 324                                continue;
 325
 326                        if (!find_insn(file, sec, func->offset)) {
 327                                WARN("%s(): can't find starting instruction",
 328                                     func->name);
 329                                return -1;
 330                        }
 331
 332                        sym_for_each_insn(file, func, insn)
 333                                insn->func = func;
 334                }
 335        }
 336
 337        if (stats)
 338                printf("nr_insns: %lu\n", nr_insns);
 339
 340        return 0;
 341
 342err:
 343        free(insn);
 344        return ret;
 345}
 346
 347static struct instruction *find_last_insn(struct objtool_file *file,
 348                                          struct section *sec)
 349{
 350        struct instruction *insn = NULL;
 351        unsigned int offset;
 352        unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
 353
 354        for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
 355                insn = find_insn(file, sec, offset);
 356
 357        return insn;
 358}
 359
 360/*
 361 * Mark "ud2" instructions and manually annotated dead ends.
 362 */
 363static int add_dead_ends(struct objtool_file *file)
 364{
 365        struct section *sec;
 366        struct reloc *reloc;
 367        struct instruction *insn;
 368
 369        /*
 370         * By default, "ud2" is a dead end unless otherwise annotated, because
 371         * GCC 7 inserts it for certain divide-by-zero cases.
 372         */
 373        for_each_insn(file, insn)
 374                if (insn->type == INSN_BUG)
 375                        insn->dead_end = true;
 376
 377        /*
 378         * Check for manually annotated dead ends.
 379         */
 380        sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
 381        if (!sec)
 382                goto reachable;
 383
 384        list_for_each_entry(reloc, &sec->reloc_list, list) {
 385                if (reloc->sym->type != STT_SECTION) {
 386                        WARN("unexpected relocation symbol type in %s", sec->name);
 387                        return -1;
 388                }
 389                insn = find_insn(file, reloc->sym->sec, reloc->addend);
 390                if (insn)
 391                        insn = list_prev_entry(insn, list);
 392                else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
 393                        insn = find_last_insn(file, reloc->sym->sec);
 394                        if (!insn) {
 395                                WARN("can't find unreachable insn at %s+0x%x",
 396                                     reloc->sym->sec->name, reloc->addend);
 397                                return -1;
 398                        }
 399                } else {
 400                        WARN("can't find unreachable insn at %s+0x%x",
 401                             reloc->sym->sec->name, reloc->addend);
 402                        return -1;
 403                }
 404
 405                insn->dead_end = true;
 406        }
 407
 408reachable:
 409        /*
 410         * These manually annotated reachable checks are needed for GCC 4.4,
 411         * where the Linux unreachable() macro isn't supported.  In that case
 412         * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
 413         * not a dead end.
 414         */
 415        sec = find_section_by_name(file->elf, ".rela.discard.reachable");
 416        if (!sec)
 417                return 0;
 418
 419        list_for_each_entry(reloc, &sec->reloc_list, list) {
 420                if (reloc->sym->type != STT_SECTION) {
 421                        WARN("unexpected relocation symbol type in %s", sec->name);
 422                        return -1;
 423                }
 424                insn = find_insn(file, reloc->sym->sec, reloc->addend);
 425                if (insn)
 426                        insn = list_prev_entry(insn, list);
 427                else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
 428                        insn = find_last_insn(file, reloc->sym->sec);
 429                        if (!insn) {
 430                                WARN("can't find reachable insn at %s+0x%x",
 431                                     reloc->sym->sec->name, reloc->addend);
 432                                return -1;
 433                        }
 434                } else {
 435                        WARN("can't find reachable insn at %s+0x%x",
 436                             reloc->sym->sec->name, reloc->addend);
 437                        return -1;
 438                }
 439
 440                insn->dead_end = false;
 441        }
 442
 443        return 0;
 444}
 445
 446static int create_static_call_sections(struct objtool_file *file)
 447{
 448        struct section *sec;
 449        struct static_call_site *site;
 450        struct instruction *insn;
 451        struct symbol *key_sym;
 452        char *key_name, *tmp;
 453        int idx;
 454
 455        sec = find_section_by_name(file->elf, ".static_call_sites");
 456        if (sec) {
 457                INIT_LIST_HEAD(&file->static_call_list);
 458                WARN("file already has .static_call_sites section, skipping");
 459                return 0;
 460        }
 461
 462        if (list_empty(&file->static_call_list))
 463                return 0;
 464
 465        idx = 0;
 466        list_for_each_entry(insn, &file->static_call_list, call_node)
 467                idx++;
 468
 469        sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
 470                                 sizeof(struct static_call_site), idx);
 471        if (!sec)
 472                return -1;
 473
 474        idx = 0;
 475        list_for_each_entry(insn, &file->static_call_list, call_node) {
 476
 477                site = (struct static_call_site *)sec->data->d_buf + idx;
 478                memset(site, 0, sizeof(struct static_call_site));
 479
 480                /* populate reloc for 'addr' */
 481                if (elf_add_reloc_to_insn(file->elf, sec,
 482                                          idx * sizeof(struct static_call_site),
 483                                          R_X86_64_PC32,
 484                                          insn->sec, insn->offset))
 485                        return -1;
 486
 487                /* find key symbol */
 488                key_name = strdup(insn->call_dest->name);
 489                if (!key_name) {
 490                        perror("strdup");
 491                        return -1;
 492                }
 493                if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
 494                            STATIC_CALL_TRAMP_PREFIX_LEN)) {
 495                        WARN("static_call: trampoline name malformed: %s", key_name);
 496                        return -1;
 497                }
 498                tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
 499                memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
 500
 501                key_sym = find_symbol_by_name(file->elf, tmp);
 502                if (!key_sym) {
 503                        if (!module) {
 504                                WARN("static_call: can't find static_call_key symbol: %s", tmp);
 505                                return -1;
 506                        }
 507
 508                        /*
 509                         * For modules(), the key might not be exported, which
 510                         * means the module can make static calls but isn't
 511                         * allowed to change them.
 512                         *
 513                         * In that case we temporarily set the key to be the
 514                         * trampoline address.  This is fixed up in
 515                         * static_call_add_module().
 516                         */
 517                        key_sym = insn->call_dest;
 518                }
 519                free(key_name);
 520
 521                /* populate reloc for 'key' */
 522                if (elf_add_reloc(file->elf, sec,
 523                                  idx * sizeof(struct static_call_site) + 4,
 524                                  R_X86_64_PC32, key_sym,
 525                                  is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
 526                        return -1;
 527
 528                idx++;
 529        }
 530
 531        return 0;
 532}
 533
 534static int create_mcount_loc_sections(struct objtool_file *file)
 535{
 536        struct section *sec;
 537        unsigned long *loc;
 538        struct instruction *insn;
 539        int idx;
 540
 541        sec = find_section_by_name(file->elf, "__mcount_loc");
 542        if (sec) {
 543                INIT_LIST_HEAD(&file->mcount_loc_list);
 544                WARN("file already has __mcount_loc section, skipping");
 545                return 0;
 546        }
 547
 548        if (list_empty(&file->mcount_loc_list))
 549                return 0;
 550
 551        idx = 0;
 552        list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node)
 553                idx++;
 554
 555        sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
 556        if (!sec)
 557                return -1;
 558
 559        idx = 0;
 560        list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) {
 561
 562                loc = (unsigned long *)sec->data->d_buf + idx;
 563                memset(loc, 0, sizeof(unsigned long));
 564
 565                if (elf_add_reloc_to_insn(file->elf, sec,
 566                                          idx * sizeof(unsigned long),
 567                                          R_X86_64_64,
 568                                          insn->sec, insn->offset))
 569                        return -1;
 570
 571                idx++;
 572        }
 573
 574        return 0;
 575}
 576
 577/*
 578 * Warnings shouldn't be reported for ignored functions.
 579 */
 580static void add_ignores(struct objtool_file *file)
 581{
 582        struct instruction *insn;
 583        struct section *sec;
 584        struct symbol *func;
 585        struct reloc *reloc;
 586
 587        sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
 588        if (!sec)
 589                return;
 590
 591        list_for_each_entry(reloc, &sec->reloc_list, list) {
 592                switch (reloc->sym->type) {
 593                case STT_FUNC:
 594                        func = reloc->sym;
 595                        break;
 596
 597                case STT_SECTION:
 598                        func = find_func_by_offset(reloc->sym->sec, reloc->addend);
 599                        if (!func)
 600                                continue;
 601                        break;
 602
 603                default:
 604                        WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
 605                        continue;
 606                }
 607
 608                func_for_each_insn(file, func, insn)
 609                        insn->ignore = true;
 610        }
 611}
 612
 613/*
 614 * This is a whitelist of functions that is allowed to be called with AC set.
 615 * The list is meant to be minimal and only contains compiler instrumentation
 616 * ABI and a few functions used to implement *_{to,from}_user() functions.
 617 *
 618 * These functions must not directly change AC, but may PUSHF/POPF.
 619 */
 620static const char *uaccess_safe_builtin[] = {
 621        /* KASAN */
 622        "kasan_report",
 623        "kasan_check_range",
 624        /* KASAN out-of-line */
 625        "__asan_loadN_noabort",
 626        "__asan_load1_noabort",
 627        "__asan_load2_noabort",
 628        "__asan_load4_noabort",
 629        "__asan_load8_noabort",
 630        "__asan_load16_noabort",
 631        "__asan_storeN_noabort",
 632        "__asan_store1_noabort",
 633        "__asan_store2_noabort",
 634        "__asan_store4_noabort",
 635        "__asan_store8_noabort",
 636        "__asan_store16_noabort",
 637        "__kasan_check_read",
 638        "__kasan_check_write",
 639        /* KASAN in-line */
 640        "__asan_report_load_n_noabort",
 641        "__asan_report_load1_noabort",
 642        "__asan_report_load2_noabort",
 643        "__asan_report_load4_noabort",
 644        "__asan_report_load8_noabort",
 645        "__asan_report_load16_noabort",
 646        "__asan_report_store_n_noabort",
 647        "__asan_report_store1_noabort",
 648        "__asan_report_store2_noabort",
 649        "__asan_report_store4_noabort",
 650        "__asan_report_store8_noabort",
 651        "__asan_report_store16_noabort",
 652        /* KCSAN */
 653        "__kcsan_check_access",
 654        "kcsan_found_watchpoint",
 655        "kcsan_setup_watchpoint",
 656        "kcsan_check_scoped_accesses",
 657        "kcsan_disable_current",
 658        "kcsan_enable_current_nowarn",
 659        /* KCSAN/TSAN */
 660        "__tsan_func_entry",
 661        "__tsan_func_exit",
 662        "__tsan_read_range",
 663        "__tsan_write_range",
 664        "__tsan_read1",
 665        "__tsan_read2",
 666        "__tsan_read4",
 667        "__tsan_read8",
 668        "__tsan_read16",
 669        "__tsan_write1",
 670        "__tsan_write2",
 671        "__tsan_write4",
 672        "__tsan_write8",
 673        "__tsan_write16",
 674        "__tsan_read_write1",
 675        "__tsan_read_write2",
 676        "__tsan_read_write4",
 677        "__tsan_read_write8",
 678        "__tsan_read_write16",
 679        "__tsan_atomic8_load",
 680        "__tsan_atomic16_load",
 681        "__tsan_atomic32_load",
 682        "__tsan_atomic64_load",
 683        "__tsan_atomic8_store",
 684        "__tsan_atomic16_store",
 685        "__tsan_atomic32_store",
 686        "__tsan_atomic64_store",
 687        "__tsan_atomic8_exchange",
 688        "__tsan_atomic16_exchange",
 689        "__tsan_atomic32_exchange",
 690        "__tsan_atomic64_exchange",
 691        "__tsan_atomic8_fetch_add",
 692        "__tsan_atomic16_fetch_add",
 693        "__tsan_atomic32_fetch_add",
 694        "__tsan_atomic64_fetch_add",
 695        "__tsan_atomic8_fetch_sub",
 696        "__tsan_atomic16_fetch_sub",
 697        "__tsan_atomic32_fetch_sub",
 698        "__tsan_atomic64_fetch_sub",
 699        "__tsan_atomic8_fetch_and",
 700        "__tsan_atomic16_fetch_and",
 701        "__tsan_atomic32_fetch_and",
 702        "__tsan_atomic64_fetch_and",
 703        "__tsan_atomic8_fetch_or",
 704        "__tsan_atomic16_fetch_or",
 705        "__tsan_atomic32_fetch_or",
 706        "__tsan_atomic64_fetch_or",
 707        "__tsan_atomic8_fetch_xor",
 708        "__tsan_atomic16_fetch_xor",
 709        "__tsan_atomic32_fetch_xor",
 710        "__tsan_atomic64_fetch_xor",
 711        "__tsan_atomic8_fetch_nand",
 712        "__tsan_atomic16_fetch_nand",
 713        "__tsan_atomic32_fetch_nand",
 714        "__tsan_atomic64_fetch_nand",
 715        "__tsan_atomic8_compare_exchange_strong",
 716        "__tsan_atomic16_compare_exchange_strong",
 717        "__tsan_atomic32_compare_exchange_strong",
 718        "__tsan_atomic64_compare_exchange_strong",
 719        "__tsan_atomic8_compare_exchange_weak",
 720        "__tsan_atomic16_compare_exchange_weak",
 721        "__tsan_atomic32_compare_exchange_weak",
 722        "__tsan_atomic64_compare_exchange_weak",
 723        "__tsan_atomic8_compare_exchange_val",
 724        "__tsan_atomic16_compare_exchange_val",
 725        "__tsan_atomic32_compare_exchange_val",
 726        "__tsan_atomic64_compare_exchange_val",
 727        "__tsan_atomic_thread_fence",
 728        "__tsan_atomic_signal_fence",
 729        /* KCOV */
 730        "write_comp_data",
 731        "check_kcov_mode",
 732        "__sanitizer_cov_trace_pc",
 733        "__sanitizer_cov_trace_const_cmp1",
 734        "__sanitizer_cov_trace_const_cmp2",
 735        "__sanitizer_cov_trace_const_cmp4",
 736        "__sanitizer_cov_trace_const_cmp8",
 737        "__sanitizer_cov_trace_cmp1",
 738        "__sanitizer_cov_trace_cmp2",
 739        "__sanitizer_cov_trace_cmp4",
 740        "__sanitizer_cov_trace_cmp8",
 741        "__sanitizer_cov_trace_switch",
 742        /* UBSAN */
 743        "ubsan_type_mismatch_common",
 744        "__ubsan_handle_type_mismatch",
 745        "__ubsan_handle_type_mismatch_v1",
 746        "__ubsan_handle_shift_out_of_bounds",
 747        /* misc */
 748        "csum_partial_copy_generic",
 749        "copy_mc_fragile",
 750        "copy_mc_fragile_handle_tail",
 751        "copy_mc_enhanced_fast_string",
 752        "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
 753        NULL
 754};
 755
 756static void add_uaccess_safe(struct objtool_file *file)
 757{
 758        struct symbol *func;
 759        const char **name;
 760
 761        if (!uaccess)
 762                return;
 763
 764        for (name = uaccess_safe_builtin; *name; name++) {
 765                func = find_symbol_by_name(file->elf, *name);
 766                if (!func)
 767                        continue;
 768
 769                func->uaccess_safe = true;
 770        }
 771}
 772
 773/*
 774 * FIXME: For now, just ignore any alternatives which add retpolines.  This is
 775 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
 776 * But it at least allows objtool to understand the control flow *around* the
 777 * retpoline.
 778 */
 779static int add_ignore_alternatives(struct objtool_file *file)
 780{
 781        struct section *sec;
 782        struct reloc *reloc;
 783        struct instruction *insn;
 784
 785        sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
 786        if (!sec)
 787                return 0;
 788
 789        list_for_each_entry(reloc, &sec->reloc_list, list) {
 790                if (reloc->sym->type != STT_SECTION) {
 791                        WARN("unexpected relocation symbol type in %s", sec->name);
 792                        return -1;
 793                }
 794
 795                insn = find_insn(file, reloc->sym->sec, reloc->addend);
 796                if (!insn) {
 797                        WARN("bad .discard.ignore_alts entry");
 798                        return -1;
 799                }
 800
 801                insn->ignore_alts = true;
 802        }
 803
 804        return 0;
 805}
 806
 807__weak bool arch_is_retpoline(struct symbol *sym)
 808{
 809        return false;
 810}
 811
 812#define NEGATIVE_RELOC  ((void *)-1L)
 813
 814static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
 815{
 816        if (insn->reloc == NEGATIVE_RELOC)
 817                return NULL;
 818
 819        if (!insn->reloc) {
 820                insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
 821                                                       insn->offset, insn->len);
 822                if (!insn->reloc) {
 823                        insn->reloc = NEGATIVE_RELOC;
 824                        return NULL;
 825                }
 826        }
 827
 828        return insn->reloc;
 829}
 830
 831/*
 832 * Find the destination instructions for all jumps.
 833 */
 834static int add_jump_destinations(struct objtool_file *file)
 835{
 836        struct instruction *insn;
 837        struct reloc *reloc;
 838        struct section *dest_sec;
 839        unsigned long dest_off;
 840
 841        for_each_insn(file, insn) {
 842                if (!is_static_jump(insn))
 843                        continue;
 844
 845                reloc = insn_reloc(file, insn);
 846                if (!reloc) {
 847                        dest_sec = insn->sec;
 848                        dest_off = arch_jump_destination(insn);
 849                } else if (reloc->sym->type == STT_SECTION) {
 850                        dest_sec = reloc->sym->sec;
 851                        dest_off = arch_dest_reloc_offset(reloc->addend);
 852                } else if (arch_is_retpoline(reloc->sym)) {
 853                        /*
 854                         * Retpoline jumps are really dynamic jumps in
 855                         * disguise, so convert them accordingly.
 856                         */
 857                        if (insn->type == INSN_JUMP_UNCONDITIONAL)
 858                                insn->type = INSN_JUMP_DYNAMIC;
 859                        else
 860                                insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
 861
 862                        list_add_tail(&insn->call_node,
 863                                      &file->retpoline_call_list);
 864
 865                        insn->retpoline_safe = true;
 866                        continue;
 867                } else if (insn->func) {
 868                        /* internal or external sibling call (with reloc) */
 869                        insn->call_dest = reloc->sym;
 870                        if (insn->call_dest->static_call_tramp) {
 871                                list_add_tail(&insn->call_node,
 872                                              &file->static_call_list);
 873                        }
 874                        continue;
 875                } else if (reloc->sym->sec->idx) {
 876                        dest_sec = reloc->sym->sec;
 877                        dest_off = reloc->sym->sym.st_value +
 878                                   arch_dest_reloc_offset(reloc->addend);
 879                } else {
 880                        /* non-func asm code jumping to another file */
 881                        continue;
 882                }
 883
 884                insn->jump_dest = find_insn(file, dest_sec, dest_off);
 885                if (!insn->jump_dest) {
 886
 887                        /*
 888                         * This is a special case where an alt instruction
 889                         * jumps past the end of the section.  These are
 890                         * handled later in handle_group_alt().
 891                         */
 892                        if (!strcmp(insn->sec->name, ".altinstr_replacement"))
 893                                continue;
 894
 895                        WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
 896                                  insn->sec, insn->offset, dest_sec->name,
 897                                  dest_off);
 898                        return -1;
 899                }
 900
 901                /*
 902                 * Cross-function jump.
 903                 */
 904                if (insn->func && insn->jump_dest->func &&
 905                    insn->func != insn->jump_dest->func) {
 906
 907                        /*
 908                         * For GCC 8+, create parent/child links for any cold
 909                         * subfunctions.  This is _mostly_ redundant with a
 910                         * similar initialization in read_symbols().
 911                         *
 912                         * If a function has aliases, we want the *first* such
 913                         * function in the symbol table to be the subfunction's
 914                         * parent.  In that case we overwrite the
 915                         * initialization done in read_symbols().
 916                         *
 917                         * However this code can't completely replace the
 918                         * read_symbols() code because this doesn't detect the
 919                         * case where the parent function's only reference to a
 920                         * subfunction is through a jump table.
 921                         */
 922                        if (!strstr(insn->func->name, ".cold") &&
 923                            strstr(insn->jump_dest->func->name, ".cold")) {
 924                                insn->func->cfunc = insn->jump_dest->func;
 925                                insn->jump_dest->func->pfunc = insn->func;
 926
 927                        } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
 928                                   insn->jump_dest->offset == insn->jump_dest->func->offset) {
 929
 930                                /* internal sibling call (without reloc) */
 931                                insn->call_dest = insn->jump_dest->func;
 932                                if (insn->call_dest->static_call_tramp) {
 933                                        list_add_tail(&insn->call_node,
 934                                                      &file->static_call_list);
 935                                }
 936                        }
 937                }
 938        }
 939
 940        return 0;
 941}
 942
 943static void remove_insn_ops(struct instruction *insn)
 944{
 945        struct stack_op *op, *tmp;
 946
 947        list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
 948                list_del(&op->list);
 949                free(op);
 950        }
 951}
 952
 953static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
 954{
 955        struct symbol *call_dest;
 956
 957        call_dest = find_func_by_offset(sec, offset);
 958        if (!call_dest)
 959                call_dest = find_symbol_by_offset(sec, offset);
 960
 961        return call_dest;
 962}
 963
 964/*
 965 * Find the destination instructions for all calls.
 966 */
 967static int add_call_destinations(struct objtool_file *file)
 968{
 969        struct instruction *insn;
 970        unsigned long dest_off;
 971        struct reloc *reloc;
 972
 973        for_each_insn(file, insn) {
 974                if (insn->type != INSN_CALL)
 975                        continue;
 976
 977                reloc = insn_reloc(file, insn);
 978                if (!reloc) {
 979                        dest_off = arch_jump_destination(insn);
 980                        insn->call_dest = find_call_destination(insn->sec, dest_off);
 981
 982                        if (insn->ignore)
 983                                continue;
 984
 985                        if (!insn->call_dest) {
 986                                WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
 987                                return -1;
 988                        }
 989
 990                        if (insn->func && insn->call_dest->type != STT_FUNC) {
 991                                WARN_FUNC("unsupported call to non-function",
 992                                          insn->sec, insn->offset);
 993                                return -1;
 994                        }
 995
 996                } else if (reloc->sym->type == STT_SECTION) {
 997                        dest_off = arch_dest_reloc_offset(reloc->addend);
 998                        insn->call_dest = find_call_destination(reloc->sym->sec,
 999                                                                dest_off);
1000                        if (!insn->call_dest) {
1001                                WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1002                                          insn->sec, insn->offset,
1003                                          reloc->sym->sec->name,
1004                                          dest_off);
1005                                return -1;
1006                        }
1007
1008                } else if (arch_is_retpoline(reloc->sym)) {
1009                        /*
1010                         * Retpoline calls are really dynamic calls in
1011                         * disguise, so convert them accordingly.
1012                         */
1013                        insn->type = INSN_CALL_DYNAMIC;
1014                        insn->retpoline_safe = true;
1015
1016                        list_add_tail(&insn->call_node,
1017                                      &file->retpoline_call_list);
1018
1019                        remove_insn_ops(insn);
1020                        continue;
1021
1022                } else
1023                        insn->call_dest = reloc->sym;
1024
1025                if (insn->call_dest && insn->call_dest->static_call_tramp) {
1026                        list_add_tail(&insn->call_node,
1027                                      &file->static_call_list);
1028                }
1029
1030                /*
1031                 * Many compilers cannot disable KCOV with a function attribute
1032                 * so they need a little help, NOP out any KCOV calls from noinstr
1033                 * text.
1034                 */
1035                if (insn->sec->noinstr &&
1036                    !strncmp(insn->call_dest->name, "__sanitizer_cov_", 16)) {
1037                        if (reloc) {
1038                                reloc->type = R_NONE;
1039                                elf_write_reloc(file->elf, reloc);
1040                        }
1041
1042                        elf_write_insn(file->elf, insn->sec,
1043                                       insn->offset, insn->len,
1044                                       arch_nop_insn(insn->len));
1045                        insn->type = INSN_NOP;
1046                }
1047
1048                if (mcount && !strcmp(insn->call_dest->name, "__fentry__")) {
1049                        if (reloc) {
1050                                reloc->type = R_NONE;
1051                                elf_write_reloc(file->elf, reloc);
1052                        }
1053
1054                        elf_write_insn(file->elf, insn->sec,
1055                                       insn->offset, insn->len,
1056                                       arch_nop_insn(insn->len));
1057
1058                        insn->type = INSN_NOP;
1059
1060                        list_add_tail(&insn->mcount_loc_node,
1061                                      &file->mcount_loc_list);
1062                }
1063
1064                /*
1065                 * Whatever stack impact regular CALLs have, should be undone
1066                 * by the RETURN of the called function.
1067                 *
1068                 * Annotated intra-function calls retain the stack_ops but
1069                 * are converted to JUMP, see read_intra_function_calls().
1070                 */
1071                remove_insn_ops(insn);
1072        }
1073
1074        return 0;
1075}
1076
1077/*
1078 * The .alternatives section requires some extra special care over and above
1079 * other special sections because alternatives are patched in place.
1080 */
1081static int handle_group_alt(struct objtool_file *file,
1082                            struct special_alt *special_alt,
1083                            struct instruction *orig_insn,
1084                            struct instruction **new_insn)
1085{
1086        struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1087        struct alt_group *orig_alt_group, *new_alt_group;
1088        unsigned long dest_off;
1089
1090
1091        orig_alt_group = malloc(sizeof(*orig_alt_group));
1092        if (!orig_alt_group) {
1093                WARN("malloc failed");
1094                return -1;
1095        }
1096        orig_alt_group->cfi = calloc(special_alt->orig_len,
1097                                     sizeof(struct cfi_state *));
1098        if (!orig_alt_group->cfi) {
1099                WARN("calloc failed");
1100                return -1;
1101        }
1102
1103        last_orig_insn = NULL;
1104        insn = orig_insn;
1105        sec_for_each_insn_from(file, insn) {
1106                if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1107                        break;
1108
1109                insn->alt_group = orig_alt_group;
1110                last_orig_insn = insn;
1111        }
1112        orig_alt_group->orig_group = NULL;
1113        orig_alt_group->first_insn = orig_insn;
1114        orig_alt_group->last_insn = last_orig_insn;
1115
1116
1117        new_alt_group = malloc(sizeof(*new_alt_group));
1118        if (!new_alt_group) {
1119                WARN("malloc failed");
1120                return -1;
1121        }
1122
1123        if (special_alt->new_len < special_alt->orig_len) {
1124                /*
1125                 * Insert a fake nop at the end to make the replacement
1126                 * alt_group the same size as the original.  This is needed to
1127                 * allow propagate_alt_cfi() to do its magic.  When the last
1128                 * instruction affects the stack, the instruction after it (the
1129                 * nop) will propagate the new state to the shared CFI array.
1130                 */
1131                nop = malloc(sizeof(*nop));
1132                if (!nop) {
1133                        WARN("malloc failed");
1134                        return -1;
1135                }
1136                memset(nop, 0, sizeof(*nop));
1137                INIT_LIST_HEAD(&nop->alts);
1138                INIT_LIST_HEAD(&nop->stack_ops);
1139                init_cfi_state(&nop->cfi);
1140
1141                nop->sec = special_alt->new_sec;
1142                nop->offset = special_alt->new_off + special_alt->new_len;
1143                nop->len = special_alt->orig_len - special_alt->new_len;
1144                nop->type = INSN_NOP;
1145                nop->func = orig_insn->func;
1146                nop->alt_group = new_alt_group;
1147                nop->ignore = orig_insn->ignore_alts;
1148        }
1149
1150        if (!special_alt->new_len) {
1151                *new_insn = nop;
1152                goto end;
1153        }
1154
1155        insn = *new_insn;
1156        sec_for_each_insn_from(file, insn) {
1157                struct reloc *alt_reloc;
1158
1159                if (insn->offset >= special_alt->new_off + special_alt->new_len)
1160                        break;
1161
1162                last_new_insn = insn;
1163
1164                insn->ignore = orig_insn->ignore_alts;
1165                insn->func = orig_insn->func;
1166                insn->alt_group = new_alt_group;
1167
1168                /*
1169                 * Since alternative replacement code is copy/pasted by the
1170                 * kernel after applying relocations, generally such code can't
1171                 * have relative-address relocation references to outside the
1172                 * .altinstr_replacement section, unless the arch's
1173                 * alternatives code can adjust the relative offsets
1174                 * accordingly.
1175                 */
1176                alt_reloc = insn_reloc(file, insn);
1177                if (alt_reloc &&
1178                    !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1179
1180                        WARN_FUNC("unsupported relocation in alternatives section",
1181                                  insn->sec, insn->offset);
1182                        return -1;
1183                }
1184
1185                if (!is_static_jump(insn))
1186                        continue;
1187
1188                if (!insn->immediate)
1189                        continue;
1190
1191                dest_off = arch_jump_destination(insn);
1192                if (dest_off == special_alt->new_off + special_alt->new_len)
1193                        insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1194
1195                if (!insn->jump_dest) {
1196                        WARN_FUNC("can't find alternative jump destination",
1197                                  insn->sec, insn->offset);
1198                        return -1;
1199                }
1200        }
1201
1202        if (!last_new_insn) {
1203                WARN_FUNC("can't find last new alternative instruction",
1204                          special_alt->new_sec, special_alt->new_off);
1205                return -1;
1206        }
1207
1208        if (nop)
1209                list_add(&nop->list, &last_new_insn->list);
1210end:
1211        new_alt_group->orig_group = orig_alt_group;
1212        new_alt_group->first_insn = *new_insn;
1213        new_alt_group->last_insn = nop ? : last_new_insn;
1214        new_alt_group->cfi = orig_alt_group->cfi;
1215        return 0;
1216}
1217
1218/*
1219 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1220 * If the original instruction is a jump, make the alt entry an effective nop
1221 * by just skipping the original instruction.
1222 */
1223static int handle_jump_alt(struct objtool_file *file,
1224                           struct special_alt *special_alt,
1225                           struct instruction *orig_insn,
1226                           struct instruction **new_insn)
1227{
1228        if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1229            orig_insn->type != INSN_NOP) {
1230
1231                WARN_FUNC("unsupported instruction at jump label",
1232                          orig_insn->sec, orig_insn->offset);
1233                return -1;
1234        }
1235
1236        if (special_alt->key_addend & 2) {
1237                struct reloc *reloc = insn_reloc(file, orig_insn);
1238
1239                if (reloc) {
1240                        reloc->type = R_NONE;
1241                        elf_write_reloc(file->elf, reloc);
1242                }
1243                elf_write_insn(file->elf, orig_insn->sec,
1244                               orig_insn->offset, orig_insn->len,
1245                               arch_nop_insn(orig_insn->len));
1246                orig_insn->type = INSN_NOP;
1247        }
1248
1249        if (orig_insn->type == INSN_NOP) {
1250                if (orig_insn->len == 2)
1251                        file->jl_nop_short++;
1252                else
1253                        file->jl_nop_long++;
1254
1255                return 0;
1256        }
1257
1258        if (orig_insn->len == 2)
1259                file->jl_short++;
1260        else
1261                file->jl_long++;
1262
1263        *new_insn = list_next_entry(orig_insn, list);
1264        return 0;
1265}
1266
1267/*
1268 * Read all the special sections which have alternate instructions which can be
1269 * patched in or redirected to at runtime.  Each instruction having alternate
1270 * instruction(s) has them added to its insn->alts list, which will be
1271 * traversed in validate_branch().
1272 */
1273static int add_special_section_alts(struct objtool_file *file)
1274{
1275        struct list_head special_alts;
1276        struct instruction *orig_insn, *new_insn;
1277        struct special_alt *special_alt, *tmp;
1278        struct alternative *alt;
1279        int ret;
1280
1281        ret = special_get_alts(file->elf, &special_alts);
1282        if (ret)
1283                return ret;
1284
1285        list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1286
1287                orig_insn = find_insn(file, special_alt->orig_sec,
1288                                      special_alt->orig_off);
1289                if (!orig_insn) {
1290                        WARN_FUNC("special: can't find orig instruction",
1291                                  special_alt->orig_sec, special_alt->orig_off);
1292                        ret = -1;
1293                        goto out;
1294                }
1295
1296                new_insn = NULL;
1297                if (!special_alt->group || special_alt->new_len) {
1298                        new_insn = find_insn(file, special_alt->new_sec,
1299                                             special_alt->new_off);
1300                        if (!new_insn) {
1301                                WARN_FUNC("special: can't find new instruction",
1302                                          special_alt->new_sec,
1303                                          special_alt->new_off);
1304                                ret = -1;
1305                                goto out;
1306                        }
1307                }
1308
1309                if (special_alt->group) {
1310                        if (!special_alt->orig_len) {
1311                                WARN_FUNC("empty alternative entry",
1312                                          orig_insn->sec, orig_insn->offset);
1313                                continue;
1314                        }
1315
1316                        ret = handle_group_alt(file, special_alt, orig_insn,
1317                                               &new_insn);
1318                        if (ret)
1319                                goto out;
1320                } else if (special_alt->jump_or_nop) {
1321                        ret = handle_jump_alt(file, special_alt, orig_insn,
1322                                              &new_insn);
1323                        if (ret)
1324                                goto out;
1325                }
1326
1327                alt = malloc(sizeof(*alt));
1328                if (!alt) {
1329                        WARN("malloc failed");
1330                        ret = -1;
1331                        goto out;
1332                }
1333
1334                alt->insn = new_insn;
1335                alt->skip_orig = special_alt->skip_orig;
1336                orig_insn->ignore_alts |= special_alt->skip_alt;
1337                list_add_tail(&alt->list, &orig_insn->alts);
1338
1339                list_del(&special_alt->list);
1340                free(special_alt);
1341        }
1342
1343        if (stats) {
1344                printf("jl\\\tNOP\tJMP\n");
1345                printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1346                printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1347        }
1348
1349out:
1350        return ret;
1351}
1352
1353static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1354                            struct reloc *table)
1355{
1356        struct reloc *reloc = table;
1357        struct instruction *dest_insn;
1358        struct alternative *alt;
1359        struct symbol *pfunc = insn->func->pfunc;
1360        unsigned int prev_offset = 0;
1361
1362        /*
1363         * Each @reloc is a switch table relocation which points to the target
1364         * instruction.
1365         */
1366        list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1367
1368                /* Check for the end of the table: */
1369                if (reloc != table && reloc->jump_table_start)
1370                        break;
1371
1372                /* Make sure the table entries are consecutive: */
1373                if (prev_offset && reloc->offset != prev_offset + 8)
1374                        break;
1375
1376                /* Detect function pointers from contiguous objects: */
1377                if (reloc->sym->sec == pfunc->sec &&
1378                    reloc->addend == pfunc->offset)
1379                        break;
1380
1381                dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1382                if (!dest_insn)
1383                        break;
1384
1385                /* Make sure the destination is in the same function: */
1386                if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1387                        break;
1388
1389                alt = malloc(sizeof(*alt));
1390                if (!alt) {
1391                        WARN("malloc failed");
1392                        return -1;
1393                }
1394
1395                alt->insn = dest_insn;
1396                list_add_tail(&alt->list, &insn->alts);
1397                prev_offset = reloc->offset;
1398        }
1399
1400        if (!prev_offset) {
1401                WARN_FUNC("can't find switch jump table",
1402                          insn->sec, insn->offset);
1403                return -1;
1404        }
1405
1406        return 0;
1407}
1408
1409/*
1410 * find_jump_table() - Given a dynamic jump, find the switch jump table
1411 * associated with it.
1412 */
1413static struct reloc *find_jump_table(struct objtool_file *file,
1414                                      struct symbol *func,
1415                                      struct instruction *insn)
1416{
1417        struct reloc *table_reloc;
1418        struct instruction *dest_insn, *orig_insn = insn;
1419
1420        /*
1421         * Backward search using the @first_jump_src links, these help avoid
1422         * much of the 'in between' code. Which avoids us getting confused by
1423         * it.
1424         */
1425        for (;
1426             insn && insn->func && insn->func->pfunc == func;
1427             insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1428
1429                if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1430                        break;
1431
1432                /* allow small jumps within the range */
1433                if (insn->type == INSN_JUMP_UNCONDITIONAL &&
1434                    insn->jump_dest &&
1435                    (insn->jump_dest->offset <= insn->offset ||
1436                     insn->jump_dest->offset > orig_insn->offset))
1437                    break;
1438
1439                table_reloc = arch_find_switch_table(file, insn);
1440                if (!table_reloc)
1441                        continue;
1442                dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1443                if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
1444                        continue;
1445
1446                return table_reloc;
1447        }
1448
1449        return NULL;
1450}
1451
1452/*
1453 * First pass: Mark the head of each jump table so that in the next pass,
1454 * we know when a given jump table ends and the next one starts.
1455 */
1456static void mark_func_jump_tables(struct objtool_file *file,
1457                                    struct symbol *func)
1458{
1459        struct instruction *insn, *last = NULL;
1460        struct reloc *reloc;
1461
1462        func_for_each_insn(file, func, insn) {
1463                if (!last)
1464                        last = insn;
1465
1466                /*
1467                 * Store back-pointers for unconditional forward jumps such
1468                 * that find_jump_table() can back-track using those and
1469                 * avoid some potentially confusing code.
1470                 */
1471                if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
1472                    insn->offset > last->offset &&
1473                    insn->jump_dest->offset > insn->offset &&
1474                    !insn->jump_dest->first_jump_src) {
1475
1476                        insn->jump_dest->first_jump_src = insn;
1477                        last = insn->jump_dest;
1478                }
1479
1480                if (insn->type != INSN_JUMP_DYNAMIC)
1481                        continue;
1482
1483                reloc = find_jump_table(file, func, insn);
1484                if (reloc) {
1485                        reloc->jump_table_start = true;
1486                        insn->jump_table = reloc;
1487                }
1488        }
1489}
1490
1491static int add_func_jump_tables(struct objtool_file *file,
1492                                  struct symbol *func)
1493{
1494        struct instruction *insn;
1495        int ret;
1496
1497        func_for_each_insn(file, func, insn) {
1498                if (!insn->jump_table)
1499                        continue;
1500
1501                ret = add_jump_table(file, insn, insn->jump_table);
1502                if (ret)
1503                        return ret;
1504        }
1505
1506        return 0;
1507}
1508
1509/*
1510 * For some switch statements, gcc generates a jump table in the .rodata
1511 * section which contains a list of addresses within the function to jump to.
1512 * This finds these jump tables and adds them to the insn->alts lists.
1513 */
1514static int add_jump_table_alts(struct objtool_file *file)
1515{
1516        struct section *sec;
1517        struct symbol *func;
1518        int ret;
1519
1520        if (!file->rodata)
1521                return 0;
1522
1523        for_each_sec(file, sec) {
1524                list_for_each_entry(func, &sec->symbol_list, list) {
1525                        if (func->type != STT_FUNC)
1526                                continue;
1527
1528                        mark_func_jump_tables(file, func);
1529                        ret = add_func_jump_tables(file, func);
1530                        if (ret)
1531                                return ret;
1532                }
1533        }
1534
1535        return 0;
1536}
1537
1538static void set_func_state(struct cfi_state *state)
1539{
1540        state->cfa = initial_func_cfi.cfa;
1541        memcpy(&state->regs, &initial_func_cfi.regs,
1542               CFI_NUM_REGS * sizeof(struct cfi_reg));
1543        state->stack_size = initial_func_cfi.cfa.offset;
1544}
1545
1546static int read_unwind_hints(struct objtool_file *file)
1547{
1548        struct section *sec, *relocsec;
1549        struct reloc *reloc;
1550        struct unwind_hint *hint;
1551        struct instruction *insn;
1552        int i;
1553
1554        sec = find_section_by_name(file->elf, ".discard.unwind_hints");
1555        if (!sec)
1556                return 0;
1557
1558        relocsec = sec->reloc;
1559        if (!relocsec) {
1560                WARN("missing .rela.discard.unwind_hints section");
1561                return -1;
1562        }
1563
1564        if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
1565                WARN("struct unwind_hint size mismatch");
1566                return -1;
1567        }
1568
1569        file->hints = true;
1570
1571        for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
1572                hint = (struct unwind_hint *)sec->data->d_buf + i;
1573
1574                reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
1575                if (!reloc) {
1576                        WARN("can't find reloc for unwind_hints[%d]", i);
1577                        return -1;
1578                }
1579
1580                insn = find_insn(file, reloc->sym->sec, reloc->addend);
1581                if (!insn) {
1582                        WARN("can't find insn for unwind_hints[%d]", i);
1583                        return -1;
1584                }
1585
1586                insn->hint = true;
1587
1588                if (hint->type == UNWIND_HINT_TYPE_FUNC) {
1589                        set_func_state(&insn->cfi);
1590                        continue;
1591                }
1592
1593                if (arch_decode_hint_reg(insn, hint->sp_reg)) {
1594                        WARN_FUNC("unsupported unwind_hint sp base reg %d",
1595                                  insn->sec, insn->offset, hint->sp_reg);
1596                        return -1;
1597                }
1598
1599                insn->cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
1600                insn->cfi.type = hint->type;
1601                insn->cfi.end = hint->end;
1602        }
1603
1604        return 0;
1605}
1606
1607static int read_retpoline_hints(struct objtool_file *file)
1608{
1609        struct section *sec;
1610        struct instruction *insn;
1611        struct reloc *reloc;
1612
1613        sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
1614        if (!sec)
1615                return 0;
1616
1617        list_for_each_entry(reloc, &sec->reloc_list, list) {
1618                if (reloc->sym->type != STT_SECTION) {
1619                        WARN("unexpected relocation symbol type in %s", sec->name);
1620                        return -1;
1621                }
1622
1623                insn = find_insn(file, reloc->sym->sec, reloc->addend);
1624                if (!insn) {
1625                        WARN("bad .discard.retpoline_safe entry");
1626                        return -1;
1627                }
1628
1629                if (insn->type != INSN_JUMP_DYNAMIC &&
1630                    insn->type != INSN_CALL_DYNAMIC) {
1631                        WARN_FUNC("retpoline_safe hint not an indirect jump/call",
1632                                  insn->sec, insn->offset);
1633                        return -1;
1634                }
1635
1636                insn->retpoline_safe = true;
1637        }
1638
1639        return 0;
1640}
1641
1642static int read_instr_hints(struct objtool_file *file)
1643{
1644        struct section *sec;
1645        struct instruction *insn;
1646        struct reloc *reloc;
1647
1648        sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
1649        if (!sec)
1650                return 0;
1651
1652        list_for_each_entry(reloc, &sec->reloc_list, list) {
1653                if (reloc->sym->type != STT_SECTION) {
1654                        WARN("unexpected relocation symbol type in %s", sec->name);
1655                        return -1;
1656                }
1657
1658                insn = find_insn(file, reloc->sym->sec, reloc->addend);
1659                if (!insn) {
1660                        WARN("bad .discard.instr_end entry");
1661                        return -1;
1662                }
1663
1664                insn->instr--;
1665        }
1666
1667        sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
1668        if (!sec)
1669                return 0;
1670
1671        list_for_each_entry(reloc, &sec->reloc_list, list) {
1672                if (reloc->sym->type != STT_SECTION) {
1673                        WARN("unexpected relocation symbol type in %s", sec->name);
1674                        return -1;
1675                }
1676
1677                insn = find_insn(file, reloc->sym->sec, reloc->addend);
1678                if (!insn) {
1679                        WARN("bad .discard.instr_begin entry");
1680                        return -1;
1681                }
1682
1683                insn->instr++;
1684        }
1685
1686        return 0;
1687}
1688
1689static int read_intra_function_calls(struct objtool_file *file)
1690{
1691        struct instruction *insn;
1692        struct section *sec;
1693        struct reloc *reloc;
1694
1695        sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
1696        if (!sec)
1697                return 0;
1698
1699        list_for_each_entry(reloc, &sec->reloc_list, list) {
1700                unsigned long dest_off;
1701
1702                if (reloc->sym->type != STT_SECTION) {
1703                        WARN("unexpected relocation symbol type in %s",
1704                             sec->name);
1705                        return -1;
1706                }
1707
1708                insn = find_insn(file, reloc->sym->sec, reloc->addend);
1709                if (!insn) {
1710                        WARN("bad .discard.intra_function_call entry");
1711                        return -1;
1712                }
1713
1714                if (insn->type != INSN_CALL) {
1715                        WARN_FUNC("intra_function_call not a direct call",
1716                                  insn->sec, insn->offset);
1717                        return -1;
1718                }
1719
1720                /*
1721                 * Treat intra-function CALLs as JMPs, but with a stack_op.
1722                 * See add_call_destinations(), which strips stack_ops from
1723                 * normal CALLs.
1724                 */
1725                insn->type = INSN_JUMP_UNCONDITIONAL;
1726
1727                dest_off = insn->offset + insn->len + insn->immediate;
1728                insn->jump_dest = find_insn(file, insn->sec, dest_off);
1729                if (!insn->jump_dest) {
1730                        WARN_FUNC("can't find call dest at %s+0x%lx",
1731                                  insn->sec, insn->offset,
1732                                  insn->sec->name, dest_off);
1733                        return -1;
1734                }
1735        }
1736
1737        return 0;
1738}
1739
1740static int read_static_call_tramps(struct objtool_file *file)
1741{
1742        struct section *sec;
1743        struct symbol *func;
1744
1745        for_each_sec(file, sec) {
1746                list_for_each_entry(func, &sec->symbol_list, list) {
1747                        if (func->bind == STB_GLOBAL &&
1748                            !strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
1749                                     strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
1750                                func->static_call_tramp = true;
1751                }
1752        }
1753
1754        return 0;
1755}
1756
1757static void mark_rodata(struct objtool_file *file)
1758{
1759        struct section *sec;
1760        bool found = false;
1761
1762        /*
1763         * Search for the following rodata sections, each of which can
1764         * potentially contain jump tables:
1765         *
1766         * - .rodata: can contain GCC switch tables
1767         * - .rodata.<func>: same, if -fdata-sections is being used
1768         * - .rodata..c_jump_table: contains C annotated jump tables
1769         *
1770         * .rodata.str1.* sections are ignored; they don't contain jump tables.
1771         */
1772        for_each_sec(file, sec) {
1773                if (!strncmp(sec->name, ".rodata", 7) &&
1774                    !strstr(sec->name, ".str1.")) {
1775                        sec->rodata = true;
1776                        found = true;
1777                }
1778        }
1779
1780        file->rodata = found;
1781}
1782
1783__weak int arch_rewrite_retpolines(struct objtool_file *file)
1784{
1785        return 0;
1786}
1787
1788static int decode_sections(struct objtool_file *file)
1789{
1790        int ret;
1791
1792        mark_rodata(file);
1793
1794        ret = decode_instructions(file);
1795        if (ret)
1796                return ret;
1797
1798        ret = add_dead_ends(file);
1799        if (ret)
1800                return ret;
1801
1802        add_ignores(file);
1803        add_uaccess_safe(file);
1804
1805        ret = add_ignore_alternatives(file);
1806        if (ret)
1807                return ret;
1808
1809        /*
1810         * Must be before add_{jump_call}_destination.
1811         */
1812        ret = read_static_call_tramps(file);
1813        if (ret)
1814                return ret;
1815
1816        /*
1817         * Must be before add_special_section_alts() as that depends on
1818         * jump_dest being set.
1819         */
1820        ret = add_jump_destinations(file);
1821        if (ret)
1822                return ret;
1823
1824        ret = add_special_section_alts(file);
1825        if (ret)
1826                return ret;
1827
1828        /*
1829         * Must be before add_call_destination(); it changes INSN_CALL to
1830         * INSN_JUMP.
1831         */
1832        ret = read_intra_function_calls(file);
1833        if (ret)
1834                return ret;
1835
1836        ret = add_call_destinations(file);
1837        if (ret)
1838                return ret;
1839
1840        ret = add_jump_table_alts(file);
1841        if (ret)
1842                return ret;
1843
1844        ret = read_unwind_hints(file);
1845        if (ret)
1846                return ret;
1847
1848        ret = read_retpoline_hints(file);
1849        if (ret)
1850                return ret;
1851
1852        ret = read_instr_hints(file);
1853        if (ret)
1854                return ret;
1855
1856        /*
1857         * Must be after add_special_section_alts(), since this will emit
1858         * alternatives. Must be after add_{jump,call}_destination(), since
1859         * those create the call insn lists.
1860         */
1861        ret = arch_rewrite_retpolines(file);
1862        if (ret)
1863                return ret;
1864
1865        return 0;
1866}
1867
1868static bool is_fentry_call(struct instruction *insn)
1869{
1870        if (insn->type == INSN_CALL && insn->call_dest &&
1871            insn->call_dest->type == STT_NOTYPE &&
1872            !strcmp(insn->call_dest->name, "__fentry__"))
1873                return true;
1874
1875        return false;
1876}
1877
1878static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
1879{
1880        struct cfi_state *cfi = &state->cfi;
1881        int i;
1882
1883        if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
1884                return true;
1885
1886        if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
1887                return true;
1888
1889        if (cfi->stack_size != initial_func_cfi.cfa.offset)
1890                return true;
1891
1892        for (i = 0; i < CFI_NUM_REGS; i++) {
1893                if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
1894                    cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
1895                        return true;
1896        }
1897
1898        return false;
1899}
1900
1901static bool check_reg_frame_pos(const struct cfi_reg *reg,
1902                                int expected_offset)
1903{
1904        return reg->base == CFI_CFA &&
1905               reg->offset == expected_offset;
1906}
1907
1908static bool has_valid_stack_frame(struct insn_state *state)
1909{
1910        struct cfi_state *cfi = &state->cfi;
1911
1912        if (cfi->cfa.base == CFI_BP &&
1913            check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
1914            check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
1915                return true;
1916
1917        if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
1918                return true;
1919
1920        return false;
1921}
1922
1923static int update_cfi_state_regs(struct instruction *insn,
1924                                  struct cfi_state *cfi,
1925                                  struct stack_op *op)
1926{
1927        struct cfi_reg *cfa = &cfi->cfa;
1928
1929        if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
1930                return 0;
1931
1932        /* push */
1933        if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
1934                cfa->offset += 8;
1935
1936        /* pop */
1937        if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
1938                cfa->offset -= 8;
1939
1940        /* add immediate to sp */
1941        if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
1942            op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
1943                cfa->offset -= op->src.offset;
1944
1945        return 0;
1946}
1947
1948static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
1949{
1950        if (arch_callee_saved_reg(reg) &&
1951            cfi->regs[reg].base == CFI_UNDEFINED) {
1952                cfi->regs[reg].base = base;
1953                cfi->regs[reg].offset = offset;
1954        }
1955}
1956
1957static void restore_reg(struct cfi_state *cfi, unsigned char reg)
1958{
1959        cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
1960        cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
1961}
1962
1963/*
1964 * A note about DRAP stack alignment:
1965 *
1966 * GCC has the concept of a DRAP register, which is used to help keep track of
1967 * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
1968 * register.  The typical DRAP pattern is:
1969 *
1970 *   4c 8d 54 24 08             lea    0x8(%rsp),%r10
1971 *   48 83 e4 c0                and    $0xffffffffffffffc0,%rsp
1972 *   41 ff 72 f8                pushq  -0x8(%r10)
1973 *   55                         push   %rbp
1974 *   48 89 e5                   mov    %rsp,%rbp
1975 *                              (more pushes)
1976 *   41 52                      push   %r10
1977 *                              ...
1978 *   41 5a                      pop    %r10
1979 *                              (more pops)
1980 *   5d                         pop    %rbp
1981 *   49 8d 62 f8                lea    -0x8(%r10),%rsp
1982 *   c3                         retq
1983 *
1984 * There are some variations in the epilogues, like:
1985 *
1986 *   5b                         pop    %rbx
1987 *   41 5a                      pop    %r10
1988 *   41 5c                      pop    %r12
1989 *   41 5d                      pop    %r13
1990 *   41 5e                      pop    %r14
1991 *   c9                         leaveq
1992 *   49 8d 62 f8                lea    -0x8(%r10),%rsp
1993 *   c3                         retq
1994 *
1995 * and:
1996 *
1997 *   4c 8b 55 e8                mov    -0x18(%rbp),%r10
1998 *   48 8b 5d e0                mov    -0x20(%rbp),%rbx
1999 *   4c 8b 65 f0                mov    -0x10(%rbp),%r12
2000 *   4c 8b 6d f8                mov    -0x8(%rbp),%r13
2001 *   c9                         leaveq
2002 *   49 8d 62 f8                lea    -0x8(%r10),%rsp
2003 *   c3                         retq
2004 *
2005 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2006 * restored beforehand:
2007 *
2008 *   41 55                      push   %r13
2009 *   4c 8d 6c 24 10             lea    0x10(%rsp),%r13
2010 *   48 83 e4 f0                and    $0xfffffffffffffff0,%rsp
2011 *                              ...
2012 *   49 8d 65 f0                lea    -0x10(%r13),%rsp
2013 *   41 5d                      pop    %r13
2014 *   c3                         retq
2015 */
2016static int update_cfi_state(struct instruction *insn,
2017                            struct instruction *next_insn,
2018                            struct cfi_state *cfi, struct stack_op *op)
2019{
2020        struct cfi_reg *cfa = &cfi->cfa;
2021        struct cfi_reg *regs = cfi->regs;
2022
2023        /* stack operations don't make sense with an undefined CFA */
2024        if (cfa->base == CFI_UNDEFINED) {
2025                if (insn->func) {
2026                        WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2027                        return -1;
2028                }
2029                return 0;
2030        }
2031
2032        if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2033            cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2034                return update_cfi_state_regs(insn, cfi, op);
2035
2036        switch (op->dest.type) {
2037
2038        case OP_DEST_REG:
2039                switch (op->src.type) {
2040
2041                case OP_SRC_REG:
2042                        if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2043                            cfa->base == CFI_SP &&
2044                            check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2045
2046                                /* mov %rsp, %rbp */
2047                                cfa->base = op->dest.reg;
2048                                cfi->bp_scratch = false;
2049                        }
2050
2051                        else if (op->src.reg == CFI_SP &&
2052                                 op->dest.reg == CFI_BP && cfi->drap) {
2053
2054                                /* drap: mov %rsp, %rbp */
2055                                regs[CFI_BP].base = CFI_BP;
2056                                regs[CFI_BP].offset = -cfi->stack_size;
2057                                cfi->bp_scratch = false;
2058                        }
2059
2060                        else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2061
2062                                /*
2063                                 * mov %rsp, %reg
2064                                 *
2065                                 * This is needed for the rare case where GCC
2066                                 * does:
2067                                 *
2068                                 *   mov    %rsp, %rax
2069                                 *   ...
2070                                 *   mov    %rax, %rsp
2071                                 */
2072                                cfi->vals[op->dest.reg].base = CFI_CFA;
2073                                cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2074                        }
2075
2076                        else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2077                                 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2078
2079                                /*
2080                                 * mov %rbp, %rsp
2081                                 *
2082                                 * Restore the original stack pointer (Clang).
2083                                 */
2084                                cfi->stack_size = -cfi->regs[CFI_BP].offset;
2085                        }
2086
2087                        else if (op->dest.reg == cfa->base) {
2088
2089                                /* mov %reg, %rsp */
2090                                if (cfa->base == CFI_SP &&
2091                                    cfi->vals[op->src.reg].base == CFI_CFA) {
2092
2093                                        /*
2094                                         * This is needed for the rare case
2095                                         * where GCC does something dumb like:
2096                                         *
2097                                         *   lea    0x8(%rsp), %rcx
2098                                         *   ...
2099                                         *   mov    %rcx, %rsp
2100                                         */
2101                                        cfa->offset = -cfi->vals[op->src.reg].offset;
2102                                        cfi->stack_size = cfa->offset;
2103
2104                                } else if (cfa->base == CFI_SP &&
2105                                           cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2106                                           cfi->vals[op->src.reg].offset == cfa->offset) {
2107
2108                                        /*
2109                                         * Stack swizzle:
2110                                         *
2111                                         * 1: mov %rsp, (%[tos])
2112                                         * 2: mov %[tos], %rsp
2113                                         *    ...
2114                                         * 3: pop %rsp
2115                                         *
2116                                         * Where:
2117                                         *
2118                                         * 1 - places a pointer to the previous
2119                                         *     stack at the Top-of-Stack of the
2120                                         *     new stack.
2121                                         *
2122                                         * 2 - switches to the new stack.
2123                                         *
2124                                         * 3 - pops the Top-of-Stack to restore
2125                                         *     the original stack.
2126                                         *
2127                                         * Note: we set base to SP_INDIRECT
2128                                         * here and preserve offset. Therefore
2129                                         * when the unwinder reaches ToS it
2130                                         * will dereference SP and then add the
2131                                         * offset to find the next frame, IOW:
2132                                         * (%rsp) + offset.
2133                                         */
2134                                        cfa->base = CFI_SP_INDIRECT;
2135
2136                                } else {
2137                                        cfa->base = CFI_UNDEFINED;
2138                                        cfa->offset = 0;
2139                                }
2140                        }
2141
2142                        else if (op->dest.reg == CFI_SP &&
2143                                 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2144                                 cfi->vals[op->src.reg].offset == cfa->offset) {
2145
2146                                /*
2147                                 * The same stack swizzle case 2) as above. But
2148                                 * because we can't change cfa->base, case 3)
2149                                 * will become a regular POP. Pretend we're a
2150                                 * PUSH so things don't go unbalanced.
2151                                 */
2152                                cfi->stack_size += 8;
2153                        }
2154
2155
2156                        break;
2157
2158                case OP_SRC_ADD:
2159                        if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2160
2161                                /* add imm, %rsp */
2162                                cfi->stack_size -= op->src.offset;
2163                                if (cfa->base == CFI_SP)
2164                                        cfa->offset -= op->src.offset;
2165                                break;
2166                        }
2167
2168                        if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2169
2170                                /* lea disp(%rbp), %rsp */
2171                                cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2172                                break;
2173                        }
2174
2175                        if (!cfi->drap && op->src.reg == CFI_SP &&
2176                            op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2177                            check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) {
2178
2179                                /* lea disp(%rsp), %rbp */
2180                                cfa->base = CFI_BP;
2181                                cfa->offset -= op->src.offset;
2182                                cfi->bp_scratch = false;
2183                                break;
2184                        }
2185
2186                        if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2187
2188                                /* drap: lea disp(%rsp), %drap */
2189                                cfi->drap_reg = op->dest.reg;
2190
2191                                /*
2192                                 * lea disp(%rsp), %reg
2193                                 *
2194                                 * This is needed for the rare case where GCC
2195                                 * does something dumb like:
2196                                 *
2197                                 *   lea    0x8(%rsp), %rcx
2198                                 *   ...
2199                                 *   mov    %rcx, %rsp
2200                                 */
2201                                cfi->vals[op->dest.reg].base = CFI_CFA;
2202                                cfi->vals[op->dest.reg].offset = \
2203                                        -cfi->stack_size + op->src.offset;
2204
2205                                break;
2206                        }
2207
2208                        if (cfi->drap && op->dest.reg == CFI_SP &&
2209                            op->src.reg == cfi->drap_reg) {
2210
2211                                 /* drap: lea disp(%drap), %rsp */
2212                                cfa->base = CFI_SP;
2213                                cfa->offset = cfi->stack_size = -op->src.offset;
2214                                cfi->drap_reg = CFI_UNDEFINED;
2215                                cfi->drap = false;
2216                                break;
2217                        }
2218
2219                        if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2220                                WARN_FUNC("unsupported stack register modification",
2221                                          insn->sec, insn->offset);
2222                                return -1;
2223                        }
2224
2225                        break;
2226
2227                case OP_SRC_AND:
2228                        if (op->dest.reg != CFI_SP ||
2229                            (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2230                            (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2231                                WARN_FUNC("unsupported stack pointer realignment",
2232                                          insn->sec, insn->offset);
2233                                return -1;
2234                        }
2235
2236                        if (cfi->drap_reg != CFI_UNDEFINED) {
2237                                /* drap: and imm, %rsp */
2238                                cfa->base = cfi->drap_reg;
2239                                cfa->offset = cfi->stack_size = 0;
2240                                cfi->drap = true;
2241                        }
2242
2243                        /*
2244                         * Older versions of GCC (4.8ish) realign the stack
2245                         * without DRAP, with a frame pointer.
2246                         */
2247
2248                        break;
2249
2250                case OP_SRC_POP:
2251                case OP_SRC_POPF:
2252                        if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2253
2254                                /* pop %rsp; # restore from a stack swizzle */
2255                                cfa->base = CFI_SP;
2256                                break;
2257                        }
2258
2259                        if (!cfi->drap && op->dest.reg == cfa->base) {
2260
2261                                /* pop %rbp */
2262                                cfa->base = CFI_SP;
2263                        }
2264
2265                        if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
2266                            op->dest.reg == cfi->drap_reg &&
2267                            cfi->drap_offset == -cfi->stack_size) {
2268
2269                                /* drap: pop %drap */
2270                                cfa->base = cfi->drap_reg;
2271                                cfa->offset = 0;
2272                                cfi->drap_offset = -1;
2273
2274                        } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
2275
2276                                /* pop %reg */
2277                                restore_reg(cfi, op->dest.reg);
2278                        }
2279
2280                        cfi->stack_size -= 8;
2281                        if (cfa->base == CFI_SP)
2282                                cfa->offset -= 8;
2283
2284                        break;
2285
2286                case OP_SRC_REG_INDIRECT:
2287                        if (!cfi->drap && op->dest.reg == cfa->base &&
2288                            op->dest.reg == CFI_BP) {
2289
2290                                /* mov disp(%rsp), %rbp */
2291                                cfa->base = CFI_SP;
2292                                cfa->offset = cfi->stack_size;
2293                        }
2294
2295                        if (cfi->drap && op->src.reg == CFI_BP &&
2296                            op->src.offset == cfi->drap_offset) {
2297
2298                                /* drap: mov disp(%rbp), %drap */
2299                                cfa->base = cfi->drap_reg;
2300                                cfa->offset = 0;
2301                                cfi->drap_offset = -1;
2302                        }
2303
2304                        if (cfi->drap && op->src.reg == CFI_BP &&
2305                            op->src.offset == regs[op->dest.reg].offset) {
2306
2307                                /* drap: mov disp(%rbp), %reg */
2308                                restore_reg(cfi, op->dest.reg);
2309
2310                        } else if (op->src.reg == cfa->base &&
2311                            op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
2312
2313                                /* mov disp(%rbp), %reg */
2314                                /* mov disp(%rsp), %reg */
2315                                restore_reg(cfi, op->dest.reg);
2316
2317                        } else if (op->src.reg == CFI_SP &&
2318                                   op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
2319
2320                                /* mov disp(%rsp), %reg */
2321                                restore_reg(cfi, op->dest.reg);
2322                        }
2323
2324                        break;
2325
2326                default:
2327                        WARN_FUNC("unknown stack-related instruction",
2328                                  insn->sec, insn->offset);
2329                        return -1;
2330                }
2331
2332                break;
2333
2334        case OP_DEST_PUSH:
2335        case OP_DEST_PUSHF:
2336                cfi->stack_size += 8;
2337                if (cfa->base == CFI_SP)
2338                        cfa->offset += 8;
2339
2340                if (op->src.type != OP_SRC_REG)
2341                        break;
2342
2343                if (cfi->drap) {
2344                        if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2345
2346                                /* drap: push %drap */
2347                                cfa->base = CFI_BP_INDIRECT;
2348                                cfa->offset = -cfi->stack_size;
2349
2350                                /* save drap so we know when to restore it */
2351                                cfi->drap_offset = -cfi->stack_size;
2352
2353                        } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2354
2355                                /* drap: push %rbp */
2356                                cfi->stack_size = 0;
2357
2358                        } else {
2359
2360                                /* drap: push %reg */
2361                                save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2362                        }
2363
2364                } else {
2365
2366                        /* push %reg */
2367                        save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2368                }
2369
2370                /* detect when asm code uses rbp as a scratch register */
2371                if (!no_fp && insn->func && op->src.reg == CFI_BP &&
2372                    cfa->base != CFI_BP)
2373                        cfi->bp_scratch = true;
2374                break;
2375
2376        case OP_DEST_REG_INDIRECT:
2377
2378                if (cfi->drap) {
2379                        if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2380
2381                                /* drap: mov %drap, disp(%rbp) */
2382                                cfa->base = CFI_BP_INDIRECT;
2383                                cfa->offset = op->dest.offset;
2384
2385                                /* save drap offset so we know when to restore it */
2386                                cfi->drap_offset = op->dest.offset;
2387                        } else {
2388
2389                                /* drap: mov reg, disp(%rbp) */
2390                                save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2391                        }
2392
2393                } else if (op->dest.reg == cfa->base) {
2394
2395                        /* mov reg, disp(%rbp) */
2396                        /* mov reg, disp(%rsp) */
2397                        save_reg(cfi, op->src.reg, CFI_CFA,
2398                                 op->dest.offset - cfi->cfa.offset);
2399
2400                } else if (op->dest.reg == CFI_SP) {
2401
2402                        /* mov reg, disp(%rsp) */
2403                        save_reg(cfi, op->src.reg, CFI_CFA,
2404                                 op->dest.offset - cfi->stack_size);
2405
2406                } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
2407
2408                        /* mov %rsp, (%reg); # setup a stack swizzle. */
2409                        cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
2410                        cfi->vals[op->dest.reg].offset = cfa->offset;
2411                }
2412
2413                break;
2414
2415        case OP_DEST_MEM:
2416                if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2417                        WARN_FUNC("unknown stack-related memory operation",
2418                                  insn->sec, insn->offset);
2419                        return -1;
2420                }
2421
2422                /* pop mem */
2423                cfi->stack_size -= 8;
2424                if (cfa->base == CFI_SP)
2425                        cfa->offset -= 8;
2426
2427                break;
2428
2429        default:
2430                WARN_FUNC("unknown stack-related instruction",
2431                          insn->sec, insn->offset);
2432                return -1;
2433        }
2434
2435        return 0;
2436}
2437
2438/*
2439 * The stack layouts of alternatives instructions can sometimes diverge when
2440 * they have stack modifications.  That's fine as long as the potential stack
2441 * layouts don't conflict at any given potential instruction boundary.
2442 *
2443 * Flatten the CFIs of the different alternative code streams (both original
2444 * and replacement) into a single shared CFI array which can be used to detect
2445 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
2446 */
2447static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2448{
2449        struct cfi_state **alt_cfi;
2450        int group_off;
2451
2452        if (!insn->alt_group)
2453                return 0;
2454
2455        alt_cfi = insn->alt_group->cfi;
2456        group_off = insn->offset - insn->alt_group->first_insn->offset;
2457
2458        if (!alt_cfi[group_off]) {
2459                alt_cfi[group_off] = &insn->cfi;
2460        } else {
2461                if (memcmp(alt_cfi[group_off], &insn->cfi, sizeof(struct cfi_state))) {
2462                        WARN_FUNC("stack layout conflict in alternatives",
2463                                  insn->sec, insn->offset);
2464                        return -1;
2465                }
2466        }
2467
2468        return 0;
2469}
2470
2471static int handle_insn_ops(struct instruction *insn,
2472                           struct instruction *next_insn,
2473                           struct insn_state *state)
2474{
2475        struct stack_op *op;
2476
2477        list_for_each_entry(op, &insn->stack_ops, list) {
2478
2479                if (update_cfi_state(insn, next_insn, &state->cfi, op))
2480                        return 1;
2481
2482                if (!insn->alt_group)
2483                        continue;
2484
2485                if (op->dest.type == OP_DEST_PUSHF) {
2486                        if (!state->uaccess_stack) {
2487                                state->uaccess_stack = 1;
2488                        } else if (state->uaccess_stack >> 31) {
2489                                WARN_FUNC("PUSHF stack exhausted",
2490                                          insn->sec, insn->offset);
2491                                return 1;
2492                        }
2493                        state->uaccess_stack <<= 1;
2494                        state->uaccess_stack  |= state->uaccess;
2495                }
2496
2497                if (op->src.type == OP_SRC_POPF) {
2498                        if (state->uaccess_stack) {
2499                                state->uaccess = state->uaccess_stack & 1;
2500                                state->uaccess_stack >>= 1;
2501                                if (state->uaccess_stack == 1)
2502                                        state->uaccess_stack = 0;
2503                        }
2504                }
2505        }
2506
2507        return 0;
2508}
2509
2510static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
2511{
2512        struct cfi_state *cfi1 = &insn->cfi;
2513        int i;
2514
2515        if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
2516
2517                WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
2518                          insn->sec, insn->offset,
2519                          cfi1->cfa.base, cfi1->cfa.offset,
2520                          cfi2->cfa.base, cfi2->cfa.offset);
2521
2522        } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
2523                for (i = 0; i < CFI_NUM_REGS; i++) {
2524                        if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
2525                                    sizeof(struct cfi_reg)))
2526                                continue;
2527
2528                        WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
2529                                  insn->sec, insn->offset,
2530                                  i, cfi1->regs[i].base, cfi1->regs[i].offset,
2531                                  i, cfi2->regs[i].base, cfi2->regs[i].offset);
2532                        break;
2533                }
2534
2535        } else if (cfi1->type != cfi2->type) {
2536
2537                WARN_FUNC("stack state mismatch: type1=%d type2=%d",
2538                          insn->sec, insn->offset, cfi1->type, cfi2->type);
2539
2540        } else if (cfi1->drap != cfi2->drap ||
2541                   (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
2542                   (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
2543
2544                WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
2545                          insn->sec, insn->offset,
2546                          cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
2547                          cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
2548
2549        } else
2550                return true;
2551
2552        return false;
2553}
2554
2555static inline bool func_uaccess_safe(struct symbol *func)
2556{
2557        if (func)
2558                return func->uaccess_safe;
2559
2560        return false;
2561}
2562
2563static inline const char *call_dest_name(struct instruction *insn)
2564{
2565        if (insn->call_dest)
2566                return insn->call_dest->name;
2567
2568        return "{dynamic}";
2569}
2570
2571static inline bool noinstr_call_dest(struct symbol *func)
2572{
2573        /*
2574         * We can't deal with indirect function calls at present;
2575         * assume they're instrumented.
2576         */
2577        if (!func)
2578                return false;
2579
2580        /*
2581         * If the symbol is from a noinstr section; we good.
2582         */
2583        if (func->sec->noinstr)
2584                return true;
2585
2586        /*
2587         * The __ubsan_handle_*() calls are like WARN(), they only happen when
2588         * something 'BAD' happened. At the risk of taking the machine down,
2589         * let them proceed to get the message out.
2590         */
2591        if (!strncmp(func->name, "__ubsan_handle_", 15))
2592                return true;
2593
2594        return false;
2595}
2596
2597static int validate_call(struct instruction *insn, struct insn_state *state)
2598{
2599        if (state->noinstr && state->instr <= 0 &&
2600            !noinstr_call_dest(insn->call_dest)) {
2601                WARN_FUNC("call to %s() leaves .noinstr.text section",
2602                                insn->sec, insn->offset, call_dest_name(insn));
2603                return 1;
2604        }
2605
2606        if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
2607                WARN_FUNC("call to %s() with UACCESS enabled",
2608                                insn->sec, insn->offset, call_dest_name(insn));
2609                return 1;
2610        }
2611
2612        if (state->df) {
2613                WARN_FUNC("call to %s() with DF set",
2614                                insn->sec, insn->offset, call_dest_name(insn));
2615                return 1;
2616        }
2617
2618        return 0;
2619}
2620
2621static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
2622{
2623        if (has_modified_stack_frame(insn, state)) {
2624                WARN_FUNC("sibling call from callable instruction with modified stack frame",
2625                                insn->sec, insn->offset);
2626                return 1;
2627        }
2628
2629        return validate_call(insn, state);
2630}
2631
2632static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
2633{
2634        if (state->noinstr && state->instr > 0) {
2635                WARN_FUNC("return with instrumentation enabled",
2636                          insn->sec, insn->offset);
2637                return 1;
2638        }
2639
2640        if (state->uaccess && !func_uaccess_safe(func)) {
2641                WARN_FUNC("return with UACCESS enabled",
2642                          insn->sec, insn->offset);
2643                return 1;
2644        }
2645
2646        if (!state->uaccess && func_uaccess_safe(func)) {
2647                WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
2648                          insn->sec, insn->offset);
2649                return 1;
2650        }
2651
2652        if (state->df) {
2653                WARN_FUNC("return with DF set",
2654                          insn->sec, insn->offset);
2655                return 1;
2656        }
2657
2658        if (func && has_modified_stack_frame(insn, state)) {
2659                WARN_FUNC("return with modified stack frame",
2660                          insn->sec, insn->offset);
2661                return 1;
2662        }
2663
2664        if (state->cfi.bp_scratch) {
2665                WARN_FUNC("BP used as a scratch register",
2666                          insn->sec, insn->offset);
2667                return 1;
2668        }
2669
2670        return 0;
2671}
2672
2673static struct instruction *next_insn_to_validate(struct objtool_file *file,
2674                                                 struct instruction *insn)
2675{
2676        struct alt_group *alt_group = insn->alt_group;
2677
2678        /*
2679         * Simulate the fact that alternatives are patched in-place.  When the
2680         * end of a replacement alt_group is reached, redirect objtool flow to
2681         * the end of the original alt_group.
2682         */
2683        if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
2684                return next_insn_same_sec(file, alt_group->orig_group->last_insn);
2685
2686        return next_insn_same_sec(file, insn);
2687}
2688
2689/*
2690 * Follow the branch starting at the given instruction, and recursively follow
2691 * any other branches (jumps).  Meanwhile, track the frame pointer state at
2692 * each instruction and validate all the rules described in
2693 * tools/objtool/Documentation/stack-validation.txt.
2694 */
2695static int validate_branch(struct objtool_file *file, struct symbol *func,
2696                           struct instruction *insn, struct insn_state state)
2697{
2698        struct alternative *alt;
2699        struct instruction *next_insn;
2700        struct section *sec;
2701        u8 visited;
2702        int ret;
2703
2704        sec = insn->sec;
2705
2706        while (1) {
2707                next_insn = next_insn_to_validate(file, insn);
2708
2709                if (file->c_file && func && insn->func && func != insn->func->pfunc) {
2710                        WARN("%s() falls through to next function %s()",
2711                             func->name, insn->func->name);
2712                        return 1;
2713                }
2714
2715                if (func && insn->ignore) {
2716                        WARN_FUNC("BUG: why am I validating an ignored function?",
2717                                  sec, insn->offset);
2718                        return 1;
2719                }
2720
2721                visited = 1 << state.uaccess;
2722                if (insn->visited) {
2723                        if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
2724                                return 1;
2725
2726                        if (insn->visited & visited)
2727                                return 0;
2728                }
2729
2730                if (state.noinstr)
2731                        state.instr += insn->instr;
2732
2733                if (insn->hint)
2734                        state.cfi = insn->cfi;
2735                else
2736                        insn->cfi = state.cfi;
2737
2738                insn->visited |= visited;
2739
2740                if (propagate_alt_cfi(file, insn))
2741                        return 1;
2742
2743                if (!insn->ignore_alts && !list_empty(&insn->alts)) {
2744                        bool skip_orig = false;
2745
2746                        list_for_each_entry(alt, &insn->alts, list) {
2747                                if (alt->skip_orig)
2748                                        skip_orig = true;
2749
2750                                ret = validate_branch(file, func, alt->insn, state);
2751                                if (ret) {
2752                                        if (backtrace)
2753                                                BT_FUNC("(alt)", insn);
2754                                        return ret;
2755                                }
2756                        }
2757
2758                        if (skip_orig)
2759                                return 0;
2760                }
2761
2762                if (handle_insn_ops(insn, next_insn, &state))
2763                        return 1;
2764
2765                switch (insn->type) {
2766
2767                case INSN_RETURN:
2768                        return validate_return(func, insn, &state);
2769
2770                case INSN_CALL:
2771                case INSN_CALL_DYNAMIC:
2772                        ret = validate_call(insn, &state);
2773                        if (ret)
2774                                return ret;
2775
2776                        if (!no_fp && func && !is_fentry_call(insn) &&
2777                            !has_valid_stack_frame(&state)) {
2778                                WARN_FUNC("call without frame pointer save/setup",
2779                                          sec, insn->offset);
2780                                return 1;
2781                        }
2782
2783                        if (dead_end_function(file, insn->call_dest))
2784                                return 0;
2785
2786                        break;
2787
2788                case INSN_JUMP_CONDITIONAL:
2789                case INSN_JUMP_UNCONDITIONAL:
2790                        if (is_sibling_call(insn)) {
2791                                ret = validate_sibling_call(insn, &state);
2792                                if (ret)
2793                                        return ret;
2794
2795                        } else if (insn->jump_dest) {
2796                                ret = validate_branch(file, func,
2797                                                      insn->jump_dest, state);
2798                                if (ret) {
2799                                        if (backtrace)
2800                                                BT_FUNC("(branch)", insn);
2801                                        return ret;
2802                                }
2803                        }
2804
2805                        if (insn->type == INSN_JUMP_UNCONDITIONAL)
2806                                return 0;
2807
2808                        break;
2809
2810                case INSN_JUMP_DYNAMIC:
2811                case INSN_JUMP_DYNAMIC_CONDITIONAL:
2812                        if (is_sibling_call(insn)) {
2813                                ret = validate_sibling_call(insn, &state);
2814                                if (ret)
2815                                        return ret;
2816                        }
2817
2818                        if (insn->type == INSN_JUMP_DYNAMIC)
2819                                return 0;
2820
2821                        break;
2822
2823                case INSN_CONTEXT_SWITCH:
2824                        if (func && (!next_insn || !next_insn->hint)) {
2825                                WARN_FUNC("unsupported instruction in callable function",
2826                                          sec, insn->offset);
2827                                return 1;
2828                        }
2829                        return 0;
2830
2831                case INSN_STAC:
2832                        if (state.uaccess) {
2833                                WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
2834                                return 1;
2835                        }
2836
2837                        state.uaccess = true;
2838                        break;
2839
2840                case INSN_CLAC:
2841                        if (!state.uaccess && func) {
2842                                WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
2843                                return 1;
2844                        }
2845
2846                        if (func_uaccess_safe(func) && !state.uaccess_stack) {
2847                                WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
2848                                return 1;
2849                        }
2850
2851                        state.uaccess = false;
2852                        break;
2853
2854                case INSN_STD:
2855                        if (state.df) {
2856                                WARN_FUNC("recursive STD", sec, insn->offset);
2857                                return 1;
2858                        }
2859
2860                        state.df = true;
2861                        break;
2862
2863                case INSN_CLD:
2864                        if (!state.df && func) {
2865                                WARN_FUNC("redundant CLD", sec, insn->offset);
2866                                return 1;
2867                        }
2868
2869                        state.df = false;
2870                        break;
2871
2872                default:
2873                        break;
2874                }
2875
2876                if (insn->dead_end)
2877                        return 0;
2878
2879                if (!next_insn) {
2880                        if (state.cfi.cfa.base == CFI_UNDEFINED)
2881                                return 0;
2882                        WARN("%s: unexpected end of section", sec->name);
2883                        return 1;
2884                }
2885
2886                insn = next_insn;
2887        }
2888
2889        return 0;
2890}
2891
2892static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
2893{
2894        struct instruction *insn;
2895        struct insn_state state;
2896        int ret, warnings = 0;
2897
2898        if (!file->hints)
2899                return 0;
2900
2901        init_insn_state(&state, sec);
2902
2903        if (sec) {
2904                insn = find_insn(file, sec, 0);
2905                if (!insn)
2906                        return 0;
2907        } else {
2908                insn = list_first_entry(&file->insn_list, typeof(*insn), list);
2909        }
2910
2911        while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
2912                if (insn->hint && !insn->visited) {
2913                        ret = validate_branch(file, insn->func, insn, state);
2914                        if (ret && backtrace)
2915                                BT_FUNC("<=== (hint)", insn);
2916                        warnings += ret;
2917                }
2918
2919                insn = list_next_entry(insn, list);
2920        }
2921
2922        return warnings;
2923}
2924
2925static int validate_retpoline(struct objtool_file *file)
2926{
2927        struct instruction *insn;
2928        int warnings = 0;
2929
2930        for_each_insn(file, insn) {
2931                if (insn->type != INSN_JUMP_DYNAMIC &&
2932                    insn->type != INSN_CALL_DYNAMIC)
2933                        continue;
2934
2935                if (insn->retpoline_safe)
2936                        continue;
2937
2938                /*
2939                 * .init.text code is ran before userspace and thus doesn't
2940                 * strictly need retpolines, except for modules which are
2941                 * loaded late, they very much do need retpoline in their
2942                 * .init.text
2943                 */
2944                if (!strcmp(insn->sec->name, ".init.text") && !module)
2945                        continue;
2946
2947                WARN_FUNC("indirect %s found in RETPOLINE build",
2948                          insn->sec, insn->offset,
2949                          insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
2950
2951                warnings++;
2952        }
2953
2954        return warnings;
2955}
2956
2957static bool is_kasan_insn(struct instruction *insn)
2958{
2959        return (insn->type == INSN_CALL &&
2960                !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
2961}
2962
2963static bool is_ubsan_insn(struct instruction *insn)
2964{
2965        return (insn->type == INSN_CALL &&
2966                !strcmp(insn->call_dest->name,
2967                        "__ubsan_handle_builtin_unreachable"));
2968}
2969
2970static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
2971{
2972        int i;
2973        struct instruction *prev_insn;
2974
2975        if (insn->ignore || insn->type == INSN_NOP)
2976                return true;
2977
2978        /*
2979         * Ignore any unused exceptions.  This can happen when a whitelisted
2980         * function has an exception table entry.
2981         *
2982         * Also ignore alternative replacement instructions.  This can happen
2983         * when a whitelisted function uses one of the ALTERNATIVE macros.
2984         */
2985        if (!strcmp(insn->sec->name, ".fixup") ||
2986            !strcmp(insn->sec->name, ".altinstr_replacement") ||
2987            !strcmp(insn->sec->name, ".altinstr_aux"))
2988                return true;
2989
2990        if (!insn->func)
2991                return false;
2992
2993        /*
2994         * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
2995         * __builtin_unreachable().  The BUG() macro has an unreachable() after
2996         * the UD2, which causes GCC's undefined trap logic to emit another UD2
2997         * (or occasionally a JMP to UD2).
2998         *
2999         * It may also insert a UD2 after calling a __noreturn function.
3000         */
3001        prev_insn = list_prev_entry(insn, list);
3002        if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
3003            (insn->type == INSN_BUG ||
3004             (insn->type == INSN_JUMP_UNCONDITIONAL &&
3005              insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
3006                return true;
3007
3008        /*
3009         * Check if this (or a subsequent) instruction is related to
3010         * CONFIG_UBSAN or CONFIG_KASAN.
3011         *
3012         * End the search at 5 instructions to avoid going into the weeds.
3013         */
3014        for (i = 0; i < 5; i++) {
3015
3016                if (is_kasan_insn(insn) || is_ubsan_insn(insn))
3017                        return true;
3018
3019                if (insn->type == INSN_JUMP_UNCONDITIONAL) {
3020                        if (insn->jump_dest &&
3021                            insn->jump_dest->func == insn->func) {
3022                                insn = insn->jump_dest;
3023                                continue;
3024                        }
3025
3026                        break;
3027                }
3028
3029                if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
3030                        break;
3031
3032                insn = list_next_entry(insn, list);
3033        }
3034
3035        return false;
3036}
3037
3038static int validate_symbol(struct objtool_file *file, struct section *sec,
3039                           struct symbol *sym, struct insn_state *state)
3040{
3041        struct instruction *insn;
3042        int ret;
3043
3044        if (!sym->len) {
3045                WARN("%s() is missing an ELF size annotation", sym->name);
3046                return 1;
3047        }
3048
3049        if (sym->pfunc != sym || sym->alias != sym)
3050                return 0;
3051
3052        insn = find_insn(file, sec, sym->offset);
3053        if (!insn || insn->ignore || insn->visited)
3054                return 0;
3055
3056        state->uaccess = sym->uaccess_safe;
3057
3058        ret = validate_branch(file, insn->func, insn, *state);
3059        if (ret && backtrace)
3060                BT_FUNC("<=== (sym)", insn);
3061        return ret;
3062}
3063
3064static int validate_section(struct objtool_file *file, struct section *sec)
3065{
3066        struct insn_state state;
3067        struct symbol *func;
3068        int warnings = 0;
3069
3070        list_for_each_entry(func, &sec->symbol_list, list) {
3071                if (func->type != STT_FUNC)
3072                        continue;
3073
3074                init_insn_state(&state, sec);
3075                set_func_state(&state.cfi);
3076
3077                warnings += validate_symbol(file, sec, func, &state);
3078        }
3079
3080        return warnings;
3081}
3082
3083static int validate_vmlinux_functions(struct objtool_file *file)
3084{
3085        struct section *sec;
3086        int warnings = 0;
3087
3088        sec = find_section_by_name(file->elf, ".noinstr.text");
3089        if (sec) {
3090                warnings += validate_section(file, sec);
3091                warnings += validate_unwind_hints(file, sec);
3092        }
3093
3094        sec = find_section_by_name(file->elf, ".entry.text");
3095        if (sec) {
3096                warnings += validate_section(file, sec);
3097                warnings += validate_unwind_hints(file, sec);
3098        }
3099
3100        return warnings;
3101}
3102
3103static int validate_functions(struct objtool_file *file)
3104{
3105        struct section *sec;
3106        int warnings = 0;
3107
3108        for_each_sec(file, sec) {
3109                if (!(sec->sh.sh_flags & SHF_EXECINSTR))
3110                        continue;
3111
3112                warnings += validate_section(file, sec);
3113        }
3114
3115        return warnings;
3116}
3117
3118static int validate_reachable_instructions(struct objtool_file *file)
3119{
3120        struct instruction *insn;
3121
3122        if (file->ignore_unreachables)
3123                return 0;
3124
3125        for_each_insn(file, insn) {
3126                if (insn->visited || ignore_unreachable_insn(file, insn))
3127                        continue;
3128
3129                WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
3130                return 1;
3131        }
3132
3133        return 0;
3134}
3135
3136int check(struct objtool_file *file)
3137{
3138        int ret, warnings = 0;
3139
3140        arch_initial_func_cfi_state(&initial_func_cfi);
3141
3142        ret = decode_sections(file);
3143        if (ret < 0)
3144                goto out;
3145        warnings += ret;
3146
3147        if (list_empty(&file->insn_list))
3148                goto out;
3149
3150        if (vmlinux && !validate_dup) {
3151                ret = validate_vmlinux_functions(file);
3152                if (ret < 0)
3153                        goto out;
3154
3155                warnings += ret;
3156                goto out;
3157        }
3158
3159        if (retpoline) {
3160                ret = validate_retpoline(file);
3161                if (ret < 0)
3162                        return ret;
3163                warnings += ret;
3164        }
3165
3166        ret = validate_functions(file);
3167        if (ret < 0)
3168                goto out;
3169        warnings += ret;
3170
3171        ret = validate_unwind_hints(file, NULL);
3172        if (ret < 0)
3173                goto out;
3174        warnings += ret;
3175
3176        if (!warnings) {
3177                ret = validate_reachable_instructions(file);
3178                if (ret < 0)
3179                        goto out;
3180                warnings += ret;
3181        }
3182
3183        ret = create_static_call_sections(file);
3184        if (ret < 0)
3185                goto out;
3186        warnings += ret;
3187
3188        if (mcount) {
3189                ret = create_mcount_loc_sections(file);
3190                if (ret < 0)
3191                        goto out;
3192                warnings += ret;
3193        }
3194
3195out:
3196        /*
3197         *  For now, don't fail the kernel build on fatal warnings.  These
3198         *  errors are still fairly common due to the growing matrix of
3199         *  supported toolchains and their recent pace of change.
3200         */
3201        return 0;
3202}
3203