linux/arch/x86/kernel/unwind_orc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/objtool.h>
   3#include <linux/module.h>
   4#include <linux/sort.h>
   5#include <asm/ptrace.h>
   6#include <asm/stacktrace.h>
   7#include <asm/unwind.h>
   8#include <asm/orc_types.h>
   9#include <asm/orc_lookup.h>
  10
  11#define orc_warn(fmt, ...) \
  12        printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
  13
  14#define orc_warn_current(args...)                                       \
  15({                                                                      \
  16        if (state->task == current)                                     \
  17                orc_warn(args);                                         \
  18})
  19
  20extern int __start_orc_unwind_ip[];
  21extern int __stop_orc_unwind_ip[];
  22extern struct orc_entry __start_orc_unwind[];
  23extern struct orc_entry __stop_orc_unwind[];
  24
  25static bool orc_init __ro_after_init;
  26static unsigned int lookup_num_blocks __ro_after_init;
  27
  28static inline unsigned long orc_ip(const int *ip)
  29{
  30        return (unsigned long)ip + *ip;
  31}
  32
  33static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
  34                                    unsigned int num_entries, unsigned long ip)
  35{
  36        int *first = ip_table;
  37        int *last = ip_table + num_entries - 1;
  38        int *mid = first, *found = first;
  39
  40        if (!num_entries)
  41                return NULL;
  42
  43        /*
  44         * Do a binary range search to find the rightmost duplicate of a given
  45         * starting address.  Some entries are section terminators which are
  46         * "weak" entries for ensuring there are no gaps.  They should be
  47         * ignored when they conflict with a real entry.
  48         */
  49        while (first <= last) {
  50                mid = first + ((last - first) / 2);
  51
  52                if (orc_ip(mid) <= ip) {
  53                        found = mid;
  54                        first = mid + 1;
  55                } else
  56                        last = mid - 1;
  57        }
  58
  59        return u_table + (found - ip_table);
  60}
  61
  62#ifdef CONFIG_MODULES
  63static struct orc_entry *orc_module_find(unsigned long ip)
  64{
  65        struct module *mod;
  66
  67        mod = __module_address(ip);
  68        if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
  69                return NULL;
  70        return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
  71                          mod->arch.num_orcs, ip);
  72}
  73#else
  74static struct orc_entry *orc_module_find(unsigned long ip)
  75{
  76        return NULL;
  77}
  78#endif
  79
  80#ifdef CONFIG_DYNAMIC_FTRACE
  81static struct orc_entry *orc_find(unsigned long ip);
  82
  83/*
  84 * Ftrace dynamic trampolines do not have orc entries of their own.
  85 * But they are copies of the ftrace entries that are static and
  86 * defined in ftrace_*.S, which do have orc entries.
  87 *
  88 * If the unwinder comes across a ftrace trampoline, then find the
  89 * ftrace function that was used to create it, and use that ftrace
  90 * function's orc entry, as the placement of the return code in
  91 * the stack will be identical.
  92 */
  93static struct orc_entry *orc_ftrace_find(unsigned long ip)
  94{
  95        struct ftrace_ops *ops;
  96        unsigned long caller;
  97
  98        ops = ftrace_ops_trampoline(ip);
  99        if (!ops)
 100                return NULL;
 101
 102        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
 103                caller = (unsigned long)ftrace_regs_call;
 104        else
 105                caller = (unsigned long)ftrace_call;
 106
 107        /* Prevent unlikely recursion */
 108        if (ip == caller)
 109                return NULL;
 110
 111        return orc_find(caller);
 112}
 113#else
 114static struct orc_entry *orc_ftrace_find(unsigned long ip)
 115{
 116        return NULL;
 117}
 118#endif
 119
 120/*
 121 * If we crash with IP==0, the last successfully executed instruction
 122 * was probably an indirect function call with a NULL function pointer,
 123 * and we don't have unwind information for NULL.
 124 * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
 125 * pointer into its parent and then continue normally from there.
 126 */
 127static struct orc_entry null_orc_entry = {
 128        .sp_offset = sizeof(long),
 129        .sp_reg = ORC_REG_SP,
 130        .bp_reg = ORC_REG_UNDEFINED,
 131        .type = UNWIND_HINT_TYPE_CALL
 132};
 133
 134/* Fake frame pointer entry -- used as a fallback for generated code */
 135static struct orc_entry orc_fp_entry = {
 136        .type           = UNWIND_HINT_TYPE_CALL,
 137        .sp_reg         = ORC_REG_BP,
 138        .sp_offset      = 16,
 139        .bp_reg         = ORC_REG_PREV_SP,
 140        .bp_offset      = -16,
 141        .end            = 0,
 142};
 143
 144static struct orc_entry *orc_find(unsigned long ip)
 145{
 146        static struct orc_entry *orc;
 147
 148        if (ip == 0)
 149                return &null_orc_entry;
 150
 151        /* For non-init vmlinux addresses, use the fast lookup table: */
 152        if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
 153                unsigned int idx, start, stop;
 154
 155                idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
 156
 157                if (unlikely((idx >= lookup_num_blocks-1))) {
 158                        orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
 159                                 idx, lookup_num_blocks, (void *)ip);
 160                        return NULL;
 161                }
 162
 163                start = orc_lookup[idx];
 164                stop = orc_lookup[idx + 1] + 1;
 165
 166                if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
 167                             (__start_orc_unwind + stop > __stop_orc_unwind))) {
 168                        orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
 169                                 idx, lookup_num_blocks, start, stop, (void *)ip);
 170                        return NULL;
 171                }
 172
 173                return __orc_find(__start_orc_unwind_ip + start,
 174                                  __start_orc_unwind + start, stop - start, ip);
 175        }
 176
 177        /* vmlinux .init slow lookup: */
 178        if (init_kernel_text(ip))
 179                return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
 180                                  __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
 181
 182        /* Module lookup: */
 183        orc = orc_module_find(ip);
 184        if (orc)
 185                return orc;
 186
 187        return orc_ftrace_find(ip);
 188}
 189
 190#ifdef CONFIG_MODULES
 191
 192static DEFINE_MUTEX(sort_mutex);
 193static int *cur_orc_ip_table = __start_orc_unwind_ip;
 194static struct orc_entry *cur_orc_table = __start_orc_unwind;
 195
 196static void orc_sort_swap(void *_a, void *_b, int size)
 197{
 198        struct orc_entry *orc_a, *orc_b;
 199        struct orc_entry orc_tmp;
 200        int *a = _a, *b = _b, tmp;
 201        int delta = _b - _a;
 202
 203        /* Swap the .orc_unwind_ip entries: */
 204        tmp = *a;
 205        *a = *b + delta;
 206        *b = tmp - delta;
 207
 208        /* Swap the corresponding .orc_unwind entries: */
 209        orc_a = cur_orc_table + (a - cur_orc_ip_table);
 210        orc_b = cur_orc_table + (b - cur_orc_ip_table);
 211        orc_tmp = *orc_a;
 212        *orc_a = *orc_b;
 213        *orc_b = orc_tmp;
 214}
 215
 216static int orc_sort_cmp(const void *_a, const void *_b)
 217{
 218        struct orc_entry *orc_a;
 219        const int *a = _a, *b = _b;
 220        unsigned long a_val = orc_ip(a);
 221        unsigned long b_val = orc_ip(b);
 222
 223        if (a_val > b_val)
 224                return 1;
 225        if (a_val < b_val)
 226                return -1;
 227
 228        /*
 229         * The "weak" section terminator entries need to always be on the left
 230         * to ensure the lookup code skips them in favor of real entries.
 231         * These terminator entries exist to handle any gaps created by
 232         * whitelisted .o files which didn't get objtool generation.
 233         */
 234        orc_a = cur_orc_table + (a - cur_orc_ip_table);
 235        return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
 236}
 237
 238void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
 239                        void *_orc, size_t orc_size)
 240{
 241        int *orc_ip = _orc_ip;
 242        struct orc_entry *orc = _orc;
 243        unsigned int num_entries = orc_ip_size / sizeof(int);
 244
 245        WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
 246                     orc_size % sizeof(*orc) != 0 ||
 247                     num_entries != orc_size / sizeof(*orc));
 248
 249        /*
 250         * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
 251         * associate an .orc_unwind_ip table entry with its corresponding
 252         * .orc_unwind entry so they can both be swapped.
 253         */
 254        mutex_lock(&sort_mutex);
 255        cur_orc_ip_table = orc_ip;
 256        cur_orc_table = orc;
 257        sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
 258        mutex_unlock(&sort_mutex);
 259
 260        mod->arch.orc_unwind_ip = orc_ip;
 261        mod->arch.orc_unwind = orc;
 262        mod->arch.num_orcs = num_entries;
 263}
 264#endif
 265
 266void __init unwind_init(void)
 267{
 268        size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
 269        size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
 270        size_t num_entries = orc_ip_size / sizeof(int);
 271        struct orc_entry *orc;
 272        int i;
 273
 274        if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
 275            orc_size % sizeof(struct orc_entry) != 0 ||
 276            num_entries != orc_size / sizeof(struct orc_entry)) {
 277                orc_warn("WARNING: Bad or missing .orc_unwind table.  Disabling unwinder.\n");
 278                return;
 279        }
 280
 281        /*
 282         * Note, the orc_unwind and orc_unwind_ip tables were already
 283         * sorted at build time via the 'sorttable' tool.
 284         * It's ready for binary search straight away, no need to sort it.
 285         */
 286
 287        /* Initialize the fast lookup table: */
 288        lookup_num_blocks = orc_lookup_end - orc_lookup;
 289        for (i = 0; i < lookup_num_blocks-1; i++) {
 290                orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
 291                                 num_entries,
 292                                 LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
 293                if (!orc) {
 294                        orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
 295                        return;
 296                }
 297
 298                orc_lookup[i] = orc - __start_orc_unwind;
 299        }
 300
 301        /* Initialize the ending block: */
 302        orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
 303                         LOOKUP_STOP_IP);
 304        if (!orc) {
 305                orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
 306                return;
 307        }
 308        orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
 309
 310        orc_init = true;
 311}
 312
 313unsigned long unwind_get_return_address(struct unwind_state *state)
 314{
 315        if (unwind_done(state))
 316                return 0;
 317
 318        return __kernel_text_address(state->ip) ? state->ip : 0;
 319}
 320EXPORT_SYMBOL_GPL(unwind_get_return_address);
 321
 322unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
 323{
 324        if (unwind_done(state))
 325                return NULL;
 326
 327        if (state->regs)
 328                return &state->regs->ip;
 329
 330        if (state->sp)
 331                return (unsigned long *)state->sp - 1;
 332
 333        return NULL;
 334}
 335
 336static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
 337                            size_t len)
 338{
 339        struct stack_info *info = &state->stack_info;
 340        void *addr = (void *)_addr;
 341
 342        if (!on_stack(info, addr, len) &&
 343            (get_stack_info(addr, state->task, info, &state->stack_mask)))
 344                return false;
 345
 346        return true;
 347}
 348
 349static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
 350                            unsigned long *val)
 351{
 352        if (!stack_access_ok(state, addr, sizeof(long)))
 353                return false;
 354
 355        *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
 356        return true;
 357}
 358
 359static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
 360                             unsigned long *ip, unsigned long *sp)
 361{
 362        struct pt_regs *regs = (struct pt_regs *)addr;
 363
 364        /* x86-32 support will be more complicated due to the &regs->sp hack */
 365        BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
 366
 367        if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
 368                return false;
 369
 370        *ip = regs->ip;
 371        *sp = regs->sp;
 372        return true;
 373}
 374
 375static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
 376                                  unsigned long *ip, unsigned long *sp)
 377{
 378        struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
 379
 380        if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
 381                return false;
 382
 383        *ip = regs->ip;
 384        *sp = regs->sp;
 385        return true;
 386}
 387
 388/*
 389 * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
 390 * value from state->regs.
 391 *
 392 * Otherwise, if state->regs just points to IRET regs, and the previous frame
 393 * had full regs, it's safe to get the value from the previous regs.  This can
 394 * happen when early/late IRQ entry code gets interrupted by an NMI.
 395 */
 396static bool get_reg(struct unwind_state *state, unsigned int reg_off,
 397                    unsigned long *val)
 398{
 399        unsigned int reg = reg_off/8;
 400
 401        if (!state->regs)
 402                return false;
 403
 404        if (state->full_regs) {
 405                *val = ((unsigned long *)state->regs)[reg];
 406                return true;
 407        }
 408
 409        if (state->prev_regs) {
 410                *val = ((unsigned long *)state->prev_regs)[reg];
 411                return true;
 412        }
 413
 414        return false;
 415}
 416
 417bool unwind_next_frame(struct unwind_state *state)
 418{
 419        unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
 420        enum stack_type prev_type = state->stack_info.type;
 421        struct orc_entry *orc;
 422        bool indirect = false;
 423
 424        if (unwind_done(state))
 425                return false;
 426
 427        /* Don't let modules unload while we're reading their ORC data. */
 428        preempt_disable();
 429
 430        /* End-of-stack check for user tasks: */
 431        if (state->regs && user_mode(state->regs))
 432                goto the_end;
 433
 434        /*
 435         * Find the orc_entry associated with the text address.
 436         *
 437         * For a call frame (as opposed to a signal frame), state->ip points to
 438         * the instruction after the call.  That instruction's stack layout
 439         * could be different from the call instruction's layout, for example
 440         * if the call was to a noreturn function.  So get the ORC data for the
 441         * call instruction itself.
 442         */
 443        orc = orc_find(state->signal ? state->ip : state->ip - 1);
 444        if (!orc) {
 445                /*
 446                 * As a fallback, try to assume this code uses a frame pointer.
 447                 * This is useful for generated code, like BPF, which ORC
 448                 * doesn't know about.  This is just a guess, so the rest of
 449                 * the unwind is no longer considered reliable.
 450                 */
 451                orc = &orc_fp_entry;
 452                state->error = true;
 453        }
 454
 455        /* End-of-stack check for kernel threads: */
 456        if (orc->sp_reg == ORC_REG_UNDEFINED) {
 457                if (!orc->end)
 458                        goto err;
 459
 460                goto the_end;
 461        }
 462
 463        /* Find the previous frame's stack: */
 464        switch (orc->sp_reg) {
 465        case ORC_REG_SP:
 466                sp = state->sp + orc->sp_offset;
 467                break;
 468
 469        case ORC_REG_BP:
 470                sp = state->bp + orc->sp_offset;
 471                break;
 472
 473        case ORC_REG_SP_INDIRECT:
 474                sp = state->sp + orc->sp_offset;
 475                indirect = true;
 476                break;
 477
 478        case ORC_REG_BP_INDIRECT:
 479                sp = state->bp + orc->sp_offset;
 480                indirect = true;
 481                break;
 482
 483        case ORC_REG_R10:
 484                if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
 485                        orc_warn_current("missing R10 value at %pB\n",
 486                                         (void *)state->ip);
 487                        goto err;
 488                }
 489                break;
 490
 491        case ORC_REG_R13:
 492                if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
 493                        orc_warn_current("missing R13 value at %pB\n",
 494                                         (void *)state->ip);
 495                        goto err;
 496                }
 497                break;
 498
 499        case ORC_REG_DI:
 500                if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
 501                        orc_warn_current("missing RDI value at %pB\n",
 502                                         (void *)state->ip);
 503                        goto err;
 504                }
 505                break;
 506
 507        case ORC_REG_DX:
 508                if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
 509                        orc_warn_current("missing DX value at %pB\n",
 510                                         (void *)state->ip);
 511                        goto err;
 512                }
 513                break;
 514
 515        default:
 516                orc_warn("unknown SP base reg %d at %pB\n",
 517                         orc->sp_reg, (void *)state->ip);
 518                goto err;
 519        }
 520
 521        if (indirect) {
 522                if (!deref_stack_reg(state, sp, &sp))
 523                        goto err;
 524        }
 525
 526        /* Find IP, SP and possibly regs: */
 527        switch (orc->type) {
 528        case UNWIND_HINT_TYPE_CALL:
 529                ip_p = sp - sizeof(long);
 530
 531                if (!deref_stack_reg(state, ip_p, &state->ip))
 532                        goto err;
 533
 534                state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
 535                                                  state->ip, (void *)ip_p);
 536
 537                state->sp = sp;
 538                state->regs = NULL;
 539                state->prev_regs = NULL;
 540                state->signal = false;
 541                break;
 542
 543        case UNWIND_HINT_TYPE_REGS:
 544                if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
 545                        orc_warn_current("can't access registers at %pB\n",
 546                                         (void *)orig_ip);
 547                        goto err;
 548                }
 549
 550                state->regs = (struct pt_regs *)sp;
 551                state->prev_regs = NULL;
 552                state->full_regs = true;
 553                state->signal = true;
 554                break;
 555
 556        case UNWIND_HINT_TYPE_REGS_PARTIAL:
 557                if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
 558                        orc_warn_current("can't access iret registers at %pB\n",
 559                                         (void *)orig_ip);
 560                        goto err;
 561                }
 562
 563                if (state->full_regs)
 564                        state->prev_regs = state->regs;
 565                state->regs = (void *)sp - IRET_FRAME_OFFSET;
 566                state->full_regs = false;
 567                state->signal = true;
 568                break;
 569
 570        default:
 571                orc_warn("unknown .orc_unwind entry type %d at %pB\n",
 572                         orc->type, (void *)orig_ip);
 573                goto err;
 574        }
 575
 576        /* Find BP: */
 577        switch (orc->bp_reg) {
 578        case ORC_REG_UNDEFINED:
 579                if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
 580                        state->bp = tmp;
 581                break;
 582
 583        case ORC_REG_PREV_SP:
 584                if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
 585                        goto err;
 586                break;
 587
 588        case ORC_REG_BP:
 589                if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
 590                        goto err;
 591                break;
 592
 593        default:
 594                orc_warn("unknown BP base reg %d for ip %pB\n",
 595                         orc->bp_reg, (void *)orig_ip);
 596                goto err;
 597        }
 598
 599        /* Prevent a recursive loop due to bad ORC data: */
 600        if (state->stack_info.type == prev_type &&
 601            on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
 602            state->sp <= prev_sp) {
 603                orc_warn_current("stack going in the wrong direction? at %pB\n",
 604                                 (void *)orig_ip);
 605                goto err;
 606        }
 607
 608        preempt_enable();
 609        return true;
 610
 611err:
 612        state->error = true;
 613
 614the_end:
 615        preempt_enable();
 616        state->stack_info.type = STACK_TYPE_UNKNOWN;
 617        return false;
 618}
 619EXPORT_SYMBOL_GPL(unwind_next_frame);
 620
 621void __unwind_start(struct unwind_state *state, struct task_struct *task,
 622                    struct pt_regs *regs, unsigned long *first_frame)
 623{
 624        memset(state, 0, sizeof(*state));
 625        state->task = task;
 626
 627        if (!orc_init)
 628                goto err;
 629
 630        /*
 631         * Refuse to unwind the stack of a task while it's executing on another
 632         * CPU.  This check is racy, but that's ok: the unwinder has other
 633         * checks to prevent it from going off the rails.
 634         */
 635        if (task_on_another_cpu(task))
 636                goto err;
 637
 638        if (regs) {
 639                if (user_mode(regs))
 640                        goto the_end;
 641
 642                state->ip = regs->ip;
 643                state->sp = regs->sp;
 644                state->bp = regs->bp;
 645                state->regs = regs;
 646                state->full_regs = true;
 647                state->signal = true;
 648
 649        } else if (task == current) {
 650                asm volatile("lea (%%rip), %0\n\t"
 651                             "mov %%rsp, %1\n\t"
 652                             "mov %%rbp, %2\n\t"
 653                             : "=r" (state->ip), "=r" (state->sp),
 654                               "=r" (state->bp));
 655
 656        } else {
 657                struct inactive_task_frame *frame = (void *)task->thread.sp;
 658
 659                state->sp = task->thread.sp + sizeof(*frame);
 660                state->bp = READ_ONCE_NOCHECK(frame->bp);
 661                state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
 662                state->signal = (void *)state->ip == ret_from_fork;
 663        }
 664
 665        if (get_stack_info((unsigned long *)state->sp, state->task,
 666                           &state->stack_info, &state->stack_mask)) {
 667                /*
 668                 * We weren't on a valid stack.  It's possible that
 669                 * we overflowed a valid stack into a guard page.
 670                 * See if the next page up is valid so that we can
 671                 * generate some kind of backtrace if this happens.
 672                 */
 673                void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
 674                state->error = true;
 675                if (get_stack_info(next_page, state->task, &state->stack_info,
 676                                   &state->stack_mask))
 677                        return;
 678        }
 679
 680        /*
 681         * The caller can provide the address of the first frame directly
 682         * (first_frame) or indirectly (regs->sp) to indicate which stack frame
 683         * to start unwinding at.  Skip ahead until we reach it.
 684         */
 685
 686        /* When starting from regs, skip the regs frame: */
 687        if (regs) {
 688                unwind_next_frame(state);
 689                return;
 690        }
 691
 692        /* Otherwise, skip ahead to the user-specified starting frame: */
 693        while (!unwind_done(state) &&
 694               (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
 695                        state->sp < (unsigned long)first_frame))
 696                unwind_next_frame(state);
 697
 698        return;
 699
 700err:
 701        state->error = true;
 702the_end:
 703        state->stack_info.type = STACK_TYPE_UNKNOWN;
 704}
 705EXPORT_SYMBOL_GPL(__unwind_start);
 706