linux/arch/tile/kernel/stack.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#include <linux/sched.h>
  16#include <linux/kernel.h>
  17#include <linux/kprobes.h>
  18#include <linux/module.h>
  19#include <linux/pfn.h>
  20#include <linux/kallsyms.h>
  21#include <linux/stacktrace.h>
  22#include <linux/uaccess.h>
  23#include <linux/mmzone.h>
  24#include <linux/dcache.h>
  25#include <linux/fs.h>
  26#include <linux/hardirq.h>
  27#include <linux/string.h>
  28#include <asm/backtrace.h>
  29#include <asm/page.h>
  30#include <asm/ucontext.h>
  31#include <asm/switch_to.h>
  32#include <asm/sigframe.h>
  33#include <asm/stack.h>
  34#include <asm/vdso.h>
  35#include <arch/abi.h>
  36#include <arch/interrupts.h>
  37
  38#define KBT_ONGOING     0  /* Backtrace still ongoing */
  39#define KBT_DONE        1  /* Backtrace cleanly completed */
  40#define KBT_RUNNING     2  /* Can't run backtrace on a running task */
  41#define KBT_LOOP        3  /* Backtrace entered a loop */
  42
  43/* Is address on the specified kernel stack? */
  44static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
  45{
  46        ulong kstack_base = (ulong) kbt->task->stack;
  47        if (kstack_base == 0)  /* corrupt task pointer; just follow stack... */
  48                return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
  49        return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
  50}
  51
  52/* Callback for backtracer; basically a glorified memcpy */
  53static bool read_memory_func(void *result, unsigned long address,
  54                             unsigned int size, void *vkbt)
  55{
  56        int retval;
  57        struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
  58
  59        if (address == 0)
  60                return 0;
  61        if (__kernel_text_address(address)) {
  62                /* OK to read kernel code. */
  63        } else if (address >= PAGE_OFFSET) {
  64                /* We only tolerate kernel-space reads of this task's stack */
  65                if (!in_kernel_stack(kbt, address))
  66                        return 0;
  67        } else if (!kbt->is_current) {
  68                return 0;       /* can't read from other user address spaces */
  69        }
  70        pagefault_disable();
  71        retval = __copy_from_user_inatomic(result,
  72                                           (void __user __force *)address,
  73                                           size);
  74        pagefault_enable();
  75        return (retval == 0);
  76}
  77
  78/* Return a pt_regs pointer for a valid fault handler frame */
  79static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
  80{
  81        const char *fault = NULL;  /* happy compiler */
  82        char fault_buf[64];
  83        unsigned long sp = kbt->it.sp;
  84        struct pt_regs *p;
  85
  86        if (sp % sizeof(long) != 0)
  87                return NULL;
  88        if (!in_kernel_stack(kbt, sp))
  89                return NULL;
  90        if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
  91                return NULL;
  92        p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
  93        if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
  94                fault = "syscall";
  95        else {
  96                if (kbt->verbose) {     /* else we aren't going to use it */
  97                        snprintf(fault_buf, sizeof(fault_buf),
  98                                 "interrupt %ld", p->faultnum);
  99                        fault = fault_buf;
 100                }
 101        }
 102        if (EX1_PL(p->ex1) == KERNEL_PL &&
 103            __kernel_text_address(p->pc) &&
 104            in_kernel_stack(kbt, p->sp) &&
 105            p->sp >= sp) {
 106                if (kbt->verbose)
 107                        pr_err("  <%s while in kernel mode>\n", fault);
 108        } else if (user_mode(p) &&
 109                   p->sp < PAGE_OFFSET && p->sp != 0) {
 110                if (kbt->verbose)
 111                        pr_err("  <%s while in user mode>\n", fault);
 112        } else {
 113                if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
 114                        pr_err("  (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
 115                               p->pc, p->sp, p->ex1);
 116                return NULL;
 117        }
 118        if (kbt->profile && ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) != 0)
 119                return NULL;
 120        return p;
 121}
 122
 123/* Is the iterator pointing to a sigreturn trampoline? */
 124static int is_sigreturn(struct KBacktraceIterator *kbt)
 125{
 126        return kbt->task->mm &&
 127                (kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
 128                                (ulong)&__vdso_rt_sigreturn));
 129}
 130
 131/* Return a pt_regs pointer for a valid signal handler frame */
 132static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
 133                                      struct rt_sigframe* kframe)
 134{
 135        BacktraceIterator *b = &kbt->it;
 136
 137        if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
 138            b->sp % sizeof(long) == 0) {
 139                int retval;
 140                pagefault_disable();
 141                retval = __copy_from_user_inatomic(
 142                        kframe, (void __user __force *)b->sp,
 143                        sizeof(*kframe));
 144                pagefault_enable();
 145                if (retval != 0 ||
 146                    (unsigned int)(kframe->info.si_signo) >= _NSIG)
 147                        return NULL;
 148                if (kbt->verbose) {
 149                        pr_err("  <received signal %d>\n",
 150                               kframe->info.si_signo);
 151                }
 152                return (struct pt_regs *)&kframe->uc.uc_mcontext;
 153        }
 154        return NULL;
 155}
 156
 157static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
 158{
 159        struct pt_regs *p;
 160        struct rt_sigframe kframe;
 161
 162        p = valid_fault_handler(kbt);
 163        if (p == NULL)
 164                p = valid_sigframe(kbt, &kframe);
 165        if (p == NULL)
 166                return 0;
 167        backtrace_init(&kbt->it, read_memory_func, kbt,
 168                       p->pc, p->lr, p->sp, p->regs[52]);
 169        kbt->new_context = 1;
 170        return 1;
 171}
 172
 173/* Find a frame that isn't a sigreturn, if there is one. */
 174static int KBacktraceIterator_next_item_inclusive(
 175        struct KBacktraceIterator *kbt)
 176{
 177        for (;;) {
 178                do {
 179                        if (!is_sigreturn(kbt))
 180                                return KBT_ONGOING;
 181                } while (backtrace_next(&kbt->it));
 182
 183                if (!KBacktraceIterator_restart(kbt))
 184                        return KBT_DONE;
 185        }
 186}
 187
 188/*
 189 * If the current sp is on a page different than what we recorded
 190 * as the top-of-kernel-stack last time we context switched, we have
 191 * probably blown the stack, and nothing is going to work out well.
 192 * If we can at least get out a warning, that may help the debug,
 193 * though we probably won't be able to backtrace into the code that
 194 * actually did the recursive damage.
 195 */
 196static void validate_stack(struct pt_regs *regs)
 197{
 198        int cpu = raw_smp_processor_id();
 199        unsigned long ksp0 = get_current_ksp0();
 200        unsigned long ksp0_base = ksp0 & -THREAD_SIZE;
 201        unsigned long sp = stack_pointer;
 202
 203        if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
 204                pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
 205                       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
 206                       cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
 207        }
 208
 209        else if (sp < ksp0_base + sizeof(struct thread_info)) {
 210                pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
 211                       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
 212                       cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
 213        }
 214}
 215
 216void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
 217                             struct task_struct *t, struct pt_regs *regs)
 218{
 219        unsigned long pc, lr, sp, r52;
 220        int is_current;
 221
 222        /*
 223         * Set up callback information.  We grab the kernel stack base
 224         * so we will allow reads of that address range.
 225         */
 226        is_current = (t == NULL || t == current);
 227        kbt->is_current = is_current;
 228        if (is_current)
 229                t = validate_current();
 230        kbt->task = t;
 231        kbt->verbose = 0;   /* override in caller if desired */
 232        kbt->profile = 0;   /* override in caller if desired */
 233        kbt->end = KBT_ONGOING;
 234        kbt->new_context = 1;
 235        if (is_current)
 236                validate_stack(regs);
 237
 238        if (regs == NULL) {
 239                if (is_current || t->state == TASK_RUNNING) {
 240                        /* Can't do this; we need registers */
 241                        kbt->end = KBT_RUNNING;
 242                        return;
 243                }
 244                pc = get_switch_to_pc();
 245                lr = t->thread.pc;
 246                sp = t->thread.ksp;
 247                r52 = 0;
 248        } else {
 249                pc = regs->pc;
 250                lr = regs->lr;
 251                sp = regs->sp;
 252                r52 = regs->regs[52];
 253        }
 254
 255        backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
 256        kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
 257}
 258EXPORT_SYMBOL(KBacktraceIterator_init);
 259
 260int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
 261{
 262        return kbt->end != KBT_ONGOING;
 263}
 264EXPORT_SYMBOL(KBacktraceIterator_end);
 265
 266void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
 267{
 268        unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
 269        kbt->new_context = 0;
 270        if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
 271                kbt->end = KBT_DONE;
 272                return;
 273        }
 274        kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
 275        if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
 276                /* Trapped in a loop; give up. */
 277                kbt->end = KBT_LOOP;
 278        }
 279}
 280EXPORT_SYMBOL(KBacktraceIterator_next);
 281
 282static void describe_addr(struct KBacktraceIterator *kbt,
 283                          unsigned long address,
 284                          int have_mmap_sem, char *buf, size_t bufsize)
 285{
 286        struct vm_area_struct *vma;
 287        size_t namelen, remaining;
 288        unsigned long size, offset, adjust;
 289        char *p, *modname;
 290        const char *name;
 291        int rc;
 292
 293        /*
 294         * Look one byte back for every caller frame (i.e. those that
 295         * aren't a new context) so we look up symbol data for the
 296         * call itself, not the following instruction, which may be on
 297         * a different line (or in a different function).
 298         */
 299        adjust = !kbt->new_context;
 300        address -= adjust;
 301
 302        if (address >= PAGE_OFFSET) {
 303                /* Handle kernel symbols. */
 304                BUG_ON(bufsize < KSYM_NAME_LEN);
 305                name = kallsyms_lookup(address, &size, &offset,
 306                                       &modname, buf);
 307                if (name == NULL) {
 308                        buf[0] = '\0';
 309                        return;
 310                }
 311                namelen = strlen(buf);
 312                remaining = (bufsize - 1) - namelen;
 313                p = buf + namelen;
 314                rc = snprintf(p, remaining, "+%#lx/%#lx ",
 315                              offset + adjust, size);
 316                if (modname && rc < remaining)
 317                        snprintf(p + rc, remaining - rc, "[%s] ", modname);
 318                buf[bufsize-1] = '\0';
 319                return;
 320        }
 321
 322        /* If we don't have the mmap_sem, we can't show any more info. */
 323        buf[0] = '\0';
 324        if (!have_mmap_sem)
 325                return;
 326
 327        /* Find vma info. */
 328        vma = find_vma(kbt->task->mm, address);
 329        if (vma == NULL || address < vma->vm_start) {
 330                snprintf(buf, bufsize, "[unmapped address] ");
 331                return;
 332        }
 333
 334        if (vma->vm_file) {
 335                p = file_path(vma->vm_file, buf, bufsize);
 336                if (IS_ERR(p))
 337                        p = "?";
 338                name = kbasename(p);
 339        } else {
 340                name = "anon";
 341        }
 342
 343        /* Generate a string description of the vma info. */
 344        namelen = strlen(name);
 345        remaining = (bufsize - 1) - namelen;
 346        memmove(buf, name, namelen);
 347        snprintf(buf + namelen, remaining, "[%lx+%lx] ",
 348                 vma->vm_start, vma->vm_end - vma->vm_start);
 349}
 350
 351/*
 352 * Avoid possible crash recursion during backtrace.  If it happens, it
 353 * makes it easy to lose the actual root cause of the failure, so we
 354 * put a simple guard on all the backtrace loops.
 355 */
 356static bool start_backtrace(void)
 357{
 358        if (current_thread_info()->in_backtrace) {
 359                pr_err("Backtrace requested while in backtrace!\n");
 360                return false;
 361        }
 362        current_thread_info()->in_backtrace = true;
 363        return true;
 364}
 365
 366static void end_backtrace(void)
 367{
 368        current_thread_info()->in_backtrace = false;
 369}
 370
 371/*
 372 * This method wraps the backtracer's more generic support.
 373 * It is only invoked from the architecture-specific code; show_stack()
 374 * and dump_stack() are architecture-independent entry points.
 375 */
 376void tile_show_stack(struct KBacktraceIterator *kbt)
 377{
 378        int i;
 379        int have_mmap_sem = 0;
 380
 381        if (!start_backtrace())
 382                return;
 383        kbt->verbose = 1;
 384        i = 0;
 385        for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
 386                char namebuf[KSYM_NAME_LEN+100];
 387                unsigned long address = kbt->it.pc;
 388
 389                /*
 390                 * Try to acquire the mmap_sem as we pass into userspace.
 391                 * If we're in an interrupt context, don't even try, since
 392                 * it's not safe to call e.g. d_path() from an interrupt,
 393                 * since it uses spin locks without disabling interrupts.
 394                 * Note we test "kbt->task == current", not "kbt->is_current",
 395                 * since we're checking that "current" will work in d_path().
 396                 */
 397                if (kbt->task == current && address < PAGE_OFFSET &&
 398                    !have_mmap_sem && kbt->task->mm && !in_interrupt()) {
 399                        have_mmap_sem =
 400                                down_read_trylock(&kbt->task->mm->mmap_sem);
 401                }
 402
 403                describe_addr(kbt, address, have_mmap_sem,
 404                              namebuf, sizeof(namebuf));
 405
 406                pr_err("  frame %d: 0x%lx %s(sp 0x%lx)\n",
 407                       i++, address, namebuf, (unsigned long)(kbt->it.sp));
 408
 409                if (i >= 100) {
 410                        pr_err("Stack dump truncated (%d frames)\n", i);
 411                        break;
 412                }
 413        }
 414        if (kbt->end == KBT_LOOP)
 415                pr_err("Stack dump stopped; next frame identical to this one\n");
 416        if (have_mmap_sem)
 417                up_read(&kbt->task->mm->mmap_sem);
 418        end_backtrace();
 419}
 420EXPORT_SYMBOL(tile_show_stack);
 421
 422static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
 423                                       ulong pc, ulong lr, ulong sp, ulong r52)
 424{
 425        memset(regs, 0, sizeof(struct pt_regs));
 426        regs->pc = pc;
 427        regs->lr = lr;
 428        regs->sp = sp;
 429        regs->regs[52] = r52;
 430        return regs;
 431}
 432
 433/* Deprecated function currently only used by kernel_double_fault(). */
 434void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
 435{
 436        struct KBacktraceIterator kbt;
 437        struct pt_regs regs;
 438
 439        regs_to_pt_regs(&regs, pc, lr, sp, r52);
 440        KBacktraceIterator_init(&kbt, NULL, &regs);
 441        tile_show_stack(&kbt);
 442}
 443
 444/* This is called from KBacktraceIterator_init_current() */
 445void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
 446                                      ulong lr, ulong sp, ulong r52)
 447{
 448        struct pt_regs regs;
 449        KBacktraceIterator_init(kbt, NULL,
 450                                regs_to_pt_regs(&regs, pc, lr, sp, r52));
 451}
 452
 453/*
 454 * Called from sched_show_task() with task != NULL, or dump_stack()
 455 * with task == NULL.  The esp argument is always NULL.
 456 */
 457void show_stack(struct task_struct *task, unsigned long *esp)
 458{
 459        struct KBacktraceIterator kbt;
 460        if (task == NULL || task == current) {
 461                KBacktraceIterator_init_current(&kbt);
 462                KBacktraceIterator_next(&kbt);  /* don't show first frame */
 463        } else {
 464                KBacktraceIterator_init(&kbt, task, NULL);
 465        }
 466        tile_show_stack(&kbt);
 467}
 468
 469#ifdef CONFIG_STACKTRACE
 470
 471/* Support generic Linux stack API too */
 472
 473static void save_stack_trace_common(struct task_struct *task,
 474                                    struct pt_regs *regs,
 475                                    bool user,
 476                                    struct stack_trace *trace)
 477{
 478        struct KBacktraceIterator kbt;
 479        int skip = trace->skip;
 480        int i = 0;
 481
 482        if (!start_backtrace())
 483                goto done;
 484        if (regs != NULL) {
 485                KBacktraceIterator_init(&kbt, NULL, regs);
 486        } else if (task == NULL || task == current) {
 487                KBacktraceIterator_init_current(&kbt);
 488                skip++;  /* don't show KBacktraceIterator_init_current */
 489        } else {
 490                KBacktraceIterator_init(&kbt, task, NULL);
 491        }
 492        for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
 493                if (skip) {
 494                        --skip;
 495                        continue;
 496                }
 497                if (i >= trace->max_entries ||
 498                    (!user && kbt.it.pc < PAGE_OFFSET))
 499                        break;
 500                trace->entries[i++] = kbt.it.pc;
 501        }
 502        end_backtrace();
 503done:
 504        if (i < trace->max_entries)
 505                trace->entries[i++] = ULONG_MAX;
 506        trace->nr_entries = i;
 507}
 508
 509void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
 510{
 511        save_stack_trace_common(task, NULL, false, trace);
 512}
 513EXPORT_SYMBOL(save_stack_trace_tsk);
 514
 515void save_stack_trace(struct stack_trace *trace)
 516{
 517        save_stack_trace_common(NULL, NULL, false, trace);
 518}
 519EXPORT_SYMBOL_GPL(save_stack_trace);
 520
 521void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
 522{
 523        save_stack_trace_common(NULL, regs, false, trace);
 524}
 525
 526void save_stack_trace_user(struct stack_trace *trace)
 527{
 528        /* Trace user stack if we are not a kernel thread. */
 529        if (current->mm)
 530                save_stack_trace_common(NULL, task_pt_regs(current),
 531                                        true, trace);
 532        else if (trace->nr_entries < trace->max_entries)
 533                trace->entries[trace->nr_entries++] = ULONG_MAX;
 534}
 535#endif
 536
 537/* In entry.S */
 538EXPORT_SYMBOL(KBacktraceIterator_init_current);
 539