linux/arch/nds32/kernel/traps.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (C) 2005-2017 Andes Technology Corporation
   3
   4#include <linux/module.h>
   5#include <linux/personality.h>
   6#include <linux/kallsyms.h>
   7#include <linux/hardirq.h>
   8#include <linux/kdebug.h>
   9#include <linux/sched/task_stack.h>
  10#include <linux/uaccess.h>
  11#include <linux/ftrace.h>
  12
  13#include <asm/proc-fns.h>
  14#include <asm/unistd.h>
  15#include <asm/fpu.h>
  16
  17#include <linux/ptrace.h>
  18#include <nds32_intrinsic.h>
  19
  20extern void show_pte(struct mm_struct *mm, unsigned long addr);
  21
  22/*
  23 * Dump out the contents of some memory nicely...
  24 */
  25void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
  26{
  27        unsigned long first;
  28        mm_segment_t fs;
  29        int i;
  30
  31        /*
  32         * We need to switch to kernel mode so that we can use __get_user
  33         * to safely read from kernel space.  Note that we now dump the
  34         * code first, just in case the backtrace kills us.
  35         */
  36        fs = get_fs();
  37        set_fs(KERNEL_DS);
  38
  39        pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
  40
  41        for (first = bottom & ~31; first < top; first += 32) {
  42                unsigned long p;
  43                char str[sizeof(" 12345678") * 8 + 1];
  44
  45                memset(str, ' ', sizeof(str));
  46                str[sizeof(str) - 1] = '\0';
  47
  48                for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
  49                        if (p >= bottom && p < top) {
  50                                unsigned long val;
  51                                if (__get_user(val, (unsigned long *)p) == 0)
  52                                        sprintf(str + i * 9, " %08lx", val);
  53                                else
  54                                        sprintf(str + i * 9, " ????????");
  55                        }
  56                }
  57                pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
  58        }
  59
  60        set_fs(fs);
  61}
  62
  63EXPORT_SYMBOL(dump_mem);
  64
  65static void dump_instr(struct pt_regs *regs)
  66{
  67        unsigned long addr = instruction_pointer(regs);
  68        mm_segment_t fs;
  69        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
  70        int i;
  71
  72        return;
  73        /*
  74         * We need to switch to kernel mode so that we can use __get_user
  75         * to safely read from kernel space.  Note that we now dump the
  76         * code first, just in case the backtrace kills us.
  77         */
  78        fs = get_fs();
  79        set_fs(KERNEL_DS);
  80
  81        pr_emerg("Code: ");
  82        for (i = -4; i < 1; i++) {
  83                unsigned int val, bad;
  84
  85                bad = __get_user(val, &((u32 *) addr)[i]);
  86
  87                if (!bad) {
  88                        p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
  89                } else {
  90                        p += sprintf(p, "bad PC value");
  91                        break;
  92                }
  93        }
  94        pr_emerg("Code: %s\n", str);
  95
  96        set_fs(fs);
  97}
  98
  99#define LOOP_TIMES (100)
 100static void __dump(struct task_struct *tsk, unsigned long *base_reg)
 101{
 102        unsigned long ret_addr;
 103        int cnt = LOOP_TIMES, graph = 0;
 104        pr_emerg("Call Trace:\n");
 105        if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
 106                while (!kstack_end(base_reg)) {
 107                        ret_addr = *base_reg++;
 108                        if (__kernel_text_address(ret_addr)) {
 109                                ret_addr = ftrace_graph_ret_addr(
 110                                                tsk, &graph, ret_addr, NULL);
 111                                print_ip_sym(ret_addr);
 112                        }
 113                        if (--cnt < 0)
 114                                break;
 115                }
 116        } else {
 117                while (!kstack_end((void *)base_reg) &&
 118                       !((unsigned long)base_reg & 0x3) &&
 119                       ((unsigned long)base_reg >= TASK_SIZE)) {
 120                        unsigned long next_fp;
 121                        ret_addr = base_reg[LP_OFFSET];
 122                        next_fp = base_reg[FP_OFFSET];
 123                        if (__kernel_text_address(ret_addr)) {
 124
 125                                ret_addr = ftrace_graph_ret_addr(
 126                                                tsk, &graph, ret_addr, NULL);
 127                                print_ip_sym(ret_addr);
 128                        }
 129                        if (--cnt < 0)
 130                                break;
 131                        base_reg = (unsigned long *)next_fp;
 132                }
 133        }
 134        pr_emerg("\n");
 135}
 136
 137void show_stack(struct task_struct *tsk, unsigned long *sp)
 138{
 139        unsigned long *base_reg;
 140
 141        if (!tsk)
 142                tsk = current;
 143        if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
 144                if (tsk != current)
 145                        base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
 146                else
 147                        __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
 148        } else {
 149                if (tsk != current)
 150                        base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
 151                else
 152                        __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
 153        }
 154        __dump(tsk, base_reg);
 155        barrier();
 156}
 157
 158DEFINE_SPINLOCK(die_lock);
 159
 160/*
 161 * This function is protected against re-entrancy.
 162 */
 163void die(const char *str, struct pt_regs *regs, int err)
 164{
 165        struct task_struct *tsk = current;
 166        static int die_counter;
 167
 168        console_verbose();
 169        spin_lock_irq(&die_lock);
 170        bust_spinlocks(1);
 171
 172        pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
 173        print_modules();
 174        pr_emerg("CPU: %i\n", smp_processor_id());
 175        show_regs(regs);
 176        pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
 177                 tsk->comm, tsk->pid, end_of_stack(tsk));
 178
 179        if (!user_mode(regs) || in_interrupt()) {
 180                dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
 181                dump_instr(regs);
 182                dump_stack();
 183        }
 184
 185        bust_spinlocks(0);
 186        spin_unlock_irq(&die_lock);
 187        do_exit(SIGSEGV);
 188}
 189
 190EXPORT_SYMBOL(die);
 191
 192void die_if_kernel(const char *str, struct pt_regs *regs, int err)
 193{
 194        if (user_mode(regs))
 195                return;
 196
 197        die(str, regs, err);
 198}
 199
 200int bad_syscall(int n, struct pt_regs *regs)
 201{
 202        if (current->personality != PER_LINUX) {
 203                send_sig(SIGSEGV, current, 1);
 204                return regs->uregs[0];
 205        }
 206
 207        force_sig_fault(SIGILL, ILL_ILLTRP,
 208                        (void __user *)instruction_pointer(regs) - 4);
 209        die_if_kernel("Oops - bad syscall", regs, n);
 210        return regs->uregs[0];
 211}
 212
 213void __pte_error(const char *file, int line, unsigned long val)
 214{
 215        pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
 216}
 217
 218void __pmd_error(const char *file, int line, unsigned long val)
 219{
 220        pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
 221}
 222
 223void __pgd_error(const char *file, int line, unsigned long val)
 224{
 225        pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
 226}
 227
 228extern char *exception_vector, *exception_vector_end;
 229void __init trap_init(void)
 230{
 231        return;
 232}
 233
 234void __init early_trap_init(void)
 235{
 236        unsigned long ivb = 0;
 237        unsigned long base = PAGE_OFFSET;
 238
 239        memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
 240               ((unsigned long)&exception_vector_end -
 241                (unsigned long)&exception_vector));
 242        ivb = __nds32__mfsr(NDS32_SR_IVB);
 243        /* Check platform support. */
 244        if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
 245                panic
 246                    ("IVIC mode is not allowed on the platform with interrupt controller\n");
 247        __nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
 248                      IVB_BASE, NDS32_SR_IVB);
 249        __nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
 250
 251        /*
 252         * 0x800 = 128 vectors * 16byte.
 253         * It should be enough to flush a page.
 254         */
 255        cpu_cache_wbinval_page(base, true);
 256}
 257
 258static void send_sigtrap(struct pt_regs *regs, int error_code, int si_code)
 259{
 260        struct task_struct *tsk = current;
 261
 262        tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
 263        tsk->thread.error_code = error_code;
 264
 265        force_sig_fault(SIGTRAP, si_code,
 266                        (void __user *)instruction_pointer(regs));
 267}
 268
 269void do_debug_trap(unsigned long entry, unsigned long addr,
 270                   unsigned long type, struct pt_regs *regs)
 271{
 272        if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
 273            == NOTIFY_STOP)
 274                return;
 275
 276        if (user_mode(regs)) {
 277                /* trap_signal */
 278                send_sigtrap(regs, 0, TRAP_BRKPT);
 279        } else {
 280                /* kernel_trap */
 281                if (!fixup_exception(regs))
 282                        die("unexpected kernel_trap", regs, 0);
 283        }
 284}
 285
 286void unhandled_interruption(struct pt_regs *regs)
 287{
 288        pr_emerg("unhandled_interruption\n");
 289        show_regs(regs);
 290        if (!user_mode(regs))
 291                do_exit(SIGKILL);
 292        force_sig(SIGKILL);
 293}
 294
 295void unhandled_exceptions(unsigned long entry, unsigned long addr,
 296                          unsigned long type, struct pt_regs *regs)
 297{
 298        pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
 299                 addr, type);
 300        show_regs(regs);
 301        if (!user_mode(regs))
 302                do_exit(SIGKILL);
 303        force_sig(SIGKILL);
 304}
 305
 306extern int do_page_fault(unsigned long entry, unsigned long addr,
 307                         unsigned int error_code, struct pt_regs *regs);
 308
 309/*
 310 * 2:DEF dispatch for TLB MISC exception handler
 311*/
 312
 313void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
 314                          unsigned long type, struct pt_regs *regs)
 315{
 316        type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
 317        if ((type & ITYPE_mskETYPE) < 5) {
 318                /* Permission exceptions */
 319                do_page_fault(entry, addr, type, regs);
 320        } else
 321                unhandled_exceptions(entry, addr, type, regs);
 322}
 323
 324void do_revinsn(struct pt_regs *regs)
 325{
 326        pr_emerg("Reserved Instruction\n");
 327        show_regs(regs);
 328        if (!user_mode(regs))
 329                do_exit(SIGILL);
 330        force_sig(SIGILL);
 331}
 332
 333#ifdef CONFIG_ALIGNMENT_TRAP
 334extern int unalign_access_mode;
 335extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
 336#endif
 337void do_dispatch_general(unsigned long entry, unsigned long addr,
 338                         unsigned long itype, struct pt_regs *regs,
 339                         unsigned long oipc)
 340{
 341        unsigned int swid = itype >> ITYPE_offSWID;
 342        unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
 343        if (type == ETYPE_ALIGNMENT_CHECK) {
 344#ifdef CONFIG_ALIGNMENT_TRAP
 345                /* Alignment check */
 346                if (user_mode(regs) && unalign_access_mode) {
 347                        int ret;
 348                        ret = do_unaligned_access(addr, regs);
 349
 350                        if (ret == 0)
 351                                return;
 352
 353                        if (ret == -EFAULT)
 354                                pr_emerg
 355                                    ("Unhandled unaligned access exception\n");
 356                }
 357#endif
 358                do_page_fault(entry, addr, type, regs);
 359        } else if (type == ETYPE_RESERVED_INSTRUCTION) {
 360                /* Reserved instruction */
 361                do_revinsn(regs);
 362        } else if (type == ETYPE_COPROCESSOR) {
 363                /* Coprocessor */
 364#if IS_ENABLED(CONFIG_FPU)
 365                unsigned int fucop_exist = __nds32__mfsr(NDS32_SR_FUCOP_EXIST);
 366                unsigned int cpid = ((itype & ITYPE_mskCPID) >> ITYPE_offCPID);
 367
 368                if ((cpid == FPU_CPID) &&
 369                    (fucop_exist & FUCOP_EXIST_mskCP0ISFPU)) {
 370                        unsigned int subtype = (itype & ITYPE_mskSTYPE);
 371
 372                        if (true == do_fpu_exception(subtype, regs))
 373                                return;
 374                }
 375#endif
 376                unhandled_exceptions(entry, addr, type, regs);
 377        } else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
 378                /* trap, used on v3 EDM target debugging workaround */
 379                /*
 380                 * DIPC(OIPC) is passed as parameter before
 381                 * interrupt is enabled, so the DIPC will not be corrupted
 382                 * even though interrupts are coming in
 383                 */
 384                /*
 385                 * 1. update ipc
 386                 * 2. update pt_regs ipc with oipc
 387                 * 3. update pt_regs ipsw (clear DEX)
 388                 */
 389                __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
 390                regs->ipc = oipc;
 391                if (regs->pipsw & PSW_mskDEX) {
 392                        pr_emerg
 393                            ("Nested Debug exception is possibly happened\n");
 394                        pr_emerg("ipc:%08x pipc:%08x\n",
 395                                 (unsigned int)regs->ipc,
 396                                 (unsigned int)regs->pipc);
 397                }
 398                do_debug_trap(entry, addr, itype, regs);
 399                regs->ipsw &= ~PSW_mskDEX;
 400        } else
 401                unhandled_exceptions(entry, addr, type, regs);
 402}
 403