linux/arch/nds32/kernel/traps.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (C) 2005-2017 Andes Technology Corporation
   3
   4#include <linux/module.h>
   5#include <linux/personality.h>
   6#include <linux/kallsyms.h>
   7#include <linux/hardirq.h>
   8#include <linux/kdebug.h>
   9#include <linux/sched/task_stack.h>
  10#include <linux/uaccess.h>
  11#include <linux/ftrace.h>
  12
  13#include <asm/proc-fns.h>
  14#include <asm/unistd.h>
  15#include <asm/fpu.h>
  16
  17#include <linux/ptrace.h>
  18#include <nds32_intrinsic.h>
  19
  20extern void show_pte(struct mm_struct *mm, unsigned long addr);
  21
  22/*
  23 * Dump out the contents of some memory nicely...
  24 */
  25void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
  26{
  27        unsigned long first;
  28        mm_segment_t fs;
  29        int i;
  30
  31        /*
  32         * We need to switch to kernel mode so that we can use __get_user
  33         * to safely read from kernel space.  Note that we now dump the
  34         * code first, just in case the backtrace kills us.
  35         */
  36        fs = get_fs();
  37        set_fs(KERNEL_DS);
  38
  39        pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
  40
  41        for (first = bottom & ~31; first < top; first += 32) {
  42                unsigned long p;
  43                char str[sizeof(" 12345678") * 8 + 1];
  44
  45                memset(str, ' ', sizeof(str));
  46                str[sizeof(str) - 1] = '\0';
  47
  48                for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
  49                        if (p >= bottom && p < top) {
  50                                unsigned long val;
  51                                if (__get_user(val, (unsigned long *)p) == 0)
  52                                        sprintf(str + i * 9, " %08lx", val);
  53                                else
  54                                        sprintf(str + i * 9, " ????????");
  55                        }
  56                }
  57                pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
  58        }
  59
  60        set_fs(fs);
  61}
  62
  63EXPORT_SYMBOL(dump_mem);
  64
  65static void dump_instr(struct pt_regs *regs)
  66{
  67        unsigned long addr = instruction_pointer(regs);
  68        mm_segment_t fs;
  69        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
  70        int i;
  71
  72        return;
  73        /*
  74         * We need to switch to kernel mode so that we can use __get_user
  75         * to safely read from kernel space.  Note that we now dump the
  76         * code first, just in case the backtrace kills us.
  77         */
  78        fs = get_fs();
  79        set_fs(KERNEL_DS);
  80
  81        pr_emerg("Code: ");
  82        for (i = -4; i < 1; i++) {
  83                unsigned int val, bad;
  84
  85                bad = __get_user(val, &((u32 *) addr)[i]);
  86
  87                if (!bad) {
  88                        p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
  89                } else {
  90                        p += sprintf(p, "bad PC value");
  91                        break;
  92                }
  93        }
  94        pr_emerg("Code: %s\n", str);
  95
  96        set_fs(fs);
  97}
  98
  99#define LOOP_TIMES (100)
 100static void __dump(struct task_struct *tsk, unsigned long *base_reg,
 101                   const char *loglvl)
 102{
 103        unsigned long ret_addr;
 104        int cnt = LOOP_TIMES, graph = 0;
 105        printk("%sCall Trace:\n", loglvl);
 106        if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
 107                while (!kstack_end(base_reg)) {
 108                        ret_addr = *base_reg++;
 109                        if (__kernel_text_address(ret_addr)) {
 110                                ret_addr = ftrace_graph_ret_addr(
 111                                                tsk, &graph, ret_addr, NULL);
 112                                print_ip_sym(loglvl, ret_addr);
 113                        }
 114                        if (--cnt < 0)
 115                                break;
 116                }
 117        } else {
 118                while (!kstack_end((void *)base_reg) &&
 119                       !((unsigned long)base_reg & 0x3) &&
 120                       ((unsigned long)base_reg >= TASK_SIZE)) {
 121                        unsigned long next_fp;
 122                        ret_addr = base_reg[LP_OFFSET];
 123                        next_fp = base_reg[FP_OFFSET];
 124                        if (__kernel_text_address(ret_addr)) {
 125
 126                                ret_addr = ftrace_graph_ret_addr(
 127                                                tsk, &graph, ret_addr, NULL);
 128                                print_ip_sym(loglvl, ret_addr);
 129                        }
 130                        if (--cnt < 0)
 131                                break;
 132                        base_reg = (unsigned long *)next_fp;
 133                }
 134        }
 135        printk("%s\n", loglvl);
 136}
 137
 138void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
 139{
 140        unsigned long *base_reg;
 141
 142        if (!tsk)
 143                tsk = current;
 144        if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
 145                if (tsk != current)
 146                        base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
 147                else
 148                        __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
 149        } else {
 150                if (tsk != current)
 151                        base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
 152                else
 153                        __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
 154        }
 155        __dump(tsk, base_reg, loglvl);
 156        barrier();
 157}
 158
 159DEFINE_SPINLOCK(die_lock);
 160
 161/*
 162 * This function is protected against re-entrancy.
 163 */
 164void die(const char *str, struct pt_regs *regs, int err)
 165{
 166        struct task_struct *tsk = current;
 167        static int die_counter;
 168
 169        console_verbose();
 170        spin_lock_irq(&die_lock);
 171        bust_spinlocks(1);
 172
 173        pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
 174        print_modules();
 175        pr_emerg("CPU: %i\n", smp_processor_id());
 176        show_regs(regs);
 177        pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
 178                 tsk->comm, tsk->pid, end_of_stack(tsk));
 179
 180        if (!user_mode(regs) || in_interrupt()) {
 181                dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
 182                dump_instr(regs);
 183                dump_stack();
 184        }
 185
 186        bust_spinlocks(0);
 187        spin_unlock_irq(&die_lock);
 188        do_exit(SIGSEGV);
 189}
 190
 191EXPORT_SYMBOL(die);
 192
 193void die_if_kernel(const char *str, struct pt_regs *regs, int err)
 194{
 195        if (user_mode(regs))
 196                return;
 197
 198        die(str, regs, err);
 199}
 200
 201int bad_syscall(int n, struct pt_regs *regs)
 202{
 203        if (current->personality != PER_LINUX) {
 204                send_sig(SIGSEGV, current, 1);
 205                return regs->uregs[0];
 206        }
 207
 208        force_sig_fault(SIGILL, ILL_ILLTRP,
 209                        (void __user *)instruction_pointer(regs) - 4);
 210        die_if_kernel("Oops - bad syscall", regs, n);
 211        return regs->uregs[0];
 212}
 213
 214void __pte_error(const char *file, int line, unsigned long val)
 215{
 216        pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
 217}
 218
 219void __pmd_error(const char *file, int line, unsigned long val)
 220{
 221        pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
 222}
 223
 224void __pgd_error(const char *file, int line, unsigned long val)
 225{
 226        pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
 227}
 228
 229extern char *exception_vector, *exception_vector_end;
 230void __init trap_init(void)
 231{
 232        return;
 233}
 234
 235void __init early_trap_init(void)
 236{
 237        unsigned long ivb = 0;
 238        unsigned long base = PAGE_OFFSET;
 239
 240        memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
 241               ((unsigned long)&exception_vector_end -
 242                (unsigned long)&exception_vector));
 243        ivb = __nds32__mfsr(NDS32_SR_IVB);
 244        /* Check platform support. */
 245        if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
 246                panic
 247                    ("IVIC mode is not allowed on the platform with interrupt controller\n");
 248        __nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
 249                      IVB_BASE, NDS32_SR_IVB);
 250        __nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
 251
 252        /*
 253         * 0x800 = 128 vectors * 16byte.
 254         * It should be enough to flush a page.
 255         */
 256        cpu_cache_wbinval_page(base, true);
 257}
 258
 259static void send_sigtrap(struct pt_regs *regs, int error_code, int si_code)
 260{
 261        struct task_struct *tsk = current;
 262
 263        tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
 264        tsk->thread.error_code = error_code;
 265
 266        force_sig_fault(SIGTRAP, si_code,
 267                        (void __user *)instruction_pointer(regs));
 268}
 269
 270void do_debug_trap(unsigned long entry, unsigned long addr,
 271                   unsigned long type, struct pt_regs *regs)
 272{
 273        if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
 274            == NOTIFY_STOP)
 275                return;
 276
 277        if (user_mode(regs)) {
 278                /* trap_signal */
 279                send_sigtrap(regs, 0, TRAP_BRKPT);
 280        } else {
 281                /* kernel_trap */
 282                if (!fixup_exception(regs))
 283                        die("unexpected kernel_trap", regs, 0);
 284        }
 285}
 286
 287void unhandled_interruption(struct pt_regs *regs)
 288{
 289        pr_emerg("unhandled_interruption\n");
 290        show_regs(regs);
 291        if (!user_mode(regs))
 292                do_exit(SIGKILL);
 293        force_sig(SIGKILL);
 294}
 295
 296void unhandled_exceptions(unsigned long entry, unsigned long addr,
 297                          unsigned long type, struct pt_regs *regs)
 298{
 299        pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
 300                 addr, type);
 301        show_regs(regs);
 302        if (!user_mode(regs))
 303                do_exit(SIGKILL);
 304        force_sig(SIGKILL);
 305}
 306
 307extern int do_page_fault(unsigned long entry, unsigned long addr,
 308                         unsigned int error_code, struct pt_regs *regs);
 309
 310/*
 311 * 2:DEF dispatch for TLB MISC exception handler
 312*/
 313
 314void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
 315                          unsigned long type, struct pt_regs *regs)
 316{
 317        type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
 318        if ((type & ITYPE_mskETYPE) < 5) {
 319                /* Permission exceptions */
 320                do_page_fault(entry, addr, type, regs);
 321        } else
 322                unhandled_exceptions(entry, addr, type, regs);
 323}
 324
 325void do_revinsn(struct pt_regs *regs)
 326{
 327        pr_emerg("Reserved Instruction\n");
 328        show_regs(regs);
 329        if (!user_mode(regs))
 330                do_exit(SIGILL);
 331        force_sig(SIGILL);
 332}
 333
 334#ifdef CONFIG_ALIGNMENT_TRAP
 335extern int unalign_access_mode;
 336extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
 337#endif
 338void do_dispatch_general(unsigned long entry, unsigned long addr,
 339                         unsigned long itype, struct pt_regs *regs,
 340                         unsigned long oipc)
 341{
 342        unsigned int swid = itype >> ITYPE_offSWID;
 343        unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
 344        if (type == ETYPE_ALIGNMENT_CHECK) {
 345#ifdef CONFIG_ALIGNMENT_TRAP
 346                /* Alignment check */
 347                if (user_mode(regs) && unalign_access_mode) {
 348                        int ret;
 349                        ret = do_unaligned_access(addr, regs);
 350
 351                        if (ret == 0)
 352                                return;
 353
 354                        if (ret == -EFAULT)
 355                                pr_emerg
 356                                    ("Unhandled unaligned access exception\n");
 357                }
 358#endif
 359                do_page_fault(entry, addr, type, regs);
 360        } else if (type == ETYPE_RESERVED_INSTRUCTION) {
 361                /* Reserved instruction */
 362                do_revinsn(regs);
 363        } else if (type == ETYPE_COPROCESSOR) {
 364                /* Coprocessor */
 365#if IS_ENABLED(CONFIG_FPU)
 366                unsigned int fucop_exist = __nds32__mfsr(NDS32_SR_FUCOP_EXIST);
 367                unsigned int cpid = ((itype & ITYPE_mskCPID) >> ITYPE_offCPID);
 368
 369                if ((cpid == FPU_CPID) &&
 370                    (fucop_exist & FUCOP_EXIST_mskCP0ISFPU)) {
 371                        unsigned int subtype = (itype & ITYPE_mskSTYPE);
 372
 373                        if (true == do_fpu_exception(subtype, regs))
 374                                return;
 375                }
 376#endif
 377                unhandled_exceptions(entry, addr, type, regs);
 378        } else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
 379                /* trap, used on v3 EDM target debugging workaround */
 380                /*
 381                 * DIPC(OIPC) is passed as parameter before
 382                 * interrupt is enabled, so the DIPC will not be corrupted
 383                 * even though interrupts are coming in
 384                 */
 385                /*
 386                 * 1. update ipc
 387                 * 2. update pt_regs ipc with oipc
 388                 * 3. update pt_regs ipsw (clear DEX)
 389                 */
 390                __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
 391                regs->ipc = oipc;
 392                if (regs->pipsw & PSW_mskDEX) {
 393                        pr_emerg
 394                            ("Nested Debug exception is possibly happened\n");
 395                        pr_emerg("ipc:%08x pipc:%08x\n",
 396                                 (unsigned int)regs->ipc,
 397                                 (unsigned int)regs->pipc);
 398                }
 399                do_debug_trap(entry, addr, itype, regs);
 400                regs->ipsw &= ~PSW_mskDEX;
 401        } else
 402                unhandled_exceptions(entry, addr, type, regs);
 403}
 404