linux/arch/mips/kernel/kgdb.c
<<
>>
Prefs
   1/*
   2 *  Originally written by Glenn Engel, Lake Stevens Instrument Division
   3 *
   4 *  Contributed by HP Systems
   5 *
   6 *  Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
   7 *  Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
   8 *
   9 *  Copyright (C) 1995 Andreas Busse
  10 *
  11 *  Copyright (C) 2003 MontaVista Software Inc.
  12 *  Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
  13 *
  14 *  Copyright (C) 2004-2005 MontaVista Software Inc.
  15 *  Author: Manish Lachwani, mlachwani@mvista.com or manish@koffee-break.com
  16 *
  17 *  Copyright (C) 2007-2008 Wind River Systems, Inc.
  18 *  Author/Maintainer: Jason Wessel, jason.wessel@windriver.com
  19 *
  20 *  This file is licensed under the terms of the GNU General Public License
  21 *  version 2. This program is licensed "as is" without any warranty of any
  22 *  kind, whether express or implied.
  23 */
  24
  25#include <linux/ptrace.h>               /* for linux pt_regs struct */
  26#include <linux/kgdb.h>
  27#include <linux/kdebug.h>
  28#include <linux/sched.h>
  29#include <linux/smp.h>
  30#include <asm/inst.h>
  31#include <asm/fpu.h>
  32#include <asm/cacheflush.h>
  33#include <asm/processor.h>
  34#include <asm/sigcontext.h>
  35
  36static struct hard_trap_info {
  37        unsigned char tt;       /* Trap type code for MIPS R3xxx and R4xxx */
  38        unsigned char signo;    /* Signal that we map this trap into */
  39} hard_trap_info[] = {
  40        { 6, SIGBUS },          /* instruction bus error */
  41        { 7, SIGBUS },          /* data bus error */
  42        { 9, SIGTRAP },         /* break */
  43/*      { 11, SIGILL }, */      /* CPU unusable */
  44        { 12, SIGFPE },         /* overflow */
  45        { 13, SIGTRAP },        /* trap */
  46        { 14, SIGSEGV },        /* virtual instruction cache coherency */
  47        { 15, SIGFPE },         /* floating point exception */
  48        { 23, SIGSEGV },        /* watch */
  49        { 31, SIGSEGV },        /* virtual data cache coherency */
  50        { 0, 0}                 /* Must be last */
  51};
  52
  53void arch_kgdb_breakpoint(void)
  54{
  55        __asm__ __volatile__(
  56                ".globl breakinst\n\t"
  57                ".set\tnoreorder\n\t"
  58                "nop\n"
  59                "breakinst:\tbreak\n\t"
  60                "nop\n\t"
  61                ".set\treorder");
  62}
  63
  64static void kgdb_call_nmi_hook(void *ignored)
  65{
  66        kgdb_nmicallback(raw_smp_processor_id(), NULL);
  67}
  68
  69void kgdb_roundup_cpus(unsigned long flags)
  70{
  71        local_irq_enable();
  72        smp_call_function(kgdb_call_nmi_hook, NULL, 0);
  73        local_irq_disable();
  74}
  75
  76static int compute_signal(int tt)
  77{
  78        struct hard_trap_info *ht;
  79
  80        for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
  81                if (ht->tt == tt)
  82                        return ht->signo;
  83
  84        return SIGHUP;          /* default for things we don't know about */
  85}
  86
  87void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
  88{
  89        int reg;
  90
  91#if (KGDB_GDB_REG_SIZE == 32)
  92        u32 *ptr = (u32 *)gdb_regs;
  93#else
  94        u64 *ptr = (u64 *)gdb_regs;
  95#endif
  96
  97        for (reg = 0; reg < 32; reg++)
  98                *(ptr++) = regs->regs[reg];
  99
 100        *(ptr++) = regs->cp0_status;
 101        *(ptr++) = regs->lo;
 102        *(ptr++) = regs->hi;
 103        *(ptr++) = regs->cp0_badvaddr;
 104        *(ptr++) = regs->cp0_cause;
 105        *(ptr++) = regs->cp0_epc;
 106
 107        /* FP REGS */
 108        if (!(current && (regs->cp0_status & ST0_CU1)))
 109                return;
 110
 111        save_fp(current);
 112        for (reg = 0; reg < 32; reg++)
 113                *(ptr++) = current->thread.fpu.fpr[reg];
 114}
 115
 116void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
 117{
 118        int reg;
 119
 120#if (KGDB_GDB_REG_SIZE == 32)
 121        const u32 *ptr = (u32 *)gdb_regs;
 122#else
 123        const u64 *ptr = (u64 *)gdb_regs;
 124#endif
 125
 126        for (reg = 0; reg < 32; reg++)
 127                regs->regs[reg] = *(ptr++);
 128
 129        regs->cp0_status = *(ptr++);
 130        regs->lo = *(ptr++);
 131        regs->hi = *(ptr++);
 132        regs->cp0_badvaddr = *(ptr++);
 133        regs->cp0_cause = *(ptr++);
 134        regs->cp0_epc = *(ptr++);
 135
 136        /* FP REGS from current */
 137        if (!(current && (regs->cp0_status & ST0_CU1)))
 138                return;
 139
 140        for (reg = 0; reg < 32; reg++)
 141                current->thread.fpu.fpr[reg] = *(ptr++);
 142        restore_fp(current);
 143}
 144
 145/*
 146 * Similar to regs_to_gdb_regs() except that process is sleeping and so
 147 * we may not be able to get all the info.
 148 */
 149void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
 150{
 151        int reg;
 152        struct thread_info *ti = task_thread_info(p);
 153        unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
 154        struct pt_regs *regs = (struct pt_regs *)ksp - 1;
 155#if (KGDB_GDB_REG_SIZE == 32)
 156        u32 *ptr = (u32 *)gdb_regs;
 157#else
 158        u64 *ptr = (u64 *)gdb_regs;
 159#endif
 160
 161        for (reg = 0; reg < 16; reg++)
 162                *(ptr++) = regs->regs[reg];
 163
 164        /* S0 - S7 */
 165        for (reg = 16; reg < 24; reg++)
 166                *(ptr++) = regs->regs[reg];
 167
 168        for (reg = 24; reg < 28; reg++)
 169                *(ptr++) = 0;
 170
 171        /* GP, SP, FP, RA */
 172        for (reg = 28; reg < 32; reg++)
 173                *(ptr++) = regs->regs[reg];
 174
 175        *(ptr++) = regs->cp0_status;
 176        *(ptr++) = regs->lo;
 177        *(ptr++) = regs->hi;
 178        *(ptr++) = regs->cp0_badvaddr;
 179        *(ptr++) = regs->cp0_cause;
 180        *(ptr++) = regs->cp0_epc;
 181}
 182
 183/*
 184 * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
 185 * then try to fall into the debugger
 186 */
 187static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
 188                            void *ptr)
 189{
 190        struct die_args *args = (struct die_args *)ptr;
 191        struct pt_regs *regs = args->regs;
 192        int trap = (regs->cp0_cause & 0x7c) >> 2;
 193
 194        /* Userpace events, ignore. */
 195        if (user_mode(regs))
 196                return NOTIFY_DONE;
 197
 198        if (atomic_read(&kgdb_active) != -1)
 199                kgdb_nmicallback(smp_processor_id(), regs);
 200
 201        if (kgdb_handle_exception(trap, compute_signal(trap), 0, regs))
 202                return NOTIFY_DONE;
 203
 204        if (atomic_read(&kgdb_setting_breakpoint))
 205                if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
 206                        regs->cp0_epc += 4;
 207
 208        /* In SMP mode, __flush_cache_all does IPI */
 209        local_irq_enable();
 210        __flush_cache_all();
 211
 212        return NOTIFY_STOP;
 213}
 214
 215static struct notifier_block kgdb_notifier = {
 216        .notifier_call = kgdb_mips_notify,
 217};
 218
 219/*
 220 * Handle the 's' and 'c' commands
 221 */
 222int kgdb_arch_handle_exception(int vector, int signo, int err_code,
 223                               char *remcom_in_buffer, char *remcom_out_buffer,
 224                               struct pt_regs *regs)
 225{
 226        char *ptr;
 227        unsigned long address;
 228        int cpu = smp_processor_id();
 229
 230        switch (remcom_in_buffer[0]) {
 231        case 's':
 232        case 'c':
 233                /* handle the optional parameter */
 234                ptr = &remcom_in_buffer[1];
 235                if (kgdb_hex2long(&ptr, &address))
 236                        regs->cp0_epc = address;
 237
 238                atomic_set(&kgdb_cpu_doing_single_step, -1);
 239                if (remcom_in_buffer[0] == 's')
 240                        atomic_set(&kgdb_cpu_doing_single_step, cpu);
 241
 242                return 0;
 243        }
 244
 245        return -1;
 246}
 247
 248struct kgdb_arch arch_kgdb_ops;
 249
 250/*
 251 * We use kgdb_early_setup so that functions we need to call now don't
 252 * cause trouble when called again later.
 253 */
 254int kgdb_arch_init(void)
 255{
 256        union mips_instruction insn = {
 257                .r_format = {
 258                        .opcode = spec_op,
 259                        .func   = break_op,
 260                }
 261        };
 262        memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
 263
 264        register_die_notifier(&kgdb_notifier);
 265
 266        return 0;
 267}
 268
 269/*
 270 *      kgdb_arch_exit - Perform any architecture specific uninitalization.
 271 *
 272 *      This function will handle the uninitalization of any architecture
 273 *      specific callbacks, for dynamic registration and unregistration.
 274 */
 275void kgdb_arch_exit(void)
 276{
 277        unregister_die_notifier(&kgdb_notifier);
 278}
 279