linux/arch/sh/kernel/kgdb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * SuperH KGDB support
   4 *
   5 * Copyright (C) 2008 - 2012  Paul Mundt
   6 *
   7 * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
   8 */
   9#include <linux/kgdb.h>
  10#include <linux/kdebug.h>
  11#include <linux/irq.h>
  12#include <linux/io.h>
  13#include <linux/sched.h>
  14#include <linux/sched/task_stack.h>
  15
  16#include <asm/cacheflush.h>
  17#include <asm/traps.h>
  18
  19/* Macros for single step instruction identification */
  20#define OPCODE_BT(op)           (((op) & 0xff00) == 0x8900)
  21#define OPCODE_BF(op)           (((op) & 0xff00) == 0x8b00)
  22#define OPCODE_BTF_DISP(op)     (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
  23                                 (((op) & 0x7f ) << 1))
  24#define OPCODE_BFS(op)          (((op) & 0xff00) == 0x8f00)
  25#define OPCODE_BTS(op)          (((op) & 0xff00) == 0x8d00)
  26#define OPCODE_BRA(op)          (((op) & 0xf000) == 0xa000)
  27#define OPCODE_BRA_DISP(op)     (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
  28                                 (((op) & 0x7ff) << 1))
  29#define OPCODE_BRAF(op)         (((op) & 0xf0ff) == 0x0023)
  30#define OPCODE_BRAF_REG(op)     (((op) & 0x0f00) >> 8)
  31#define OPCODE_BSR(op)          (((op) & 0xf000) == 0xb000)
  32#define OPCODE_BSR_DISP(op)     (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
  33                                 (((op) & 0x7ff) << 1))
  34#define OPCODE_BSRF(op)         (((op) & 0xf0ff) == 0x0003)
  35#define OPCODE_BSRF_REG(op)     (((op) >> 8) & 0xf)
  36#define OPCODE_JMP(op)          (((op) & 0xf0ff) == 0x402b)
  37#define OPCODE_JMP_REG(op)      (((op) >> 8) & 0xf)
  38#define OPCODE_JSR(op)          (((op) & 0xf0ff) == 0x400b)
  39#define OPCODE_JSR_REG(op)      (((op) >> 8) & 0xf)
  40#define OPCODE_RTS(op)          ((op) == 0xb)
  41#define OPCODE_RTE(op)          ((op) == 0x2b)
  42
  43#define SR_T_BIT_MASK           0x1
  44#define STEP_OPCODE             0xc33d
  45
  46/* Calculate the new address for after a step */
  47static short *get_step_address(struct pt_regs *linux_regs)
  48{
  49        insn_size_t op = __raw_readw(linux_regs->pc);
  50        long addr;
  51
  52        /* BT */
  53        if (OPCODE_BT(op)) {
  54                if (linux_regs->sr & SR_T_BIT_MASK)
  55                        addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
  56                else
  57                        addr = linux_regs->pc + 2;
  58        }
  59
  60        /* BTS */
  61        else if (OPCODE_BTS(op)) {
  62                if (linux_regs->sr & SR_T_BIT_MASK)
  63                        addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
  64                else
  65                        addr = linux_regs->pc + 4;      /* Not in delay slot */
  66        }
  67
  68        /* BF */
  69        else if (OPCODE_BF(op)) {
  70                if (!(linux_regs->sr & SR_T_BIT_MASK))
  71                        addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
  72                else
  73                        addr = linux_regs->pc + 2;
  74        }
  75
  76        /* BFS */
  77        else if (OPCODE_BFS(op)) {
  78                if (!(linux_regs->sr & SR_T_BIT_MASK))
  79                        addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
  80                else
  81                        addr = linux_regs->pc + 4;      /* Not in delay slot */
  82        }
  83
  84        /* BRA */
  85        else if (OPCODE_BRA(op))
  86                addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op);
  87
  88        /* BRAF */
  89        else if (OPCODE_BRAF(op))
  90                addr = linux_regs->pc + 4
  91                    + linux_regs->regs[OPCODE_BRAF_REG(op)];
  92
  93        /* BSR */
  94        else if (OPCODE_BSR(op))
  95                addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op);
  96
  97        /* BSRF */
  98        else if (OPCODE_BSRF(op))
  99                addr = linux_regs->pc + 4
 100                    + linux_regs->regs[OPCODE_BSRF_REG(op)];
 101
 102        /* JMP */
 103        else if (OPCODE_JMP(op))
 104                addr = linux_regs->regs[OPCODE_JMP_REG(op)];
 105
 106        /* JSR */
 107        else if (OPCODE_JSR(op))
 108                addr = linux_regs->regs[OPCODE_JSR_REG(op)];
 109
 110        /* RTS */
 111        else if (OPCODE_RTS(op))
 112                addr = linux_regs->pr;
 113
 114        /* RTE */
 115        else if (OPCODE_RTE(op))
 116                addr = linux_regs->regs[15];
 117
 118        /* Other */
 119        else
 120                addr = linux_regs->pc + instruction_size(op);
 121
 122        flush_icache_range(addr, addr + instruction_size(op));
 123        return (short *)addr;
 124}
 125
 126/*
 127 * Replace the instruction immediately after the current instruction
 128 * (i.e. next in the expected flow of control) with a trap instruction,
 129 * so that returning will cause only a single instruction to be executed.
 130 * Note that this model is slightly broken for instructions with delay
 131 * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the
 132 * instruction in the delay slot will be executed.
 133 */
 134
 135static unsigned long stepped_address;
 136static insn_size_t stepped_opcode;
 137
 138static void do_single_step(struct pt_regs *linux_regs)
 139{
 140        /* Determine where the target instruction will send us to */
 141        unsigned short *addr = get_step_address(linux_regs);
 142
 143        stepped_address = (int)addr;
 144
 145        /* Replace it */
 146        stepped_opcode = __raw_readw((long)addr);
 147        *addr = STEP_OPCODE;
 148
 149        /* Flush and return */
 150        flush_icache_range((long)addr, (long)addr +
 151                           instruction_size(stepped_opcode));
 152}
 153
 154/* Undo a single step */
 155static void undo_single_step(struct pt_regs *linux_regs)
 156{
 157        /* If we have stepped, put back the old instruction */
 158        /* Use stepped_address in case we stopped elsewhere */
 159        if (stepped_opcode != 0) {
 160                __raw_writew(stepped_opcode, stepped_address);
 161                flush_icache_range(stepped_address, stepped_address + 2);
 162        }
 163
 164        stepped_opcode = 0;
 165}
 166
 167struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
 168        { "r0",         GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
 169        { "r1",         GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
 170        { "r2",         GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
 171        { "r3",         GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
 172        { "r4",         GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
 173        { "r5",         GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
 174        { "r6",         GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
 175        { "r7",         GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
 176        { "r8",         GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
 177        { "r9",         GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
 178        { "r10",        GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
 179        { "r11",        GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
 180        { "r12",        GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
 181        { "r13",        GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
 182        { "r14",        GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
 183        { "r15",        GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
 184        { "pc",         GDB_SIZEOF_REG, offsetof(struct pt_regs, pc) },
 185        { "pr",         GDB_SIZEOF_REG, offsetof(struct pt_regs, pr) },
 186        { "sr",         GDB_SIZEOF_REG, offsetof(struct pt_regs, sr) },
 187        { "gbr",        GDB_SIZEOF_REG, offsetof(struct pt_regs, gbr) },
 188        { "mach",       GDB_SIZEOF_REG, offsetof(struct pt_regs, mach) },
 189        { "macl",       GDB_SIZEOF_REG, offsetof(struct pt_regs, macl) },
 190        { "vbr",        GDB_SIZEOF_REG, -1 },
 191};
 192
 193int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
 194{
 195        if (regno < 0 || regno >= DBG_MAX_REG_NUM)
 196                return -EINVAL;
 197
 198        if (dbg_reg_def[regno].offset != -1)
 199                memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
 200                       dbg_reg_def[regno].size);
 201
 202        return 0;
 203}
 204
 205char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
 206{
 207        if (regno >= DBG_MAX_REG_NUM || regno < 0)
 208                return NULL;
 209
 210        if (dbg_reg_def[regno].size != -1)
 211                memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
 212                       dbg_reg_def[regno].size);
 213
 214        switch (regno) {
 215        case GDB_VBR:
 216                __asm__ __volatile__ ("stc vbr, %0" : "=r" (mem));
 217                break;
 218        }
 219
 220        return dbg_reg_def[regno].name;
 221}
 222
 223void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
 224{
 225        struct pt_regs *thread_regs = task_pt_regs(p);
 226        int reg;
 227
 228        /* Initialize to zero */
 229        for (reg = 0; reg < DBG_MAX_REG_NUM; reg++)
 230                gdb_regs[reg] = 0;
 231
 232        /*
 233         * Copy out GP regs 8 to 14.
 234         *
 235         * switch_to() relies on SR.RB toggling, so regs 0->7 are banked
 236         * and need privileged instructions to get to. The r15 value we
 237         * fetch from the thread info directly.
 238         */
 239        for (reg = GDB_R8; reg < GDB_R15; reg++)
 240                gdb_regs[reg] = thread_regs->regs[reg];
 241
 242        gdb_regs[GDB_R15] = p->thread.sp;
 243        gdb_regs[GDB_PC] = p->thread.pc;
 244
 245        /*
 246         * Additional registers we have context for
 247         */
 248        gdb_regs[GDB_PR] = thread_regs->pr;
 249        gdb_regs[GDB_GBR] = thread_regs->gbr;
 250}
 251
 252int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
 253                               char *remcomInBuffer, char *remcomOutBuffer,
 254                               struct pt_regs *linux_regs)
 255{
 256        unsigned long addr;
 257        char *ptr;
 258
 259        /* Undo any stepping we may have done */
 260        undo_single_step(linux_regs);
 261
 262        switch (remcomInBuffer[0]) {
 263        case 'c':
 264        case 's':
 265                /* try to read optional parameter, pc unchanged if no parm */
 266                ptr = &remcomInBuffer[1];
 267                if (kgdb_hex2long(&ptr, &addr))
 268                        linux_regs->pc = addr;
 269        case 'D':
 270        case 'k':
 271                atomic_set(&kgdb_cpu_doing_single_step, -1);
 272
 273                if (remcomInBuffer[0] == 's') {
 274                        do_single_step(linux_regs);
 275                        kgdb_single_step = 1;
 276
 277                        atomic_set(&kgdb_cpu_doing_single_step,
 278                                   raw_smp_processor_id());
 279                }
 280
 281                return 0;
 282        }
 283
 284        /* this means that we do not want to exit from the handler: */
 285        return -1;
 286}
 287
 288unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
 289{
 290        if (exception == 60)
 291                return instruction_pointer(regs) - 2;
 292        return instruction_pointer(regs);
 293}
 294
 295void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
 296{
 297        regs->pc = ip;
 298}
 299
 300/*
 301 * The primary entry points for the kgdb debug trap table entries.
 302 */
 303BUILD_TRAP_HANDLER(singlestep)
 304{
 305        unsigned long flags;
 306        TRAP_HANDLER_DECL;
 307
 308        local_irq_save(flags);
 309        regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
 310        kgdb_handle_exception(0, SIGTRAP, 0, regs);
 311        local_irq_restore(flags);
 312}
 313
 314static int __kgdb_notify(struct die_args *args, unsigned long cmd)
 315{
 316        int ret;
 317
 318        switch (cmd) {
 319        case DIE_BREAKPOINT:
 320                /*
 321                 * This means a user thread is single stepping
 322                 * a system call which should be ignored
 323                 */
 324                if (test_thread_flag(TIF_SINGLESTEP))
 325                        return NOTIFY_DONE;
 326
 327                ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
 328                                            args->err, args->regs);
 329                if (ret)
 330                        return NOTIFY_DONE;
 331
 332                break;
 333        }
 334
 335        return NOTIFY_STOP;
 336}
 337
 338static int
 339kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
 340{
 341        unsigned long flags;
 342        int ret;
 343
 344        local_irq_save(flags);
 345        ret = __kgdb_notify(ptr, cmd);
 346        local_irq_restore(flags);
 347
 348        return ret;
 349}
 350
 351static struct notifier_block kgdb_notifier = {
 352        .notifier_call  = kgdb_notify,
 353
 354        /*
 355         * Lowest-prio notifier priority, we want to be notified last:
 356         */
 357        .priority       = -INT_MAX,
 358};
 359
 360int kgdb_arch_init(void)
 361{
 362        return register_die_notifier(&kgdb_notifier);
 363}
 364
 365void kgdb_arch_exit(void)
 366{
 367        unregister_die_notifier(&kgdb_notifier);
 368}
 369
 370const struct kgdb_arch arch_kgdb_ops = {
 371        /* Breakpoint instruction: trapa #0x3c */
 372#ifdef CONFIG_CPU_LITTLE_ENDIAN
 373        .gdb_bpt_instr          = { 0x3c, 0xc3 },
 374#else
 375        .gdb_bpt_instr          = { 0xc3, 0x3c },
 376#endif
 377};
 378