linux/arch/riscv/kernel/kgdb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2020 SiFive
   4 */
   5
   6#include <linux/ptrace.h>
   7#include <linux/kdebug.h>
   8#include <linux/bug.h>
   9#include <linux/kgdb.h>
  10#include <linux/irqflags.h>
  11#include <linux/string.h>
  12#include <asm/cacheflush.h>
  13#include <asm/gdb_xml.h>
  14#include <asm/parse_asm.h>
  15
  16enum {
  17        NOT_KGDB_BREAK = 0,
  18        KGDB_SW_BREAK,
  19        KGDB_COMPILED_BREAK,
  20        KGDB_SW_SINGLE_STEP
  21};
  22
  23static unsigned long stepped_address;
  24static unsigned int stepped_opcode;
  25
  26#if __riscv_xlen == 32
  27/* C.JAL is an RV32C-only instruction */
  28DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
  29#else
  30#define is_c_jal_insn(opcode) 0
  31#endif
  32DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
  33DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
  34DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
  35DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
  36DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
  37DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
  38DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
  39DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
  40DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
  41DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
  42DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
  43DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
  44DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
  45DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
  46
  47static int decode_register_index(unsigned long opcode, int offset)
  48{
  49        return (opcode >> offset) & 0x1F;
  50}
  51
  52static int decode_register_index_short(unsigned long opcode, int offset)
  53{
  54        return ((opcode >> offset) & 0x7) + 8;
  55}
  56
  57/* Calculate the new address for after a step */
  58static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
  59{
  60        unsigned long pc = regs->epc;
  61        unsigned long *regs_ptr = (unsigned long *)regs;
  62        unsigned int rs1_num, rs2_num;
  63        int op_code;
  64
  65        if (get_kernel_nofault(op_code, (void *)pc))
  66                return -EINVAL;
  67        if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) {
  68                if (is_c_jalr_insn(op_code) || is_c_jr_insn(op_code)) {
  69                        rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF);
  70                        *next_addr = regs_ptr[rs1_num];
  71                } else if (is_c_j_insn(op_code) || is_c_jal_insn(op_code)) {
  72                        *next_addr = EXTRACT_RVC_J_IMM(op_code) + pc;
  73                } else if (is_c_beqz_insn(op_code)) {
  74                        rs1_num = decode_register_index_short(op_code,
  75                                                              RVC_C1_RS1_OPOFF);
  76                        if (!rs1_num || regs_ptr[rs1_num] == 0)
  77                                *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc;
  78                        else
  79                                *next_addr = pc + 2;
  80                } else if (is_c_bnez_insn(op_code)) {
  81                        rs1_num =
  82                            decode_register_index_short(op_code, RVC_C1_RS1_OPOFF);
  83                        if (rs1_num && regs_ptr[rs1_num] != 0)
  84                                *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc;
  85                        else
  86                                *next_addr = pc + 2;
  87                } else {
  88                        *next_addr = pc + 2;
  89                }
  90        } else {
  91                if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) {
  92                        bool result = false;
  93                        long imm = EXTRACT_BTYPE_IMM(op_code);
  94                        unsigned long rs1_val = 0, rs2_val = 0;
  95
  96                        rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
  97                        rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF);
  98                        if (rs1_num)
  99                                rs1_val = regs_ptr[rs1_num];
 100                        if (rs2_num)
 101                                rs2_val = regs_ptr[rs2_num];
 102
 103                        if (is_beq_insn(op_code))
 104                                result = (rs1_val == rs2_val) ? true : false;
 105                        else if (is_bne_insn(op_code))
 106                                result = (rs1_val != rs2_val) ? true : false;
 107                        else if (is_blt_insn(op_code))
 108                                result =
 109                                    ((long)rs1_val <
 110                                     (long)rs2_val) ? true : false;
 111                        else if (is_bge_insn(op_code))
 112                                result =
 113                                    ((long)rs1_val >=
 114                                     (long)rs2_val) ? true : false;
 115                        else if (is_bltu_insn(op_code))
 116                                result = (rs1_val < rs2_val) ? true : false;
 117                        else if (is_bgeu_insn(op_code))
 118                                result = (rs1_val >= rs2_val) ? true : false;
 119                        if (result)
 120                                *next_addr = imm + pc;
 121                        else
 122                                *next_addr = pc + 4;
 123                } else if (is_jal_insn(op_code)) {
 124                        *next_addr = EXTRACT_JTYPE_IMM(op_code) + pc;
 125                } else if (is_jalr_insn(op_code)) {
 126                        rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
 127                        if (rs1_num)
 128                                *next_addr = ((unsigned long *)regs)[rs1_num];
 129                        *next_addr += EXTRACT_ITYPE_IMM(op_code);
 130                } else if (is_sret_insn(op_code)) {
 131                        *next_addr = pc;
 132                } else {
 133                        *next_addr = pc + 4;
 134                }
 135        }
 136        return 0;
 137}
 138
 139static int do_single_step(struct pt_regs *regs)
 140{
 141        /* Determine where the target instruction will send us to */
 142        unsigned long addr = 0;
 143        int error = get_step_address(regs, &addr);
 144
 145        if (error)
 146                return error;
 147
 148        /* Store the op code in the stepped address */
 149        error = get_kernel_nofault(stepped_opcode, (void *)addr);
 150        if (error)
 151                return error;
 152
 153        stepped_address = addr;
 154
 155        /* Replace the op code with the break instruction */
 156        error = copy_to_kernel_nofault((void *)stepped_address,
 157                                   arch_kgdb_ops.gdb_bpt_instr,
 158                                   BREAK_INSTR_SIZE);
 159        /* Flush and return */
 160        if (!error) {
 161                flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
 162                kgdb_single_step = 1;
 163                atomic_set(&kgdb_cpu_doing_single_step,
 164                           raw_smp_processor_id());
 165        } else {
 166                stepped_address = 0;
 167                stepped_opcode = 0;
 168        }
 169        return error;
 170}
 171
 172/* Undo a single step */
 173static void undo_single_step(struct pt_regs *regs)
 174{
 175        if (stepped_opcode != 0) {
 176                copy_to_kernel_nofault((void *)stepped_address,
 177                                   (void *)&stepped_opcode, BREAK_INSTR_SIZE);
 178                flush_icache_range(stepped_address,
 179                                   stepped_address + BREAK_INSTR_SIZE);
 180        }
 181        stepped_address = 0;
 182        stepped_opcode = 0;
 183        kgdb_single_step = 0;
 184        atomic_set(&kgdb_cpu_doing_single_step, -1);
 185}
 186
 187struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
 188        {DBG_REG_ZERO, GDB_SIZEOF_REG, -1},
 189        {DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)},
 190        {DBG_REG_SP, GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
 191        {DBG_REG_GP, GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
 192        {DBG_REG_TP, GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
 193        {DBG_REG_T0, GDB_SIZEOF_REG, offsetof(struct pt_regs, t0)},
 194        {DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)},
 195        {DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)},
 196        {DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)},
 197        {DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
 198        {DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)},
 199        {DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
 200        {DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)},
 201        {DBG_REG_A3, GDB_SIZEOF_REG, offsetof(struct pt_regs, a3)},
 202        {DBG_REG_A4, GDB_SIZEOF_REG, offsetof(struct pt_regs, a4)},
 203        {DBG_REG_A5, GDB_SIZEOF_REG, offsetof(struct pt_regs, a5)},
 204        {DBG_REG_A6, GDB_SIZEOF_REG, offsetof(struct pt_regs, a6)},
 205        {DBG_REG_A7, GDB_SIZEOF_REG, offsetof(struct pt_regs, a7)},
 206        {DBG_REG_S2, GDB_SIZEOF_REG, offsetof(struct pt_regs, s2)},
 207        {DBG_REG_S3, GDB_SIZEOF_REG, offsetof(struct pt_regs, s3)},
 208        {DBG_REG_S4, GDB_SIZEOF_REG, offsetof(struct pt_regs, s4)},
 209        {DBG_REG_S5, GDB_SIZEOF_REG, offsetof(struct pt_regs, s5)},
 210        {DBG_REG_S6, GDB_SIZEOF_REG, offsetof(struct pt_regs, s6)},
 211        {DBG_REG_S7, GDB_SIZEOF_REG, offsetof(struct pt_regs, s7)},
 212        {DBG_REG_S8, GDB_SIZEOF_REG, offsetof(struct pt_regs, s8)},
 213        {DBG_REG_S9, GDB_SIZEOF_REG, offsetof(struct pt_regs, s9)},
 214        {DBG_REG_S10, GDB_SIZEOF_REG, offsetof(struct pt_regs, s10)},
 215        {DBG_REG_S11, GDB_SIZEOF_REG, offsetof(struct pt_regs, s11)},
 216        {DBG_REG_T3, GDB_SIZEOF_REG, offsetof(struct pt_regs, t3)},
 217        {DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)},
 218        {DBG_REG_T5, GDB_SIZEOF_REG, offsetof(struct pt_regs, t5)},
 219        {DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)},
 220        {DBG_REG_EPC, GDB_SIZEOF_REG, offsetof(struct pt_regs, epc)},
 221        {DBG_REG_STATUS, GDB_SIZEOF_REG, offsetof(struct pt_regs, status)},
 222        {DBG_REG_BADADDR, GDB_SIZEOF_REG, offsetof(struct pt_regs, badaddr)},
 223        {DBG_REG_CAUSE, GDB_SIZEOF_REG, offsetof(struct pt_regs, cause)},
 224};
 225
 226char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
 227{
 228        if (regno >= DBG_MAX_REG_NUM || regno < 0)
 229                return NULL;
 230
 231        if (dbg_reg_def[regno].offset != -1)
 232                memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
 233                       dbg_reg_def[regno].size);
 234        else
 235                memset(mem, 0, dbg_reg_def[regno].size);
 236        return dbg_reg_def[regno].name;
 237}
 238
 239int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
 240{
 241        if (regno >= DBG_MAX_REG_NUM || regno < 0)
 242                return -EINVAL;
 243
 244        if (dbg_reg_def[regno].offset != -1)
 245                memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
 246                       dbg_reg_def[regno].size);
 247        return 0;
 248}
 249
 250void
 251sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
 252{
 253        /* Initialize to zero */
 254        memset((char *)gdb_regs, 0, NUMREGBYTES);
 255
 256        gdb_regs[DBG_REG_SP_OFF] = task->thread.sp;
 257        gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0];
 258        gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1];
 259        gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2];
 260        gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3];
 261        gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4];
 262        gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5];
 263        gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6];
 264        gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7];
 265        gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8];
 266        gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10];
 267        gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11];
 268        gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra;
 269}
 270
 271void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
 272{
 273        regs->epc = pc;
 274}
 275
 276void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
 277                                char *remcom_out_buffer)
 278{
 279        if (!strncmp(remcom_in_buffer, gdb_xfer_read_target,
 280                     sizeof(gdb_xfer_read_target)))
 281                strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc);
 282        else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml,
 283                          sizeof(gdb_xfer_read_cpuxml)))
 284                strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml);
 285}
 286
 287static inline void kgdb_arch_update_addr(struct pt_regs *regs,
 288                                         char *remcom_in_buffer)
 289{
 290        unsigned long addr;
 291        char *ptr;
 292
 293        ptr = &remcom_in_buffer[1];
 294        if (kgdb_hex2long(&ptr, &addr))
 295                regs->epc = addr;
 296}
 297
 298int kgdb_arch_handle_exception(int vector, int signo, int err_code,
 299                               char *remcom_in_buffer, char *remcom_out_buffer,
 300                               struct pt_regs *regs)
 301{
 302        int err = 0;
 303
 304        undo_single_step(regs);
 305
 306        switch (remcom_in_buffer[0]) {
 307        case 'c':
 308        case 'D':
 309        case 'k':
 310                if (remcom_in_buffer[0] == 'c')
 311                        kgdb_arch_update_addr(regs, remcom_in_buffer);
 312                break;
 313        case 's':
 314                kgdb_arch_update_addr(regs, remcom_in_buffer);
 315                err = do_single_step(regs);
 316                break;
 317        default:
 318                err = -1;
 319        }
 320        return err;
 321}
 322
 323static int kgdb_riscv_kgdbbreak(unsigned long addr)
 324{
 325        if (stepped_address == addr)
 326                return KGDB_SW_SINGLE_STEP;
 327        if (atomic_read(&kgdb_setting_breakpoint))
 328                if (addr == (unsigned long)&kgdb_compiled_break)
 329                        return KGDB_COMPILED_BREAK;
 330
 331        return kgdb_has_hit_break(addr);
 332}
 333
 334static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd,
 335                             void *ptr)
 336{
 337        struct die_args *args = (struct die_args *)ptr;
 338        struct pt_regs *regs = args->regs;
 339        unsigned long flags;
 340        int type;
 341
 342        if (user_mode(regs))
 343                return NOTIFY_DONE;
 344
 345        type = kgdb_riscv_kgdbbreak(regs->epc);
 346        if (type == NOT_KGDB_BREAK && cmd == DIE_TRAP)
 347                return NOTIFY_DONE;
 348
 349        local_irq_save(flags);
 350
 351        if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1,
 352                                  args->signr, cmd, regs))
 353                return NOTIFY_DONE;
 354
 355        if (type == KGDB_COMPILED_BREAK)
 356                regs->epc += 4;
 357
 358        local_irq_restore(flags);
 359
 360        return NOTIFY_STOP;
 361}
 362
 363static struct notifier_block kgdb_notifier = {
 364        .notifier_call = kgdb_riscv_notify,
 365};
 366
 367int kgdb_arch_init(void)
 368{
 369        register_die_notifier(&kgdb_notifier);
 370
 371        return 0;
 372}
 373
 374void kgdb_arch_exit(void)
 375{
 376        unregister_die_notifier(&kgdb_notifier);
 377}
 378
 379/*
 380 * Global data
 381 */
 382#ifdef CONFIG_RISCV_ISA_C
 383const struct kgdb_arch arch_kgdb_ops = {
 384        .gdb_bpt_instr = {0x02, 0x90},  /* c.ebreak */
 385};
 386#else
 387const struct kgdb_arch arch_kgdb_ops = {
 388        .gdb_bpt_instr = {0x73, 0x00, 0x10, 0x00},      /* ebreak */
 389};
 390#endif
 391