linux/arch/s390/kernel/uprobes.c
<<
>>
Prefs
   1/*
   2 *  User-space Probes (UProbes) for s390
   3 *
   4 *    Copyright IBM Corp. 2014
   5 *    Author(s): Jan Willeke,
   6 */
   7
   8#include <linux/uaccess.h>
   9#include <linux/uprobes.h>
  10#include <linux/compat.h>
  11#include <linux/kdebug.h>
  12#include <asm/switch_to.h>
  13#include <asm/facility.h>
  14#include <asm/kprobes.h>
  15#include <asm/dis.h>
  16#include "entry.h"
  17
  18#define UPROBE_TRAP_NR  UINT_MAX
  19
  20int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
  21                             unsigned long addr)
  22{
  23        return probe_is_prohibited_opcode(auprobe->insn);
  24}
  25
  26int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  27{
  28        if (psw_bits(regs->psw).eaba == PSW_AMODE_24BIT)
  29                return -EINVAL;
  30        if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_AMODE_31BIT)
  31                return -EINVAL;
  32        clear_pt_regs_flag(regs, PIF_PER_TRAP);
  33        auprobe->saved_per = psw_bits(regs->psw).r;
  34        auprobe->saved_int_code = regs->int_code;
  35        regs->int_code = UPROBE_TRAP_NR;
  36        regs->psw.addr = current->utask->xol_vaddr;
  37        set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
  38        update_cr_regs(current);
  39        return 0;
  40}
  41
  42bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
  43{
  44        struct pt_regs *regs = task_pt_regs(tsk);
  45
  46        if (regs->int_code != UPROBE_TRAP_NR)
  47                return true;
  48        return false;
  49}
  50
  51static int check_per_event(unsigned short cause, unsigned long control,
  52                           struct pt_regs *regs)
  53{
  54        if (!(regs->psw.mask & PSW_MASK_PER))
  55                return 0;
  56        /* user space single step */
  57        if (control == 0)
  58                return 1;
  59        /* over indication for storage alteration */
  60        if ((control & 0x20200000) && (cause & 0x2000))
  61                return 1;
  62        if (cause & 0x8000) {
  63                /* all branches */
  64                if ((control & 0x80800000) == 0x80000000)
  65                        return 1;
  66                /* branch into selected range */
  67                if (((control & 0x80800000) == 0x80800000) &&
  68                    regs->psw.addr >= current->thread.per_user.start &&
  69                    regs->psw.addr <= current->thread.per_user.end)
  70                        return 1;
  71        }
  72        return 0;
  73}
  74
  75int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  76{
  77        int fixup = probe_get_fixup_type(auprobe->insn);
  78        struct uprobe_task *utask = current->utask;
  79
  80        clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
  81        update_cr_regs(current);
  82        psw_bits(regs->psw).r = auprobe->saved_per;
  83        regs->int_code = auprobe->saved_int_code;
  84
  85        if (fixup & FIXUP_PSW_NORMAL)
  86                regs->psw.addr += utask->vaddr - utask->xol_vaddr;
  87        if (fixup & FIXUP_RETURN_REGISTER) {
  88                int reg = (auprobe->insn[0] & 0xf0) >> 4;
  89
  90                regs->gprs[reg] += utask->vaddr - utask->xol_vaddr;
  91        }
  92        if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
  93                int ilen = insn_length(auprobe->insn[0] >> 8);
  94
  95                if (regs->psw.addr - utask->xol_vaddr == ilen)
  96                        regs->psw.addr = utask->vaddr + ilen;
  97        }
  98        if (check_per_event(current->thread.per_event.cause,
  99                            current->thread.per_user.control, regs)) {
 100                /* fix per address */
 101                current->thread.per_event.address = utask->vaddr;
 102                /* trigger per event */
 103                set_pt_regs_flag(regs, PIF_PER_TRAP);
 104        }
 105        return 0;
 106}
 107
 108int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
 109                                 void *data)
 110{
 111        struct die_args *args = data;
 112        struct pt_regs *regs = args->regs;
 113
 114        if (!user_mode(regs))
 115                return NOTIFY_DONE;
 116        if (regs->int_code & 0x200) /* Trap during transaction */
 117                return NOTIFY_DONE;
 118        switch (val) {
 119        case DIE_BPT:
 120                if (uprobe_pre_sstep_notifier(regs))
 121                        return NOTIFY_STOP;
 122                break;
 123        case DIE_SSTEP:
 124                if (uprobe_post_sstep_notifier(regs))
 125                        return NOTIFY_STOP;
 126        default:
 127                break;
 128        }
 129        return NOTIFY_DONE;
 130}
 131
 132void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
 133{
 134        clear_thread_flag(TIF_UPROBE_SINGLESTEP);
 135        regs->int_code = auprobe->saved_int_code;
 136        regs->psw.addr = current->utask->vaddr;
 137        current->thread.per_event.address = current->utask->vaddr;
 138}
 139
 140unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
 141                                                struct pt_regs *regs)
 142{
 143        unsigned long orig;
 144
 145        orig = regs->gprs[14];
 146        regs->gprs[14] = trampoline;
 147        return orig;
 148}
 149
 150/* Instruction Emulation */
 151
 152static void adjust_psw_addr(psw_t *psw, unsigned long len)
 153{
 154        psw->addr = __rewind_psw(*psw, -len);
 155}
 156
 157#define EMU_ILLEGAL_OP          1
 158#define EMU_SPECIFICATION       2
 159#define EMU_ADDRESSING          3
 160
 161#define emu_load_ril(ptr, output)                       \
 162({                                                      \
 163        unsigned int mask = sizeof(*(ptr)) - 1;         \
 164        __typeof__(*(ptr)) input;                       \
 165        int __rc = 0;                                   \
 166                                                        \
 167        if (!test_facility(34))                         \
 168                __rc = EMU_ILLEGAL_OP;                  \
 169        else if ((u64 __force)ptr & mask)               \
 170                __rc = EMU_SPECIFICATION;               \
 171        else if (get_user(input, ptr))                  \
 172                __rc = EMU_ADDRESSING;                  \
 173        else                                            \
 174                *(output) = input;                      \
 175        __rc;                                           \
 176})
 177
 178#define emu_store_ril(regs, ptr, input)                 \
 179({                                                      \
 180        unsigned int mask = sizeof(*(ptr)) - 1;         \
 181        __typeof__(ptr) __ptr = (ptr);                  \
 182        int __rc = 0;                                   \
 183                                                        \
 184        if (!test_facility(34))                         \
 185                __rc = EMU_ILLEGAL_OP;                  \
 186        else if ((u64 __force)__ptr & mask)             \
 187                __rc = EMU_SPECIFICATION;               \
 188        else if (put_user(*(input), __ptr))             \
 189                __rc = EMU_ADDRESSING;                  \
 190        if (__rc == 0)                                  \
 191                sim_stor_event(regs,                    \
 192                               (void __force *)__ptr,   \
 193                               mask + 1);               \
 194        __rc;                                           \
 195})
 196
 197#define emu_cmp_ril(regs, ptr, cmp)                     \
 198({                                                      \
 199        unsigned int mask = sizeof(*(ptr)) - 1;         \
 200        __typeof__(*(ptr)) input;                       \
 201        int __rc = 0;                                   \
 202                                                        \
 203        if (!test_facility(34))                         \
 204                __rc = EMU_ILLEGAL_OP;                  \
 205        else if ((u64 __force)ptr & mask)               \
 206                __rc = EMU_SPECIFICATION;               \
 207        else if (get_user(input, ptr))                  \
 208                __rc = EMU_ADDRESSING;                  \
 209        else if (input > *(cmp))                        \
 210                psw_bits((regs)->psw).cc = 1;           \
 211        else if (input < *(cmp))                        \
 212                psw_bits((regs)->psw).cc = 2;           \
 213        else                                            \
 214                psw_bits((regs)->psw).cc = 0;           \
 215        __rc;                                           \
 216})
 217
 218struct insn_ril {
 219        u8 opc0;
 220        u8 reg  : 4;
 221        u8 opc1 : 4;
 222        s32 disp;
 223} __packed;
 224
 225union split_register {
 226        u64 u64;
 227        u32 u32[2];
 228        u16 u16[4];
 229        s64 s64;
 230        s32 s32[2];
 231        s16 s16[4];
 232};
 233
 234/*
 235 * If user per registers are setup to trace storage alterations and an
 236 * emulated store took place on a fitting address a user trap is generated.
 237 */
 238static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
 239{
 240        if (!(regs->psw.mask & PSW_MASK_PER))
 241                return;
 242        if (!(current->thread.per_user.control & PER_EVENT_STORE))
 243                return;
 244        if ((void *)current->thread.per_user.start > (addr + len))
 245                return;
 246        if ((void *)current->thread.per_user.end < addr)
 247                return;
 248        current->thread.per_event.address = regs->psw.addr;
 249        current->thread.per_event.cause = PER_EVENT_STORE >> 16;
 250        set_pt_regs_flag(regs, PIF_PER_TRAP);
 251}
 252
 253/*
 254 * pc relative instructions are emulated, since parameters may not be
 255 * accessible from the xol area due to range limitations.
 256 */
 257static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
 258{
 259        union split_register *rx;
 260        struct insn_ril *insn;
 261        unsigned int ilen;
 262        void *uptr;
 263        int rc = 0;
 264
 265        insn = (struct insn_ril *) &auprobe->insn;
 266        rx = (union split_register *) &regs->gprs[insn->reg];
 267        uptr = (void *)(regs->psw.addr + (insn->disp * 2));
 268        ilen = insn_length(insn->opc0);
 269
 270        switch (insn->opc0) {
 271        case 0xc0:
 272                switch (insn->opc1) {
 273                case 0x00: /* larl */
 274                        rx->u64 = (unsigned long)uptr;
 275                        break;
 276                }
 277                break;
 278        case 0xc4:
 279                switch (insn->opc1) {
 280                case 0x02: /* llhrl */
 281                        rc = emu_load_ril((u16 __user *)uptr, &rx->u32[1]);
 282                        break;
 283                case 0x04: /* lghrl */
 284                        rc = emu_load_ril((s16 __user *)uptr, &rx->u64);
 285                        break;
 286                case 0x05: /* lhrl */
 287                        rc = emu_load_ril((s16 __user *)uptr, &rx->u32[1]);
 288                        break;
 289                case 0x06: /* llghrl */
 290                        rc = emu_load_ril((u16 __user *)uptr, &rx->u64);
 291                        break;
 292                case 0x08: /* lgrl */
 293                        rc = emu_load_ril((u64 __user *)uptr, &rx->u64);
 294                        break;
 295                case 0x0c: /* lgfrl */
 296                        rc = emu_load_ril((s32 __user *)uptr, &rx->u64);
 297                        break;
 298                case 0x0d: /* lrl */
 299                        rc = emu_load_ril((u32 __user *)uptr, &rx->u32[1]);
 300                        break;
 301                case 0x0e: /* llgfrl */
 302                        rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
 303                        break;
 304                case 0x07: /* sthrl */
 305                        rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
 306                        break;
 307                case 0x0b: /* stgrl */
 308                        rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
 309                        break;
 310                case 0x0f: /* strl */
 311                        rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
 312                        break;
 313                }
 314                break;
 315        case 0xc6:
 316                switch (insn->opc1) {
 317                case 0x02: /* pfdrl */
 318                        if (!test_facility(34))
 319                                rc = EMU_ILLEGAL_OP;
 320                        break;
 321                case 0x04: /* cghrl */
 322                        rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
 323                        break;
 324                case 0x05: /* chrl */
 325                        rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s32[1]);
 326                        break;
 327                case 0x06: /* clghrl */
 328                        rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u64);
 329                        break;
 330                case 0x07: /* clhrl */
 331                        rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u32[1]);
 332                        break;
 333                case 0x08: /* cgrl */
 334                        rc = emu_cmp_ril(regs, (s64 __user *)uptr, &rx->s64);
 335                        break;
 336                case 0x0a: /* clgrl */
 337                        rc = emu_cmp_ril(regs, (u64 __user *)uptr, &rx->u64);
 338                        break;
 339                case 0x0c: /* cgfrl */
 340                        rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s64);
 341                        break;
 342                case 0x0d: /* crl */
 343                        rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s32[1]);
 344                        break;
 345                case 0x0e: /* clgfrl */
 346                        rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u64);
 347                        break;
 348                case 0x0f: /* clrl */
 349                        rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
 350                        break;
 351                }
 352                break;
 353        }
 354        adjust_psw_addr(&regs->psw, ilen);
 355        switch (rc) {
 356        case EMU_ILLEGAL_OP:
 357                regs->int_code = ilen << 16 | 0x0001;
 358                do_report_trap(regs, SIGILL, ILL_ILLOPC, NULL);
 359                break;
 360        case EMU_SPECIFICATION:
 361                regs->int_code = ilen << 16 | 0x0006;
 362                do_report_trap(regs, SIGILL, ILL_ILLOPC , NULL);
 363                break;
 364        case EMU_ADDRESSING:
 365                regs->int_code = ilen << 16 | 0x0005;
 366                do_report_trap(regs, SIGSEGV, SEGV_MAPERR, NULL);
 367                break;
 368        }
 369}
 370
 371bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
 372{
 373        if ((psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) ||
 374            ((psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) &&
 375             !is_compat_task())) {
 376                regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
 377                do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
 378                return true;
 379        }
 380        if (probe_is_insn_relative_long(auprobe->insn)) {
 381                handle_insn_ril(auprobe, regs);
 382                return true;
 383        }
 384        return false;
 385}
 386