linux/arch/s390/kernel/uprobes.c
<<
>>
Prefs
   1/*
   2 *  User-space Probes (UProbes) for s390
   3 *
   4 *    Copyright IBM Corp. 2014
   5 *    Author(s): Jan Willeke,
   6 */
   7
   8#include <linux/uaccess.h>
   9#include <linux/uprobes.h>
  10#include <linux/compat.h>
  11#include <linux/kdebug.h>
  12#include <asm/switch_to.h>
  13#include <asm/facility.h>
  14#include <asm/kprobes.h>
  15#include <asm/dis.h>
  16#include "entry.h"
  17
  18#define UPROBE_TRAP_NR  UINT_MAX
  19
  20int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
  21                             unsigned long addr)
  22{
  23        return probe_is_prohibited_opcode(auprobe->insn);
  24}
  25
  26int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  27{
  28        if (psw_bits(regs->psw).eaba == PSW_AMODE_24BIT)
  29                return -EINVAL;
  30        if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_AMODE_31BIT)
  31                return -EINVAL;
  32        clear_tsk_thread_flag(current, TIF_PER_TRAP);
  33        auprobe->saved_per = psw_bits(regs->psw).r;
  34        auprobe->saved_int_code = regs->int_code;
  35        regs->int_code = UPROBE_TRAP_NR;
  36        regs->psw.addr = current->utask->xol_vaddr;
  37        set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
  38        update_cr_regs(current);
  39        return 0;
  40}
  41
  42bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
  43{
  44        struct pt_regs *regs = task_pt_regs(tsk);
  45
  46        if (regs->int_code != UPROBE_TRAP_NR)
  47                return true;
  48        return false;
  49}
  50
  51static int check_per_event(unsigned short cause, unsigned long control,
  52                           struct pt_regs *regs)
  53{
  54        if (!(regs->psw.mask & PSW_MASK_PER))
  55                return 0;
  56        /* user space single step */
  57        if (control == 0)
  58                return 1;
  59        /* over indication for storage alteration */
  60        if ((control & 0x20200000) && (cause & 0x2000))
  61                return 1;
  62        if (cause & 0x8000) {
  63                /* all branches */
  64                if ((control & 0x80800000) == 0x80000000)
  65                        return 1;
  66                /* branch into selected range */
  67                if (((control & 0x80800000) == 0x80800000) &&
  68                    regs->psw.addr >= current->thread.per_user.start &&
  69                    regs->psw.addr <= current->thread.per_user.end)
  70                        return 1;
  71        }
  72        return 0;
  73}
  74
  75int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  76{
  77        int fixup = probe_get_fixup_type(auprobe->insn);
  78        struct uprobe_task *utask = current->utask;
  79
  80        clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
  81        update_cr_regs(current);
  82        psw_bits(regs->psw).r = auprobe->saved_per;
  83        regs->int_code = auprobe->saved_int_code;
  84
  85        if (fixup & FIXUP_PSW_NORMAL)
  86                regs->psw.addr += utask->vaddr - utask->xol_vaddr;
  87        if (fixup & FIXUP_RETURN_REGISTER) {
  88                int reg = (auprobe->insn[0] & 0xf0) >> 4;
  89
  90                regs->gprs[reg] += utask->vaddr - utask->xol_vaddr;
  91        }
  92        if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
  93                int ilen = insn_length(auprobe->insn[0] >> 8);
  94
  95                if (regs->psw.addr - utask->xol_vaddr == ilen)
  96                        regs->psw.addr = utask->vaddr + ilen;
  97        }
  98        if (check_per_event(current->thread.per_event.cause,
  99                            current->thread.per_user.control, regs)) {
 100                /* fix per address */
 101                current->thread.per_event.address = utask->vaddr;
 102                /* trigger per event */
 103                set_tsk_thread_flag(current, TIF_PER_TRAP);
 104        }
 105        return 0;
 106}
 107
 108int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
 109                                 void *data)
 110{
 111        struct die_args *args = data;
 112        struct pt_regs *regs = args->regs;
 113
 114        if (!user_mode(regs))
 115                return NOTIFY_DONE;
 116        if (regs->int_code & 0x200) /* Trap during transaction */
 117                return NOTIFY_DONE;
 118        switch (val) {
 119        case DIE_BPT:
 120                if (uprobe_pre_sstep_notifier(regs))
 121                        return NOTIFY_STOP;
 122                break;
 123        case DIE_SSTEP:
 124                if (uprobe_post_sstep_notifier(regs))
 125                        return NOTIFY_STOP;
 126        default:
 127                break;
 128        }
 129        return NOTIFY_DONE;
 130}
 131
 132void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
 133{
 134        clear_thread_flag(TIF_UPROBE_SINGLESTEP);
 135        regs->int_code = auprobe->saved_int_code;
 136        regs->psw.addr = current->utask->vaddr;
 137        current->thread.per_event.address = current->utask->vaddr;
 138}
 139
 140unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
 141                                                struct pt_regs *regs)
 142{
 143        unsigned long orig;
 144
 145        orig = regs->gprs[14];
 146        regs->gprs[14] = trampoline;
 147        return orig;
 148}
 149
 150bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
 151                             struct pt_regs *regs)
 152{
 153        if (ctx == RP_CHECK_CHAIN_CALL)
 154                return user_stack_pointer(regs) <= ret->stack;
 155        else
 156                return user_stack_pointer(regs) < ret->stack;
 157}
 158
 159/* Instruction Emulation */
 160
 161static void adjust_psw_addr(psw_t *psw, unsigned long len)
 162{
 163        psw->addr = __rewind_psw(*psw, -len);
 164}
 165
 166#define EMU_ILLEGAL_OP          1
 167#define EMU_SPECIFICATION       2
 168#define EMU_ADDRESSING          3
 169
 170#define emu_load_ril(ptr, output)                       \
 171({                                                      \
 172        unsigned int mask = sizeof(*(ptr)) - 1;         \
 173        __typeof__(*(ptr)) input;                       \
 174        int __rc = 0;                                   \
 175                                                        \
 176        if (!test_facility(34))                         \
 177                __rc = EMU_ILLEGAL_OP;                  \
 178        else if ((u64 __force)ptr & mask)               \
 179                __rc = EMU_SPECIFICATION;               \
 180        else if (get_user(input, ptr))                  \
 181                __rc = EMU_ADDRESSING;                  \
 182        else                                            \
 183                *(output) = input;                      \
 184        __rc;                                           \
 185})
 186
 187#define emu_store_ril(regs, ptr, input)                 \
 188({                                                      \
 189        unsigned int mask = sizeof(*(ptr)) - 1;         \
 190        __typeof__(ptr) __ptr = (ptr);                  \
 191        int __rc = 0;                                   \
 192                                                        \
 193        if (!test_facility(34))                         \
 194                __rc = EMU_ILLEGAL_OP;                  \
 195        else if ((u64 __force)__ptr & mask)             \
 196                __rc = EMU_SPECIFICATION;               \
 197        else if (put_user(*(input), __ptr))             \
 198                __rc = EMU_ADDRESSING;                  \
 199        if (__rc == 0)                                  \
 200                sim_stor_event(regs, __ptr, mask + 1);  \
 201        __rc;                                           \
 202})
 203
 204#define emu_cmp_ril(regs, ptr, cmp)                     \
 205({                                                      \
 206        unsigned int mask = sizeof(*(ptr)) - 1;         \
 207        __typeof__(*(ptr)) input;                       \
 208        int __rc = 0;                                   \
 209                                                        \
 210        if (!test_facility(34))                         \
 211                __rc = EMU_ILLEGAL_OP;                  \
 212        else if ((u64 __force)ptr & mask)               \
 213                __rc = EMU_SPECIFICATION;               \
 214        else if (get_user(input, ptr))                  \
 215                __rc = EMU_ADDRESSING;                  \
 216        else if (input > *(cmp))                        \
 217                psw_bits((regs)->psw).cc = 1;           \
 218        else if (input < *(cmp))                        \
 219                psw_bits((regs)->psw).cc = 2;           \
 220        else                                            \
 221                psw_bits((regs)->psw).cc = 0;           \
 222        __rc;                                           \
 223})
 224
 225struct insn_ril {
 226        u8 opc0;
 227        u8 reg  : 4;
 228        u8 opc1 : 4;
 229        s32 disp;
 230} __packed;
 231
 232union split_register {
 233        u64 u64;
 234        u32 u32[2];
 235        u16 u16[4];
 236        s64 s64;
 237        s32 s32[2];
 238        s16 s16[4];
 239};
 240
 241/*
 242 * If user per registers are setup to trace storage alterations and an
 243 * emulated store took place on a fitting address a user trap is generated.
 244 */
 245static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
 246{
 247        if (!(regs->psw.mask & PSW_MASK_PER))
 248                return;
 249        if (!(current->thread.per_user.control & PER_EVENT_STORE))
 250                return;
 251        if ((void *)current->thread.per_user.start > (addr + len))
 252                return;
 253        if ((void *)current->thread.per_user.end < addr)
 254                return;
 255        current->thread.per_event.address = regs->psw.addr;
 256        current->thread.per_event.cause = PER_EVENT_STORE >> 16;
 257
 258        set_tsk_thread_flag(current, TIF_PER_TRAP);
 259}
 260
 261/*
 262 * pc relative instructions are emulated, since parameters may not be
 263 * accessible from the xol area due to range limitations.
 264 */
 265static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
 266{
 267        union split_register *rx;
 268        struct insn_ril *insn;
 269        unsigned int ilen;
 270        void *uptr;
 271        int rc = 0;
 272
 273        insn = (struct insn_ril *) &auprobe->insn;
 274        rx = (union split_register *) &regs->gprs[insn->reg];
 275        uptr = (void *)(regs->psw.addr + (insn->disp * 2));
 276        ilen = insn_length(insn->opc0);
 277
 278        switch (insn->opc0) {
 279        case 0xc0:
 280                switch (insn->opc1) {
 281                case 0x00: /* larl */
 282                        rx->u64 = (unsigned long)uptr;
 283                        break;
 284                }
 285                break;
 286        case 0xc4:
 287                switch (insn->opc1) {
 288                case 0x02: /* llhrl */
 289                        rc = emu_load_ril((u16 __user *)uptr, &rx->u32[1]);
 290                        break;
 291                case 0x04: /* lghrl */
 292                        rc = emu_load_ril((s16 __user *)uptr, &rx->u64);
 293                        break;
 294                case 0x05: /* lhrl */
 295                        rc = emu_load_ril((s16 __user *)uptr, &rx->u32[1]);
 296                        break;
 297                case 0x06: /* llghrl */
 298                        rc = emu_load_ril((u16 __user *)uptr, &rx->u64);
 299                        break;
 300                case 0x08: /* lgrl */
 301                        rc = emu_load_ril((u64 __user *)uptr, &rx->u64);
 302                        break;
 303                case 0x0c: /* lgfrl */
 304                        rc = emu_load_ril((s32 __user *)uptr, &rx->u64);
 305                        break;
 306                case 0x0d: /* lrl */
 307                        rc = emu_load_ril((u32 __user *)uptr, &rx->u32[1]);
 308                        break;
 309                case 0x0e: /* llgfrl */
 310                        rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
 311                        break;
 312                case 0x07: /* sthrl */
 313                        rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
 314                        break;
 315                case 0x0b: /* stgrl */
 316                        rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
 317                        break;
 318                case 0x0f: /* strl */
 319                        rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
 320                        break;
 321                }
 322                break;
 323        case 0xc6:
 324                switch (insn->opc1) {
 325                case 0x02: /* pfdrl */
 326                        if (!test_facility(34))
 327                                rc = EMU_ILLEGAL_OP;
 328                        break;
 329                case 0x04: /* cghrl */
 330                        rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
 331                        break;
 332                case 0x05: /* chrl */
 333                        rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s32[1]);
 334                        break;
 335                case 0x06: /* clghrl */
 336                        rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u64);
 337                        break;
 338                case 0x07: /* clhrl */
 339                        rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u32[1]);
 340                        break;
 341                case 0x08: /* cgrl */
 342                        rc = emu_cmp_ril(regs, (s64 __user *)uptr, &rx->s64);
 343                        break;
 344                case 0x0a: /* clgrl */
 345                        rc = emu_cmp_ril(regs, (u64 __user *)uptr, &rx->u64);
 346                        break;
 347                case 0x0c: /* cgfrl */
 348                        rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s64);
 349                        break;
 350                case 0x0d: /* crl */
 351                        rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s32[1]);
 352                        break;
 353                case 0x0e: /* clgfrl */
 354                        rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u64);
 355                        break;
 356                case 0x0f: /* clrl */
 357                        rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
 358                        break;
 359                }
 360                break;
 361        }
 362        adjust_psw_addr(&regs->psw, ilen);
 363        switch (rc) {
 364        case EMU_ILLEGAL_OP:
 365                regs->int_code = ilen << 16 | 0x0001;
 366                do_report_trap(regs, SIGILL, ILL_ILLOPC, NULL);
 367                break;
 368        case EMU_SPECIFICATION:
 369                regs->int_code = ilen << 16 | 0x0006;
 370                do_report_trap(regs, SIGILL, ILL_ILLOPC , NULL);
 371                break;
 372        case EMU_ADDRESSING:
 373                regs->int_code = ilen << 16 | 0x0005;
 374                do_report_trap(regs, SIGSEGV, SEGV_MAPERR, NULL);
 375                break;
 376        }
 377}
 378
 379bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
 380{
 381        if ((psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) ||
 382            ((psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) &&
 383             !is_compat_task())) {
 384                regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
 385                do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
 386                return true;
 387        }
 388        if (probe_is_insn_relative_long(auprobe->insn)) {
 389                handle_insn_ril(auprobe, regs);
 390                return true;
 391        }
 392        return false;
 393}
 394