linux/arch/s390/kernel/ptrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Ptrace user space interface.
   4 *
   5 *    Copyright IBM Corp. 1999, 2010
   6 *    Author(s): Denis Joseph Barrow
   7 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/sched.h>
  12#include <linux/sched/task_stack.h>
  13#include <linux/mm.h>
  14#include <linux/smp.h>
  15#include <linux/errno.h>
  16#include <linux/ptrace.h>
  17#include <linux/user.h>
  18#include <linux/security.h>
  19#include <linux/audit.h>
  20#include <linux/signal.h>
  21#include <linux/elf.h>
  22#include <linux/regset.h>
  23#include <linux/tracehook.h>
  24#include <linux/seccomp.h>
  25#include <linux/compat.h>
  26#include <trace/syscall.h>
  27#include <asm/page.h>
  28#include <asm/pgtable.h>
  29#include <asm/pgalloc.h>
  30#include <linux/uaccess.h>
  31#include <asm/unistd.h>
  32#include <asm/switch_to.h>
  33#include <asm/runtime_instr.h>
  34#include <asm/facility.h>
  35
  36#include "entry.h"
  37
  38#ifdef CONFIG_COMPAT
  39#include "compat_ptrace.h"
  40#endif
  41
  42#define CREATE_TRACE_POINTS
  43#include <trace/events/syscalls.h>
  44
  45void update_cr_regs(struct task_struct *task)
  46{
  47        struct pt_regs *regs = task_pt_regs(task);
  48        struct thread_struct *thread = &task->thread;
  49        struct per_regs old, new;
  50        union ctlreg0 cr0_old, cr0_new;
  51        union ctlreg2 cr2_old, cr2_new;
  52        int cr0_changed, cr2_changed;
  53
  54        __ctl_store(cr0_old.val, 0, 0);
  55        __ctl_store(cr2_old.val, 2, 2);
  56        cr0_new = cr0_old;
  57        cr2_new = cr2_old;
  58        /* Take care of the enable/disable of transactional execution. */
  59        if (MACHINE_HAS_TE) {
  60                /* Set or clear transaction execution TXC bit 8. */
  61                cr0_new.tcx = 1;
  62                if (task->thread.per_flags & PER_FLAG_NO_TE)
  63                        cr0_new.tcx = 0;
  64                /* Set or clear transaction execution TDC bits 62 and 63. */
  65                cr2_new.tdc = 0;
  66                if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
  67                        if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
  68                                cr2_new.tdc = 1;
  69                        else
  70                                cr2_new.tdc = 2;
  71                }
  72        }
  73        /* Take care of enable/disable of guarded storage. */
  74        if (MACHINE_HAS_GS) {
  75                cr2_new.gse = 0;
  76                if (task->thread.gs_cb)
  77                        cr2_new.gse = 1;
  78        }
  79        /* Load control register 0/2 iff changed */
  80        cr0_changed = cr0_new.val != cr0_old.val;
  81        cr2_changed = cr2_new.val != cr2_old.val;
  82        if (cr0_changed)
  83                __ctl_load(cr0_new.val, 0, 0);
  84        if (cr2_changed)
  85                __ctl_load(cr2_new.val, 2, 2);
  86        /* Copy user specified PER registers */
  87        new.control = thread->per_user.control;
  88        new.start = thread->per_user.start;
  89        new.end = thread->per_user.end;
  90
  91        /* merge TIF_SINGLE_STEP into user specified PER registers. */
  92        if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
  93            test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
  94                if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
  95                        new.control |= PER_EVENT_BRANCH;
  96                else
  97                        new.control |= PER_EVENT_IFETCH;
  98                new.control |= PER_CONTROL_SUSPENSION;
  99                new.control |= PER_EVENT_TRANSACTION_END;
 100                if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
 101                        new.control |= PER_EVENT_IFETCH;
 102                new.start = 0;
 103                new.end = -1UL;
 104        }
 105
 106        /* Take care of the PER enablement bit in the PSW. */
 107        if (!(new.control & PER_EVENT_MASK)) {
 108                regs->psw.mask &= ~PSW_MASK_PER;
 109                return;
 110        }
 111        regs->psw.mask |= PSW_MASK_PER;
 112        __ctl_store(old, 9, 11);
 113        if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
 114                __ctl_load(new, 9, 11);
 115}
 116
 117void user_enable_single_step(struct task_struct *task)
 118{
 119        clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
 120        set_tsk_thread_flag(task, TIF_SINGLE_STEP);
 121}
 122
 123void user_disable_single_step(struct task_struct *task)
 124{
 125        clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
 126        clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
 127}
 128
 129void user_enable_block_step(struct task_struct *task)
 130{
 131        set_tsk_thread_flag(task, TIF_SINGLE_STEP);
 132        set_tsk_thread_flag(task, TIF_BLOCK_STEP);
 133}
 134
 135/*
 136 * Called by kernel/ptrace.c when detaching..
 137 *
 138 * Clear all debugging related fields.
 139 */
 140void ptrace_disable(struct task_struct *task)
 141{
 142        memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
 143        memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
 144        clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
 145        clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
 146        task->thread.per_flags = 0;
 147}
 148
 149#define __ADDR_MASK 7
 150
 151static inline unsigned long __peek_user_per(struct task_struct *child,
 152                                            addr_t addr)
 153{
 154        struct per_struct_kernel *dummy = NULL;
 155
 156        if (addr == (addr_t) &dummy->cr9)
 157                /* Control bits of the active per set. */
 158                return test_thread_flag(TIF_SINGLE_STEP) ?
 159                        PER_EVENT_IFETCH : child->thread.per_user.control;
 160        else if (addr == (addr_t) &dummy->cr10)
 161                /* Start address of the active per set. */
 162                return test_thread_flag(TIF_SINGLE_STEP) ?
 163                        0 : child->thread.per_user.start;
 164        else if (addr == (addr_t) &dummy->cr11)
 165                /* End address of the active per set. */
 166                return test_thread_flag(TIF_SINGLE_STEP) ?
 167                        -1UL : child->thread.per_user.end;
 168        else if (addr == (addr_t) &dummy->bits)
 169                /* Single-step bit. */
 170                return test_thread_flag(TIF_SINGLE_STEP) ?
 171                        (1UL << (BITS_PER_LONG - 1)) : 0;
 172        else if (addr == (addr_t) &dummy->starting_addr)
 173                /* Start address of the user specified per set. */
 174                return child->thread.per_user.start;
 175        else if (addr == (addr_t) &dummy->ending_addr)
 176                /* End address of the user specified per set. */
 177                return child->thread.per_user.end;
 178        else if (addr == (addr_t) &dummy->perc_atmid)
 179                /* PER code, ATMID and AI of the last PER trap */
 180                return (unsigned long)
 181                        child->thread.per_event.cause << (BITS_PER_LONG - 16);
 182        else if (addr == (addr_t) &dummy->address)
 183                /* Address of the last PER trap */
 184                return child->thread.per_event.address;
 185        else if (addr == (addr_t) &dummy->access_id)
 186                /* Access id of the last PER trap */
 187                return (unsigned long)
 188                        child->thread.per_event.paid << (BITS_PER_LONG - 8);
 189        return 0;
 190}
 191
 192/*
 193 * Read the word at offset addr from the user area of a process. The
 194 * trouble here is that the information is littered over different
 195 * locations. The process registers are found on the kernel stack,
 196 * the floating point stuff and the trace settings are stored in
 197 * the task structure. In addition the different structures in
 198 * struct user contain pad bytes that should be read as zeroes.
 199 * Lovely...
 200 */
 201static unsigned long __peek_user(struct task_struct *child, addr_t addr)
 202{
 203        struct user *dummy = NULL;
 204        addr_t offset, tmp;
 205
 206        if (addr < (addr_t) &dummy->regs.acrs) {
 207                /*
 208                 * psw and gprs are stored on the stack
 209                 */
 210                tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
 211                if (addr == (addr_t) &dummy->regs.psw.mask) {
 212                        /* Return a clean psw mask. */
 213                        tmp &= PSW_MASK_USER | PSW_MASK_RI;
 214                        tmp |= PSW_USER_BITS;
 215                }
 216
 217        } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
 218                /*
 219                 * access registers are stored in the thread structure
 220                 */
 221                offset = addr - (addr_t) &dummy->regs.acrs;
 222                /*
 223                 * Very special case: old & broken 64 bit gdb reading
 224                 * from acrs[15]. Result is a 64 bit value. Read the
 225                 * 32 bit acrs[15] value and shift it by 32. Sick...
 226                 */
 227                if (addr == (addr_t) &dummy->regs.acrs[15])
 228                        tmp = ((unsigned long) child->thread.acrs[15]) << 32;
 229                else
 230                        tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
 231
 232        } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
 233                /*
 234                 * orig_gpr2 is stored on the kernel stack
 235                 */
 236                tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
 237
 238        } else if (addr < (addr_t) &dummy->regs.fp_regs) {
 239                /*
 240                 * prevent reads of padding hole between
 241                 * orig_gpr2 and fp_regs on s390.
 242                 */
 243                tmp = 0;
 244
 245        } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
 246                /*
 247                 * floating point control reg. is in the thread structure
 248                 */
 249                tmp = child->thread.fpu.fpc;
 250                tmp <<= BITS_PER_LONG - 32;
 251
 252        } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
 253                /*
 254                 * floating point regs. are either in child->thread.fpu
 255                 * or the child->thread.fpu.vxrs array
 256                 */
 257                offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
 258                if (MACHINE_HAS_VX)
 259                        tmp = *(addr_t *)
 260                               ((addr_t) child->thread.fpu.vxrs + 2*offset);
 261                else
 262                        tmp = *(addr_t *)
 263                               ((addr_t) child->thread.fpu.fprs + offset);
 264
 265        } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
 266                /*
 267                 * Handle access to the per_info structure.
 268                 */
 269                addr -= (addr_t) &dummy->regs.per_info;
 270                tmp = __peek_user_per(child, addr);
 271
 272        } else
 273                tmp = 0;
 274
 275        return tmp;
 276}
 277
 278static int
 279peek_user(struct task_struct *child, addr_t addr, addr_t data)
 280{
 281        addr_t tmp, mask;
 282
 283        /*
 284         * Stupid gdb peeks/pokes the access registers in 64 bit with
 285         * an alignment of 4. Programmers from hell...
 286         */
 287        mask = __ADDR_MASK;
 288        if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
 289            addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
 290                mask = 3;
 291        if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
 292                return -EIO;
 293
 294        tmp = __peek_user(child, addr);
 295        return put_user(tmp, (addr_t __user *) data);
 296}
 297
 298static inline void __poke_user_per(struct task_struct *child,
 299                                   addr_t addr, addr_t data)
 300{
 301        struct per_struct_kernel *dummy = NULL;
 302
 303        /*
 304         * There are only three fields in the per_info struct that the
 305         * debugger user can write to.
 306         * 1) cr9: the debugger wants to set a new PER event mask
 307         * 2) starting_addr: the debugger wants to set a new starting
 308         *    address to use with the PER event mask.
 309         * 3) ending_addr: the debugger wants to set a new ending
 310         *    address to use with the PER event mask.
 311         * The user specified PER event mask and the start and end
 312         * addresses are used only if single stepping is not in effect.
 313         * Writes to any other field in per_info are ignored.
 314         */
 315        if (addr == (addr_t) &dummy->cr9)
 316                /* PER event mask of the user specified per set. */
 317                child->thread.per_user.control =
 318                        data & (PER_EVENT_MASK | PER_CONTROL_MASK);
 319        else if (addr == (addr_t) &dummy->starting_addr)
 320                /* Starting address of the user specified per set. */
 321                child->thread.per_user.start = data;
 322        else if (addr == (addr_t) &dummy->ending_addr)
 323                /* Ending address of the user specified per set. */
 324                child->thread.per_user.end = data;
 325}
 326
 327/*
 328 * Write a word to the user area of a process at location addr. This
 329 * operation does have an additional problem compared to peek_user.
 330 * Stores to the program status word and on the floating point
 331 * control register needs to get checked for validity.
 332 */
 333static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
 334{
 335        struct user *dummy = NULL;
 336        addr_t offset;
 337
 338        if (addr < (addr_t) &dummy->regs.acrs) {
 339                /*
 340                 * psw and gprs are stored on the stack
 341                 */
 342                if (addr == (addr_t) &dummy->regs.psw.mask) {
 343                        unsigned long mask = PSW_MASK_USER;
 344
 345                        mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
 346                        if ((data ^ PSW_USER_BITS) & ~mask)
 347                                /* Invalid psw mask. */
 348                                return -EINVAL;
 349                        if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
 350                                /* Invalid address-space-control bits */
 351                                return -EINVAL;
 352                        if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
 353                                /* Invalid addressing mode bits */
 354                                return -EINVAL;
 355                }
 356                *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
 357
 358        } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
 359                /*
 360                 * access registers are stored in the thread structure
 361                 */
 362                offset = addr - (addr_t) &dummy->regs.acrs;
 363                /*
 364                 * Very special case: old & broken 64 bit gdb writing
 365                 * to acrs[15] with a 64 bit value. Ignore the lower
 366                 * half of the value and write the upper 32 bit to
 367                 * acrs[15]. Sick...
 368                 */
 369                if (addr == (addr_t) &dummy->regs.acrs[15])
 370                        child->thread.acrs[15] = (unsigned int) (data >> 32);
 371                else
 372                        *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
 373
 374        } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
 375                /*
 376                 * orig_gpr2 is stored on the kernel stack
 377                 */
 378                task_pt_regs(child)->orig_gpr2 = data;
 379
 380        } else if (addr < (addr_t) &dummy->regs.fp_regs) {
 381                /*
 382                 * prevent writes of padding hole between
 383                 * orig_gpr2 and fp_regs on s390.
 384                 */
 385                return 0;
 386
 387        } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
 388                /*
 389                 * floating point control reg. is in the thread structure
 390                 */
 391                if ((unsigned int) data != 0 ||
 392                    test_fp_ctl(data >> (BITS_PER_LONG - 32)))
 393                        return -EINVAL;
 394                child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
 395
 396        } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
 397                /*
 398                 * floating point regs. are either in child->thread.fpu
 399                 * or the child->thread.fpu.vxrs array
 400                 */
 401                offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
 402                if (MACHINE_HAS_VX)
 403                        *(addr_t *)((addr_t)
 404                                child->thread.fpu.vxrs + 2*offset) = data;
 405                else
 406                        *(addr_t *)((addr_t)
 407                                child->thread.fpu.fprs + offset) = data;
 408
 409        } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
 410                /*
 411                 * Handle access to the per_info structure.
 412                 */
 413                addr -= (addr_t) &dummy->regs.per_info;
 414                __poke_user_per(child, addr, data);
 415
 416        }
 417
 418        return 0;
 419}
 420
 421static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
 422{
 423        addr_t mask;
 424
 425        /*
 426         * Stupid gdb peeks/pokes the access registers in 64 bit with
 427         * an alignment of 4. Programmers from hell indeed...
 428         */
 429        mask = __ADDR_MASK;
 430        if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
 431            addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
 432                mask = 3;
 433        if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
 434                return -EIO;
 435
 436        return __poke_user(child, addr, data);
 437}
 438
 439long arch_ptrace(struct task_struct *child, long request,
 440                 unsigned long addr, unsigned long data)
 441{
 442        ptrace_area parea; 
 443        int copied, ret;
 444
 445        switch (request) {
 446        case PTRACE_PEEKUSR:
 447                /* read the word at location addr in the USER area. */
 448                return peek_user(child, addr, data);
 449
 450        case PTRACE_POKEUSR:
 451                /* write the word at location addr in the USER area */
 452                return poke_user(child, addr, data);
 453
 454        case PTRACE_PEEKUSR_AREA:
 455        case PTRACE_POKEUSR_AREA:
 456                if (copy_from_user(&parea, (void __force __user *) addr,
 457                                                        sizeof(parea)))
 458                        return -EFAULT;
 459                addr = parea.kernel_addr;
 460                data = parea.process_addr;
 461                copied = 0;
 462                while (copied < parea.len) {
 463                        if (request == PTRACE_PEEKUSR_AREA)
 464                                ret = peek_user(child, addr, data);
 465                        else {
 466                                addr_t utmp;
 467                                if (get_user(utmp,
 468                                             (addr_t __force __user *) data))
 469                                        return -EFAULT;
 470                                ret = poke_user(child, addr, utmp);
 471                        }
 472                        if (ret)
 473                                return ret;
 474                        addr += sizeof(unsigned long);
 475                        data += sizeof(unsigned long);
 476                        copied += sizeof(unsigned long);
 477                }
 478                return 0;
 479        case PTRACE_GET_LAST_BREAK:
 480                put_user(child->thread.last_break,
 481                         (unsigned long __user *) data);
 482                return 0;
 483        case PTRACE_ENABLE_TE:
 484                if (!MACHINE_HAS_TE)
 485                        return -EIO;
 486                child->thread.per_flags &= ~PER_FLAG_NO_TE;
 487                return 0;
 488        case PTRACE_DISABLE_TE:
 489                if (!MACHINE_HAS_TE)
 490                        return -EIO;
 491                child->thread.per_flags |= PER_FLAG_NO_TE;
 492                child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
 493                return 0;
 494        case PTRACE_TE_ABORT_RAND:
 495                if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
 496                        return -EIO;
 497                switch (data) {
 498                case 0UL:
 499                        child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
 500                        break;
 501                case 1UL:
 502                        child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
 503                        child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
 504                        break;
 505                case 2UL:
 506                        child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
 507                        child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
 508                        break;
 509                default:
 510                        return -EINVAL;
 511                }
 512                return 0;
 513        default:
 514                return ptrace_request(child, request, addr, data);
 515        }
 516}
 517
 518#ifdef CONFIG_COMPAT
 519/*
 520 * Now the fun part starts... a 31 bit program running in the
 521 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
 522 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
 523 * to handle, the difference to the 64 bit versions of the requests
 524 * is that the access is done in multiples of 4 byte instead of
 525 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
 526 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
 527 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
 528 * is a 31 bit program too, the content of struct user can be
 529 * emulated. A 31 bit program peeking into the struct user of
 530 * a 64 bit program is a no-no.
 531 */
 532
 533/*
 534 * Same as peek_user_per but for a 31 bit program.
 535 */
 536static inline __u32 __peek_user_per_compat(struct task_struct *child,
 537                                           addr_t addr)
 538{
 539        struct compat_per_struct_kernel *dummy32 = NULL;
 540
 541        if (addr == (addr_t) &dummy32->cr9)
 542                /* Control bits of the active per set. */
 543                return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
 544                        PER_EVENT_IFETCH : child->thread.per_user.control;
 545        else if (addr == (addr_t) &dummy32->cr10)
 546                /* Start address of the active per set. */
 547                return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
 548                        0 : child->thread.per_user.start;
 549        else if (addr == (addr_t) &dummy32->cr11)
 550                /* End address of the active per set. */
 551                return test_thread_flag(TIF_SINGLE_STEP) ?
 552                        PSW32_ADDR_INSN : child->thread.per_user.end;
 553        else if (addr == (addr_t) &dummy32->bits)
 554                /* Single-step bit. */
 555                return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
 556                        0x80000000 : 0;
 557        else if (addr == (addr_t) &dummy32->starting_addr)
 558                /* Start address of the user specified per set. */
 559                return (__u32) child->thread.per_user.start;
 560        else if (addr == (addr_t) &dummy32->ending_addr)
 561                /* End address of the user specified per set. */
 562                return (__u32) child->thread.per_user.end;
 563        else if (addr == (addr_t) &dummy32->perc_atmid)
 564                /* PER code, ATMID and AI of the last PER trap */
 565                return (__u32) child->thread.per_event.cause << 16;
 566        else if (addr == (addr_t) &dummy32->address)
 567                /* Address of the last PER trap */
 568                return (__u32) child->thread.per_event.address;
 569        else if (addr == (addr_t) &dummy32->access_id)
 570                /* Access id of the last PER trap */
 571                return (__u32) child->thread.per_event.paid << 24;
 572        return 0;
 573}
 574
 575/*
 576 * Same as peek_user but for a 31 bit program.
 577 */
 578static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
 579{
 580        struct compat_user *dummy32 = NULL;
 581        addr_t offset;
 582        __u32 tmp;
 583
 584        if (addr < (addr_t) &dummy32->regs.acrs) {
 585                struct pt_regs *regs = task_pt_regs(child);
 586                /*
 587                 * psw and gprs are stored on the stack
 588                 */
 589                if (addr == (addr_t) &dummy32->regs.psw.mask) {
 590                        /* Fake a 31 bit psw mask. */
 591                        tmp = (__u32)(regs->psw.mask >> 32);
 592                        tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
 593                        tmp |= PSW32_USER_BITS;
 594                } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
 595                        /* Fake a 31 bit psw address. */
 596                        tmp = (__u32) regs->psw.addr |
 597                                (__u32)(regs->psw.mask & PSW_MASK_BA);
 598                } else {
 599                        /* gpr 0-15 */
 600                        tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
 601                }
 602        } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
 603                /*
 604                 * access registers are stored in the thread structure
 605                 */
 606                offset = addr - (addr_t) &dummy32->regs.acrs;
 607                tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
 608
 609        } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
 610                /*
 611                 * orig_gpr2 is stored on the kernel stack
 612                 */
 613                tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
 614
 615        } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
 616                /*
 617                 * prevent reads of padding hole between
 618                 * orig_gpr2 and fp_regs on s390.
 619                 */
 620                tmp = 0;
 621
 622        } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
 623                /*
 624                 * floating point control reg. is in the thread structure
 625                 */
 626                tmp = child->thread.fpu.fpc;
 627
 628        } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
 629                /*
 630                 * floating point regs. are either in child->thread.fpu
 631                 * or the child->thread.fpu.vxrs array
 632                 */
 633                offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
 634                if (MACHINE_HAS_VX)
 635                        tmp = *(__u32 *)
 636                               ((addr_t) child->thread.fpu.vxrs + 2*offset);
 637                else
 638                        tmp = *(__u32 *)
 639                               ((addr_t) child->thread.fpu.fprs + offset);
 640
 641        } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
 642                /*
 643                 * Handle access to the per_info structure.
 644                 */
 645                addr -= (addr_t) &dummy32->regs.per_info;
 646                tmp = __peek_user_per_compat(child, addr);
 647
 648        } else
 649                tmp = 0;
 650
 651        return tmp;
 652}
 653
 654static int peek_user_compat(struct task_struct *child,
 655                            addr_t addr, addr_t data)
 656{
 657        __u32 tmp;
 658
 659        if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
 660                return -EIO;
 661
 662        tmp = __peek_user_compat(child, addr);
 663        return put_user(tmp, (__u32 __user *) data);
 664}
 665
 666/*
 667 * Same as poke_user_per but for a 31 bit program.
 668 */
 669static inline void __poke_user_per_compat(struct task_struct *child,
 670                                          addr_t addr, __u32 data)
 671{
 672        struct compat_per_struct_kernel *dummy32 = NULL;
 673
 674        if (addr == (addr_t) &dummy32->cr9)
 675                /* PER event mask of the user specified per set. */
 676                child->thread.per_user.control =
 677                        data & (PER_EVENT_MASK | PER_CONTROL_MASK);
 678        else if (addr == (addr_t) &dummy32->starting_addr)
 679                /* Starting address of the user specified per set. */
 680                child->thread.per_user.start = data;
 681        else if (addr == (addr_t) &dummy32->ending_addr)
 682                /* Ending address of the user specified per set. */
 683                child->thread.per_user.end = data;
 684}
 685
 686/*
 687 * Same as poke_user but for a 31 bit program.
 688 */
 689static int __poke_user_compat(struct task_struct *child,
 690                              addr_t addr, addr_t data)
 691{
 692        struct compat_user *dummy32 = NULL;
 693        __u32 tmp = (__u32) data;
 694        addr_t offset;
 695
 696        if (addr < (addr_t) &dummy32->regs.acrs) {
 697                struct pt_regs *regs = task_pt_regs(child);
 698                /*
 699                 * psw, gprs, acrs and orig_gpr2 are stored on the stack
 700                 */
 701                if (addr == (addr_t) &dummy32->regs.psw.mask) {
 702                        __u32 mask = PSW32_MASK_USER;
 703
 704                        mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
 705                        /* Build a 64 bit psw mask from 31 bit mask. */
 706                        if ((tmp ^ PSW32_USER_BITS) & ~mask)
 707                                /* Invalid psw mask. */
 708                                return -EINVAL;
 709                        if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
 710                                /* Invalid address-space-control bits */
 711                                return -EINVAL;
 712                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
 713                                (regs->psw.mask & PSW_MASK_BA) |
 714                                (__u64)(tmp & mask) << 32;
 715                } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
 716                        /* Build a 64 bit psw address from 31 bit address. */
 717                        regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
 718                        /* Transfer 31 bit amode bit to psw mask. */
 719                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
 720                                (__u64)(tmp & PSW32_ADDR_AMODE);
 721                } else {
 722                        /* gpr 0-15 */
 723                        *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
 724                }
 725        } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
 726                /*
 727                 * access registers are stored in the thread structure
 728                 */
 729                offset = addr - (addr_t) &dummy32->regs.acrs;
 730                *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
 731
 732        } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
 733                /*
 734                 * orig_gpr2 is stored on the kernel stack
 735                 */
 736                *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
 737
 738        } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
 739                /*
 740                 * prevent writess of padding hole between
 741                 * orig_gpr2 and fp_regs on s390.
 742                 */
 743                return 0;
 744
 745        } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
 746                /*
 747                 * floating point control reg. is in the thread structure
 748                 */
 749                if (test_fp_ctl(tmp))
 750                        return -EINVAL;
 751                child->thread.fpu.fpc = data;
 752
 753        } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
 754                /*
 755                 * floating point regs. are either in child->thread.fpu
 756                 * or the child->thread.fpu.vxrs array
 757                 */
 758                offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
 759                if (MACHINE_HAS_VX)
 760                        *(__u32 *)((addr_t)
 761                                child->thread.fpu.vxrs + 2*offset) = tmp;
 762                else
 763                        *(__u32 *)((addr_t)
 764                                child->thread.fpu.fprs + offset) = tmp;
 765
 766        } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
 767                /*
 768                 * Handle access to the per_info structure.
 769                 */
 770                addr -= (addr_t) &dummy32->regs.per_info;
 771                __poke_user_per_compat(child, addr, data);
 772        }
 773
 774        return 0;
 775}
 776
 777static int poke_user_compat(struct task_struct *child,
 778                            addr_t addr, addr_t data)
 779{
 780        if (!is_compat_task() || (addr & 3) ||
 781            addr > sizeof(struct compat_user) - 3)
 782                return -EIO;
 783
 784        return __poke_user_compat(child, addr, data);
 785}
 786
 787long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 788                        compat_ulong_t caddr, compat_ulong_t cdata)
 789{
 790        unsigned long addr = caddr;
 791        unsigned long data = cdata;
 792        compat_ptrace_area parea;
 793        int copied, ret;
 794
 795        switch (request) {
 796        case PTRACE_PEEKUSR:
 797                /* read the word at location addr in the USER area. */
 798                return peek_user_compat(child, addr, data);
 799
 800        case PTRACE_POKEUSR:
 801                /* write the word at location addr in the USER area */
 802                return poke_user_compat(child, addr, data);
 803
 804        case PTRACE_PEEKUSR_AREA:
 805        case PTRACE_POKEUSR_AREA:
 806                if (copy_from_user(&parea, (void __force __user *) addr,
 807                                                        sizeof(parea)))
 808                        return -EFAULT;
 809                addr = parea.kernel_addr;
 810                data = parea.process_addr;
 811                copied = 0;
 812                while (copied < parea.len) {
 813                        if (request == PTRACE_PEEKUSR_AREA)
 814                                ret = peek_user_compat(child, addr, data);
 815                        else {
 816                                __u32 utmp;
 817                                if (get_user(utmp,
 818                                             (__u32 __force __user *) data))
 819                                        return -EFAULT;
 820                                ret = poke_user_compat(child, addr, utmp);
 821                        }
 822                        if (ret)
 823                                return ret;
 824                        addr += sizeof(unsigned int);
 825                        data += sizeof(unsigned int);
 826                        copied += sizeof(unsigned int);
 827                }
 828                return 0;
 829        case PTRACE_GET_LAST_BREAK:
 830                put_user(child->thread.last_break,
 831                         (unsigned int __user *) data);
 832                return 0;
 833        }
 834        return compat_ptrace_request(child, request, addr, data);
 835}
 836#endif
 837
 838asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
 839{
 840        unsigned long mask = -1UL;
 841
 842        /*
 843         * The sysc_tracesys code in entry.S stored the system
 844         * call number to gprs[2].
 845         */
 846        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
 847            (tracehook_report_syscall_entry(regs) ||
 848             regs->gprs[2] >= NR_syscalls)) {
 849                /*
 850                 * Tracing decided this syscall should not happen or the
 851                 * debugger stored an invalid system call number. Skip
 852                 * the system call and the system call restart handling.
 853                 */
 854                clear_pt_regs_flag(regs, PIF_SYSCALL);
 855                return -1;
 856        }
 857
 858        /* Do the secure computing check after ptrace. */
 859        if (secure_computing(NULL)) {
 860                /* seccomp failures shouldn't expose any additional code. */
 861                return -1;
 862        }
 863
 864        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
 865                trace_sys_enter(regs, regs->gprs[2]);
 866
 867        if (is_compat_task())
 868                mask = 0xffffffff;
 869
 870        audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
 871                            regs->gprs[3] &mask, regs->gprs[4] &mask,
 872                            regs->gprs[5] &mask);
 873
 874        return regs->gprs[2];
 875}
 876
 877asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
 878{
 879        audit_syscall_exit(regs);
 880
 881        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
 882                trace_sys_exit(regs, regs->gprs[2]);
 883
 884        if (test_thread_flag(TIF_SYSCALL_TRACE))
 885                tracehook_report_syscall_exit(regs, 0);
 886}
 887
 888/*
 889 * user_regset definitions.
 890 */
 891
 892static int s390_regs_get(struct task_struct *target,
 893                         const struct user_regset *regset,
 894                         unsigned int pos, unsigned int count,
 895                         void *kbuf, void __user *ubuf)
 896{
 897        if (target == current)
 898                save_access_regs(target->thread.acrs);
 899
 900        if (kbuf) {
 901                unsigned long *k = kbuf;
 902                while (count > 0) {
 903                        *k++ = __peek_user(target, pos);
 904                        count -= sizeof(*k);
 905                        pos += sizeof(*k);
 906                }
 907        } else {
 908                unsigned long __user *u = ubuf;
 909                while (count > 0) {
 910                        if (__put_user(__peek_user(target, pos), u++))
 911                                return -EFAULT;
 912                        count -= sizeof(*u);
 913                        pos += sizeof(*u);
 914                }
 915        }
 916        return 0;
 917}
 918
 919static int s390_regs_set(struct task_struct *target,
 920                         const struct user_regset *regset,
 921                         unsigned int pos, unsigned int count,
 922                         const void *kbuf, const void __user *ubuf)
 923{
 924        int rc = 0;
 925
 926        if (target == current)
 927                save_access_regs(target->thread.acrs);
 928
 929        if (kbuf) {
 930                const unsigned long *k = kbuf;
 931                while (count > 0 && !rc) {
 932                        rc = __poke_user(target, pos, *k++);
 933                        count -= sizeof(*k);
 934                        pos += sizeof(*k);
 935                }
 936        } else {
 937                const unsigned long  __user *u = ubuf;
 938                while (count > 0 && !rc) {
 939                        unsigned long word;
 940                        rc = __get_user(word, u++);
 941                        if (rc)
 942                                break;
 943                        rc = __poke_user(target, pos, word);
 944                        count -= sizeof(*u);
 945                        pos += sizeof(*u);
 946                }
 947        }
 948
 949        if (rc == 0 && target == current)
 950                restore_access_regs(target->thread.acrs);
 951
 952        return rc;
 953}
 954
 955static int s390_fpregs_get(struct task_struct *target,
 956                           const struct user_regset *regset, unsigned int pos,
 957                           unsigned int count, void *kbuf, void __user *ubuf)
 958{
 959        _s390_fp_regs fp_regs;
 960
 961        if (target == current)
 962                save_fpu_regs();
 963
 964        fp_regs.fpc = target->thread.fpu.fpc;
 965        fpregs_store(&fp_regs, &target->thread.fpu);
 966
 967        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 968                                   &fp_regs, 0, -1);
 969}
 970
 971static int s390_fpregs_set(struct task_struct *target,
 972                           const struct user_regset *regset, unsigned int pos,
 973                           unsigned int count, const void *kbuf,
 974                           const void __user *ubuf)
 975{
 976        int rc = 0;
 977        freg_t fprs[__NUM_FPRS];
 978
 979        if (target == current)
 980                save_fpu_regs();
 981
 982        if (MACHINE_HAS_VX)
 983                convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
 984        else
 985                memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
 986
 987        /* If setting FPC, must validate it first. */
 988        if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
 989                u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
 990                rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
 991                                        0, offsetof(s390_fp_regs, fprs));
 992                if (rc)
 993                        return rc;
 994                if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
 995                        return -EINVAL;
 996                target->thread.fpu.fpc = ufpc[0];
 997        }
 998
 999        if (rc == 0 && count > 0)
1000                rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1001                                        fprs, offsetof(s390_fp_regs, fprs), -1);
1002        if (rc)
1003                return rc;
1004
1005        if (MACHINE_HAS_VX)
1006                convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1007        else
1008                memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1009
1010        return rc;
1011}
1012
1013static int s390_last_break_get(struct task_struct *target,
1014                               const struct user_regset *regset,
1015                               unsigned int pos, unsigned int count,
1016                               void *kbuf, void __user *ubuf)
1017{
1018        if (count > 0) {
1019                if (kbuf) {
1020                        unsigned long *k = kbuf;
1021                        *k = target->thread.last_break;
1022                } else {
1023                        unsigned long  __user *u = ubuf;
1024                        if (__put_user(target->thread.last_break, u))
1025                                return -EFAULT;
1026                }
1027        }
1028        return 0;
1029}
1030
1031static int s390_last_break_set(struct task_struct *target,
1032                               const struct user_regset *regset,
1033                               unsigned int pos, unsigned int count,
1034                               const void *kbuf, const void __user *ubuf)
1035{
1036        return 0;
1037}
1038
1039static int s390_tdb_get(struct task_struct *target,
1040                        const struct user_regset *regset,
1041                        unsigned int pos, unsigned int count,
1042                        void *kbuf, void __user *ubuf)
1043{
1044        struct pt_regs *regs = task_pt_regs(target);
1045        unsigned char *data;
1046
1047        if (!(regs->int_code & 0x200))
1048                return -ENODATA;
1049        data = target->thread.trap_tdb;
1050        return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
1051}
1052
1053static int s390_tdb_set(struct task_struct *target,
1054                        const struct user_regset *regset,
1055                        unsigned int pos, unsigned int count,
1056                        const void *kbuf, const void __user *ubuf)
1057{
1058        return 0;
1059}
1060
1061static int s390_vxrs_low_get(struct task_struct *target,
1062                             const struct user_regset *regset,
1063                             unsigned int pos, unsigned int count,
1064                             void *kbuf, void __user *ubuf)
1065{
1066        __u64 vxrs[__NUM_VXRS_LOW];
1067        int i;
1068
1069        if (!MACHINE_HAS_VX)
1070                return -ENODEV;
1071        if (target == current)
1072                save_fpu_regs();
1073        for (i = 0; i < __NUM_VXRS_LOW; i++)
1074                vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1075        return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1076}
1077
1078static int s390_vxrs_low_set(struct task_struct *target,
1079                             const struct user_regset *regset,
1080                             unsigned int pos, unsigned int count,
1081                             const void *kbuf, const void __user *ubuf)
1082{
1083        __u64 vxrs[__NUM_VXRS_LOW];
1084        int i, rc;
1085
1086        if (!MACHINE_HAS_VX)
1087                return -ENODEV;
1088        if (target == current)
1089                save_fpu_regs();
1090
1091        for (i = 0; i < __NUM_VXRS_LOW; i++)
1092                vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1093
1094        rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1095        if (rc == 0)
1096                for (i = 0; i < __NUM_VXRS_LOW; i++)
1097                        *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1098
1099        return rc;
1100}
1101
1102static int s390_vxrs_high_get(struct task_struct *target,
1103                              const struct user_regset *regset,
1104                              unsigned int pos, unsigned int count,
1105                              void *kbuf, void __user *ubuf)
1106{
1107        __vector128 vxrs[__NUM_VXRS_HIGH];
1108
1109        if (!MACHINE_HAS_VX)
1110                return -ENODEV;
1111        if (target == current)
1112                save_fpu_regs();
1113        memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
1114
1115        return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1116}
1117
1118static int s390_vxrs_high_set(struct task_struct *target,
1119                              const struct user_regset *regset,
1120                              unsigned int pos, unsigned int count,
1121                              const void *kbuf, const void __user *ubuf)
1122{
1123        int rc;
1124
1125        if (!MACHINE_HAS_VX)
1126                return -ENODEV;
1127        if (target == current)
1128                save_fpu_regs();
1129
1130        rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1131                                target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1132        return rc;
1133}
1134
1135static int s390_system_call_get(struct task_struct *target,
1136                                const struct user_regset *regset,
1137                                unsigned int pos, unsigned int count,
1138                                void *kbuf, void __user *ubuf)
1139{
1140        unsigned int *data = &target->thread.system_call;
1141        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1142                                   data, 0, sizeof(unsigned int));
1143}
1144
1145static int s390_system_call_set(struct task_struct *target,
1146                                const struct user_regset *regset,
1147                                unsigned int pos, unsigned int count,
1148                                const void *kbuf, const void __user *ubuf)
1149{
1150        unsigned int *data = &target->thread.system_call;
1151        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1152                                  data, 0, sizeof(unsigned int));
1153}
1154
1155static int s390_gs_cb_get(struct task_struct *target,
1156                          const struct user_regset *regset,
1157                          unsigned int pos, unsigned int count,
1158                          void *kbuf, void __user *ubuf)
1159{
1160        struct gs_cb *data = target->thread.gs_cb;
1161
1162        if (!MACHINE_HAS_GS)
1163                return -ENODEV;
1164        if (!data)
1165                return -ENODATA;
1166        if (target == current)
1167                save_gs_cb(data);
1168        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1169                                   data, 0, sizeof(struct gs_cb));
1170}
1171
1172static int s390_gs_cb_set(struct task_struct *target,
1173                          const struct user_regset *regset,
1174                          unsigned int pos, unsigned int count,
1175                          const void *kbuf, const void __user *ubuf)
1176{
1177        struct gs_cb gs_cb = { }, *data = NULL;
1178        int rc;
1179
1180        if (!MACHINE_HAS_GS)
1181                return -ENODEV;
1182        if (!target->thread.gs_cb) {
1183                data = kzalloc(sizeof(*data), GFP_KERNEL);
1184                if (!data)
1185                        return -ENOMEM;
1186        }
1187        if (!target->thread.gs_cb)
1188                gs_cb.gsd = 25;
1189        else if (target == current)
1190                save_gs_cb(&gs_cb);
1191        else
1192                gs_cb = *target->thread.gs_cb;
1193        rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1194                                &gs_cb, 0, sizeof(gs_cb));
1195        if (rc) {
1196                kfree(data);
1197                return -EFAULT;
1198        }
1199        preempt_disable();
1200        if (!target->thread.gs_cb)
1201                target->thread.gs_cb = data;
1202        *target->thread.gs_cb = gs_cb;
1203        if (target == current) {
1204                __ctl_set_bit(2, 4);
1205                restore_gs_cb(target->thread.gs_cb);
1206        }
1207        preempt_enable();
1208        return rc;
1209}
1210
1211static int s390_gs_bc_get(struct task_struct *target,
1212                          const struct user_regset *regset,
1213                          unsigned int pos, unsigned int count,
1214                          void *kbuf, void __user *ubuf)
1215{
1216        struct gs_cb *data = target->thread.gs_bc_cb;
1217
1218        if (!MACHINE_HAS_GS)
1219                return -ENODEV;
1220        if (!data)
1221                return -ENODATA;
1222        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1223                                   data, 0, sizeof(struct gs_cb));
1224}
1225
1226static int s390_gs_bc_set(struct task_struct *target,
1227                          const struct user_regset *regset,
1228                          unsigned int pos, unsigned int count,
1229                          const void *kbuf, const void __user *ubuf)
1230{
1231        struct gs_cb *data = target->thread.gs_bc_cb;
1232
1233        if (!MACHINE_HAS_GS)
1234                return -ENODEV;
1235        if (!data) {
1236                data = kzalloc(sizeof(*data), GFP_KERNEL);
1237                if (!data)
1238                        return -ENOMEM;
1239                target->thread.gs_bc_cb = data;
1240        }
1241        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1242                                  data, 0, sizeof(struct gs_cb));
1243}
1244
1245static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
1246{
1247        return (cb->rca & 0x1f) == 0 &&
1248                (cb->roa & 0xfff) == 0 &&
1249                (cb->rla & 0xfff) == 0xfff &&
1250                cb->s == 1 &&
1251                cb->k == 1 &&
1252                cb->h == 0 &&
1253                cb->reserved1 == 0 &&
1254                cb->ps == 1 &&
1255                cb->qs == 0 &&
1256                cb->pc == 1 &&
1257                cb->qc == 0 &&
1258                cb->reserved2 == 0 &&
1259                cb->key == PAGE_DEFAULT_KEY &&
1260                cb->reserved3 == 0 &&
1261                cb->reserved4 == 0 &&
1262                cb->reserved5 == 0 &&
1263                cb->reserved6 == 0 &&
1264                cb->reserved7 == 0 &&
1265                cb->reserved8 == 0 &&
1266                cb->rla >= cb->roa &&
1267                cb->rca >= cb->roa &&
1268                cb->rca <= cb->rla+1 &&
1269                cb->m < 3;
1270}
1271
1272static int s390_runtime_instr_get(struct task_struct *target,
1273                                const struct user_regset *regset,
1274                                unsigned int pos, unsigned int count,
1275                                void *kbuf, void __user *ubuf)
1276{
1277        struct runtime_instr_cb *data = target->thread.ri_cb;
1278
1279        if (!test_facility(64))
1280                return -ENODEV;
1281        if (!data)
1282                return -ENODATA;
1283
1284        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1285                                   data, 0, sizeof(struct runtime_instr_cb));
1286}
1287
1288static int s390_runtime_instr_set(struct task_struct *target,
1289                                  const struct user_regset *regset,
1290                                  unsigned int pos, unsigned int count,
1291                                  const void *kbuf, const void __user *ubuf)
1292{
1293        struct runtime_instr_cb ri_cb = { }, *data = NULL;
1294        int rc;
1295
1296        if (!test_facility(64))
1297                return -ENODEV;
1298
1299        if (!target->thread.ri_cb) {
1300                data = kzalloc(sizeof(*data), GFP_KERNEL);
1301                if (!data)
1302                        return -ENOMEM;
1303        }
1304
1305        if (target->thread.ri_cb) {
1306                if (target == current)
1307                        store_runtime_instr_cb(&ri_cb);
1308                else
1309                        ri_cb = *target->thread.ri_cb;
1310        }
1311
1312        rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1313                                &ri_cb, 0, sizeof(struct runtime_instr_cb));
1314        if (rc) {
1315                kfree(data);
1316                return -EFAULT;
1317        }
1318
1319        if (!is_ri_cb_valid(&ri_cb)) {
1320                kfree(data);
1321                return -EINVAL;
1322        }
1323
1324        preempt_disable();
1325        if (!target->thread.ri_cb)
1326                target->thread.ri_cb = data;
1327        *target->thread.ri_cb = ri_cb;
1328        if (target == current)
1329                load_runtime_instr_cb(target->thread.ri_cb);
1330        preempt_enable();
1331
1332        return 0;
1333}
1334
1335static const struct user_regset s390_regsets[] = {
1336        {
1337                .core_note_type = NT_PRSTATUS,
1338                .n = sizeof(s390_regs) / sizeof(long),
1339                .size = sizeof(long),
1340                .align = sizeof(long),
1341                .get = s390_regs_get,
1342                .set = s390_regs_set,
1343        },
1344        {
1345                .core_note_type = NT_PRFPREG,
1346                .n = sizeof(s390_fp_regs) / sizeof(long),
1347                .size = sizeof(long),
1348                .align = sizeof(long),
1349                .get = s390_fpregs_get,
1350                .set = s390_fpregs_set,
1351        },
1352        {
1353                .core_note_type = NT_S390_SYSTEM_CALL,
1354                .n = 1,
1355                .size = sizeof(unsigned int),
1356                .align = sizeof(unsigned int),
1357                .get = s390_system_call_get,
1358                .set = s390_system_call_set,
1359        },
1360        {
1361                .core_note_type = NT_S390_LAST_BREAK,
1362                .n = 1,
1363                .size = sizeof(long),
1364                .align = sizeof(long),
1365                .get = s390_last_break_get,
1366                .set = s390_last_break_set,
1367        },
1368        {
1369                .core_note_type = NT_S390_TDB,
1370                .n = 1,
1371                .size = 256,
1372                .align = 1,
1373                .get = s390_tdb_get,
1374                .set = s390_tdb_set,
1375        },
1376        {
1377                .core_note_type = NT_S390_VXRS_LOW,
1378                .n = __NUM_VXRS_LOW,
1379                .size = sizeof(__u64),
1380                .align = sizeof(__u64),
1381                .get = s390_vxrs_low_get,
1382                .set = s390_vxrs_low_set,
1383        },
1384        {
1385                .core_note_type = NT_S390_VXRS_HIGH,
1386                .n = __NUM_VXRS_HIGH,
1387                .size = sizeof(__vector128),
1388                .align = sizeof(__vector128),
1389                .get = s390_vxrs_high_get,
1390                .set = s390_vxrs_high_set,
1391        },
1392        {
1393                .core_note_type = NT_S390_GS_CB,
1394                .n = sizeof(struct gs_cb) / sizeof(__u64),
1395                .size = sizeof(__u64),
1396                .align = sizeof(__u64),
1397                .get = s390_gs_cb_get,
1398                .set = s390_gs_cb_set,
1399        },
1400        {
1401                .core_note_type = NT_S390_GS_BC,
1402                .n = sizeof(struct gs_cb) / sizeof(__u64),
1403                .size = sizeof(__u64),
1404                .align = sizeof(__u64),
1405                .get = s390_gs_bc_get,
1406                .set = s390_gs_bc_set,
1407        },
1408        {
1409                .core_note_type = NT_S390_RI_CB,
1410                .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1411                .size = sizeof(__u64),
1412                .align = sizeof(__u64),
1413                .get = s390_runtime_instr_get,
1414                .set = s390_runtime_instr_set,
1415        },
1416};
1417
1418static const struct user_regset_view user_s390_view = {
1419        .name = UTS_MACHINE,
1420        .e_machine = EM_S390,
1421        .regsets = s390_regsets,
1422        .n = ARRAY_SIZE(s390_regsets)
1423};
1424
1425#ifdef CONFIG_COMPAT
1426static int s390_compat_regs_get(struct task_struct *target,
1427                                const struct user_regset *regset,
1428                                unsigned int pos, unsigned int count,
1429                                void *kbuf, void __user *ubuf)
1430{
1431        if (target == current)
1432                save_access_regs(target->thread.acrs);
1433
1434        if (kbuf) {
1435                compat_ulong_t *k = kbuf;
1436                while (count > 0) {
1437                        *k++ = __peek_user_compat(target, pos);
1438                        count -= sizeof(*k);
1439                        pos += sizeof(*k);
1440                }
1441        } else {
1442                compat_ulong_t __user *u = ubuf;
1443                while (count > 0) {
1444                        if (__put_user(__peek_user_compat(target, pos), u++))
1445                                return -EFAULT;
1446                        count -= sizeof(*u);
1447                        pos += sizeof(*u);
1448                }
1449        }
1450        return 0;
1451}
1452
1453static int s390_compat_regs_set(struct task_struct *target,
1454                                const struct user_regset *regset,
1455                                unsigned int pos, unsigned int count,
1456                                const void *kbuf, const void __user *ubuf)
1457{
1458        int rc = 0;
1459
1460        if (target == current)
1461                save_access_regs(target->thread.acrs);
1462
1463        if (kbuf) {
1464                const compat_ulong_t *k = kbuf;
1465                while (count > 0 && !rc) {
1466                        rc = __poke_user_compat(target, pos, *k++);
1467                        count -= sizeof(*k);
1468                        pos += sizeof(*k);
1469                }
1470        } else {
1471                const compat_ulong_t  __user *u = ubuf;
1472                while (count > 0 && !rc) {
1473                        compat_ulong_t word;
1474                        rc = __get_user(word, u++);
1475                        if (rc)
1476                                break;
1477                        rc = __poke_user_compat(target, pos, word);
1478                        count -= sizeof(*u);
1479                        pos += sizeof(*u);
1480                }
1481        }
1482
1483        if (rc == 0 && target == current)
1484                restore_access_regs(target->thread.acrs);
1485
1486        return rc;
1487}
1488
1489static int s390_compat_regs_high_get(struct task_struct *target,
1490                                     const struct user_regset *regset,
1491                                     unsigned int pos, unsigned int count,
1492                                     void *kbuf, void __user *ubuf)
1493{
1494        compat_ulong_t *gprs_high;
1495
1496        gprs_high = (compat_ulong_t *)
1497                &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1498        if (kbuf) {
1499                compat_ulong_t *k = kbuf;
1500                while (count > 0) {
1501                        *k++ = *gprs_high;
1502                        gprs_high += 2;
1503                        count -= sizeof(*k);
1504                }
1505        } else {
1506                compat_ulong_t __user *u = ubuf;
1507                while (count > 0) {
1508                        if (__put_user(*gprs_high, u++))
1509                                return -EFAULT;
1510                        gprs_high += 2;
1511                        count -= sizeof(*u);
1512                }
1513        }
1514        return 0;
1515}
1516
1517static int s390_compat_regs_high_set(struct task_struct *target,
1518                                     const struct user_regset *regset,
1519                                     unsigned int pos, unsigned int count,
1520                                     const void *kbuf, const void __user *ubuf)
1521{
1522        compat_ulong_t *gprs_high;
1523        int rc = 0;
1524
1525        gprs_high = (compat_ulong_t *)
1526                &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1527        if (kbuf) {
1528                const compat_ulong_t *k = kbuf;
1529                while (count > 0) {
1530                        *gprs_high = *k++;
1531                        *gprs_high += 2;
1532                        count -= sizeof(*k);
1533                }
1534        } else {
1535                const compat_ulong_t  __user *u = ubuf;
1536                while (count > 0 && !rc) {
1537                        unsigned long word;
1538                        rc = __get_user(word, u++);
1539                        if (rc)
1540                                break;
1541                        *gprs_high = word;
1542                        *gprs_high += 2;
1543                        count -= sizeof(*u);
1544                }
1545        }
1546
1547        return rc;
1548}
1549
1550static int s390_compat_last_break_get(struct task_struct *target,
1551                                      const struct user_regset *regset,
1552                                      unsigned int pos, unsigned int count,
1553                                      void *kbuf, void __user *ubuf)
1554{
1555        compat_ulong_t last_break;
1556
1557        if (count > 0) {
1558                last_break = target->thread.last_break;
1559                if (kbuf) {
1560                        unsigned long *k = kbuf;
1561                        *k = last_break;
1562                } else {
1563                        unsigned long  __user *u = ubuf;
1564                        if (__put_user(last_break, u))
1565                                return -EFAULT;
1566                }
1567        }
1568        return 0;
1569}
1570
1571static int s390_compat_last_break_set(struct task_struct *target,
1572                                      const struct user_regset *regset,
1573                                      unsigned int pos, unsigned int count,
1574                                      const void *kbuf, const void __user *ubuf)
1575{
1576        return 0;
1577}
1578
1579static const struct user_regset s390_compat_regsets[] = {
1580        {
1581                .core_note_type = NT_PRSTATUS,
1582                .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1583                .size = sizeof(compat_long_t),
1584                .align = sizeof(compat_long_t),
1585                .get = s390_compat_regs_get,
1586                .set = s390_compat_regs_set,
1587        },
1588        {
1589                .core_note_type = NT_PRFPREG,
1590                .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1591                .size = sizeof(compat_long_t),
1592                .align = sizeof(compat_long_t),
1593                .get = s390_fpregs_get,
1594                .set = s390_fpregs_set,
1595        },
1596        {
1597                .core_note_type = NT_S390_SYSTEM_CALL,
1598                .n = 1,
1599                .size = sizeof(compat_uint_t),
1600                .align = sizeof(compat_uint_t),
1601                .get = s390_system_call_get,
1602                .set = s390_system_call_set,
1603        },
1604        {
1605                .core_note_type = NT_S390_LAST_BREAK,
1606                .n = 1,
1607                .size = sizeof(long),
1608                .align = sizeof(long),
1609                .get = s390_compat_last_break_get,
1610                .set = s390_compat_last_break_set,
1611        },
1612        {
1613                .core_note_type = NT_S390_TDB,
1614                .n = 1,
1615                .size = 256,
1616                .align = 1,
1617                .get = s390_tdb_get,
1618                .set = s390_tdb_set,
1619        },
1620        {
1621                .core_note_type = NT_S390_VXRS_LOW,
1622                .n = __NUM_VXRS_LOW,
1623                .size = sizeof(__u64),
1624                .align = sizeof(__u64),
1625                .get = s390_vxrs_low_get,
1626                .set = s390_vxrs_low_set,
1627        },
1628        {
1629                .core_note_type = NT_S390_VXRS_HIGH,
1630                .n = __NUM_VXRS_HIGH,
1631                .size = sizeof(__vector128),
1632                .align = sizeof(__vector128),
1633                .get = s390_vxrs_high_get,
1634                .set = s390_vxrs_high_set,
1635        },
1636        {
1637                .core_note_type = NT_S390_HIGH_GPRS,
1638                .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1639                .size = sizeof(compat_long_t),
1640                .align = sizeof(compat_long_t),
1641                .get = s390_compat_regs_high_get,
1642                .set = s390_compat_regs_high_set,
1643        },
1644        {
1645                .core_note_type = NT_S390_GS_CB,
1646                .n = sizeof(struct gs_cb) / sizeof(__u64),
1647                .size = sizeof(__u64),
1648                .align = sizeof(__u64),
1649                .get = s390_gs_cb_get,
1650                .set = s390_gs_cb_set,
1651        },
1652        {
1653                .core_note_type = NT_S390_GS_BC,
1654                .n = sizeof(struct gs_cb) / sizeof(__u64),
1655                .size = sizeof(__u64),
1656                .align = sizeof(__u64),
1657                .get = s390_gs_bc_get,
1658                .set = s390_gs_bc_set,
1659        },
1660        {
1661                .core_note_type = NT_S390_RI_CB,
1662                .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1663                .size = sizeof(__u64),
1664                .align = sizeof(__u64),
1665                .get = s390_runtime_instr_get,
1666                .set = s390_runtime_instr_set,
1667        },
1668};
1669
1670static const struct user_regset_view user_s390_compat_view = {
1671        .name = "s390",
1672        .e_machine = EM_S390,
1673        .regsets = s390_compat_regsets,
1674        .n = ARRAY_SIZE(s390_compat_regsets)
1675};
1676#endif
1677
1678const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1679{
1680#ifdef CONFIG_COMPAT
1681        if (test_tsk_thread_flag(task, TIF_31BIT))
1682                return &user_s390_compat_view;
1683#endif
1684        return &user_s390_view;
1685}
1686
1687static const char *gpr_names[NUM_GPRS] = {
1688        "r0", "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
1689        "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1690};
1691
1692unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1693{
1694        if (offset >= NUM_GPRS)
1695                return 0;
1696        return regs->gprs[offset];
1697}
1698
1699int regs_query_register_offset(const char *name)
1700{
1701        unsigned long offset;
1702
1703        if (!name || *name != 'r')
1704                return -EINVAL;
1705        if (kstrtoul(name + 1, 10, &offset))
1706                return -EINVAL;
1707        if (offset >= NUM_GPRS)
1708                return -EINVAL;
1709        return offset;
1710}
1711
1712const char *regs_query_register_name(unsigned int offset)
1713{
1714        if (offset >= NUM_GPRS)
1715                return NULL;
1716        return gpr_names[offset];
1717}
1718
1719static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1720{
1721        unsigned long ksp = kernel_stack_pointer(regs);
1722
1723        return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1724}
1725
1726/**
1727 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1728 * @regs:pt_regs which contains kernel stack pointer.
1729 * @n:stack entry number.
1730 *
1731 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1732 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1733 * this returns 0.
1734 */
1735unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1736{
1737        unsigned long addr;
1738
1739        addr = kernel_stack_pointer(regs) + n * sizeof(long);
1740        if (!regs_within_kernel_stack(regs, addr))
1741                return 0;
1742        return *(unsigned long *)addr;
1743}
1744