linux/arch/powerpc/kernel/ptrace.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Derived from "arch/m68k/kernel/ptrace.c"
   6 *  Copyright (C) 1994 by Hamish Macdonald
   7 *  Taken from linux/kernel/ptrace.c and modified for M680x0.
   8 *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
   9 *
  10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
  11 * and Paul Mackerras (paulus@samba.org).
  12 *
  13 * This file is subject to the terms and conditions of the GNU General
  14 * Public License.  See the file README.legal in the main directory of
  15 * this archive for more details.
  16 */
  17
  18#include <linux/kernel.h>
  19#include <linux/sched.h>
  20#include <linux/mm.h>
  21#include <linux/smp.h>
  22#include <linux/errno.h>
  23#include <linux/ptrace.h>
  24#include <linux/regset.h>
  25#include <linux/tracehook.h>
  26#include <linux/elf.h>
  27#include <linux/user.h>
  28#include <linux/security.h>
  29#include <linux/signal.h>
  30#include <linux/seccomp.h>
  31#include <linux/audit.h>
  32#include <trace/syscall.h>
  33#include <linux/hw_breakpoint.h>
  34#include <linux/perf_event.h>
  35#include <linux/context_tracking.h>
  36
  37#include <linux/uaccess.h>
  38#include <asm/page.h>
  39#include <asm/pgtable.h>
  40#include <asm/switch_to.h>
  41#include <asm/tm.h>
  42#include <asm/asm-prototypes.h>
  43
  44#define CREATE_TRACE_POINTS
  45#include <trace/events/syscalls.h>
  46
  47/*
  48 * The parameter save area on the stack is used to store arguments being passed
  49 * to callee function and is located at fixed offset from stack pointer.
  50 */
  51#ifdef CONFIG_PPC32
  52#define PARAMETER_SAVE_AREA_OFFSET      24  /* bytes */
  53#else /* CONFIG_PPC32 */
  54#define PARAMETER_SAVE_AREA_OFFSET      48  /* bytes */
  55#endif
  56
  57struct pt_regs_offset {
  58        const char *name;
  59        int offset;
  60};
  61
  62#define STR(s)  #s                      /* convert to string */
  63#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
  64#define GPR_OFFSET_NAME(num)    \
  65        {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
  66        {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
  67#define REG_OFFSET_END {.name = NULL, .offset = 0}
  68
  69#define TVSO(f) (offsetof(struct thread_vr_state, f))
  70#define TFSO(f) (offsetof(struct thread_fp_state, f))
  71#define TSO(f)  (offsetof(struct thread_struct, f))
  72
  73static const struct pt_regs_offset regoffset_table[] = {
  74        GPR_OFFSET_NAME(0),
  75        GPR_OFFSET_NAME(1),
  76        GPR_OFFSET_NAME(2),
  77        GPR_OFFSET_NAME(3),
  78        GPR_OFFSET_NAME(4),
  79        GPR_OFFSET_NAME(5),
  80        GPR_OFFSET_NAME(6),
  81        GPR_OFFSET_NAME(7),
  82        GPR_OFFSET_NAME(8),
  83        GPR_OFFSET_NAME(9),
  84        GPR_OFFSET_NAME(10),
  85        GPR_OFFSET_NAME(11),
  86        GPR_OFFSET_NAME(12),
  87        GPR_OFFSET_NAME(13),
  88        GPR_OFFSET_NAME(14),
  89        GPR_OFFSET_NAME(15),
  90        GPR_OFFSET_NAME(16),
  91        GPR_OFFSET_NAME(17),
  92        GPR_OFFSET_NAME(18),
  93        GPR_OFFSET_NAME(19),
  94        GPR_OFFSET_NAME(20),
  95        GPR_OFFSET_NAME(21),
  96        GPR_OFFSET_NAME(22),
  97        GPR_OFFSET_NAME(23),
  98        GPR_OFFSET_NAME(24),
  99        GPR_OFFSET_NAME(25),
 100        GPR_OFFSET_NAME(26),
 101        GPR_OFFSET_NAME(27),
 102        GPR_OFFSET_NAME(28),
 103        GPR_OFFSET_NAME(29),
 104        GPR_OFFSET_NAME(30),
 105        GPR_OFFSET_NAME(31),
 106        REG_OFFSET_NAME(nip),
 107        REG_OFFSET_NAME(msr),
 108        REG_OFFSET_NAME(ctr),
 109        REG_OFFSET_NAME(link),
 110        REG_OFFSET_NAME(xer),
 111        REG_OFFSET_NAME(ccr),
 112#ifdef CONFIG_PPC64
 113        REG_OFFSET_NAME(softe),
 114#else
 115        REG_OFFSET_NAME(mq),
 116#endif
 117        REG_OFFSET_NAME(trap),
 118        REG_OFFSET_NAME(dar),
 119        REG_OFFSET_NAME(dsisr),
 120        REG_OFFSET_END,
 121};
 122
 123#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 124static void flush_tmregs_to_thread(struct task_struct *tsk)
 125{
 126        /*
 127         * If task is not current, it will have been flushed already to
 128         * it's thread_struct during __switch_to().
 129         *
 130         * A reclaim flushes ALL the state or if not in TM save TM SPRs
 131         * in the appropriate thread structures from live.
 132         */
 133
 134        if (tsk != current)
 135                return;
 136
 137        if (MSR_TM_SUSPENDED(mfmsr())) {
 138                tm_reclaim_current(TM_CAUSE_SIGNAL);
 139        } else {
 140                tm_enable();
 141                tm_save_sprs(&(tsk->thread));
 142        }
 143}
 144#else
 145static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
 146#endif
 147
 148/**
 149 * regs_query_register_offset() - query register offset from its name
 150 * @name:       the name of a register
 151 *
 152 * regs_query_register_offset() returns the offset of a register in struct
 153 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
 154 */
 155int regs_query_register_offset(const char *name)
 156{
 157        const struct pt_regs_offset *roff;
 158        for (roff = regoffset_table; roff->name != NULL; roff++)
 159                if (!strcmp(roff->name, name))
 160                        return roff->offset;
 161        return -EINVAL;
 162}
 163
 164/**
 165 * regs_query_register_name() - query register name from its offset
 166 * @offset:     the offset of a register in struct pt_regs.
 167 *
 168 * regs_query_register_name() returns the name of a register from its
 169 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
 170 */
 171const char *regs_query_register_name(unsigned int offset)
 172{
 173        const struct pt_regs_offset *roff;
 174        for (roff = regoffset_table; roff->name != NULL; roff++)
 175                if (roff->offset == offset)
 176                        return roff->name;
 177        return NULL;
 178}
 179
 180/*
 181 * does not yet catch signals sent when the child dies.
 182 * in exit.c or in signal.c.
 183 */
 184
 185/*
 186 * Set of msr bits that gdb can change on behalf of a process.
 187 */
 188#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 189#define MSR_DEBUGCHANGE 0
 190#else
 191#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
 192#endif
 193
 194/*
 195 * Max register writeable via put_reg
 196 */
 197#ifdef CONFIG_PPC32
 198#define PT_MAX_PUT_REG  PT_MQ
 199#else
 200#define PT_MAX_PUT_REG  PT_CCR
 201#endif
 202
 203static unsigned long get_user_msr(struct task_struct *task)
 204{
 205        return task->thread.regs->msr | task->thread.fpexc_mode;
 206}
 207
 208static int set_user_msr(struct task_struct *task, unsigned long msr)
 209{
 210        task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
 211        task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
 212        return 0;
 213}
 214
 215#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 216static unsigned long get_user_ckpt_msr(struct task_struct *task)
 217{
 218        return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
 219}
 220
 221static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
 222{
 223        task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
 224        task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
 225        return 0;
 226}
 227
 228static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
 229{
 230        task->thread.ckpt_regs.trap = trap & 0xfff0;
 231        return 0;
 232}
 233#endif
 234
 235#ifdef CONFIG_PPC64
 236static int get_user_dscr(struct task_struct *task, unsigned long *data)
 237{
 238        *data = task->thread.dscr;
 239        return 0;
 240}
 241
 242static int set_user_dscr(struct task_struct *task, unsigned long dscr)
 243{
 244        task->thread.dscr = dscr;
 245        task->thread.dscr_inherit = 1;
 246        return 0;
 247}
 248#else
 249static int get_user_dscr(struct task_struct *task, unsigned long *data)
 250{
 251        return -EIO;
 252}
 253
 254static int set_user_dscr(struct task_struct *task, unsigned long dscr)
 255{
 256        return -EIO;
 257}
 258#endif
 259
 260/*
 261 * We prevent mucking around with the reserved area of trap
 262 * which are used internally by the kernel.
 263 */
 264static int set_user_trap(struct task_struct *task, unsigned long trap)
 265{
 266        task->thread.regs->trap = trap & 0xfff0;
 267        return 0;
 268}
 269
 270/*
 271 * Get contents of register REGNO in task TASK.
 272 */
 273int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
 274{
 275        if ((task->thread.regs == NULL) || !data)
 276                return -EIO;
 277
 278        if (regno == PT_MSR) {
 279                *data = get_user_msr(task);
 280                return 0;
 281        }
 282
 283        if (regno == PT_DSCR)
 284                return get_user_dscr(task, data);
 285
 286        if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
 287                *data = ((unsigned long *)task->thread.regs)[regno];
 288                return 0;
 289        }
 290
 291        return -EIO;
 292}
 293
 294/*
 295 * Write contents of register REGNO in task TASK.
 296 */
 297int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
 298{
 299        if (task->thread.regs == NULL)
 300                return -EIO;
 301
 302        if (regno == PT_MSR)
 303                return set_user_msr(task, data);
 304        if (regno == PT_TRAP)
 305                return set_user_trap(task, data);
 306        if (regno == PT_DSCR)
 307                return set_user_dscr(task, data);
 308
 309        if (regno <= PT_MAX_PUT_REG) {
 310                ((unsigned long *)task->thread.regs)[regno] = data;
 311                return 0;
 312        }
 313        return -EIO;
 314}
 315
 316static int gpr_get(struct task_struct *target, const struct user_regset *regset,
 317                   unsigned int pos, unsigned int count,
 318                   void *kbuf, void __user *ubuf)
 319{
 320        int i, ret;
 321
 322        if (target->thread.regs == NULL)
 323                return -EIO;
 324
 325        if (!FULL_REGS(target->thread.regs)) {
 326                /* We have a partial register set.  Fill 14-31 with bogus values */
 327                for (i = 14; i < 32; i++)
 328                        target->thread.regs->gpr[i] = NV_REG_POISON;
 329        }
 330
 331        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 332                                  target->thread.regs,
 333                                  0, offsetof(struct pt_regs, msr));
 334        if (!ret) {
 335                unsigned long msr = get_user_msr(target);
 336                ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
 337                                          offsetof(struct pt_regs, msr),
 338                                          offsetof(struct pt_regs, msr) +
 339                                          sizeof(msr));
 340        }
 341
 342        BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 343                     offsetof(struct pt_regs, msr) + sizeof(long));
 344
 345        if (!ret)
 346                ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 347                                          &target->thread.regs->orig_gpr3,
 348                                          offsetof(struct pt_regs, orig_gpr3),
 349                                          sizeof(struct pt_regs));
 350        if (!ret)
 351                ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
 352                                               sizeof(struct pt_regs), -1);
 353
 354        return ret;
 355}
 356
 357static int gpr_set(struct task_struct *target, const struct user_regset *regset,
 358                   unsigned int pos, unsigned int count,
 359                   const void *kbuf, const void __user *ubuf)
 360{
 361        unsigned long reg;
 362        int ret;
 363
 364        if (target->thread.regs == NULL)
 365                return -EIO;
 366
 367        CHECK_FULL_REGS(target->thread.regs);
 368
 369        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 370                                 target->thread.regs,
 371                                 0, PT_MSR * sizeof(reg));
 372
 373        if (!ret && count > 0) {
 374                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 375                                         PT_MSR * sizeof(reg),
 376                                         (PT_MSR + 1) * sizeof(reg));
 377                if (!ret)
 378                        ret = set_user_msr(target, reg);
 379        }
 380
 381        BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 382                     offsetof(struct pt_regs, msr) + sizeof(long));
 383
 384        if (!ret)
 385                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 386                                         &target->thread.regs->orig_gpr3,
 387                                         PT_ORIG_R3 * sizeof(reg),
 388                                         (PT_MAX_PUT_REG + 1) * sizeof(reg));
 389
 390        if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
 391                ret = user_regset_copyin_ignore(
 392                        &pos, &count, &kbuf, &ubuf,
 393                        (PT_MAX_PUT_REG + 1) * sizeof(reg),
 394                        PT_TRAP * sizeof(reg));
 395
 396        if (!ret && count > 0) {
 397                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 398                                         PT_TRAP * sizeof(reg),
 399                                         (PT_TRAP + 1) * sizeof(reg));
 400                if (!ret)
 401                        ret = set_user_trap(target, reg);
 402        }
 403
 404        if (!ret)
 405                ret = user_regset_copyin_ignore(
 406                        &pos, &count, &kbuf, &ubuf,
 407                        (PT_TRAP + 1) * sizeof(reg), -1);
 408
 409        return ret;
 410}
 411
 412/*
 413 * Regardless of transactions, 'fp_state' holds the current running
 414 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
 415 * value of all FPR registers for the current transaction.
 416 *
 417 * Userspace interface buffer layout:
 418 *
 419 * struct data {
 420 *      u64     fpr[32];
 421 *      u64     fpscr;
 422 * };
 423 */
 424static int fpr_get(struct task_struct *target, const struct user_regset *regset,
 425                   unsigned int pos, unsigned int count,
 426                   void *kbuf, void __user *ubuf)
 427{
 428#ifdef CONFIG_VSX
 429        u64 buf[33];
 430        int i;
 431
 432        flush_fp_to_thread(target);
 433
 434        /* copy to local buffer then write that out */
 435        for (i = 0; i < 32 ; i++)
 436                buf[i] = target->thread.TS_FPR(i);
 437        buf[32] = target->thread.fp_state.fpscr;
 438        return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 439#else
 440        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
 441                     offsetof(struct thread_fp_state, fpr[32]));
 442
 443        flush_fp_to_thread(target);
 444
 445        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 446                                   &target->thread.fp_state, 0, -1);
 447#endif
 448}
 449
 450/*
 451 * Regardless of transactions, 'fp_state' holds the current running
 452 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
 453 * value of all FPR registers for the current transaction.
 454 *
 455 * Userspace interface buffer layout:
 456 *
 457 * struct data {
 458 *      u64     fpr[32];
 459 *      u64     fpscr;
 460 * };
 461 *
 462 */
 463static int fpr_set(struct task_struct *target, const struct user_regset *regset,
 464                   unsigned int pos, unsigned int count,
 465                   const void *kbuf, const void __user *ubuf)
 466{
 467#ifdef CONFIG_VSX
 468        u64 buf[33];
 469        int i;
 470
 471        flush_fp_to_thread(target);
 472
 473        for (i = 0; i < 32 ; i++)
 474                buf[i] = target->thread.TS_FPR(i);
 475        buf[32] = target->thread.fp_state.fpscr;
 476
 477        /* copy to local buffer then write that out */
 478        i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 479        if (i)
 480                return i;
 481
 482        for (i = 0; i < 32 ; i++)
 483                target->thread.TS_FPR(i) = buf[i];
 484        target->thread.fp_state.fpscr = buf[32];
 485        return 0;
 486#else
 487        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
 488                     offsetof(struct thread_fp_state, fpr[32]));
 489
 490        flush_fp_to_thread(target);
 491
 492        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 493                                  &target->thread.fp_state, 0, -1);
 494#endif
 495}
 496
 497#ifdef CONFIG_ALTIVEC
 498/*
 499 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
 500 * The transfer totals 34 quadword.  Quadwords 0-31 contain the
 501 * corresponding vector registers.  Quadword 32 contains the vscr as the
 502 * last word (offset 12) within that quadword.  Quadword 33 contains the
 503 * vrsave as the first word (offset 0) within the quadword.
 504 *
 505 * This definition of the VMX state is compatible with the current PPC32
 506 * ptrace interface.  This allows signal handling and ptrace to use the
 507 * same structures.  This also simplifies the implementation of a bi-arch
 508 * (combined (32- and 64-bit) gdb.
 509 */
 510
 511static int vr_active(struct task_struct *target,
 512                     const struct user_regset *regset)
 513{
 514        flush_altivec_to_thread(target);
 515        return target->thread.used_vr ? regset->n : 0;
 516}
 517
 518/*
 519 * Regardless of transactions, 'vr_state' holds the current running
 520 * value of all the VMX registers and 'ckvr_state' holds the last
 521 * checkpointed value of all the VMX registers for the current
 522 * transaction to fall back on in case it aborts.
 523 *
 524 * Userspace interface buffer layout:
 525 *
 526 * struct data {
 527 *      vector128       vr[32];
 528 *      vector128       vscr;
 529 *      vector128       vrsave;
 530 * };
 531 */
 532static int vr_get(struct task_struct *target, const struct user_regset *regset,
 533                  unsigned int pos, unsigned int count,
 534                  void *kbuf, void __user *ubuf)
 535{
 536        int ret;
 537
 538        flush_altivec_to_thread(target);
 539
 540        BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
 541                     offsetof(struct thread_vr_state, vr[32]));
 542
 543        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 544                                  &target->thread.vr_state, 0,
 545                                  33 * sizeof(vector128));
 546        if (!ret) {
 547                /*
 548                 * Copy out only the low-order word of vrsave.
 549                 */
 550                union {
 551                        elf_vrreg_t reg;
 552                        u32 word;
 553                } vrsave;
 554                memset(&vrsave, 0, sizeof(vrsave));
 555
 556                vrsave.word = target->thread.vrsave;
 557
 558                ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
 559                                          33 * sizeof(vector128), -1);
 560        }
 561
 562        return ret;
 563}
 564
 565/*
 566 * Regardless of transactions, 'vr_state' holds the current running
 567 * value of all the VMX registers and 'ckvr_state' holds the last
 568 * checkpointed value of all the VMX registers for the current
 569 * transaction to fall back on in case it aborts.
 570 *
 571 * Userspace interface buffer layout:
 572 *
 573 * struct data {
 574 *      vector128       vr[32];
 575 *      vector128       vscr;
 576 *      vector128       vrsave;
 577 * };
 578 */
 579static int vr_set(struct task_struct *target, const struct user_regset *regset,
 580                  unsigned int pos, unsigned int count,
 581                  const void *kbuf, const void __user *ubuf)
 582{
 583        int ret;
 584
 585        flush_altivec_to_thread(target);
 586
 587        BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
 588                     offsetof(struct thread_vr_state, vr[32]));
 589
 590        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 591                                 &target->thread.vr_state, 0,
 592                                 33 * sizeof(vector128));
 593        if (!ret && count > 0) {
 594                /*
 595                 * We use only the first word of vrsave.
 596                 */
 597                union {
 598                        elf_vrreg_t reg;
 599                        u32 word;
 600                } vrsave;
 601                memset(&vrsave, 0, sizeof(vrsave));
 602
 603                vrsave.word = target->thread.vrsave;
 604
 605                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
 606                                         33 * sizeof(vector128), -1);
 607                if (!ret)
 608                        target->thread.vrsave = vrsave.word;
 609        }
 610
 611        return ret;
 612}
 613#endif /* CONFIG_ALTIVEC */
 614
 615#ifdef CONFIG_VSX
 616/*
 617 * Currently to set and and get all the vsx state, you need to call
 618 * the fp and VMX calls as well.  This only get/sets the lower 32
 619 * 128bit VSX registers.
 620 */
 621
 622static int vsr_active(struct task_struct *target,
 623                      const struct user_regset *regset)
 624{
 625        flush_vsx_to_thread(target);
 626        return target->thread.used_vsr ? regset->n : 0;
 627}
 628
 629/*
 630 * Regardless of transactions, 'fp_state' holds the current running
 631 * value of all FPR registers and 'ckfp_state' holds the last
 632 * checkpointed value of all FPR registers for the current
 633 * transaction.
 634 *
 635 * Userspace interface buffer layout:
 636 *
 637 * struct data {
 638 *      u64     vsx[32];
 639 * };
 640 */
 641static int vsr_get(struct task_struct *target, const struct user_regset *regset,
 642                   unsigned int pos, unsigned int count,
 643                   void *kbuf, void __user *ubuf)
 644{
 645        u64 buf[32];
 646        int ret, i;
 647
 648        flush_tmregs_to_thread(target);
 649        flush_fp_to_thread(target);
 650        flush_altivec_to_thread(target);
 651        flush_vsx_to_thread(target);
 652
 653        for (i = 0; i < 32 ; i++)
 654                buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
 655
 656        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 657                                  buf, 0, 32 * sizeof(double));
 658
 659        return ret;
 660}
 661
 662/*
 663 * Regardless of transactions, 'fp_state' holds the current running
 664 * value of all FPR registers and 'ckfp_state' holds the last
 665 * checkpointed value of all FPR registers for the current
 666 * transaction.
 667 *
 668 * Userspace interface buffer layout:
 669 *
 670 * struct data {
 671 *      u64     vsx[32];
 672 * };
 673 */
 674static int vsr_set(struct task_struct *target, const struct user_regset *regset,
 675                   unsigned int pos, unsigned int count,
 676                   const void *kbuf, const void __user *ubuf)
 677{
 678        u64 buf[32];
 679        int ret,i;
 680
 681        flush_tmregs_to_thread(target);
 682        flush_fp_to_thread(target);
 683        flush_altivec_to_thread(target);
 684        flush_vsx_to_thread(target);
 685
 686        for (i = 0; i < 32 ; i++)
 687                buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
 688
 689        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 690                                 buf, 0, 32 * sizeof(double));
 691        if (!ret)
 692                for (i = 0; i < 32 ; i++)
 693                        target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
 694
 695        return ret;
 696}
 697#endif /* CONFIG_VSX */
 698
 699#ifdef CONFIG_SPE
 700
 701/*
 702 * For get_evrregs/set_evrregs functions 'data' has the following layout:
 703 *
 704 * struct {
 705 *   u32 evr[32];
 706 *   u64 acc;
 707 *   u32 spefscr;
 708 * }
 709 */
 710
 711static int evr_active(struct task_struct *target,
 712                      const struct user_regset *regset)
 713{
 714        flush_spe_to_thread(target);
 715        return target->thread.used_spe ? regset->n : 0;
 716}
 717
 718static int evr_get(struct task_struct *target, const struct user_regset *regset,
 719                   unsigned int pos, unsigned int count,
 720                   void *kbuf, void __user *ubuf)
 721{
 722        int ret;
 723
 724        flush_spe_to_thread(target);
 725
 726        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 727                                  &target->thread.evr,
 728                                  0, sizeof(target->thread.evr));
 729
 730        BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
 731                     offsetof(struct thread_struct, spefscr));
 732
 733        if (!ret)
 734                ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 735                                          &target->thread.acc,
 736                                          sizeof(target->thread.evr), -1);
 737
 738        return ret;
 739}
 740
 741static int evr_set(struct task_struct *target, const struct user_regset *regset,
 742                   unsigned int pos, unsigned int count,
 743                   const void *kbuf, const void __user *ubuf)
 744{
 745        int ret;
 746
 747        flush_spe_to_thread(target);
 748
 749        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 750                                 &target->thread.evr,
 751                                 0, sizeof(target->thread.evr));
 752
 753        BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
 754                     offsetof(struct thread_struct, spefscr));
 755
 756        if (!ret)
 757                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 758                                         &target->thread.acc,
 759                                         sizeof(target->thread.evr), -1);
 760
 761        return ret;
 762}
 763#endif /* CONFIG_SPE */
 764
 765#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 766/**
 767 * tm_cgpr_active - get active number of registers in CGPR
 768 * @target:     The target task.
 769 * @regset:     The user regset structure.
 770 *
 771 * This function checks for the active number of available
 772 * regisers in transaction checkpointed GPR category.
 773 */
 774static int tm_cgpr_active(struct task_struct *target,
 775                          const struct user_regset *regset)
 776{
 777        if (!cpu_has_feature(CPU_FTR_TM))
 778                return -ENODEV;
 779
 780        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 781                return 0;
 782
 783        return regset->n;
 784}
 785
 786/**
 787 * tm_cgpr_get - get CGPR registers
 788 * @target:     The target task.
 789 * @regset:     The user regset structure.
 790 * @pos:        The buffer position.
 791 * @count:      Number of bytes to copy.
 792 * @kbuf:       Kernel buffer to copy from.
 793 * @ubuf:       User buffer to copy into.
 794 *
 795 * This function gets transaction checkpointed GPR registers.
 796 *
 797 * When the transaction is active, 'ckpt_regs' holds all the checkpointed
 798 * GPR register values for the current transaction to fall back on if it
 799 * aborts in between. This function gets those checkpointed GPR registers.
 800 * The userspace interface buffer layout is as follows.
 801 *
 802 * struct data {
 803 *      struct pt_regs ckpt_regs;
 804 * };
 805 */
 806static int tm_cgpr_get(struct task_struct *target,
 807                        const struct user_regset *regset,
 808                        unsigned int pos, unsigned int count,
 809                        void *kbuf, void __user *ubuf)
 810{
 811        int ret;
 812
 813        if (!cpu_has_feature(CPU_FTR_TM))
 814                return -ENODEV;
 815
 816        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 817                return -ENODATA;
 818
 819        flush_tmregs_to_thread(target);
 820        flush_fp_to_thread(target);
 821        flush_altivec_to_thread(target);
 822
 823        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 824                                  &target->thread.ckpt_regs,
 825                                  0, offsetof(struct pt_regs, msr));
 826        if (!ret) {
 827                unsigned long msr = get_user_ckpt_msr(target);
 828
 829                ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
 830                                          offsetof(struct pt_regs, msr),
 831                                          offsetof(struct pt_regs, msr) +
 832                                          sizeof(msr));
 833        }
 834
 835        BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 836                     offsetof(struct pt_regs, msr) + sizeof(long));
 837
 838        if (!ret)
 839                ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 840                                          &target->thread.ckpt_regs.orig_gpr3,
 841                                          offsetof(struct pt_regs, orig_gpr3),
 842                                          sizeof(struct pt_regs));
 843        if (!ret)
 844                ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
 845                                               sizeof(struct pt_regs), -1);
 846
 847        return ret;
 848}
 849
 850/*
 851 * tm_cgpr_set - set the CGPR registers
 852 * @target:     The target task.
 853 * @regset:     The user regset structure.
 854 * @pos:        The buffer position.
 855 * @count:      Number of bytes to copy.
 856 * @kbuf:       Kernel buffer to copy into.
 857 * @ubuf:       User buffer to copy from.
 858 *
 859 * This function sets in transaction checkpointed GPR registers.
 860 *
 861 * When the transaction is active, 'ckpt_regs' holds the checkpointed
 862 * GPR register values for the current transaction to fall back on if it
 863 * aborts in between. This function sets those checkpointed GPR registers.
 864 * The userspace interface buffer layout is as follows.
 865 *
 866 * struct data {
 867 *      struct pt_regs ckpt_regs;
 868 * };
 869 */
 870static int tm_cgpr_set(struct task_struct *target,
 871                        const struct user_regset *regset,
 872                        unsigned int pos, unsigned int count,
 873                        const void *kbuf, const void __user *ubuf)
 874{
 875        unsigned long reg;
 876        int ret;
 877
 878        if (!cpu_has_feature(CPU_FTR_TM))
 879                return -ENODEV;
 880
 881        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 882                return -ENODATA;
 883
 884        flush_tmregs_to_thread(target);
 885        flush_fp_to_thread(target);
 886        flush_altivec_to_thread(target);
 887
 888        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 889                                 &target->thread.ckpt_regs,
 890                                 0, PT_MSR * sizeof(reg));
 891
 892        if (!ret && count > 0) {
 893                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 894                                         PT_MSR * sizeof(reg),
 895                                         (PT_MSR + 1) * sizeof(reg));
 896                if (!ret)
 897                        ret = set_user_ckpt_msr(target, reg);
 898        }
 899
 900        BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 901                     offsetof(struct pt_regs, msr) + sizeof(long));
 902
 903        if (!ret)
 904                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 905                                         &target->thread.ckpt_regs.orig_gpr3,
 906                                         PT_ORIG_R3 * sizeof(reg),
 907                                         (PT_MAX_PUT_REG + 1) * sizeof(reg));
 908
 909        if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
 910                ret = user_regset_copyin_ignore(
 911                        &pos, &count, &kbuf, &ubuf,
 912                        (PT_MAX_PUT_REG + 1) * sizeof(reg),
 913                        PT_TRAP * sizeof(reg));
 914
 915        if (!ret && count > 0) {
 916                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 917                                         PT_TRAP * sizeof(reg),
 918                                         (PT_TRAP + 1) * sizeof(reg));
 919                if (!ret)
 920                        ret = set_user_ckpt_trap(target, reg);
 921        }
 922
 923        if (!ret)
 924                ret = user_regset_copyin_ignore(
 925                        &pos, &count, &kbuf, &ubuf,
 926                        (PT_TRAP + 1) * sizeof(reg), -1);
 927
 928        return ret;
 929}
 930
 931/**
 932 * tm_cfpr_active - get active number of registers in CFPR
 933 * @target:     The target task.
 934 * @regset:     The user regset structure.
 935 *
 936 * This function checks for the active number of available
 937 * regisers in transaction checkpointed FPR category.
 938 */
 939static int tm_cfpr_active(struct task_struct *target,
 940                                const struct user_regset *regset)
 941{
 942        if (!cpu_has_feature(CPU_FTR_TM))
 943                return -ENODEV;
 944
 945        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 946                return 0;
 947
 948        return regset->n;
 949}
 950
 951/**
 952 * tm_cfpr_get - get CFPR registers
 953 * @target:     The target task.
 954 * @regset:     The user regset structure.
 955 * @pos:        The buffer position.
 956 * @count:      Number of bytes to copy.
 957 * @kbuf:       Kernel buffer to copy from.
 958 * @ubuf:       User buffer to copy into.
 959 *
 960 * This function gets in transaction checkpointed FPR registers.
 961 *
 962 * When the transaction is active 'ckfp_state' holds the checkpointed
 963 * values for the current transaction to fall back on if it aborts
 964 * in between. This function gets those checkpointed FPR registers.
 965 * The userspace interface buffer layout is as follows.
 966 *
 967 * struct data {
 968 *      u64     fpr[32];
 969 *      u64     fpscr;
 970 *};
 971 */
 972static int tm_cfpr_get(struct task_struct *target,
 973                        const struct user_regset *regset,
 974                        unsigned int pos, unsigned int count,
 975                        void *kbuf, void __user *ubuf)
 976{
 977        u64 buf[33];
 978        int i;
 979
 980        if (!cpu_has_feature(CPU_FTR_TM))
 981                return -ENODEV;
 982
 983        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 984                return -ENODATA;
 985
 986        flush_tmregs_to_thread(target);
 987        flush_fp_to_thread(target);
 988        flush_altivec_to_thread(target);
 989
 990        /* copy to local buffer then write that out */
 991        for (i = 0; i < 32 ; i++)
 992                buf[i] = target->thread.TS_CKFPR(i);
 993        buf[32] = target->thread.ckfp_state.fpscr;
 994        return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 995}
 996
 997/**
 998 * tm_cfpr_set - set CFPR registers
 999 * @target:     The target task.
1000 * @regset:     The user regset structure.
1001 * @pos:        The buffer position.
1002 * @count:      Number of bytes to copy.
1003 * @kbuf:       Kernel buffer to copy into.
1004 * @ubuf:       User buffer to copy from.
1005 *
1006 * This function sets in transaction checkpointed FPR registers.
1007 *
1008 * When the transaction is active 'ckfp_state' holds the checkpointed
1009 * FPR register values for the current transaction to fall back on
1010 * if it aborts in between. This function sets these checkpointed
1011 * FPR registers. The userspace interface buffer layout is as follows.
1012 *
1013 * struct data {
1014 *      u64     fpr[32];
1015 *      u64     fpscr;
1016 *};
1017 */
1018static int tm_cfpr_set(struct task_struct *target,
1019                        const struct user_regset *regset,
1020                        unsigned int pos, unsigned int count,
1021                        const void *kbuf, const void __user *ubuf)
1022{
1023        u64 buf[33];
1024        int i;
1025
1026        if (!cpu_has_feature(CPU_FTR_TM))
1027                return -ENODEV;
1028
1029        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1030                return -ENODATA;
1031
1032        flush_tmregs_to_thread(target);
1033        flush_fp_to_thread(target);
1034        flush_altivec_to_thread(target);
1035
1036        for (i = 0; i < 32; i++)
1037                buf[i] = target->thread.TS_CKFPR(i);
1038        buf[32] = target->thread.ckfp_state.fpscr;
1039
1040        /* copy to local buffer then write that out */
1041        i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1042        if (i)
1043                return i;
1044        for (i = 0; i < 32 ; i++)
1045                target->thread.TS_CKFPR(i) = buf[i];
1046        target->thread.ckfp_state.fpscr = buf[32];
1047        return 0;
1048}
1049
1050/**
1051 * tm_cvmx_active - get active number of registers in CVMX
1052 * @target:     The target task.
1053 * @regset:     The user regset structure.
1054 *
1055 * This function checks for the active number of available
1056 * regisers in checkpointed VMX category.
1057 */
1058static int tm_cvmx_active(struct task_struct *target,
1059                                const struct user_regset *regset)
1060{
1061        if (!cpu_has_feature(CPU_FTR_TM))
1062                return -ENODEV;
1063
1064        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1065                return 0;
1066
1067        return regset->n;
1068}
1069
1070/**
1071 * tm_cvmx_get - get CMVX registers
1072 * @target:     The target task.
1073 * @regset:     The user regset structure.
1074 * @pos:        The buffer position.
1075 * @count:      Number of bytes to copy.
1076 * @kbuf:       Kernel buffer to copy from.
1077 * @ubuf:       User buffer to copy into.
1078 *
1079 * This function gets in transaction checkpointed VMX registers.
1080 *
1081 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1082 * the checkpointed values for the current transaction to fall
1083 * back on if it aborts in between. The userspace interface buffer
1084 * layout is as follows.
1085 *
1086 * struct data {
1087 *      vector128       vr[32];
1088 *      vector128       vscr;
1089 *      vector128       vrsave;
1090 *};
1091 */
1092static int tm_cvmx_get(struct task_struct *target,
1093                        const struct user_regset *regset,
1094                        unsigned int pos, unsigned int count,
1095                        void *kbuf, void __user *ubuf)
1096{
1097        int ret;
1098
1099        BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1100
1101        if (!cpu_has_feature(CPU_FTR_TM))
1102                return -ENODEV;
1103
1104        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1105                return -ENODATA;
1106
1107        /* Flush the state */
1108        flush_tmregs_to_thread(target);
1109        flush_fp_to_thread(target);
1110        flush_altivec_to_thread(target);
1111
1112        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1113                                        &target->thread.ckvr_state, 0,
1114                                        33 * sizeof(vector128));
1115        if (!ret) {
1116                /*
1117                 * Copy out only the low-order word of vrsave.
1118                 */
1119                union {
1120                        elf_vrreg_t reg;
1121                        u32 word;
1122                } vrsave;
1123                memset(&vrsave, 0, sizeof(vrsave));
1124                vrsave.word = target->thread.ckvrsave;
1125                ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1126                                                33 * sizeof(vector128), -1);
1127        }
1128
1129        return ret;
1130}
1131
1132/**
1133 * tm_cvmx_set - set CMVX registers
1134 * @target:     The target task.
1135 * @regset:     The user regset structure.
1136 * @pos:        The buffer position.
1137 * @count:      Number of bytes to copy.
1138 * @kbuf:       Kernel buffer to copy into.
1139 * @ubuf:       User buffer to copy from.
1140 *
1141 * This function sets in transaction checkpointed VMX registers.
1142 *
1143 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1144 * the checkpointed values for the current transaction to fall
1145 * back on if it aborts in between. The userspace interface buffer
1146 * layout is as follows.
1147 *
1148 * struct data {
1149 *      vector128       vr[32];
1150 *      vector128       vscr;
1151 *      vector128       vrsave;
1152 *};
1153 */
1154static int tm_cvmx_set(struct task_struct *target,
1155                        const struct user_regset *regset,
1156                        unsigned int pos, unsigned int count,
1157                        const void *kbuf, const void __user *ubuf)
1158{
1159        int ret;
1160
1161        BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1162
1163        if (!cpu_has_feature(CPU_FTR_TM))
1164                return -ENODEV;
1165
1166        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1167                return -ENODATA;
1168
1169        flush_tmregs_to_thread(target);
1170        flush_fp_to_thread(target);
1171        flush_altivec_to_thread(target);
1172
1173        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1174                                        &target->thread.ckvr_state, 0,
1175                                        33 * sizeof(vector128));
1176        if (!ret && count > 0) {
1177                /*
1178                 * We use only the low-order word of vrsave.
1179                 */
1180                union {
1181                        elf_vrreg_t reg;
1182                        u32 word;
1183                } vrsave;
1184                memset(&vrsave, 0, sizeof(vrsave));
1185                vrsave.word = target->thread.ckvrsave;
1186                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1187                                                33 * sizeof(vector128), -1);
1188                if (!ret)
1189                        target->thread.ckvrsave = vrsave.word;
1190        }
1191
1192        return ret;
1193}
1194
1195/**
1196 * tm_cvsx_active - get active number of registers in CVSX
1197 * @target:     The target task.
1198 * @regset:     The user regset structure.
1199 *
1200 * This function checks for the active number of available
1201 * regisers in transaction checkpointed VSX category.
1202 */
1203static int tm_cvsx_active(struct task_struct *target,
1204                                const struct user_regset *regset)
1205{
1206        if (!cpu_has_feature(CPU_FTR_TM))
1207                return -ENODEV;
1208
1209        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1210                return 0;
1211
1212        flush_vsx_to_thread(target);
1213        return target->thread.used_vsr ? regset->n : 0;
1214}
1215
1216/**
1217 * tm_cvsx_get - get CVSX registers
1218 * @target:     The target task.
1219 * @regset:     The user regset structure.
1220 * @pos:        The buffer position.
1221 * @count:      Number of bytes to copy.
1222 * @kbuf:       Kernel buffer to copy from.
1223 * @ubuf:       User buffer to copy into.
1224 *
1225 * This function gets in transaction checkpointed VSX registers.
1226 *
1227 * When the transaction is active 'ckfp_state' holds the checkpointed
1228 * values for the current transaction to fall back on if it aborts
1229 * in between. This function gets those checkpointed VSX registers.
1230 * The userspace interface buffer layout is as follows.
1231 *
1232 * struct data {
1233 *      u64     vsx[32];
1234 *};
1235 */
1236static int tm_cvsx_get(struct task_struct *target,
1237                        const struct user_regset *regset,
1238                        unsigned int pos, unsigned int count,
1239                        void *kbuf, void __user *ubuf)
1240{
1241        u64 buf[32];
1242        int ret, i;
1243
1244        if (!cpu_has_feature(CPU_FTR_TM))
1245                return -ENODEV;
1246
1247        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1248                return -ENODATA;
1249
1250        /* Flush the state */
1251        flush_tmregs_to_thread(target);
1252        flush_fp_to_thread(target);
1253        flush_altivec_to_thread(target);
1254        flush_vsx_to_thread(target);
1255
1256        for (i = 0; i < 32 ; i++)
1257                buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1258        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1259                                  buf, 0, 32 * sizeof(double));
1260
1261        return ret;
1262}
1263
1264/**
1265 * tm_cvsx_set - set CFPR registers
1266 * @target:     The target task.
1267 * @regset:     The user regset structure.
1268 * @pos:        The buffer position.
1269 * @count:      Number of bytes to copy.
1270 * @kbuf:       Kernel buffer to copy into.
1271 * @ubuf:       User buffer to copy from.
1272 *
1273 * This function sets in transaction checkpointed VSX registers.
1274 *
1275 * When the transaction is active 'ckfp_state' holds the checkpointed
1276 * VSX register values for the current transaction to fall back on
1277 * if it aborts in between. This function sets these checkpointed
1278 * FPR registers. The userspace interface buffer layout is as follows.
1279 *
1280 * struct data {
1281 *      u64     vsx[32];
1282 *};
1283 */
1284static int tm_cvsx_set(struct task_struct *target,
1285                        const struct user_regset *regset,
1286                        unsigned int pos, unsigned int count,
1287                        const void *kbuf, const void __user *ubuf)
1288{
1289        u64 buf[32];
1290        int ret, i;
1291
1292        if (!cpu_has_feature(CPU_FTR_TM))
1293                return -ENODEV;
1294
1295        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1296                return -ENODATA;
1297
1298        /* Flush the state */
1299        flush_tmregs_to_thread(target);
1300        flush_fp_to_thread(target);
1301        flush_altivec_to_thread(target);
1302        flush_vsx_to_thread(target);
1303
1304        for (i = 0; i < 32 ; i++)
1305                buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1306
1307        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1308                                 buf, 0, 32 * sizeof(double));
1309        if (!ret)
1310                for (i = 0; i < 32 ; i++)
1311                        target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1312
1313        return ret;
1314}
1315
1316/**
1317 * tm_spr_active - get active number of registers in TM SPR
1318 * @target:     The target task.
1319 * @regset:     The user regset structure.
1320 *
1321 * This function checks the active number of available
1322 * regisers in the transactional memory SPR category.
1323 */
1324static int tm_spr_active(struct task_struct *target,
1325                         const struct user_regset *regset)
1326{
1327        if (!cpu_has_feature(CPU_FTR_TM))
1328                return -ENODEV;
1329
1330        return regset->n;
1331}
1332
1333/**
1334 * tm_spr_get - get the TM related SPR registers
1335 * @target:     The target task.
1336 * @regset:     The user regset structure.
1337 * @pos:        The buffer position.
1338 * @count:      Number of bytes to copy.
1339 * @kbuf:       Kernel buffer to copy from.
1340 * @ubuf:       User buffer to copy into.
1341 *
1342 * This function gets transactional memory related SPR registers.
1343 * The userspace interface buffer layout is as follows.
1344 *
1345 * struct {
1346 *      u64             tm_tfhar;
1347 *      u64             tm_texasr;
1348 *      u64             tm_tfiar;
1349 * };
1350 */
1351static int tm_spr_get(struct task_struct *target,
1352                      const struct user_regset *regset,
1353                      unsigned int pos, unsigned int count,
1354                      void *kbuf, void __user *ubuf)
1355{
1356        int ret;
1357
1358        /* Build tests */
1359        BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1360        BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1361        BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1362
1363        if (!cpu_has_feature(CPU_FTR_TM))
1364                return -ENODEV;
1365
1366        /* Flush the states */
1367        flush_tmregs_to_thread(target);
1368        flush_fp_to_thread(target);
1369        flush_altivec_to_thread(target);
1370
1371        /* TFHAR register */
1372        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1373                                &target->thread.tm_tfhar, 0, sizeof(u64));
1374
1375        /* TEXASR register */
1376        if (!ret)
1377                ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1378                                &target->thread.tm_texasr, sizeof(u64),
1379                                2 * sizeof(u64));
1380
1381        /* TFIAR register */
1382        if (!ret)
1383                ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1384                                &target->thread.tm_tfiar,
1385                                2 * sizeof(u64), 3 * sizeof(u64));
1386        return ret;
1387}
1388
1389/**
1390 * tm_spr_set - set the TM related SPR registers
1391 * @target:     The target task.
1392 * @regset:     The user regset structure.
1393 * @pos:        The buffer position.
1394 * @count:      Number of bytes to copy.
1395 * @kbuf:       Kernel buffer to copy into.
1396 * @ubuf:       User buffer to copy from.
1397 *
1398 * This function sets transactional memory related SPR registers.
1399 * The userspace interface buffer layout is as follows.
1400 *
1401 * struct {
1402 *      u64             tm_tfhar;
1403 *      u64             tm_texasr;
1404 *      u64             tm_tfiar;
1405 * };
1406 */
1407static int tm_spr_set(struct task_struct *target,
1408                      const struct user_regset *regset,
1409                      unsigned int pos, unsigned int count,
1410                      const void *kbuf, const void __user *ubuf)
1411{
1412        int ret;
1413
1414        /* Build tests */
1415        BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1416        BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1417        BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1418
1419        if (!cpu_has_feature(CPU_FTR_TM))
1420                return -ENODEV;
1421
1422        /* Flush the states */
1423        flush_tmregs_to_thread(target);
1424        flush_fp_to_thread(target);
1425        flush_altivec_to_thread(target);
1426
1427        /* TFHAR register */
1428        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1429                                &target->thread.tm_tfhar, 0, sizeof(u64));
1430
1431        /* TEXASR register */
1432        if (!ret)
1433                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1434                                &target->thread.tm_texasr, sizeof(u64),
1435                                2 * sizeof(u64));
1436
1437        /* TFIAR register */
1438        if (!ret)
1439                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1440                                &target->thread.tm_tfiar,
1441                                 2 * sizeof(u64), 3 * sizeof(u64));
1442        return ret;
1443}
1444
1445static int tm_tar_active(struct task_struct *target,
1446                         const struct user_regset *regset)
1447{
1448        if (!cpu_has_feature(CPU_FTR_TM))
1449                return -ENODEV;
1450
1451        if (MSR_TM_ACTIVE(target->thread.regs->msr))
1452                return regset->n;
1453
1454        return 0;
1455}
1456
1457static int tm_tar_get(struct task_struct *target,
1458                      const struct user_regset *regset,
1459                      unsigned int pos, unsigned int count,
1460                      void *kbuf, void __user *ubuf)
1461{
1462        int ret;
1463
1464        if (!cpu_has_feature(CPU_FTR_TM))
1465                return -ENODEV;
1466
1467        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1468                return -ENODATA;
1469
1470        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1471                                &target->thread.tm_tar, 0, sizeof(u64));
1472        return ret;
1473}
1474
1475static int tm_tar_set(struct task_struct *target,
1476                      const struct user_regset *regset,
1477                      unsigned int pos, unsigned int count,
1478                      const void *kbuf, const void __user *ubuf)
1479{
1480        int ret;
1481
1482        if (!cpu_has_feature(CPU_FTR_TM))
1483                return -ENODEV;
1484
1485        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1486                return -ENODATA;
1487
1488        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1489                                &target->thread.tm_tar, 0, sizeof(u64));
1490        return ret;
1491}
1492
1493static int tm_ppr_active(struct task_struct *target,
1494                         const struct user_regset *regset)
1495{
1496        if (!cpu_has_feature(CPU_FTR_TM))
1497                return -ENODEV;
1498
1499        if (MSR_TM_ACTIVE(target->thread.regs->msr))
1500                return regset->n;
1501
1502        return 0;
1503}
1504
1505
1506static int tm_ppr_get(struct task_struct *target,
1507                      const struct user_regset *regset,
1508                      unsigned int pos, unsigned int count,
1509                      void *kbuf, void __user *ubuf)
1510{
1511        int ret;
1512
1513        if (!cpu_has_feature(CPU_FTR_TM))
1514                return -ENODEV;
1515
1516        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1517                return -ENODATA;
1518
1519        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1520                                &target->thread.tm_ppr, 0, sizeof(u64));
1521        return ret;
1522}
1523
1524static int tm_ppr_set(struct task_struct *target,
1525                      const struct user_regset *regset,
1526                      unsigned int pos, unsigned int count,
1527                      const void *kbuf, const void __user *ubuf)
1528{
1529        int ret;
1530
1531        if (!cpu_has_feature(CPU_FTR_TM))
1532                return -ENODEV;
1533
1534        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1535                return -ENODATA;
1536
1537        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1538                                &target->thread.tm_ppr, 0, sizeof(u64));
1539        return ret;
1540}
1541
1542static int tm_dscr_active(struct task_struct *target,
1543                         const struct user_regset *regset)
1544{
1545        if (!cpu_has_feature(CPU_FTR_TM))
1546                return -ENODEV;
1547
1548        if (MSR_TM_ACTIVE(target->thread.regs->msr))
1549                return regset->n;
1550
1551        return 0;
1552}
1553
1554static int tm_dscr_get(struct task_struct *target,
1555                      const struct user_regset *regset,
1556                      unsigned int pos, unsigned int count,
1557                      void *kbuf, void __user *ubuf)
1558{
1559        int ret;
1560
1561        if (!cpu_has_feature(CPU_FTR_TM))
1562                return -ENODEV;
1563
1564        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1565                return -ENODATA;
1566
1567        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1568                                &target->thread.tm_dscr, 0, sizeof(u64));
1569        return ret;
1570}
1571
1572static int tm_dscr_set(struct task_struct *target,
1573                      const struct user_regset *regset,
1574                      unsigned int pos, unsigned int count,
1575                      const void *kbuf, const void __user *ubuf)
1576{
1577        int ret;
1578
1579        if (!cpu_has_feature(CPU_FTR_TM))
1580                return -ENODEV;
1581
1582        if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1583                return -ENODATA;
1584
1585        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1586                                &target->thread.tm_dscr, 0, sizeof(u64));
1587        return ret;
1588}
1589#endif  /* CONFIG_PPC_TRANSACTIONAL_MEM */
1590
1591#ifdef CONFIG_PPC64
1592static int ppr_get(struct task_struct *target,
1593                      const struct user_regset *regset,
1594                      unsigned int pos, unsigned int count,
1595                      void *kbuf, void __user *ubuf)
1596{
1597        int ret;
1598
1599        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1600                                &target->thread.ppr, 0, sizeof(u64));
1601        return ret;
1602}
1603
1604static int ppr_set(struct task_struct *target,
1605                      const struct user_regset *regset,
1606                      unsigned int pos, unsigned int count,
1607                      const void *kbuf, const void __user *ubuf)
1608{
1609        int ret;
1610
1611        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1612                                &target->thread.ppr, 0, sizeof(u64));
1613        return ret;
1614}
1615
1616static int dscr_get(struct task_struct *target,
1617                      const struct user_regset *regset,
1618                      unsigned int pos, unsigned int count,
1619                      void *kbuf, void __user *ubuf)
1620{
1621        int ret;
1622
1623        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1624                                &target->thread.dscr, 0, sizeof(u64));
1625        return ret;
1626}
1627static int dscr_set(struct task_struct *target,
1628                      const struct user_regset *regset,
1629                      unsigned int pos, unsigned int count,
1630                      const void *kbuf, const void __user *ubuf)
1631{
1632        int ret;
1633
1634        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1635                                &target->thread.dscr, 0, sizeof(u64));
1636        return ret;
1637}
1638#endif
1639#ifdef CONFIG_PPC_BOOK3S_64
1640static int tar_get(struct task_struct *target,
1641                      const struct user_regset *regset,
1642                      unsigned int pos, unsigned int count,
1643                      void *kbuf, void __user *ubuf)
1644{
1645        int ret;
1646
1647        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1648                                &target->thread.tar, 0, sizeof(u64));
1649        return ret;
1650}
1651static int tar_set(struct task_struct *target,
1652                      const struct user_regset *regset,
1653                      unsigned int pos, unsigned int count,
1654                      const void *kbuf, const void __user *ubuf)
1655{
1656        int ret;
1657
1658        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1659                                &target->thread.tar, 0, sizeof(u64));
1660        return ret;
1661}
1662
1663static int ebb_active(struct task_struct *target,
1664                         const struct user_regset *regset)
1665{
1666        if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1667                return -ENODEV;
1668
1669        if (target->thread.used_ebb)
1670                return regset->n;
1671
1672        return 0;
1673}
1674
1675static int ebb_get(struct task_struct *target,
1676                      const struct user_regset *regset,
1677                      unsigned int pos, unsigned int count,
1678                      void *kbuf, void __user *ubuf)
1679{
1680        /* Build tests */
1681        BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1682        BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1683
1684        if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1685                return -ENODEV;
1686
1687        if (!target->thread.used_ebb)
1688                return -ENODATA;
1689
1690        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1691                        &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1692}
1693
1694static int ebb_set(struct task_struct *target,
1695                      const struct user_regset *regset,
1696                      unsigned int pos, unsigned int count,
1697                      const void *kbuf, const void __user *ubuf)
1698{
1699        int ret = 0;
1700
1701        /* Build tests */
1702        BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1703        BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1704
1705        if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1706                return -ENODEV;
1707
1708        if (target->thread.used_ebb)
1709                return -ENODATA;
1710
1711        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1712                        &target->thread.ebbrr, 0, sizeof(unsigned long));
1713
1714        if (!ret)
1715                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1716                        &target->thread.ebbhr, sizeof(unsigned long),
1717                        2 * sizeof(unsigned long));
1718
1719        if (!ret)
1720                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1721                        &target->thread.bescr,
1722                        2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1723
1724        return ret;
1725}
1726static int pmu_active(struct task_struct *target,
1727                         const struct user_regset *regset)
1728{
1729        if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1730                return -ENODEV;
1731
1732        return regset->n;
1733}
1734
1735static int pmu_get(struct task_struct *target,
1736                      const struct user_regset *regset,
1737                      unsigned int pos, unsigned int count,
1738                      void *kbuf, void __user *ubuf)
1739{
1740        /* Build tests */
1741        BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1742        BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1743        BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1744        BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1745
1746        if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1747                return -ENODEV;
1748
1749        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1750                        &target->thread.siar, 0,
1751                        5 * sizeof(unsigned long));
1752}
1753
1754static int pmu_set(struct task_struct *target,
1755                      const struct user_regset *regset,
1756                      unsigned int pos, unsigned int count,
1757                      const void *kbuf, const void __user *ubuf)
1758{
1759        int ret = 0;
1760
1761        /* Build tests */
1762        BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1763        BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1764        BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1765        BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1766
1767        if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1768                return -ENODEV;
1769
1770        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1771                        &target->thread.siar, 0,
1772                        sizeof(unsigned long));
1773
1774        if (!ret)
1775                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1776                        &target->thread.sdar, sizeof(unsigned long),
1777                        2 * sizeof(unsigned long));
1778
1779        if (!ret)
1780                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1781                        &target->thread.sier, 2 * sizeof(unsigned long),
1782                        3 * sizeof(unsigned long));
1783
1784        if (!ret)
1785                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1786                        &target->thread.mmcr2, 3 * sizeof(unsigned long),
1787                        4 * sizeof(unsigned long));
1788
1789        if (!ret)
1790                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1791                        &target->thread.mmcr0, 4 * sizeof(unsigned long),
1792                        5 * sizeof(unsigned long));
1793        return ret;
1794}
1795#endif
1796/*
1797 * These are our native regset flavors.
1798 */
1799enum powerpc_regset {
1800        REGSET_GPR,
1801        REGSET_FPR,
1802#ifdef CONFIG_ALTIVEC
1803        REGSET_VMX,
1804#endif
1805#ifdef CONFIG_VSX
1806        REGSET_VSX,
1807#endif
1808#ifdef CONFIG_SPE
1809        REGSET_SPE,
1810#endif
1811#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1812        REGSET_TM_CGPR,         /* TM checkpointed GPR registers */
1813        REGSET_TM_CFPR,         /* TM checkpointed FPR registers */
1814        REGSET_TM_CVMX,         /* TM checkpointed VMX registers */
1815        REGSET_TM_CVSX,         /* TM checkpointed VSX registers */
1816        REGSET_TM_SPR,          /* TM specific SPR registers */
1817        REGSET_TM_CTAR,         /* TM checkpointed TAR register */
1818        REGSET_TM_CPPR,         /* TM checkpointed PPR register */
1819        REGSET_TM_CDSCR,        /* TM checkpointed DSCR register */
1820#endif
1821#ifdef CONFIG_PPC64
1822        REGSET_PPR,             /* PPR register */
1823        REGSET_DSCR,            /* DSCR register */
1824#endif
1825#ifdef CONFIG_PPC_BOOK3S_64
1826        REGSET_TAR,             /* TAR register */
1827        REGSET_EBB,             /* EBB registers */
1828        REGSET_PMR,             /* Performance Monitor Registers */
1829#endif
1830};
1831
1832static const struct user_regset native_regsets[] = {
1833        [REGSET_GPR] = {
1834                .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1835                .size = sizeof(long), .align = sizeof(long),
1836                .get = gpr_get, .set = gpr_set
1837        },
1838        [REGSET_FPR] = {
1839                .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1840                .size = sizeof(double), .align = sizeof(double),
1841                .get = fpr_get, .set = fpr_set
1842        },
1843#ifdef CONFIG_ALTIVEC
1844        [REGSET_VMX] = {
1845                .core_note_type = NT_PPC_VMX, .n = 34,
1846                .size = sizeof(vector128), .align = sizeof(vector128),
1847                .active = vr_active, .get = vr_get, .set = vr_set
1848        },
1849#endif
1850#ifdef CONFIG_VSX
1851        [REGSET_VSX] = {
1852                .core_note_type = NT_PPC_VSX, .n = 32,
1853                .size = sizeof(double), .align = sizeof(double),
1854                .active = vsr_active, .get = vsr_get, .set = vsr_set
1855        },
1856#endif
1857#ifdef CONFIG_SPE
1858        [REGSET_SPE] = {
1859                .core_note_type = NT_PPC_SPE, .n = 35,
1860                .size = sizeof(u32), .align = sizeof(u32),
1861                .active = evr_active, .get = evr_get, .set = evr_set
1862        },
1863#endif
1864#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1865        [REGSET_TM_CGPR] = {
1866                .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1867                .size = sizeof(long), .align = sizeof(long),
1868                .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1869        },
1870        [REGSET_TM_CFPR] = {
1871                .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1872                .size = sizeof(double), .align = sizeof(double),
1873                .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1874        },
1875        [REGSET_TM_CVMX] = {
1876                .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1877                .size = sizeof(vector128), .align = sizeof(vector128),
1878                .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1879        },
1880        [REGSET_TM_CVSX] = {
1881                .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1882                .size = sizeof(double), .align = sizeof(double),
1883                .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1884        },
1885        [REGSET_TM_SPR] = {
1886                .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1887                .size = sizeof(u64), .align = sizeof(u64),
1888                .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1889        },
1890        [REGSET_TM_CTAR] = {
1891                .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1892                .size = sizeof(u64), .align = sizeof(u64),
1893                .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1894        },
1895        [REGSET_TM_CPPR] = {
1896                .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1897                .size = sizeof(u64), .align = sizeof(u64),
1898                .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1899        },
1900        [REGSET_TM_CDSCR] = {
1901                .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1902                .size = sizeof(u64), .align = sizeof(u64),
1903                .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1904        },
1905#endif
1906#ifdef CONFIG_PPC64
1907        [REGSET_PPR] = {
1908                .core_note_type = NT_PPC_PPR, .n = 1,
1909                .size = sizeof(u64), .align = sizeof(u64),
1910                .get = ppr_get, .set = ppr_set
1911        },
1912        [REGSET_DSCR] = {
1913                .core_note_type = NT_PPC_DSCR, .n = 1,
1914                .size = sizeof(u64), .align = sizeof(u64),
1915                .get = dscr_get, .set = dscr_set
1916        },
1917#endif
1918#ifdef CONFIG_PPC_BOOK3S_64
1919        [REGSET_TAR] = {
1920                .core_note_type = NT_PPC_TAR, .n = 1,
1921                .size = sizeof(u64), .align = sizeof(u64),
1922                .get = tar_get, .set = tar_set
1923        },
1924        [REGSET_EBB] = {
1925                .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1926                .size = sizeof(u64), .align = sizeof(u64),
1927                .active = ebb_active, .get = ebb_get, .set = ebb_set
1928        },
1929        [REGSET_PMR] = {
1930                .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1931                .size = sizeof(u64), .align = sizeof(u64),
1932                .active = pmu_active, .get = pmu_get, .set = pmu_set
1933        },
1934#endif
1935};
1936
1937static const struct user_regset_view user_ppc_native_view = {
1938        .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
1939        .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
1940};
1941
1942#ifdef CONFIG_PPC64
1943#include <linux/compat.h>
1944
1945static int gpr32_get_common(struct task_struct *target,
1946                     const struct user_regset *regset,
1947                     unsigned int pos, unsigned int count,
1948                            void *kbuf, void __user *ubuf,
1949                            unsigned long *regs)
1950{
1951        compat_ulong_t *k = kbuf;
1952        compat_ulong_t __user *u = ubuf;
1953        compat_ulong_t reg;
1954
1955        pos /= sizeof(reg);
1956        count /= sizeof(reg);
1957
1958        if (kbuf)
1959                for (; count > 0 && pos < PT_MSR; --count)
1960                        *k++ = regs[pos++];
1961        else
1962                for (; count > 0 && pos < PT_MSR; --count)
1963                        if (__put_user((compat_ulong_t) regs[pos++], u++))
1964                                return -EFAULT;
1965
1966        if (count > 0 && pos == PT_MSR) {
1967                reg = get_user_msr(target);
1968                if (kbuf)
1969                        *k++ = reg;
1970                else if (__put_user(reg, u++))
1971                        return -EFAULT;
1972                ++pos;
1973                --count;
1974        }
1975
1976        if (kbuf)
1977                for (; count > 0 && pos < PT_REGS_COUNT; --count)
1978                        *k++ = regs[pos++];
1979        else
1980                for (; count > 0 && pos < PT_REGS_COUNT; --count)
1981                        if (__put_user((compat_ulong_t) regs[pos++], u++))
1982                                return -EFAULT;
1983
1984        kbuf = k;
1985        ubuf = u;
1986        pos *= sizeof(reg);
1987        count *= sizeof(reg);
1988        return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
1989                                        PT_REGS_COUNT * sizeof(reg), -1);
1990}
1991
1992static int gpr32_set_common(struct task_struct *target,
1993                     const struct user_regset *regset,
1994                     unsigned int pos, unsigned int count,
1995                     const void *kbuf, const void __user *ubuf,
1996                     unsigned long *regs)
1997{
1998        const compat_ulong_t *k = kbuf;
1999        const compat_ulong_t __user *u = ubuf;
2000        compat_ulong_t reg;
2001
2002        pos /= sizeof(reg);
2003        count /= sizeof(reg);
2004
2005        if (kbuf)
2006                for (; count > 0 && pos < PT_MSR; --count)
2007                        regs[pos++] = *k++;
2008        else
2009                for (; count > 0 && pos < PT_MSR; --count) {
2010                        if (__get_user(reg, u++))
2011                                return -EFAULT;
2012                        regs[pos++] = reg;
2013                }
2014
2015
2016        if (count > 0 && pos == PT_MSR) {
2017                if (kbuf)
2018                        reg = *k++;
2019                else if (__get_user(reg, u++))
2020                        return -EFAULT;
2021                set_user_msr(target, reg);
2022                ++pos;
2023                --count;
2024        }
2025
2026        if (kbuf) {
2027                for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2028                        regs[pos++] = *k++;
2029                for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2030                        ++k;
2031        } else {
2032                for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2033                        if (__get_user(reg, u++))
2034                                return -EFAULT;
2035                        regs[pos++] = reg;
2036                }
2037                for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2038                        if (__get_user(reg, u++))
2039                                return -EFAULT;
2040        }
2041
2042        if (count > 0 && pos == PT_TRAP) {
2043                if (kbuf)
2044                        reg = *k++;
2045                else if (__get_user(reg, u++))
2046                        return -EFAULT;
2047                set_user_trap(target, reg);
2048                ++pos;
2049                --count;
2050        }
2051
2052        kbuf = k;
2053        ubuf = u;
2054        pos *= sizeof(reg);
2055        count *= sizeof(reg);
2056        return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2057                                         (PT_TRAP + 1) * sizeof(reg), -1);
2058}
2059
2060#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2061static int tm_cgpr32_get(struct task_struct *target,
2062                     const struct user_regset *regset,
2063                     unsigned int pos, unsigned int count,
2064                     void *kbuf, void __user *ubuf)
2065{
2066        return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2067                        &target->thread.ckpt_regs.gpr[0]);
2068}
2069
2070static int tm_cgpr32_set(struct task_struct *target,
2071                     const struct user_regset *regset,
2072                     unsigned int pos, unsigned int count,
2073                     const void *kbuf, const void __user *ubuf)
2074{
2075        return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2076                        &target->thread.ckpt_regs.gpr[0]);
2077}
2078#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2079
2080static int gpr32_get(struct task_struct *target,
2081                     const struct user_regset *regset,
2082                     unsigned int pos, unsigned int count,
2083                     void *kbuf, void __user *ubuf)
2084{
2085        int i;
2086
2087        if (target->thread.regs == NULL)
2088                return -EIO;
2089
2090        if (!FULL_REGS(target->thread.regs)) {
2091                /*
2092                 * We have a partial register set.
2093                 * Fill 14-31 with bogus values.
2094                 */
2095                for (i = 14; i < 32; i++)
2096                        target->thread.regs->gpr[i] = NV_REG_POISON;
2097        }
2098        return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2099                        &target->thread.regs->gpr[0]);
2100}
2101
2102static int gpr32_set(struct task_struct *target,
2103                     const struct user_regset *regset,
2104                     unsigned int pos, unsigned int count,
2105                     const void *kbuf, const void __user *ubuf)
2106{
2107        if (target->thread.regs == NULL)
2108                return -EIO;
2109
2110        CHECK_FULL_REGS(target->thread.regs);
2111        return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2112                        &target->thread.regs->gpr[0]);
2113}
2114
2115/*
2116 * These are the regset flavors matching the CONFIG_PPC32 native set.
2117 */
2118static const struct user_regset compat_regsets[] = {
2119        [REGSET_GPR] = {
2120                .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2121                .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2122                .get = gpr32_get, .set = gpr32_set
2123        },
2124        [REGSET_FPR] = {
2125                .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2126                .size = sizeof(double), .align = sizeof(double),
2127                .get = fpr_get, .set = fpr_set
2128        },
2129#ifdef CONFIG_ALTIVEC
2130        [REGSET_VMX] = {
2131                .core_note_type = NT_PPC_VMX, .n = 34,
2132                .size = sizeof(vector128), .align = sizeof(vector128),
2133                .active = vr_active, .get = vr_get, .set = vr_set
2134        },
2135#endif
2136#ifdef CONFIG_SPE
2137        [REGSET_SPE] = {
2138                .core_note_type = NT_PPC_SPE, .n = 35,
2139                .size = sizeof(u32), .align = sizeof(u32),
2140                .active = evr_active, .get = evr_get, .set = evr_set
2141        },
2142#endif
2143#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2144        [REGSET_TM_CGPR] = {
2145                .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2146                .size = sizeof(long), .align = sizeof(long),
2147                .active = tm_cgpr_active,
2148                .get = tm_cgpr32_get, .set = tm_cgpr32_set
2149        },
2150        [REGSET_TM_CFPR] = {
2151                .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2152                .size = sizeof(double), .align = sizeof(double),
2153                .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2154        },
2155        [REGSET_TM_CVMX] = {
2156                .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2157                .size = sizeof(vector128), .align = sizeof(vector128),
2158                .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2159        },
2160        [REGSET_TM_CVSX] = {
2161                .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2162                .size = sizeof(double), .align = sizeof(double),
2163                .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2164        },
2165        [REGSET_TM_SPR] = {
2166                .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2167                .size = sizeof(u64), .align = sizeof(u64),
2168                .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2169        },
2170        [REGSET_TM_CTAR] = {
2171                .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2172                .size = sizeof(u64), .align = sizeof(u64),
2173                .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2174        },
2175        [REGSET_TM_CPPR] = {
2176                .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2177                .size = sizeof(u64), .align = sizeof(u64),
2178                .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2179        },
2180        [REGSET_TM_CDSCR] = {
2181                .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2182                .size = sizeof(u64), .align = sizeof(u64),
2183                .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2184        },
2185#endif
2186#ifdef CONFIG_PPC64
2187        [REGSET_PPR] = {
2188                .core_note_type = NT_PPC_PPR, .n = 1,
2189                .size = sizeof(u64), .align = sizeof(u64),
2190                .get = ppr_get, .set = ppr_set
2191        },
2192        [REGSET_DSCR] = {
2193                .core_note_type = NT_PPC_DSCR, .n = 1,
2194                .size = sizeof(u64), .align = sizeof(u64),
2195                .get = dscr_get, .set = dscr_set
2196        },
2197#endif
2198#ifdef CONFIG_PPC_BOOK3S_64
2199        [REGSET_TAR] = {
2200                .core_note_type = NT_PPC_TAR, .n = 1,
2201                .size = sizeof(u64), .align = sizeof(u64),
2202                .get = tar_get, .set = tar_set
2203        },
2204        [REGSET_EBB] = {
2205                .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2206                .size = sizeof(u64), .align = sizeof(u64),
2207                .active = ebb_active, .get = ebb_get, .set = ebb_set
2208        },
2209#endif
2210};
2211
2212static const struct user_regset_view user_ppc_compat_view = {
2213        .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2214        .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2215};
2216#endif  /* CONFIG_PPC64 */
2217
2218const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2219{
2220#ifdef CONFIG_PPC64
2221        if (test_tsk_thread_flag(task, TIF_32BIT))
2222                return &user_ppc_compat_view;
2223#endif
2224        return &user_ppc_native_view;
2225}
2226
2227
2228void user_enable_single_step(struct task_struct *task)
2229{
2230        struct pt_regs *regs = task->thread.regs;
2231
2232        if (regs != NULL) {
2233#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2234                task->thread.debug.dbcr0 &= ~DBCR0_BT;
2235                task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2236                regs->msr |= MSR_DE;
2237#else
2238                regs->msr &= ~MSR_BE;
2239                regs->msr |= MSR_SE;
2240#endif
2241        }
2242        set_tsk_thread_flag(task, TIF_SINGLESTEP);
2243}
2244
2245void user_enable_block_step(struct task_struct *task)
2246{
2247        struct pt_regs *regs = task->thread.regs;
2248
2249        if (regs != NULL) {
2250#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2251                task->thread.debug.dbcr0 &= ~DBCR0_IC;
2252                task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
2253                regs->msr |= MSR_DE;
2254#else
2255                regs->msr &= ~MSR_SE;
2256                regs->msr |= MSR_BE;
2257#endif
2258        }
2259        set_tsk_thread_flag(task, TIF_SINGLESTEP);
2260}
2261
2262void user_disable_single_step(struct task_struct *task)
2263{
2264        struct pt_regs *regs = task->thread.regs;
2265
2266        if (regs != NULL) {
2267#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2268                /*
2269                 * The logic to disable single stepping should be as
2270                 * simple as turning off the Instruction Complete flag.
2271                 * And, after doing so, if all debug flags are off, turn
2272                 * off DBCR0(IDM) and MSR(DE) .... Torez
2273                 */
2274                task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
2275                /*
2276                 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2277                 */
2278                if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2279                                        task->thread.debug.dbcr1)) {
2280                        /*
2281                         * All debug events were off.....
2282                         */
2283                        task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2284                        regs->msr &= ~MSR_DE;
2285                }
2286#else
2287                regs->msr &= ~(MSR_SE | MSR_BE);
2288#endif
2289        }
2290        clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2291}
2292
2293#ifdef CONFIG_HAVE_HW_BREAKPOINT
2294void ptrace_triggered(struct perf_event *bp,
2295                      struct perf_sample_data *data, struct pt_regs *regs)
2296{
2297        struct perf_event_attr attr;
2298
2299        /*
2300         * Disable the breakpoint request here since ptrace has defined a
2301         * one-shot behaviour for breakpoint exceptions in PPC64.
2302         * The SIGTRAP signal is generated automatically for us in do_dabr().
2303         * We don't have to do anything about that here
2304         */
2305        attr = bp->attr;
2306        attr.disabled = true;
2307        modify_user_hw_breakpoint(bp, &attr);
2308}
2309#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2310
2311static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
2312                               unsigned long data)
2313{
2314#ifdef CONFIG_HAVE_HW_BREAKPOINT
2315        int ret;
2316        struct thread_struct *thread = &(task->thread);
2317        struct perf_event *bp;
2318        struct perf_event_attr attr;
2319#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2320#ifndef CONFIG_PPC_ADV_DEBUG_REGS
2321        struct arch_hw_breakpoint hw_brk;
2322#endif
2323
2324        /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2325         *  For embedded processors we support one DAC and no IAC's at the
2326         *  moment.
2327         */
2328        if (addr > 0)
2329                return -EINVAL;
2330
2331        /* The bottom 3 bits in dabr are flags */
2332        if ((data & ~0x7UL) >= TASK_SIZE)
2333                return -EIO;
2334
2335#ifndef CONFIG_PPC_ADV_DEBUG_REGS
2336        /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2337         *  It was assumed, on previous implementations, that 3 bits were
2338         *  passed together with the data address, fitting the design of the
2339         *  DABR register, as follows:
2340         *
2341         *  bit 0: Read flag
2342         *  bit 1: Write flag
2343         *  bit 2: Breakpoint translation
2344         *
2345         *  Thus, we use them here as so.
2346         */
2347
2348        /* Ensure breakpoint translation bit is set */
2349        if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2350                return -EIO;
2351        hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2352        hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2353        hw_brk.len = 8;
2354#ifdef CONFIG_HAVE_HW_BREAKPOINT
2355        bp = thread->ptrace_bps[0];
2356        if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
2357                if (bp) {
2358                        unregister_hw_breakpoint(bp);
2359                        thread->ptrace_bps[0] = NULL;
2360                }
2361                return 0;
2362        }
2363        if (bp) {
2364                attr = bp->attr;
2365                attr.bp_addr = hw_brk.address;
2366                arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2367
2368                /* Enable breakpoint */
2369                attr.disabled = false;
2370
2371                ret =  modify_user_hw_breakpoint(bp, &attr);
2372                if (ret) {
2373                        return ret;
2374                }
2375                thread->ptrace_bps[0] = bp;
2376                thread->hw_brk = hw_brk;
2377                return 0;
2378        }
2379
2380        /* Create a new breakpoint request if one doesn't exist already */
2381        hw_breakpoint_init(&attr);
2382        attr.bp_addr = hw_brk.address;
2383        arch_bp_generic_fields(hw_brk.type,
2384                               &attr.bp_type);
2385
2386        thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2387                                               ptrace_triggered, NULL, task);
2388        if (IS_ERR(bp)) {
2389                thread->ptrace_bps[0] = NULL;
2390                return PTR_ERR(bp);
2391        }
2392
2393#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2394        task->thread.hw_brk = hw_brk;
2395#else /* CONFIG_PPC_ADV_DEBUG_REGS */
2396        /* As described above, it was assumed 3 bits were passed with the data
2397         *  address, but we will assume only the mode bits will be passed
2398         *  as to not cause alignment restrictions for DAC-based processors.
2399         */
2400
2401        /* DAC's hold the whole address without any mode flags */
2402        task->thread.debug.dac1 = data & ~0x3UL;
2403
2404        if (task->thread.debug.dac1 == 0) {
2405                dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2406                if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2407                                        task->thread.debug.dbcr1)) {
2408                        task->thread.regs->msr &= ~MSR_DE;
2409                        task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2410                }
2411                return 0;
2412        }
2413
2414        /* Read or Write bits must be set */
2415
2416        if (!(data & 0x3UL))
2417                return -EINVAL;
2418
2419        /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2420           register */
2421        task->thread.debug.dbcr0 |= DBCR0_IDM;
2422
2423        /* Check for write and read flags and set DBCR0
2424           accordingly */
2425        dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2426        if (data & 0x1UL)
2427                dbcr_dac(task) |= DBCR_DAC1R;
2428        if (data & 0x2UL)
2429                dbcr_dac(task) |= DBCR_DAC1W;
2430        task->thread.regs->msr |= MSR_DE;
2431#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2432        return 0;
2433}
2434
2435/*
2436 * Called by kernel/ptrace.c when detaching..
2437 *
2438 * Make sure single step bits etc are not set.
2439 */
2440void ptrace_disable(struct task_struct *child)
2441{
2442        /* make sure the single step bit is not set. */
2443        user_disable_single_step(child);
2444}
2445
2446#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2447static long set_instruction_bp(struct task_struct *child,
2448                              struct ppc_hw_breakpoint *bp_info)
2449{
2450        int slot;
2451        int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2452        int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2453        int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2454        int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2455
2456        if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2457                slot2_in_use = 1;
2458        if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2459                slot4_in_use = 1;
2460
2461        if (bp_info->addr >= TASK_SIZE)
2462                return -EIO;
2463
2464        if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2465
2466                /* Make sure range is valid. */
2467                if (bp_info->addr2 >= TASK_SIZE)
2468                        return -EIO;
2469
2470                /* We need a pair of IAC regsisters */
2471                if ((!slot1_in_use) && (!slot2_in_use)) {
2472                        slot = 1;
2473                        child->thread.debug.iac1 = bp_info->addr;
2474                        child->thread.debug.iac2 = bp_info->addr2;
2475                        child->thread.debug.dbcr0 |= DBCR0_IAC1;
2476                        if (bp_info->addr_mode ==
2477                                        PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2478                                dbcr_iac_range(child) |= DBCR_IAC12X;
2479                        else
2480                                dbcr_iac_range(child) |= DBCR_IAC12I;
2481#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2482                } else if ((!slot3_in_use) && (!slot4_in_use)) {
2483                        slot = 3;
2484                        child->thread.debug.iac3 = bp_info->addr;
2485                        child->thread.debug.iac4 = bp_info->addr2;
2486                        child->thread.debug.dbcr0 |= DBCR0_IAC3;
2487                        if (bp_info->addr_mode ==
2488                                        PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2489                                dbcr_iac_range(child) |= DBCR_IAC34X;
2490                        else
2491                                dbcr_iac_range(child) |= DBCR_IAC34I;
2492#endif
2493                } else
2494                        return -ENOSPC;
2495        } else {
2496                /* We only need one.  If possible leave a pair free in
2497                 * case a range is needed later
2498                 */
2499                if (!slot1_in_use) {
2500                        /*
2501                         * Don't use iac1 if iac1-iac2 are free and either
2502                         * iac3 or iac4 (but not both) are free
2503                         */
2504                        if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2505                                slot = 1;
2506                                child->thread.debug.iac1 = bp_info->addr;
2507                                child->thread.debug.dbcr0 |= DBCR0_IAC1;
2508                                goto out;
2509                        }
2510                }
2511                if (!slot2_in_use) {
2512                        slot = 2;
2513                        child->thread.debug.iac2 = bp_info->addr;
2514                        child->thread.debug.dbcr0 |= DBCR0_IAC2;
2515#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2516                } else if (!slot3_in_use) {
2517                        slot = 3;
2518                        child->thread.debug.iac3 = bp_info->addr;
2519                        child->thread.debug.dbcr0 |= DBCR0_IAC3;
2520                } else if (!slot4_in_use) {
2521                        slot = 4;
2522                        child->thread.debug.iac4 = bp_info->addr;
2523                        child->thread.debug.dbcr0 |= DBCR0_IAC4;
2524#endif
2525                } else
2526                        return -ENOSPC;
2527        }
2528out:
2529        child->thread.debug.dbcr0 |= DBCR0_IDM;
2530        child->thread.regs->msr |= MSR_DE;
2531
2532        return slot;
2533}
2534
2535static int del_instruction_bp(struct task_struct *child, int slot)
2536{
2537        switch (slot) {
2538        case 1:
2539                if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2540                        return -ENOENT;
2541
2542                if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2543                        /* address range - clear slots 1 & 2 */
2544                        child->thread.debug.iac2 = 0;
2545                        dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2546                }
2547                child->thread.debug.iac1 = 0;
2548                child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2549                break;
2550        case 2:
2551                if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2552                        return -ENOENT;
2553
2554                if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2555                        /* used in a range */
2556                        return -EINVAL;
2557                child->thread.debug.iac2 = 0;
2558                child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2559                break;
2560#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2561        case 3:
2562                if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2563                        return -ENOENT;
2564
2565                if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2566                        /* address range - clear slots 3 & 4 */
2567                        child->thread.debug.iac4 = 0;
2568                        dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2569                }
2570                child->thread.debug.iac3 = 0;
2571                child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2572                break;
2573        case 4:
2574                if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2575                        return -ENOENT;
2576
2577                if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2578                        /* Used in a range */
2579                        return -EINVAL;
2580                child->thread.debug.iac4 = 0;
2581                child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2582                break;
2583#endif
2584        default:
2585                return -EINVAL;
2586        }
2587        return 0;
2588}
2589
2590static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2591{
2592        int byte_enable =
2593                (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2594                & 0xf;
2595        int condition_mode =
2596                bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2597        int slot;
2598
2599        if (byte_enable && (condition_mode == 0))
2600                return -EINVAL;
2601
2602        if (bp_info->addr >= TASK_SIZE)
2603                return -EIO;
2604
2605        if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2606                slot = 1;
2607                if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2608                        dbcr_dac(child) |= DBCR_DAC1R;
2609                if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2610                        dbcr_dac(child) |= DBCR_DAC1W;
2611                child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2612#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2613                if (byte_enable) {
2614                        child->thread.debug.dvc1 =
2615                                (unsigned long)bp_info->condition_value;
2616                        child->thread.debug.dbcr2 |=
2617                                ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2618                                 (condition_mode << DBCR2_DVC1M_SHIFT));
2619                }
2620#endif
2621#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2622        } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2623                /* Both dac1 and dac2 are part of a range */
2624                return -ENOSPC;
2625#endif
2626        } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2627                slot = 2;
2628                if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2629                        dbcr_dac(child) |= DBCR_DAC2R;
2630                if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2631                        dbcr_dac(child) |= DBCR_DAC2W;
2632                child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2633#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2634                if (byte_enable) {
2635                        child->thread.debug.dvc2 =
2636                                (unsigned long)bp_info->condition_value;
2637                        child->thread.debug.dbcr2 |=
2638                                ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2639                                 (condition_mode << DBCR2_DVC2M_SHIFT));
2640                }
2641#endif
2642        } else
2643                return -ENOSPC;
2644        child->thread.debug.dbcr0 |= DBCR0_IDM;
2645        child->thread.regs->msr |= MSR_DE;
2646
2647        return slot + 4;
2648}
2649
2650static int del_dac(struct task_struct *child, int slot)
2651{
2652        if (slot == 1) {
2653                if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2654                        return -ENOENT;
2655
2656                child->thread.debug.dac1 = 0;
2657                dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2658#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2659                if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2660                        child->thread.debug.dac2 = 0;
2661                        child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2662                }
2663                child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2664#endif
2665#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2666                child->thread.debug.dvc1 = 0;
2667#endif
2668        } else if (slot == 2) {
2669                if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2670                        return -ENOENT;
2671
2672#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2673                if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2674                        /* Part of a range */
2675                        return -EINVAL;
2676                child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2677#endif
2678#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2679                child->thread.debug.dvc2 = 0;
2680#endif
2681                child->thread.debug.dac2 = 0;
2682                dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2683        } else
2684                return -EINVAL;
2685
2686        return 0;
2687}
2688#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2689
2690#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2691static int set_dac_range(struct task_struct *child,
2692                         struct ppc_hw_breakpoint *bp_info)
2693{
2694        int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2695
2696        /* We don't allow range watchpoints to be used with DVC */
2697        if (bp_info->condition_mode)
2698                return -EINVAL;
2699
2700        /*
2701         * Best effort to verify the address range.  The user/supervisor bits
2702         * prevent trapping in kernel space, but let's fail on an obvious bad
2703         * range.  The simple test on the mask is not fool-proof, and any
2704         * exclusive range will spill over into kernel space.
2705         */
2706        if (bp_info->addr >= TASK_SIZE)
2707                return -EIO;
2708        if (mode == PPC_BREAKPOINT_MODE_MASK) {
2709                /*
2710                 * dac2 is a bitmask.  Don't allow a mask that makes a
2711                 * kernel space address from a valid dac1 value
2712                 */
2713                if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2714                        return -EIO;
2715        } else {
2716                /*
2717                 * For range breakpoints, addr2 must also be a valid address
2718                 */
2719                if (bp_info->addr2 >= TASK_SIZE)
2720                        return -EIO;
2721        }
2722
2723        if (child->thread.debug.dbcr0 &
2724            (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2725                return -ENOSPC;
2726
2727        if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2728                child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2729        if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2730                child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2731        child->thread.debug.dac1 = bp_info->addr;
2732        child->thread.debug.dac2 = bp_info->addr2;
2733        if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2734                child->thread.debug.dbcr2  |= DBCR2_DAC12M;
2735        else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2736                child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
2737        else    /* PPC_BREAKPOINT_MODE_MASK */
2738                child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
2739        child->thread.regs->msr |= MSR_DE;
2740
2741        return 5;
2742}
2743#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2744
2745static long ppc_set_hwdebug(struct task_struct *child,
2746                     struct ppc_hw_breakpoint *bp_info)
2747{
2748#ifdef CONFIG_HAVE_HW_BREAKPOINT
2749        int len = 0;
2750        struct thread_struct *thread = &(child->thread);
2751        struct perf_event *bp;
2752        struct perf_event_attr attr;
2753#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2754#ifndef CONFIG_PPC_ADV_DEBUG_REGS
2755        struct arch_hw_breakpoint brk;
2756#endif
2757
2758        if (bp_info->version != 1)
2759                return -ENOTSUPP;
2760#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2761        /*
2762         * Check for invalid flags and combinations
2763         */
2764        if ((bp_info->trigger_type == 0) ||
2765            (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2766                                       PPC_BREAKPOINT_TRIGGER_RW)) ||
2767            (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2768            (bp_info->condition_mode &
2769             ~(PPC_BREAKPOINT_CONDITION_MODE |
2770               PPC_BREAKPOINT_CONDITION_BE_ALL)))
2771                return -EINVAL;
2772#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2773        if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2774                return -EINVAL;
2775#endif
2776
2777        if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2778                if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2779                    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2780                        return -EINVAL;
2781                return set_instruction_bp(child, bp_info);
2782        }
2783        if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2784                return set_dac(child, bp_info);
2785
2786#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2787        return set_dac_range(child, bp_info);
2788#else
2789        return -EINVAL;
2790#endif
2791#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2792        /*
2793         * We only support one data breakpoint
2794         */
2795        if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2796            (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2797            bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2798                return -EINVAL;
2799
2800        if ((unsigned long)bp_info->addr >= TASK_SIZE)
2801                return -EIO;
2802
2803        brk.address = bp_info->addr & ~7UL;
2804        brk.type = HW_BRK_TYPE_TRANSLATE;
2805        brk.len = 8;
2806        if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2807                brk.type |= HW_BRK_TYPE_READ;
2808        if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2809                brk.type |= HW_BRK_TYPE_WRITE;
2810#ifdef CONFIG_HAVE_HW_BREAKPOINT
2811        /*
2812         * Check if the request is for 'range' breakpoints. We can
2813         * support it if range < 8 bytes.
2814         */
2815        if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2816                len = bp_info->addr2 - bp_info->addr;
2817        else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2818                len = 1;
2819        else
2820                return -EINVAL;
2821        bp = thread->ptrace_bps[0];
2822        if (bp)
2823                return -ENOSPC;
2824
2825        /* Create a new breakpoint request if one doesn't exist already */
2826        hw_breakpoint_init(&attr);
2827        attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2828        attr.bp_len = len;
2829        arch_bp_generic_fields(brk.type, &attr.bp_type);
2830
2831        thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2832                                               ptrace_triggered, NULL, child);
2833        if (IS_ERR(bp)) {
2834                thread->ptrace_bps[0] = NULL;
2835                return PTR_ERR(bp);
2836        }
2837
2838        return 1;
2839#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2840
2841        if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2842                return -EINVAL;
2843
2844        if (child->thread.hw_brk.address)
2845                return -ENOSPC;
2846
2847        child->thread.hw_brk = brk;
2848
2849        return 1;
2850#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2851}
2852
2853static long ppc_del_hwdebug(struct task_struct *child, long data)
2854{
2855#ifdef CONFIG_HAVE_HW_BREAKPOINT
2856        int ret = 0;
2857        struct thread_struct *thread = &(child->thread);
2858        struct perf_event *bp;
2859#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2860#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2861        int rc;
2862
2863        if (data <= 4)
2864                rc = del_instruction_bp(child, (int)data);
2865        else
2866                rc = del_dac(child, (int)data - 4);
2867
2868        if (!rc) {
2869                if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2870                                        child->thread.debug.dbcr1)) {
2871                        child->thread.debug.dbcr0 &= ~DBCR0_IDM;
2872                        child->thread.regs->msr &= ~MSR_DE;
2873                }
2874        }
2875        return rc;
2876#else
2877        if (data != 1)
2878                return -EINVAL;
2879
2880#ifdef CONFIG_HAVE_HW_BREAKPOINT
2881        bp = thread->ptrace_bps[0];
2882        if (bp) {
2883                unregister_hw_breakpoint(bp);
2884                thread->ptrace_bps[0] = NULL;
2885        } else
2886                ret = -ENOENT;
2887        return ret;
2888#else /* CONFIG_HAVE_HW_BREAKPOINT */
2889        if (child->thread.hw_brk.address == 0)
2890                return -ENOENT;
2891
2892        child->thread.hw_brk.address = 0;
2893        child->thread.hw_brk.type = 0;
2894#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2895
2896        return 0;
2897#endif
2898}
2899
2900long arch_ptrace(struct task_struct *child, long request,
2901                 unsigned long addr, unsigned long data)
2902{
2903        int ret = -EPERM;
2904        void __user *datavp = (void __user *) data;
2905        unsigned long __user *datalp = datavp;
2906
2907        switch (request) {
2908        /* read the word at location addr in the USER area. */
2909        case PTRACE_PEEKUSR: {
2910                unsigned long index, tmp;
2911
2912                ret = -EIO;
2913                /* convert to index and check */
2914#ifdef CONFIG_PPC32
2915                index = addr >> 2;
2916                if ((addr & 3) || (index > PT_FPSCR)
2917                    || (child->thread.regs == NULL))
2918#else
2919                index = addr >> 3;
2920                if ((addr & 7) || (index > PT_FPSCR))
2921#endif
2922                        break;
2923
2924                CHECK_FULL_REGS(child->thread.regs);
2925                if (index < PT_FPR0) {
2926                        ret = ptrace_get_reg(child, (int) index, &tmp);
2927                        if (ret)
2928                                break;
2929                } else {
2930                        unsigned int fpidx = index - PT_FPR0;
2931
2932                        flush_fp_to_thread(child);
2933                        if (fpidx < (PT_FPSCR - PT_FPR0))
2934                                memcpy(&tmp, &child->thread.TS_FPR(fpidx),
2935                                       sizeof(long));
2936                        else
2937                                tmp = child->thread.fp_state.fpscr;
2938                }
2939                ret = put_user(tmp, datalp);
2940                break;
2941        }
2942
2943        /* write the word at location addr in the USER area */
2944        case PTRACE_POKEUSR: {
2945                unsigned long index;
2946
2947                ret = -EIO;
2948                /* convert to index and check */
2949#ifdef CONFIG_PPC32
2950                index = addr >> 2;
2951                if ((addr & 3) || (index > PT_FPSCR)
2952                    || (child->thread.regs == NULL))
2953#else
2954                index = addr >> 3;
2955                if ((addr & 7) || (index > PT_FPSCR))
2956#endif
2957                        break;
2958
2959                CHECK_FULL_REGS(child->thread.regs);
2960                if (index < PT_FPR0) {
2961                        ret = ptrace_put_reg(child, index, data);
2962                } else {
2963                        unsigned int fpidx = index - PT_FPR0;
2964
2965                        flush_fp_to_thread(child);
2966                        if (fpidx < (PT_FPSCR - PT_FPR0))
2967                                memcpy(&child->thread.TS_FPR(fpidx), &data,
2968                                       sizeof(long));
2969                        else
2970                                child->thread.fp_state.fpscr = data;
2971                        ret = 0;
2972                }
2973                break;
2974        }
2975
2976        case PPC_PTRACE_GETHWDBGINFO: {
2977                struct ppc_debug_info dbginfo;
2978
2979                dbginfo.version = 1;
2980#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2981                dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
2982                dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
2983                dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
2984                dbginfo.data_bp_alignment = 4;
2985                dbginfo.sizeof_condition = 4;
2986                dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
2987                                   PPC_DEBUG_FEATURE_INSN_BP_MASK;
2988#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2989                dbginfo.features |=
2990                                   PPC_DEBUG_FEATURE_DATA_BP_RANGE |
2991                                   PPC_DEBUG_FEATURE_DATA_BP_MASK;
2992#endif
2993#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
2994                dbginfo.num_instruction_bps = 0;
2995                dbginfo.num_data_bps = 1;
2996                dbginfo.num_condition_regs = 0;
2997#ifdef CONFIG_PPC64
2998                dbginfo.data_bp_alignment = 8;
2999#else
3000                dbginfo.data_bp_alignment = 4;
3001#endif
3002                dbginfo.sizeof_condition = 0;
3003#ifdef CONFIG_HAVE_HW_BREAKPOINT
3004                dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
3005                if (cpu_has_feature(CPU_FTR_DAWR))
3006                        dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
3007#else
3008                dbginfo.features = 0;
3009#endif /* CONFIG_HAVE_HW_BREAKPOINT */
3010#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3011
3012                if (!access_ok(VERIFY_WRITE, datavp,
3013                               sizeof(struct ppc_debug_info)))
3014                        return -EFAULT;
3015                ret = __copy_to_user(datavp, &dbginfo,
3016                                     sizeof(struct ppc_debug_info)) ?
3017                      -EFAULT : 0;
3018                break;
3019        }
3020
3021        case PPC_PTRACE_SETHWDEBUG: {
3022                struct ppc_hw_breakpoint bp_info;
3023
3024                if (!access_ok(VERIFY_READ, datavp,
3025                               sizeof(struct ppc_hw_breakpoint)))
3026                        return -EFAULT;
3027                ret = __copy_from_user(&bp_info, datavp,
3028                                       sizeof(struct ppc_hw_breakpoint)) ?
3029                      -EFAULT : 0;
3030                if (!ret)
3031                        ret = ppc_set_hwdebug(child, &bp_info);
3032                break;
3033        }
3034
3035        case PPC_PTRACE_DELHWDEBUG: {
3036                ret = ppc_del_hwdebug(child, data);
3037                break;
3038        }
3039
3040        case PTRACE_GET_DEBUGREG: {
3041#ifndef CONFIG_PPC_ADV_DEBUG_REGS
3042                unsigned long dabr_fake;
3043#endif
3044                ret = -EINVAL;
3045                /* We only support one DABR and no IABRS at the moment */
3046                if (addr > 0)
3047                        break;
3048#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3049                ret = put_user(child->thread.debug.dac1, datalp);
3050#else
3051                dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3052                             (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3053                ret = put_user(dabr_fake, datalp);
3054#endif
3055                break;
3056        }
3057
3058        case PTRACE_SET_DEBUGREG:
3059                ret = ptrace_set_debugreg(child, addr, data);
3060                break;
3061
3062#ifdef CONFIG_PPC64
3063        case PTRACE_GETREGS64:
3064#endif
3065        case PTRACE_GETREGS:    /* Get all pt_regs from the child. */
3066                return copy_regset_to_user(child, &user_ppc_native_view,
3067                                           REGSET_GPR,
3068                                           0, sizeof(struct pt_regs),
3069                                           datavp);
3070
3071#ifdef CONFIG_PPC64
3072        case PTRACE_SETREGS64:
3073#endif
3074        case PTRACE_SETREGS:    /* Set all gp regs in the child. */
3075                return copy_regset_from_user(child, &user_ppc_native_view,
3076                                             REGSET_GPR,
3077                                             0, sizeof(struct pt_regs),
3078                                             datavp);
3079
3080        case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3081                return copy_regset_to_user(child, &user_ppc_native_view,
3082                                           REGSET_FPR,
3083                                           0, sizeof(elf_fpregset_t),
3084                                           datavp);
3085
3086        case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3087                return copy_regset_from_user(child, &user_ppc_native_view,
3088                                             REGSET_FPR,
3089                                             0, sizeof(elf_fpregset_t),
3090                                             datavp);
3091
3092#ifdef CONFIG_ALTIVEC
3093        case PTRACE_GETVRREGS:
3094                return copy_regset_to_user(child, &user_ppc_native_view,
3095                                           REGSET_VMX,
3096                                           0, (33 * sizeof(vector128) +
3097                                               sizeof(u32)),
3098                                           datavp);
3099
3100        case PTRACE_SETVRREGS:
3101                return copy_regset_from_user(child, &user_ppc_native_view,
3102                                             REGSET_VMX,
3103                                             0, (33 * sizeof(vector128) +
3104                                                 sizeof(u32)),
3105                                             datavp);
3106#endif
3107#ifdef CONFIG_VSX
3108        case PTRACE_GETVSRREGS:
3109                return copy_regset_to_user(child, &user_ppc_native_view,
3110                                           REGSET_VSX,
3111                                           0, 32 * sizeof(double),
3112                                           datavp);
3113
3114        case PTRACE_SETVSRREGS:
3115                return copy_regset_from_user(child, &user_ppc_native_view,
3116                                             REGSET_VSX,
3117                                             0, 32 * sizeof(double),
3118                                             datavp);
3119#endif
3120#ifdef CONFIG_SPE
3121        case PTRACE_GETEVRREGS:
3122                /* Get the child spe register state. */
3123                return copy_regset_to_user(child, &user_ppc_native_view,
3124                                           REGSET_SPE, 0, 35 * sizeof(u32),
3125                                           datavp);
3126
3127        case PTRACE_SETEVRREGS:
3128                /* Set the child spe register state. */
3129                return copy_regset_from_user(child, &user_ppc_native_view,
3130                                             REGSET_SPE, 0, 35 * sizeof(u32),
3131                                             datavp);
3132#endif
3133
3134        default:
3135                ret = ptrace_request(child, request, addr, data);
3136                break;
3137        }
3138        return ret;
3139}
3140
3141#ifdef CONFIG_SECCOMP
3142static int do_seccomp(struct pt_regs *regs)
3143{
3144        if (!test_thread_flag(TIF_SECCOMP))
3145                return 0;
3146
3147        /*
3148         * The ABI we present to seccomp tracers is that r3 contains
3149         * the syscall return value and orig_gpr3 contains the first
3150         * syscall parameter. This is different to the ptrace ABI where
3151         * both r3 and orig_gpr3 contain the first syscall parameter.
3152         */
3153        regs->gpr[3] = -ENOSYS;
3154
3155        /*
3156         * We use the __ version here because we have already checked
3157         * TIF_SECCOMP. If this fails, there is nothing left to do, we
3158         * have already loaded -ENOSYS into r3, or seccomp has put
3159         * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3160         */
3161        if (__secure_computing(NULL))
3162                return -1;
3163
3164        /*
3165         * The syscall was allowed by seccomp, restore the register
3166         * state to what audit expects.
3167         * Note that we use orig_gpr3, which means a seccomp tracer can
3168         * modify the first syscall parameter (in orig_gpr3) and also
3169         * allow the syscall to proceed.
3170         */
3171        regs->gpr[3] = regs->orig_gpr3;
3172
3173        return 0;
3174}
3175#else
3176static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3177#endif /* CONFIG_SECCOMP */
3178
3179/**
3180 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3181 * @regs: the pt_regs of the task to trace (current)
3182 *
3183 * Performs various types of tracing on syscall entry. This includes seccomp,
3184 * ptrace, syscall tracepoints and audit.
3185 *
3186 * The pt_regs are potentially visible to userspace via ptrace, so their
3187 * contents is ABI.
3188 *
3189 * One or more of the tracers may modify the contents of pt_regs, in particular
3190 * to modify arguments or even the syscall number itself.
3191 *
3192 * It's also possible that a tracer can choose to reject the system call. In
3193 * that case this function will return an illegal syscall number, and will put
3194 * an appropriate return value in regs->r3.
3195 *
3196 * Return: the (possibly changed) syscall number.
3197 */
3198long do_syscall_trace_enter(struct pt_regs *regs)
3199{
3200        user_exit();
3201
3202        /*
3203         * The tracer may decide to abort the syscall, if so tracehook
3204         * will return !0. Note that the tracer may also just change
3205         * regs->gpr[0] to an invalid syscall number, that is handled
3206         * below on the exit path.
3207         */
3208        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3209            tracehook_report_syscall_entry(regs))
3210                goto skip;
3211
3212        /* Run seccomp after ptrace; allow it to set gpr[3]. */
3213        if (do_seccomp(regs))
3214                return -1;
3215
3216        /* Avoid trace and audit when syscall is invalid. */
3217        if (regs->gpr[0] >= NR_syscalls)
3218                goto skip;
3219
3220        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3221                trace_sys_enter(regs, regs->gpr[0]);
3222
3223#ifdef CONFIG_PPC64
3224        if (!is_32bit_task())
3225                audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
3226                                    regs->gpr[5], regs->gpr[6]);
3227        else
3228#endif
3229                audit_syscall_entry(regs->gpr[0],
3230                                    regs->gpr[3] & 0xffffffff,
3231                                    regs->gpr[4] & 0xffffffff,
3232                                    regs->gpr[5] & 0xffffffff,
3233                                    regs->gpr[6] & 0xffffffff);
3234
3235        /* Return the possibly modified but valid syscall number */
3236        return regs->gpr[0];
3237
3238skip:
3239        /*
3240         * If we are aborting explicitly, or if the syscall number is
3241         * now invalid, set the return value to -ENOSYS.
3242         */
3243        regs->gpr[3] = -ENOSYS;
3244        return -1;
3245}
3246
3247void do_syscall_trace_leave(struct pt_regs *regs)
3248{
3249        int step;
3250
3251        audit_syscall_exit(regs);
3252
3253        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3254                trace_sys_exit(regs, regs->result);
3255
3256        step = test_thread_flag(TIF_SINGLESTEP);
3257        if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3258                tracehook_report_syscall_exit(regs, step);
3259
3260        user_enter();
3261}
3262