linux/arch/powerpc/kvm/book3s.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
   3 *
   4 * Authors:
   5 *    Alexander Graf <agraf@suse.de>
   6 *    Kevin Wolf <mail@kevin-wolf.de>
   7 *
   8 * Description:
   9 * This file is derived from arch/powerpc/kvm/44x.c,
  10 * by Hollis Blanchard <hollisb@us.ibm.com>.
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License, version 2, as
  14 * published by the Free Software Foundation.
  15 */
  16
  17#include <linux/kvm_host.h>
  18#include <linux/err.h>
  19#include <linux/export.h>
  20#include <linux/slab.h>
  21
  22#include <asm/reg.h>
  23#include <asm/cputable.h>
  24#include <asm/cacheflush.h>
  25#include <asm/tlbflush.h>
  26#include <asm/uaccess.h>
  27#include <asm/io.h>
  28#include <asm/kvm_ppc.h>
  29#include <asm/kvm_book3s.h>
  30#include <asm/mmu_context.h>
  31#include <asm/page.h>
  32#include <linux/gfp.h>
  33#include <linux/sched.h>
  34#include <linux/vmalloc.h>
  35#include <linux/highmem.h>
  36
  37#include "trace.h"
  38
  39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  40
  41/* #define EXIT_DEBUG */
  42
  43struct kvm_stats_debugfs_item debugfs_entries[] = {
  44        { "exits",       VCPU_STAT(sum_exits) },
  45        { "mmio",        VCPU_STAT(mmio_exits) },
  46        { "sig",         VCPU_STAT(signal_exits) },
  47        { "sysc",        VCPU_STAT(syscall_exits) },
  48        { "inst_emu",    VCPU_STAT(emulated_inst_exits) },
  49        { "dec",         VCPU_STAT(dec_exits) },
  50        { "ext_intr",    VCPU_STAT(ext_intr_exits) },
  51        { "queue_intr",  VCPU_STAT(queue_intr) },
  52        { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  53        { "pf_storage",  VCPU_STAT(pf_storage) },
  54        { "sp_storage",  VCPU_STAT(sp_storage) },
  55        { "pf_instruc",  VCPU_STAT(pf_instruc) },
  56        { "sp_instruc",  VCPU_STAT(sp_instruc) },
  57        { "ld",          VCPU_STAT(ld) },
  58        { "ld_slow",     VCPU_STAT(ld_slow) },
  59        { "st",          VCPU_STAT(st) },
  60        { "st_slow",     VCPU_STAT(st_slow) },
  61        { NULL }
  62};
  63
  64void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
  65{
  66}
  67
  68void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
  69{
  70}
  71
  72void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
  73{
  74        vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
  75        vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
  76        kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
  77        vcpu->arch.mmu.reset_msr(vcpu);
  78}
  79
  80static int kvmppc_book3s_vec2irqprio(unsigned int vec)
  81{
  82        unsigned int prio;
  83
  84        switch (vec) {
  85        case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET;         break;
  86        case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK;        break;
  87        case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE;         break;
  88        case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT;         break;
  89        case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE;         break;
  90        case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT;         break;
  91        case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL;             break;
  92        case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL;       break;
  93        case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT;            break;
  94        case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM;              break;
  95        case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL;           break;
  96        case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER;          break;
  97        case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL;              break;
  98        case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;                break;
  99        case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;              break;
 100        case 0xf40: prio = BOOK3S_IRQPRIO_VSX;                  break;
 101        default:    prio = BOOK3S_IRQPRIO_MAX;                  break;
 102        }
 103
 104        return prio;
 105}
 106
 107void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
 108                                          unsigned int vec)
 109{
 110        unsigned long old_pending = vcpu->arch.pending_exceptions;
 111
 112        clear_bit(kvmppc_book3s_vec2irqprio(vec),
 113                  &vcpu->arch.pending_exceptions);
 114
 115        kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
 116                                  old_pending);
 117}
 118
 119void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
 120{
 121        vcpu->stat.queue_intr++;
 122
 123        set_bit(kvmppc_book3s_vec2irqprio(vec),
 124                &vcpu->arch.pending_exceptions);
 125#ifdef EXIT_DEBUG
 126        printk(KERN_INFO "Queueing interrupt %x\n", vec);
 127#endif
 128}
 129
 130
 131void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
 132{
 133        /* might as well deliver this straight away */
 134        kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
 135}
 136
 137void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
 138{
 139        kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
 140}
 141
 142int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
 143{
 144        return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
 145}
 146
 147void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
 148{
 149        kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
 150}
 151
 152void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
 153                                struct kvm_interrupt *irq)
 154{
 155        unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
 156
 157        if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
 158                vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
 159
 160        kvmppc_book3s_queue_irqprio(vcpu, vec);
 161}
 162
 163void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
 164{
 165        kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
 166        kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
 167}
 168
 169int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
 170{
 171        int deliver = 1;
 172        int vec = 0;
 173        bool crit = kvmppc_critical_section(vcpu);
 174
 175        switch (priority) {
 176        case BOOK3S_IRQPRIO_DECREMENTER:
 177                deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
 178                vec = BOOK3S_INTERRUPT_DECREMENTER;
 179                break;
 180        case BOOK3S_IRQPRIO_EXTERNAL:
 181        case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
 182                deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
 183                vec = BOOK3S_INTERRUPT_EXTERNAL;
 184                break;
 185        case BOOK3S_IRQPRIO_SYSTEM_RESET:
 186                vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
 187                break;
 188        case BOOK3S_IRQPRIO_MACHINE_CHECK:
 189                vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
 190                break;
 191        case BOOK3S_IRQPRIO_DATA_STORAGE:
 192                vec = BOOK3S_INTERRUPT_DATA_STORAGE;
 193                break;
 194        case BOOK3S_IRQPRIO_INST_STORAGE:
 195                vec = BOOK3S_INTERRUPT_INST_STORAGE;
 196                break;
 197        case BOOK3S_IRQPRIO_DATA_SEGMENT:
 198                vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
 199                break;
 200        case BOOK3S_IRQPRIO_INST_SEGMENT:
 201                vec = BOOK3S_INTERRUPT_INST_SEGMENT;
 202                break;
 203        case BOOK3S_IRQPRIO_ALIGNMENT:
 204                vec = BOOK3S_INTERRUPT_ALIGNMENT;
 205                break;
 206        case BOOK3S_IRQPRIO_PROGRAM:
 207                vec = BOOK3S_INTERRUPT_PROGRAM;
 208                break;
 209        case BOOK3S_IRQPRIO_VSX:
 210                vec = BOOK3S_INTERRUPT_VSX;
 211                break;
 212        case BOOK3S_IRQPRIO_ALTIVEC:
 213                vec = BOOK3S_INTERRUPT_ALTIVEC;
 214                break;
 215        case BOOK3S_IRQPRIO_FP_UNAVAIL:
 216                vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
 217                break;
 218        case BOOK3S_IRQPRIO_SYSCALL:
 219                vec = BOOK3S_INTERRUPT_SYSCALL;
 220                break;
 221        case BOOK3S_IRQPRIO_DEBUG:
 222                vec = BOOK3S_INTERRUPT_TRACE;
 223                break;
 224        case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
 225                vec = BOOK3S_INTERRUPT_PERFMON;
 226                break;
 227        default:
 228                deliver = 0;
 229                printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
 230                break;
 231        }
 232
 233#if 0
 234        printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
 235#endif
 236
 237        if (deliver)
 238                kvmppc_inject_interrupt(vcpu, vec, 0);
 239
 240        return deliver;
 241}
 242
 243/*
 244 * This function determines if an irqprio should be cleared once issued.
 245 */
 246static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
 247{
 248        switch (priority) {
 249                case BOOK3S_IRQPRIO_DECREMENTER:
 250                        /* DEC interrupts get cleared by mtdec */
 251                        return false;
 252                case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
 253                        /* External interrupts get cleared by userspace */
 254                        return false;
 255        }
 256
 257        return true;
 258}
 259
 260int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 261{
 262        unsigned long *pending = &vcpu->arch.pending_exceptions;
 263        unsigned long old_pending = vcpu->arch.pending_exceptions;
 264        unsigned int priority;
 265
 266#ifdef EXIT_DEBUG
 267        if (vcpu->arch.pending_exceptions)
 268                printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
 269#endif
 270        priority = __ffs(*pending);
 271        while (priority < BOOK3S_IRQPRIO_MAX) {
 272                if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
 273                    clear_irqprio(vcpu, priority)) {
 274                        clear_bit(priority, &vcpu->arch.pending_exceptions);
 275                        break;
 276                }
 277
 278                priority = find_next_bit(pending,
 279                                         BITS_PER_BYTE * sizeof(*pending),
 280                                         priority + 1);
 281        }
 282
 283        /* Tell the guest about our interrupt status */
 284        kvmppc_update_int_pending(vcpu, *pending, old_pending);
 285
 286        return 0;
 287}
 288
 289pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
 290{
 291        ulong mp_pa = vcpu->arch.magic_page_pa;
 292
 293        if (!(vcpu->arch.shared->msr & MSR_SF))
 294                mp_pa = (uint32_t)mp_pa;
 295
 296        /* Magic page override */
 297        if (unlikely(mp_pa) &&
 298            unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
 299                     ((mp_pa & PAGE_MASK) & KVM_PAM))) {
 300                ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
 301                pfn_t pfn;
 302
 303                pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
 304                get_page(pfn_to_page(pfn));
 305                return pfn;
 306        }
 307
 308        return gfn_to_pfn(vcpu->kvm, gfn);
 309}
 310
 311static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
 312                         struct kvmppc_pte *pte)
 313{
 314        int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
 315        int r;
 316
 317        if (relocated) {
 318                r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
 319        } else {
 320                pte->eaddr = eaddr;
 321                pte->raddr = eaddr & KVM_PAM;
 322                pte->vpage = VSID_REAL | eaddr >> 12;
 323                pte->may_read = true;
 324                pte->may_write = true;
 325                pte->may_execute = true;
 326                r = 0;
 327        }
 328
 329        return r;
 330}
 331
 332static hva_t kvmppc_bad_hva(void)
 333{
 334        return PAGE_OFFSET;
 335}
 336
 337static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
 338                               bool read)
 339{
 340        hva_t hpage;
 341
 342        if (read && !pte->may_read)
 343                goto err;
 344
 345        if (!read && !pte->may_write)
 346                goto err;
 347
 348        hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
 349        if (kvm_is_error_hva(hpage))
 350                goto err;
 351
 352        return hpage | (pte->raddr & ~PAGE_MASK);
 353err:
 354        return kvmppc_bad_hva();
 355}
 356
 357int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
 358              bool data)
 359{
 360        struct kvmppc_pte pte;
 361
 362        vcpu->stat.st++;
 363
 364        if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
 365                return -ENOENT;
 366
 367        *eaddr = pte.raddr;
 368
 369        if (!pte.may_write)
 370                return -EPERM;
 371
 372        if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
 373                return EMULATE_DO_MMIO;
 374
 375        return EMULATE_DONE;
 376}
 377
 378int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
 379                      bool data)
 380{
 381        struct kvmppc_pte pte;
 382        hva_t hva = *eaddr;
 383
 384        vcpu->stat.ld++;
 385
 386        if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
 387                goto nopte;
 388
 389        *eaddr = pte.raddr;
 390
 391        hva = kvmppc_pte_to_hva(vcpu, &pte, true);
 392        if (kvm_is_error_hva(hva))
 393                goto mmio;
 394
 395        if (copy_from_user(ptr, (void __user *)hva, size)) {
 396                printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
 397                goto mmio;
 398        }
 399
 400        return EMULATE_DONE;
 401
 402nopte:
 403        return -ENOENT;
 404mmio:
 405        return EMULATE_DO_MMIO;
 406}
 407
 408int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 409{
 410        return 0;
 411}
 412
 413int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
 414{
 415        return 0;
 416}
 417
 418void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
 419{
 420}
 421
 422int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 423{
 424        int i;
 425
 426        regs->pc = kvmppc_get_pc(vcpu);
 427        regs->cr = kvmppc_get_cr(vcpu);
 428        regs->ctr = kvmppc_get_ctr(vcpu);
 429        regs->lr = kvmppc_get_lr(vcpu);
 430        regs->xer = kvmppc_get_xer(vcpu);
 431        regs->msr = vcpu->arch.shared->msr;
 432        regs->srr0 = vcpu->arch.shared->srr0;
 433        regs->srr1 = vcpu->arch.shared->srr1;
 434        regs->pid = vcpu->arch.pid;
 435        regs->sprg0 = vcpu->arch.shared->sprg0;
 436        regs->sprg1 = vcpu->arch.shared->sprg1;
 437        regs->sprg2 = vcpu->arch.shared->sprg2;
 438        regs->sprg3 = vcpu->arch.shared->sprg3;
 439        regs->sprg4 = vcpu->arch.shared->sprg4;
 440        regs->sprg5 = vcpu->arch.shared->sprg5;
 441        regs->sprg6 = vcpu->arch.shared->sprg6;
 442        regs->sprg7 = vcpu->arch.shared->sprg7;
 443
 444        for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
 445                regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
 446
 447        return 0;
 448}
 449
 450int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 451{
 452        int i;
 453
 454        kvmppc_set_pc(vcpu, regs->pc);
 455        kvmppc_set_cr(vcpu, regs->cr);
 456        kvmppc_set_ctr(vcpu, regs->ctr);
 457        kvmppc_set_lr(vcpu, regs->lr);
 458        kvmppc_set_xer(vcpu, regs->xer);
 459        kvmppc_set_msr(vcpu, regs->msr);
 460        vcpu->arch.shared->srr0 = regs->srr0;
 461        vcpu->arch.shared->srr1 = regs->srr1;
 462        vcpu->arch.shared->sprg0 = regs->sprg0;
 463        vcpu->arch.shared->sprg1 = regs->sprg1;
 464        vcpu->arch.shared->sprg2 = regs->sprg2;
 465        vcpu->arch.shared->sprg3 = regs->sprg3;
 466        vcpu->arch.shared->sprg4 = regs->sprg4;
 467        vcpu->arch.shared->sprg5 = regs->sprg5;
 468        vcpu->arch.shared->sprg6 = regs->sprg6;
 469        vcpu->arch.shared->sprg7 = regs->sprg7;
 470
 471        for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
 472                kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
 473
 474        return 0;
 475}
 476
 477int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 478{
 479        return -ENOTSUPP;
 480}
 481
 482int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 483{
 484        return -ENOTSUPP;
 485}
 486
 487int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 488{
 489        int r;
 490        union kvmppc_one_reg val;
 491        int size;
 492        long int i;
 493
 494        size = one_reg_size(reg->id);
 495        if (size > sizeof(val))
 496                return -EINVAL;
 497
 498        r = kvmppc_get_one_reg(vcpu, reg->id, &val);
 499
 500        if (r == -EINVAL) {
 501                r = 0;
 502                switch (reg->id) {
 503                case KVM_REG_PPC_DAR:
 504                        val = get_reg_val(reg->id, vcpu->arch.shared->dar);
 505                        break;
 506                case KVM_REG_PPC_DSISR:
 507                        val = get_reg_val(reg->id, vcpu->arch.shared->dsisr);
 508                        break;
 509                case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 510                        i = reg->id - KVM_REG_PPC_FPR0;
 511                        val = get_reg_val(reg->id, vcpu->arch.fpr[i]);
 512                        break;
 513                case KVM_REG_PPC_FPSCR:
 514                        val = get_reg_val(reg->id, vcpu->arch.fpscr);
 515                        break;
 516#ifdef CONFIG_ALTIVEC
 517                case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
 518                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 519                                r = -ENXIO;
 520                                break;
 521                        }
 522                        val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0];
 523                        break;
 524                case KVM_REG_PPC_VSCR:
 525                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 526                                r = -ENXIO;
 527                                break;
 528                        }
 529                        val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
 530                        break;
 531#endif /* CONFIG_ALTIVEC */
 532                case KVM_REG_PPC_DEBUG_INST: {
 533                        u32 opcode = INS_TW;
 534                        r = copy_to_user((u32 __user *)(long)reg->addr,
 535                                         &opcode, sizeof(u32));
 536                        break;
 537                }
 538#ifdef CONFIG_KVM_XICS
 539                case KVM_REG_PPC_ICP_STATE:
 540                        if (!vcpu->arch.icp) {
 541                                r = -ENXIO;
 542                                break;
 543                        }
 544                        val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu));
 545                        break;
 546#endif /* CONFIG_KVM_XICS */
 547                default:
 548                        r = -EINVAL;
 549                        break;
 550                }
 551        }
 552        if (r)
 553                return r;
 554
 555        if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
 556                r = -EFAULT;
 557
 558        return r;
 559}
 560
 561int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 562{
 563        int r;
 564        union kvmppc_one_reg val;
 565        int size;
 566        long int i;
 567
 568        size = one_reg_size(reg->id);
 569        if (size > sizeof(val))
 570                return -EINVAL;
 571
 572        if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
 573                return -EFAULT;
 574
 575        r = kvmppc_set_one_reg(vcpu, reg->id, &val);
 576
 577        if (r == -EINVAL) {
 578                r = 0;
 579                switch (reg->id) {
 580                case KVM_REG_PPC_DAR:
 581                        vcpu->arch.shared->dar = set_reg_val(reg->id, val);
 582                        break;
 583                case KVM_REG_PPC_DSISR:
 584                        vcpu->arch.shared->dsisr = set_reg_val(reg->id, val);
 585                        break;
 586                case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 587                        i = reg->id - KVM_REG_PPC_FPR0;
 588                        vcpu->arch.fpr[i] = set_reg_val(reg->id, val);
 589                        break;
 590                case KVM_REG_PPC_FPSCR:
 591                        vcpu->arch.fpscr = set_reg_val(reg->id, val);
 592                        break;
 593#ifdef CONFIG_ALTIVEC
 594                case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
 595                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 596                                r = -ENXIO;
 597                                break;
 598                        }
 599                        vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
 600                        break;
 601                case KVM_REG_PPC_VSCR:
 602                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 603                                r = -ENXIO;
 604                                break;
 605                        }
 606                        vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
 607                        break;
 608#endif /* CONFIG_ALTIVEC */
 609#ifdef CONFIG_KVM_XICS
 610                case KVM_REG_PPC_ICP_STATE:
 611                        if (!vcpu->arch.icp) {
 612                                r = -ENXIO;
 613                                break;
 614                        }
 615                        r = kvmppc_xics_set_icp(vcpu,
 616                                                set_reg_val(reg->id, val));
 617                        break;
 618#endif /* CONFIG_KVM_XICS */
 619                default:
 620                        r = -EINVAL;
 621                        break;
 622                }
 623        }
 624
 625        return r;
 626}
 627
 628int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 629                                  struct kvm_translation *tr)
 630{
 631        return 0;
 632}
 633
 634int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 635                                        struct kvm_guest_debug *dbg)
 636{
 637        return -EINVAL;
 638}
 639
 640void kvmppc_decrementer_func(unsigned long data)
 641{
 642        struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
 643
 644        kvmppc_core_queue_dec(vcpu);
 645        kvm_vcpu_kick(vcpu);
 646}
 647