linux/arch/powerpc/kvm/powerpc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright IBM Corp. 2007
   5 *
   6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
   7 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
   8 */
   9
  10#include <linux/errno.h>
  11#include <linux/err.h>
  12#include <linux/kvm_host.h>
  13#include <linux/vmalloc.h>
  14#include <linux/hrtimer.h>
  15#include <linux/sched/signal.h>
  16#include <linux/fs.h>
  17#include <linux/slab.h>
  18#include <linux/file.h>
  19#include <linux/module.h>
  20#include <linux/irqbypass.h>
  21#include <linux/kvm_irqfd.h>
  22#include <asm/cputable.h>
  23#include <linux/uaccess.h>
  24#include <asm/kvm_ppc.h>
  25#include <asm/cputhreads.h>
  26#include <asm/irqflags.h>
  27#include <asm/iommu.h>
  28#include <asm/switch_to.h>
  29#include <asm/xive.h>
  30#ifdef CONFIG_PPC_PSERIES
  31#include <asm/hvcall.h>
  32#include <asm/plpar_wrappers.h>
  33#endif
  34#include <asm/ultravisor.h>
  35
  36#include "timing.h"
  37#include "irq.h"
  38#include "../mm/mmu_decl.h"
  39
  40#define CREATE_TRACE_POINTS
  41#include "trace.h"
  42
  43struct kvmppc_ops *kvmppc_hv_ops;
  44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
  45struct kvmppc_ops *kvmppc_pr_ops;
  46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
  47
  48
  49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
  50{
  51        return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
  52}
  53
  54bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
  55{
  56        return kvm_arch_vcpu_runnable(vcpu);
  57}
  58
  59bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
  60{
  61        return false;
  62}
  63
  64int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  65{
  66        return 1;
  67}
  68
  69/*
  70 * Common checks before entering the guest world.  Call with interrupts
  71 * disabled.
  72 *
  73 * returns:
  74 *
  75 * == 1 if we're ready to go into guest state
  76 * <= 0 if we need to go back to the host with return value
  77 */
  78int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
  79{
  80        int r;
  81
  82        WARN_ON(irqs_disabled());
  83        hard_irq_disable();
  84
  85        while (true) {
  86                if (need_resched()) {
  87                        local_irq_enable();
  88                        cond_resched();
  89                        hard_irq_disable();
  90                        continue;
  91                }
  92
  93                if (signal_pending(current)) {
  94                        kvmppc_account_exit(vcpu, SIGNAL_EXITS);
  95                        vcpu->run->exit_reason = KVM_EXIT_INTR;
  96                        r = -EINTR;
  97                        break;
  98                }
  99
 100                vcpu->mode = IN_GUEST_MODE;
 101
 102                /*
 103                 * Reading vcpu->requests must happen after setting vcpu->mode,
 104                 * so we don't miss a request because the requester sees
 105                 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
 106                 * before next entering the guest (and thus doesn't IPI).
 107                 * This also orders the write to mode from any reads
 108                 * to the page tables done while the VCPU is running.
 109                 * Please see the comment in kvm_flush_remote_tlbs.
 110                 */
 111                smp_mb();
 112
 113                if (kvm_request_pending(vcpu)) {
 114                        /* Make sure we process requests preemptable */
 115                        local_irq_enable();
 116                        trace_kvm_check_requests(vcpu);
 117                        r = kvmppc_core_check_requests(vcpu);
 118                        hard_irq_disable();
 119                        if (r > 0)
 120                                continue;
 121                        break;
 122                }
 123
 124                if (kvmppc_core_prepare_to_enter(vcpu)) {
 125                        /* interrupts got enabled in between, so we
 126                           are back at square 1 */
 127                        continue;
 128                }
 129
 130                guest_enter_irqoff();
 131                return 1;
 132        }
 133
 134        /* return to host */
 135        local_irq_enable();
 136        return r;
 137}
 138EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
 139
 140#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
 141static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
 142{
 143        struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
 144        int i;
 145
 146        shared->sprg0 = swab64(shared->sprg0);
 147        shared->sprg1 = swab64(shared->sprg1);
 148        shared->sprg2 = swab64(shared->sprg2);
 149        shared->sprg3 = swab64(shared->sprg3);
 150        shared->srr0 = swab64(shared->srr0);
 151        shared->srr1 = swab64(shared->srr1);
 152        shared->dar = swab64(shared->dar);
 153        shared->msr = swab64(shared->msr);
 154        shared->dsisr = swab32(shared->dsisr);
 155        shared->int_pending = swab32(shared->int_pending);
 156        for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
 157                shared->sr[i] = swab32(shared->sr[i]);
 158}
 159#endif
 160
 161int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
 162{
 163        int nr = kvmppc_get_gpr(vcpu, 11);
 164        int r;
 165        unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
 166        unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
 167        unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
 168        unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
 169        unsigned long r2 = 0;
 170
 171        if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
 172                /* 32 bit mode */
 173                param1 &= 0xffffffff;
 174                param2 &= 0xffffffff;
 175                param3 &= 0xffffffff;
 176                param4 &= 0xffffffff;
 177        }
 178
 179        switch (nr) {
 180        case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
 181        {
 182#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
 183                /* Book3S can be little endian, find it out here */
 184                int shared_big_endian = true;
 185                if (vcpu->arch.intr_msr & MSR_LE)
 186                        shared_big_endian = false;
 187                if (shared_big_endian != vcpu->arch.shared_big_endian)
 188                        kvmppc_swab_shared(vcpu);
 189                vcpu->arch.shared_big_endian = shared_big_endian;
 190#endif
 191
 192                if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
 193                        /*
 194                         * Older versions of the Linux magic page code had
 195                         * a bug where they would map their trampoline code
 196                         * NX. If that's the case, remove !PR NX capability.
 197                         */
 198                        vcpu->arch.disable_kernel_nx = true;
 199                        kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 200                }
 201
 202                vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
 203                vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
 204
 205#ifdef CONFIG_PPC_64K_PAGES
 206                /*
 207                 * Make sure our 4k magic page is in the same window of a 64k
 208                 * page within the guest and within the host's page.
 209                 */
 210                if ((vcpu->arch.magic_page_pa & 0xf000) !=
 211                    ((ulong)vcpu->arch.shared & 0xf000)) {
 212                        void *old_shared = vcpu->arch.shared;
 213                        ulong shared = (ulong)vcpu->arch.shared;
 214                        void *new_shared;
 215
 216                        shared &= PAGE_MASK;
 217                        shared |= vcpu->arch.magic_page_pa & 0xf000;
 218                        new_shared = (void*)shared;
 219                        memcpy(new_shared, old_shared, 0x1000);
 220                        vcpu->arch.shared = new_shared;
 221                }
 222#endif
 223
 224                r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
 225
 226                r = EV_SUCCESS;
 227                break;
 228        }
 229        case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
 230                r = EV_SUCCESS;
 231#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
 232                r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
 233#endif
 234
 235                /* Second return value is in r4 */
 236                break;
 237        case EV_HCALL_TOKEN(EV_IDLE):
 238                r = EV_SUCCESS;
 239                kvm_vcpu_block(vcpu);
 240                kvm_clear_request(KVM_REQ_UNHALT, vcpu);
 241                break;
 242        default:
 243                r = EV_UNIMPLEMENTED;
 244                break;
 245        }
 246
 247        kvmppc_set_gpr(vcpu, 4, r2);
 248
 249        return r;
 250}
 251EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
 252
 253int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
 254{
 255        int r = false;
 256
 257        /* We have to know what CPU to virtualize */
 258        if (!vcpu->arch.pvr)
 259                goto out;
 260
 261        /* PAPR only works with book3s_64 */
 262        if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
 263                goto out;
 264
 265        /* HV KVM can only do PAPR mode for now */
 266        if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
 267                goto out;
 268
 269#ifdef CONFIG_KVM_BOOKE_HV
 270        if (!cpu_has_feature(CPU_FTR_EMB_HV))
 271                goto out;
 272#endif
 273
 274        r = true;
 275
 276out:
 277        vcpu->arch.sane = r;
 278        return r ? 0 : -EINVAL;
 279}
 280EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
 281
 282int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
 283{
 284        enum emulation_result er;
 285        int r;
 286
 287        er = kvmppc_emulate_loadstore(vcpu);
 288        switch (er) {
 289        case EMULATE_DONE:
 290                /* Future optimization: only reload non-volatiles if they were
 291                 * actually modified. */
 292                r = RESUME_GUEST_NV;
 293                break;
 294        case EMULATE_AGAIN:
 295                r = RESUME_GUEST;
 296                break;
 297        case EMULATE_DO_MMIO:
 298                vcpu->run->exit_reason = KVM_EXIT_MMIO;
 299                /* We must reload nonvolatiles because "update" load/store
 300                 * instructions modify register state. */
 301                /* Future optimization: only reload non-volatiles if they were
 302                 * actually modified. */
 303                r = RESUME_HOST_NV;
 304                break;
 305        case EMULATE_FAIL:
 306        {
 307                u32 last_inst;
 308
 309                kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
 310                /* XXX Deliver Program interrupt to guest. */
 311                pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
 312                r = RESUME_HOST;
 313                break;
 314        }
 315        default:
 316                WARN_ON(1);
 317                r = RESUME_GUEST;
 318        }
 319
 320        return r;
 321}
 322EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
 323
 324int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
 325              bool data)
 326{
 327        ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
 328        struct kvmppc_pte pte;
 329        int r = -EINVAL;
 330
 331        vcpu->stat.st++;
 332
 333        if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
 334                r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
 335                                                            size);
 336
 337        if ((!r) || (r == -EAGAIN))
 338                return r;
 339
 340        r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
 341                         XLATE_WRITE, &pte);
 342        if (r < 0)
 343                return r;
 344
 345        *eaddr = pte.raddr;
 346
 347        if (!pte.may_write)
 348                return -EPERM;
 349
 350        /* Magic page override */
 351        if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
 352            ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
 353            !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 354                void *magic = vcpu->arch.shared;
 355                magic += pte.eaddr & 0xfff;
 356                memcpy(magic, ptr, size);
 357                return EMULATE_DONE;
 358        }
 359
 360        if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
 361                return EMULATE_DO_MMIO;
 362
 363        return EMULATE_DONE;
 364}
 365EXPORT_SYMBOL_GPL(kvmppc_st);
 366
 367int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
 368                      bool data)
 369{
 370        ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
 371        struct kvmppc_pte pte;
 372        int rc = -EINVAL;
 373
 374        vcpu->stat.ld++;
 375
 376        if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
 377                rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
 378                                                              size);
 379
 380        if ((!rc) || (rc == -EAGAIN))
 381                return rc;
 382
 383        rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
 384                          XLATE_READ, &pte);
 385        if (rc)
 386                return rc;
 387
 388        *eaddr = pte.raddr;
 389
 390        if (!pte.may_read)
 391                return -EPERM;
 392
 393        if (!data && !pte.may_execute)
 394                return -ENOEXEC;
 395
 396        /* Magic page override */
 397        if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
 398            ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
 399            !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 400                void *magic = vcpu->arch.shared;
 401                magic += pte.eaddr & 0xfff;
 402                memcpy(ptr, magic, size);
 403                return EMULATE_DONE;
 404        }
 405
 406        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 407        rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
 408        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 409        if (rc)
 410                return EMULATE_DO_MMIO;
 411
 412        return EMULATE_DONE;
 413}
 414EXPORT_SYMBOL_GPL(kvmppc_ld);
 415
 416int kvm_arch_hardware_enable(void)
 417{
 418        return 0;
 419}
 420
 421int kvm_arch_hardware_setup(void *opaque)
 422{
 423        return 0;
 424}
 425
 426int kvm_arch_check_processor_compat(void *opaque)
 427{
 428        return kvmppc_core_check_processor_compat();
 429}
 430
 431int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 432{
 433        struct kvmppc_ops *kvm_ops = NULL;
 434        /*
 435         * if we have both HV and PR enabled, default is HV
 436         */
 437        if (type == 0) {
 438                if (kvmppc_hv_ops)
 439                        kvm_ops = kvmppc_hv_ops;
 440                else
 441                        kvm_ops = kvmppc_pr_ops;
 442                if (!kvm_ops)
 443                        goto err_out;
 444        } else  if (type == KVM_VM_PPC_HV) {
 445                if (!kvmppc_hv_ops)
 446                        goto err_out;
 447                kvm_ops = kvmppc_hv_ops;
 448        } else if (type == KVM_VM_PPC_PR) {
 449                if (!kvmppc_pr_ops)
 450                        goto err_out;
 451                kvm_ops = kvmppc_pr_ops;
 452        } else
 453                goto err_out;
 454
 455        if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
 456                return -ENOENT;
 457
 458        kvm->arch.kvm_ops = kvm_ops;
 459        return kvmppc_core_init_vm(kvm);
 460err_out:
 461        return -EINVAL;
 462}
 463
 464void kvm_arch_destroy_vm(struct kvm *kvm)
 465{
 466        unsigned int i;
 467        struct kvm_vcpu *vcpu;
 468
 469#ifdef CONFIG_KVM_XICS
 470        /*
 471         * We call kick_all_cpus_sync() to ensure that all
 472         * CPUs have executed any pending IPIs before we
 473         * continue and free VCPUs structures below.
 474         */
 475        if (is_kvmppc_hv_enabled(kvm))
 476                kick_all_cpus_sync();
 477#endif
 478
 479        kvm_for_each_vcpu(i, vcpu, kvm)
 480                kvm_vcpu_destroy(vcpu);
 481
 482        mutex_lock(&kvm->lock);
 483        for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 484                kvm->vcpus[i] = NULL;
 485
 486        atomic_set(&kvm->online_vcpus, 0);
 487
 488        kvmppc_core_destroy_vm(kvm);
 489
 490        mutex_unlock(&kvm->lock);
 491
 492        /* drop the module reference */
 493        module_put(kvm->arch.kvm_ops->owner);
 494}
 495
 496int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 497{
 498        int r;
 499        /* Assume we're using HV mode when the HV module is loaded */
 500        int hv_enabled = kvmppc_hv_ops ? 1 : 0;
 501
 502        if (kvm) {
 503                /*
 504                 * Hooray - we know which VM type we're running on. Depend on
 505                 * that rather than the guess above.
 506                 */
 507                hv_enabled = is_kvmppc_hv_enabled(kvm);
 508        }
 509
 510        switch (ext) {
 511#ifdef CONFIG_BOOKE
 512        case KVM_CAP_PPC_BOOKE_SREGS:
 513        case KVM_CAP_PPC_BOOKE_WATCHDOG:
 514        case KVM_CAP_PPC_EPR:
 515#else
 516        case KVM_CAP_PPC_SEGSTATE:
 517        case KVM_CAP_PPC_HIOR:
 518        case KVM_CAP_PPC_PAPR:
 519#endif
 520        case KVM_CAP_PPC_UNSET_IRQ:
 521        case KVM_CAP_PPC_IRQ_LEVEL:
 522        case KVM_CAP_ENABLE_CAP:
 523        case KVM_CAP_ONE_REG:
 524        case KVM_CAP_IOEVENTFD:
 525        case KVM_CAP_DEVICE_CTRL:
 526        case KVM_CAP_IMMEDIATE_EXIT:
 527        case KVM_CAP_SET_GUEST_DEBUG:
 528                r = 1;
 529                break;
 530        case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
 531        case KVM_CAP_PPC_PAIRED_SINGLES:
 532        case KVM_CAP_PPC_OSI:
 533        case KVM_CAP_PPC_GET_PVINFO:
 534#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
 535        case KVM_CAP_SW_TLB:
 536#endif
 537                /* We support this only for PR */
 538                r = !hv_enabled;
 539                break;
 540#ifdef CONFIG_KVM_MPIC
 541        case KVM_CAP_IRQ_MPIC:
 542                r = 1;
 543                break;
 544#endif
 545
 546#ifdef CONFIG_PPC_BOOK3S_64
 547        case KVM_CAP_SPAPR_TCE:
 548        case KVM_CAP_SPAPR_TCE_64:
 549                r = 1;
 550                break;
 551        case KVM_CAP_SPAPR_TCE_VFIO:
 552                r = !!cpu_has_feature(CPU_FTR_HVMODE);
 553                break;
 554        case KVM_CAP_PPC_RTAS:
 555        case KVM_CAP_PPC_FIXUP_HCALL:
 556        case KVM_CAP_PPC_ENABLE_HCALL:
 557#ifdef CONFIG_KVM_XICS
 558        case KVM_CAP_IRQ_XICS:
 559#endif
 560        case KVM_CAP_PPC_GET_CPU_CHAR:
 561                r = 1;
 562                break;
 563#ifdef CONFIG_KVM_XIVE
 564        case KVM_CAP_PPC_IRQ_XIVE:
 565                /*
 566                 * We need XIVE to be enabled on the platform (implies
 567                 * a POWER9 processor) and the PowerNV platform, as
 568                 * nested is not yet supported.
 569                 */
 570                r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
 571                        kvmppc_xive_native_supported();
 572                break;
 573#endif
 574
 575        case KVM_CAP_PPC_ALLOC_HTAB:
 576                r = hv_enabled;
 577                break;
 578#endif /* CONFIG_PPC_BOOK3S_64 */
 579#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 580        case KVM_CAP_PPC_SMT:
 581                r = 0;
 582                if (kvm) {
 583                        if (kvm->arch.emul_smt_mode > 1)
 584                                r = kvm->arch.emul_smt_mode;
 585                        else
 586                                r = kvm->arch.smt_mode;
 587                } else if (hv_enabled) {
 588                        if (cpu_has_feature(CPU_FTR_ARCH_300))
 589                                r = 1;
 590                        else
 591                                r = threads_per_subcore;
 592                }
 593                break;
 594        case KVM_CAP_PPC_SMT_POSSIBLE:
 595                r = 1;
 596                if (hv_enabled) {
 597                        if (!cpu_has_feature(CPU_FTR_ARCH_300))
 598                                r = ((threads_per_subcore << 1) - 1);
 599                        else
 600                                /* P9 can emulate dbells, so allow any mode */
 601                                r = 8 | 4 | 2 | 1;
 602                }
 603                break;
 604        case KVM_CAP_PPC_RMA:
 605                r = 0;
 606                break;
 607        case KVM_CAP_PPC_HWRNG:
 608                r = kvmppc_hwrng_present();
 609                break;
 610        case KVM_CAP_PPC_MMU_RADIX:
 611                r = !!(hv_enabled && radix_enabled());
 612                break;
 613        case KVM_CAP_PPC_MMU_HASH_V3:
 614                r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
 615                       kvmppc_hv_ops->hash_v3_possible());
 616                break;
 617        case KVM_CAP_PPC_NESTED_HV:
 618                r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
 619                       !kvmppc_hv_ops->enable_nested(NULL));
 620                break;
 621#endif
 622        case KVM_CAP_SYNC_MMU:
 623#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 624                r = hv_enabled;
 625#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 626                r = 1;
 627#else
 628                r = 0;
 629#endif
 630                break;
 631#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 632        case KVM_CAP_PPC_HTAB_FD:
 633                r = hv_enabled;
 634                break;
 635#endif
 636        case KVM_CAP_NR_VCPUS:
 637                /*
 638                 * Recommending a number of CPUs is somewhat arbitrary; we
 639                 * return the number of present CPUs for -HV (since a host
 640                 * will have secondary threads "offline"), and for other KVM
 641                 * implementations just count online CPUs.
 642                 */
 643                if (hv_enabled)
 644                        r = num_present_cpus();
 645                else
 646                        r = num_online_cpus();
 647                break;
 648        case KVM_CAP_MAX_VCPUS:
 649                r = KVM_MAX_VCPUS;
 650                break;
 651        case KVM_CAP_MAX_VCPU_ID:
 652                r = KVM_MAX_VCPU_ID;
 653                break;
 654#ifdef CONFIG_PPC_BOOK3S_64
 655        case KVM_CAP_PPC_GET_SMMU_INFO:
 656                r = 1;
 657                break;
 658        case KVM_CAP_SPAPR_MULTITCE:
 659                r = 1;
 660                break;
 661        case KVM_CAP_SPAPR_RESIZE_HPT:
 662                r = !!hv_enabled;
 663                break;
 664#endif
 665#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 666        case KVM_CAP_PPC_FWNMI:
 667                r = hv_enabled;
 668                break;
 669#endif
 670#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 671        case KVM_CAP_PPC_HTM:
 672                r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
 673                     (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
 674                break;
 675#endif
 676#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
 677        case KVM_CAP_PPC_SECURE_GUEST:
 678                r = hv_enabled && kvmppc_hv_ops->enable_svm &&
 679                        !kvmppc_hv_ops->enable_svm(NULL);
 680                break;
 681        case KVM_CAP_PPC_DAWR1:
 682                r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
 683                       !kvmppc_hv_ops->enable_dawr1(NULL));
 684                break;
 685        case KVM_CAP_PPC_RPT_INVALIDATE:
 686                r = 1;
 687                break;
 688#endif
 689        default:
 690                r = 0;
 691                break;
 692        }
 693        return r;
 694
 695}
 696
 697long kvm_arch_dev_ioctl(struct file *filp,
 698                        unsigned int ioctl, unsigned long arg)
 699{
 700        return -EINVAL;
 701}
 702
 703void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
 704{
 705        kvmppc_core_free_memslot(kvm, slot);
 706}
 707
 708int kvm_arch_prepare_memory_region(struct kvm *kvm,
 709                                   struct kvm_memory_slot *memslot,
 710                                   const struct kvm_userspace_memory_region *mem,
 711                                   enum kvm_mr_change change)
 712{
 713        return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
 714}
 715
 716void kvm_arch_commit_memory_region(struct kvm *kvm,
 717                                   const struct kvm_userspace_memory_region *mem,
 718                                   struct kvm_memory_slot *old,
 719                                   const struct kvm_memory_slot *new,
 720                                   enum kvm_mr_change change)
 721{
 722        kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
 723}
 724
 725void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 726                                   struct kvm_memory_slot *slot)
 727{
 728        kvmppc_core_flush_memslot(kvm, slot);
 729}
 730
 731int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 732{
 733        return 0;
 734}
 735
 736static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
 737{
 738        struct kvm_vcpu *vcpu;
 739
 740        vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
 741        kvmppc_decrementer_func(vcpu);
 742
 743        return HRTIMER_NORESTART;
 744}
 745
 746int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 747{
 748        int err;
 749
 750        hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
 751        vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
 752        vcpu->arch.dec_expires = get_tb();
 753
 754#ifdef CONFIG_KVM_EXIT_TIMING
 755        mutex_init(&vcpu->arch.exit_timing_lock);
 756#endif
 757        err = kvmppc_subarch_vcpu_init(vcpu);
 758        if (err)
 759                return err;
 760
 761        err = kvmppc_core_vcpu_create(vcpu);
 762        if (err)
 763                goto out_vcpu_uninit;
 764
 765        vcpu->arch.waitp = &vcpu->wait;
 766        kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
 767        return 0;
 768
 769out_vcpu_uninit:
 770        kvmppc_subarch_vcpu_uninit(vcpu);
 771        return err;
 772}
 773
 774void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 775{
 776}
 777
 778void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 779{
 780        /* Make sure we're not using the vcpu anymore */
 781        hrtimer_cancel(&vcpu->arch.dec_timer);
 782
 783        kvmppc_remove_vcpu_debugfs(vcpu);
 784
 785        switch (vcpu->arch.irq_type) {
 786        case KVMPPC_IRQ_MPIC:
 787                kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
 788                break;
 789        case KVMPPC_IRQ_XICS:
 790                if (xics_on_xive())
 791                        kvmppc_xive_cleanup_vcpu(vcpu);
 792                else
 793                        kvmppc_xics_free_icp(vcpu);
 794                break;
 795        case KVMPPC_IRQ_XIVE:
 796                kvmppc_xive_native_cleanup_vcpu(vcpu);
 797                break;
 798        }
 799
 800        kvmppc_core_vcpu_free(vcpu);
 801
 802        kvmppc_subarch_vcpu_uninit(vcpu);
 803}
 804
 805int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 806{
 807        return kvmppc_core_pending_dec(vcpu);
 808}
 809
 810void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 811{
 812#ifdef CONFIG_BOOKE
 813        /*
 814         * vrsave (formerly usprg0) isn't used by Linux, but may
 815         * be used by the guest.
 816         *
 817         * On non-booke this is associated with Altivec and
 818         * is handled by code in book3s.c.
 819         */
 820        mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
 821#endif
 822        kvmppc_core_vcpu_load(vcpu, cpu);
 823}
 824
 825void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 826{
 827        kvmppc_core_vcpu_put(vcpu);
 828#ifdef CONFIG_BOOKE
 829        vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
 830#endif
 831}
 832
 833/*
 834 * irq_bypass_add_producer and irq_bypass_del_producer are only
 835 * useful if the architecture supports PCI passthrough.
 836 * irq_bypass_stop and irq_bypass_start are not needed and so
 837 * kvm_ops are not defined for them.
 838 */
 839bool kvm_arch_has_irq_bypass(void)
 840{
 841        return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
 842                (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
 843}
 844
 845int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
 846                                     struct irq_bypass_producer *prod)
 847{
 848        struct kvm_kernel_irqfd *irqfd =
 849                container_of(cons, struct kvm_kernel_irqfd, consumer);
 850        struct kvm *kvm = irqfd->kvm;
 851
 852        if (kvm->arch.kvm_ops->irq_bypass_add_producer)
 853                return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
 854
 855        return 0;
 856}
 857
 858void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
 859                                      struct irq_bypass_producer *prod)
 860{
 861        struct kvm_kernel_irqfd *irqfd =
 862                container_of(cons, struct kvm_kernel_irqfd, consumer);
 863        struct kvm *kvm = irqfd->kvm;
 864
 865        if (kvm->arch.kvm_ops->irq_bypass_del_producer)
 866                kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
 867}
 868
 869#ifdef CONFIG_VSX
 870static inline int kvmppc_get_vsr_dword_offset(int index)
 871{
 872        int offset;
 873
 874        if ((index != 0) && (index != 1))
 875                return -1;
 876
 877#ifdef __BIG_ENDIAN
 878        offset =  index;
 879#else
 880        offset = 1 - index;
 881#endif
 882
 883        return offset;
 884}
 885
 886static inline int kvmppc_get_vsr_word_offset(int index)
 887{
 888        int offset;
 889
 890        if ((index > 3) || (index < 0))
 891                return -1;
 892
 893#ifdef __BIG_ENDIAN
 894        offset = index;
 895#else
 896        offset = 3 - index;
 897#endif
 898        return offset;
 899}
 900
 901static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
 902        u64 gpr)
 903{
 904        union kvmppc_one_reg val;
 905        int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
 906        int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
 907
 908        if (offset == -1)
 909                return;
 910
 911        if (index >= 32) {
 912                val.vval = VCPU_VSX_VR(vcpu, index - 32);
 913                val.vsxval[offset] = gpr;
 914                VCPU_VSX_VR(vcpu, index - 32) = val.vval;
 915        } else {
 916                VCPU_VSX_FPR(vcpu, index, offset) = gpr;
 917        }
 918}
 919
 920static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
 921        u64 gpr)
 922{
 923        union kvmppc_one_reg val;
 924        int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
 925
 926        if (index >= 32) {
 927                val.vval = VCPU_VSX_VR(vcpu, index - 32);
 928                val.vsxval[0] = gpr;
 929                val.vsxval[1] = gpr;
 930                VCPU_VSX_VR(vcpu, index - 32) = val.vval;
 931        } else {
 932                VCPU_VSX_FPR(vcpu, index, 0) = gpr;
 933                VCPU_VSX_FPR(vcpu, index, 1) = gpr;
 934        }
 935}
 936
 937static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
 938        u32 gpr)
 939{
 940        union kvmppc_one_reg val;
 941        int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
 942
 943        if (index >= 32) {
 944                val.vsx32val[0] = gpr;
 945                val.vsx32val[1] = gpr;
 946                val.vsx32val[2] = gpr;
 947                val.vsx32val[3] = gpr;
 948                VCPU_VSX_VR(vcpu, index - 32) = val.vval;
 949        } else {
 950                val.vsx32val[0] = gpr;
 951                val.vsx32val[1] = gpr;
 952                VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
 953                VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
 954        }
 955}
 956
 957static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
 958        u32 gpr32)
 959{
 960        union kvmppc_one_reg val;
 961        int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
 962        int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
 963        int dword_offset, word_offset;
 964
 965        if (offset == -1)
 966                return;
 967
 968        if (index >= 32) {
 969                val.vval = VCPU_VSX_VR(vcpu, index - 32);
 970                val.vsx32val[offset] = gpr32;
 971                VCPU_VSX_VR(vcpu, index - 32) = val.vval;
 972        } else {
 973                dword_offset = offset / 2;
 974                word_offset = offset % 2;
 975                val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
 976                val.vsx32val[word_offset] = gpr32;
 977                VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
 978        }
 979}
 980#endif /* CONFIG_VSX */
 981
 982#ifdef CONFIG_ALTIVEC
 983static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
 984                int index, int element_size)
 985{
 986        int offset;
 987        int elts = sizeof(vector128)/element_size;
 988
 989        if ((index < 0) || (index >= elts))
 990                return -1;
 991
 992        if (kvmppc_need_byteswap(vcpu))
 993                offset = elts - index - 1;
 994        else
 995                offset = index;
 996
 997        return offset;
 998}
 999
1000static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1001                int index)
1002{
1003        return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1004}
1005
1006static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1007                int index)
1008{
1009        return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1010}
1011
1012static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1013                int index)
1014{
1015        return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1016}
1017
1018static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1019                int index)
1020{
1021        return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1022}
1023
1024
1025static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1026        u64 gpr)
1027{
1028        union kvmppc_one_reg val;
1029        int offset = kvmppc_get_vmx_dword_offset(vcpu,
1030                        vcpu->arch.mmio_vmx_offset);
1031        int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1032
1033        if (offset == -1)
1034                return;
1035
1036        val.vval = VCPU_VSX_VR(vcpu, index);
1037        val.vsxval[offset] = gpr;
1038        VCPU_VSX_VR(vcpu, index) = val.vval;
1039}
1040
1041static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1042        u32 gpr32)
1043{
1044        union kvmppc_one_reg val;
1045        int offset = kvmppc_get_vmx_word_offset(vcpu,
1046                        vcpu->arch.mmio_vmx_offset);
1047        int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1048
1049        if (offset == -1)
1050                return;
1051
1052        val.vval = VCPU_VSX_VR(vcpu, index);
1053        val.vsx32val[offset] = gpr32;
1054        VCPU_VSX_VR(vcpu, index) = val.vval;
1055}
1056
1057static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1058        u16 gpr16)
1059{
1060        union kvmppc_one_reg val;
1061        int offset = kvmppc_get_vmx_hword_offset(vcpu,
1062                        vcpu->arch.mmio_vmx_offset);
1063        int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1064
1065        if (offset == -1)
1066                return;
1067
1068        val.vval = VCPU_VSX_VR(vcpu, index);
1069        val.vsx16val[offset] = gpr16;
1070        VCPU_VSX_VR(vcpu, index) = val.vval;
1071}
1072
1073static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1074        u8 gpr8)
1075{
1076        union kvmppc_one_reg val;
1077        int offset = kvmppc_get_vmx_byte_offset(vcpu,
1078                        vcpu->arch.mmio_vmx_offset);
1079        int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1080
1081        if (offset == -1)
1082                return;
1083
1084        val.vval = VCPU_VSX_VR(vcpu, index);
1085        val.vsx8val[offset] = gpr8;
1086        VCPU_VSX_VR(vcpu, index) = val.vval;
1087}
1088#endif /* CONFIG_ALTIVEC */
1089
1090#ifdef CONFIG_PPC_FPU
1091static inline u64 sp_to_dp(u32 fprs)
1092{
1093        u64 fprd;
1094
1095        preempt_disable();
1096        enable_kernel_fp();
1097        asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m"UPD_CONSTR (fprd) : "m"UPD_CONSTR (fprs)
1098             : "fr0");
1099        preempt_enable();
1100        return fprd;
1101}
1102
1103static inline u32 dp_to_sp(u64 fprd)
1104{
1105        u32 fprs;
1106
1107        preempt_disable();
1108        enable_kernel_fp();
1109        asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m"UPD_CONSTR (fprs) : "m"UPD_CONSTR (fprd)
1110             : "fr0");
1111        preempt_enable();
1112        return fprs;
1113}
1114
1115#else
1116#define sp_to_dp(x)     (x)
1117#define dp_to_sp(x)     (x)
1118#endif /* CONFIG_PPC_FPU */
1119
1120static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1121{
1122        struct kvm_run *run = vcpu->run;
1123        u64 gpr;
1124
1125        if (run->mmio.len > sizeof(gpr)) {
1126                printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1127                return;
1128        }
1129
1130        if (!vcpu->arch.mmio_host_swabbed) {
1131                switch (run->mmio.len) {
1132                case 8: gpr = *(u64 *)run->mmio.data; break;
1133                case 4: gpr = *(u32 *)run->mmio.data; break;
1134                case 2: gpr = *(u16 *)run->mmio.data; break;
1135                case 1: gpr = *(u8 *)run->mmio.data; break;
1136                }
1137        } else {
1138                switch (run->mmio.len) {
1139                case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1140                case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1141                case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1142                case 1: gpr = *(u8 *)run->mmio.data; break;
1143                }
1144        }
1145
1146        /* conversion between single and double precision */
1147        if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1148                gpr = sp_to_dp(gpr);
1149
1150        if (vcpu->arch.mmio_sign_extend) {
1151                switch (run->mmio.len) {
1152#ifdef CONFIG_PPC64
1153                case 4:
1154                        gpr = (s64)(s32)gpr;
1155                        break;
1156#endif
1157                case 2:
1158                        gpr = (s64)(s16)gpr;
1159                        break;
1160                case 1:
1161                        gpr = (s64)(s8)gpr;
1162                        break;
1163                }
1164        }
1165
1166        switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1167        case KVM_MMIO_REG_GPR:
1168                kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1169                break;
1170        case KVM_MMIO_REG_FPR:
1171                if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1172                        vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1173
1174                VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1175                break;
1176#ifdef CONFIG_PPC_BOOK3S
1177        case KVM_MMIO_REG_QPR:
1178                vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1179                break;
1180        case KVM_MMIO_REG_FQPR:
1181                VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1182                vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1183                break;
1184#endif
1185#ifdef CONFIG_VSX
1186        case KVM_MMIO_REG_VSX:
1187                if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1188                        vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1189
1190                if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1191                        kvmppc_set_vsr_dword(vcpu, gpr);
1192                else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1193                        kvmppc_set_vsr_word(vcpu, gpr);
1194                else if (vcpu->arch.mmio_copy_type ==
1195                                KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1196                        kvmppc_set_vsr_dword_dump(vcpu, gpr);
1197                else if (vcpu->arch.mmio_copy_type ==
1198                                KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1199                        kvmppc_set_vsr_word_dump(vcpu, gpr);
1200                break;
1201#endif
1202#ifdef CONFIG_ALTIVEC
1203        case KVM_MMIO_REG_VMX:
1204                if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1205                        vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1206
1207                if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1208                        kvmppc_set_vmx_dword(vcpu, gpr);
1209                else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1210                        kvmppc_set_vmx_word(vcpu, gpr);
1211                else if (vcpu->arch.mmio_copy_type ==
1212                                KVMPPC_VMX_COPY_HWORD)
1213                        kvmppc_set_vmx_hword(vcpu, gpr);
1214                else if (vcpu->arch.mmio_copy_type ==
1215                                KVMPPC_VMX_COPY_BYTE)
1216                        kvmppc_set_vmx_byte(vcpu, gpr);
1217                break;
1218#endif
1219#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1220        case KVM_MMIO_REG_NESTED_GPR:
1221                if (kvmppc_need_byteswap(vcpu))
1222                        gpr = swab64(gpr);
1223                kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1224                                     sizeof(gpr));
1225                break;
1226#endif
1227        default:
1228                BUG();
1229        }
1230}
1231
1232static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1233                                unsigned int rt, unsigned int bytes,
1234                                int is_default_endian, int sign_extend)
1235{
1236        struct kvm_run *run = vcpu->run;
1237        int idx, ret;
1238        bool host_swabbed;
1239
1240        /* Pity C doesn't have a logical XOR operator */
1241        if (kvmppc_need_byteswap(vcpu)) {
1242                host_swabbed = is_default_endian;
1243        } else {
1244                host_swabbed = !is_default_endian;
1245        }
1246
1247        if (bytes > sizeof(run->mmio.data)) {
1248                printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1249                       run->mmio.len);
1250        }
1251
1252        run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1253        run->mmio.len = bytes;
1254        run->mmio.is_write = 0;
1255
1256        vcpu->arch.io_gpr = rt;
1257        vcpu->arch.mmio_host_swabbed = host_swabbed;
1258        vcpu->mmio_needed = 1;
1259        vcpu->mmio_is_write = 0;
1260        vcpu->arch.mmio_sign_extend = sign_extend;
1261
1262        idx = srcu_read_lock(&vcpu->kvm->srcu);
1263
1264        ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1265                              bytes, &run->mmio.data);
1266
1267        srcu_read_unlock(&vcpu->kvm->srcu, idx);
1268
1269        if (!ret) {
1270                kvmppc_complete_mmio_load(vcpu);
1271                vcpu->mmio_needed = 0;
1272                return EMULATE_DONE;
1273        }
1274
1275        return EMULATE_DO_MMIO;
1276}
1277
1278int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1279                       unsigned int rt, unsigned int bytes,
1280                       int is_default_endian)
1281{
1282        return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1283}
1284EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1285
1286/* Same as above, but sign extends */
1287int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1288                        unsigned int rt, unsigned int bytes,
1289                        int is_default_endian)
1290{
1291        return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1292}
1293
1294#ifdef CONFIG_VSX
1295int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1296                        unsigned int rt, unsigned int bytes,
1297                        int is_default_endian, int mmio_sign_extend)
1298{
1299        enum emulation_result emulated = EMULATE_DONE;
1300
1301        /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1302        if (vcpu->arch.mmio_vsx_copy_nums > 4)
1303                return EMULATE_FAIL;
1304
1305        while (vcpu->arch.mmio_vsx_copy_nums) {
1306                emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1307                        is_default_endian, mmio_sign_extend);
1308
1309                if (emulated != EMULATE_DONE)
1310                        break;
1311
1312                vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1313
1314                vcpu->arch.mmio_vsx_copy_nums--;
1315                vcpu->arch.mmio_vsx_offset++;
1316        }
1317        return emulated;
1318}
1319#endif /* CONFIG_VSX */
1320
1321int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1322                        u64 val, unsigned int bytes, int is_default_endian)
1323{
1324        struct kvm_run *run = vcpu->run;
1325        void *data = run->mmio.data;
1326        int idx, ret;
1327        bool host_swabbed;
1328
1329        /* Pity C doesn't have a logical XOR operator */
1330        if (kvmppc_need_byteswap(vcpu)) {
1331                host_swabbed = is_default_endian;
1332        } else {
1333                host_swabbed = !is_default_endian;
1334        }
1335
1336        if (bytes > sizeof(run->mmio.data)) {
1337                printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1338                       run->mmio.len);
1339        }
1340
1341        run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1342        run->mmio.len = bytes;
1343        run->mmio.is_write = 1;
1344        vcpu->mmio_needed = 1;
1345        vcpu->mmio_is_write = 1;
1346
1347        if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1348                val = dp_to_sp(val);
1349
1350        /* Store the value at the lowest bytes in 'data'. */
1351        if (!host_swabbed) {
1352                switch (bytes) {
1353                case 8: *(u64 *)data = val; break;
1354                case 4: *(u32 *)data = val; break;
1355                case 2: *(u16 *)data = val; break;
1356                case 1: *(u8  *)data = val; break;
1357                }
1358        } else {
1359                switch (bytes) {
1360                case 8: *(u64 *)data = swab64(val); break;
1361                case 4: *(u32 *)data = swab32(val); break;
1362                case 2: *(u16 *)data = swab16(val); break;
1363                case 1: *(u8  *)data = val; break;
1364                }
1365        }
1366
1367        idx = srcu_read_lock(&vcpu->kvm->srcu);
1368
1369        ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1370                               bytes, &run->mmio.data);
1371
1372        srcu_read_unlock(&vcpu->kvm->srcu, idx);
1373
1374        if (!ret) {
1375                vcpu->mmio_needed = 0;
1376                return EMULATE_DONE;
1377        }
1378
1379        return EMULATE_DO_MMIO;
1380}
1381EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1382
1383#ifdef CONFIG_VSX
1384static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1385{
1386        u32 dword_offset, word_offset;
1387        union kvmppc_one_reg reg;
1388        int vsx_offset = 0;
1389        int copy_type = vcpu->arch.mmio_copy_type;
1390        int result = 0;
1391
1392        switch (copy_type) {
1393        case KVMPPC_VSX_COPY_DWORD:
1394                vsx_offset =
1395                        kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1396
1397                if (vsx_offset == -1) {
1398                        result = -1;
1399                        break;
1400                }
1401
1402                if (rs < 32) {
1403                        *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1404                } else {
1405                        reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1406                        *val = reg.vsxval[vsx_offset];
1407                }
1408                break;
1409
1410        case KVMPPC_VSX_COPY_WORD:
1411                vsx_offset =
1412                        kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1413
1414                if (vsx_offset == -1) {
1415                        result = -1;
1416                        break;
1417                }
1418
1419                if (rs < 32) {
1420                        dword_offset = vsx_offset / 2;
1421                        word_offset = vsx_offset % 2;
1422                        reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1423                        *val = reg.vsx32val[word_offset];
1424                } else {
1425                        reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1426                        *val = reg.vsx32val[vsx_offset];
1427                }
1428                break;
1429
1430        default:
1431                result = -1;
1432                break;
1433        }
1434
1435        return result;
1436}
1437
1438int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1439                        int rs, unsigned int bytes, int is_default_endian)
1440{
1441        u64 val;
1442        enum emulation_result emulated = EMULATE_DONE;
1443
1444        vcpu->arch.io_gpr = rs;
1445
1446        /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1447        if (vcpu->arch.mmio_vsx_copy_nums > 4)
1448                return EMULATE_FAIL;
1449
1450        while (vcpu->arch.mmio_vsx_copy_nums) {
1451                if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1452                        return EMULATE_FAIL;
1453
1454                emulated = kvmppc_handle_store(vcpu,
1455                         val, bytes, is_default_endian);
1456
1457                if (emulated != EMULATE_DONE)
1458                        break;
1459
1460                vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1461
1462                vcpu->arch.mmio_vsx_copy_nums--;
1463                vcpu->arch.mmio_vsx_offset++;
1464        }
1465
1466        return emulated;
1467}
1468
1469static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1470{
1471        struct kvm_run *run = vcpu->run;
1472        enum emulation_result emulated = EMULATE_FAIL;
1473        int r;
1474
1475        vcpu->arch.paddr_accessed += run->mmio.len;
1476
1477        if (!vcpu->mmio_is_write) {
1478                emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1479                         run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1480        } else {
1481                emulated = kvmppc_handle_vsx_store(vcpu,
1482                         vcpu->arch.io_gpr, run->mmio.len, 1);
1483        }
1484
1485        switch (emulated) {
1486        case EMULATE_DO_MMIO:
1487                run->exit_reason = KVM_EXIT_MMIO;
1488                r = RESUME_HOST;
1489                break;
1490        case EMULATE_FAIL:
1491                pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1492                run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1493                run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1494                r = RESUME_HOST;
1495                break;
1496        default:
1497                r = RESUME_GUEST;
1498                break;
1499        }
1500        return r;
1501}
1502#endif /* CONFIG_VSX */
1503
1504#ifdef CONFIG_ALTIVEC
1505int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1506                unsigned int rt, unsigned int bytes, int is_default_endian)
1507{
1508        enum emulation_result emulated = EMULATE_DONE;
1509
1510        if (vcpu->arch.mmio_vsx_copy_nums > 2)
1511                return EMULATE_FAIL;
1512
1513        while (vcpu->arch.mmio_vmx_copy_nums) {
1514                emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1515                                is_default_endian, 0);
1516
1517                if (emulated != EMULATE_DONE)
1518                        break;
1519
1520                vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1521                vcpu->arch.mmio_vmx_copy_nums--;
1522                vcpu->arch.mmio_vmx_offset++;
1523        }
1524
1525        return emulated;
1526}
1527
1528static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1529{
1530        union kvmppc_one_reg reg;
1531        int vmx_offset = 0;
1532        int result = 0;
1533
1534        vmx_offset =
1535                kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1536
1537        if (vmx_offset == -1)
1538                return -1;
1539
1540        reg.vval = VCPU_VSX_VR(vcpu, index);
1541        *val = reg.vsxval[vmx_offset];
1542
1543        return result;
1544}
1545
1546static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1547{
1548        union kvmppc_one_reg reg;
1549        int vmx_offset = 0;
1550        int result = 0;
1551
1552        vmx_offset =
1553                kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1554
1555        if (vmx_offset == -1)
1556                return -1;
1557
1558        reg.vval = VCPU_VSX_VR(vcpu, index);
1559        *val = reg.vsx32val[vmx_offset];
1560
1561        return result;
1562}
1563
1564static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1565{
1566        union kvmppc_one_reg reg;
1567        int vmx_offset = 0;
1568        int result = 0;
1569
1570        vmx_offset =
1571                kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1572
1573        if (vmx_offset == -1)
1574                return -1;
1575
1576        reg.vval = VCPU_VSX_VR(vcpu, index);
1577        *val = reg.vsx16val[vmx_offset];
1578
1579        return result;
1580}
1581
1582static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1583{
1584        union kvmppc_one_reg reg;
1585        int vmx_offset = 0;
1586        int result = 0;
1587
1588        vmx_offset =
1589                kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1590
1591        if (vmx_offset == -1)
1592                return -1;
1593
1594        reg.vval = VCPU_VSX_VR(vcpu, index);
1595        *val = reg.vsx8val[vmx_offset];
1596
1597        return result;
1598}
1599
1600int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1601                unsigned int rs, unsigned int bytes, int is_default_endian)
1602{
1603        u64 val = 0;
1604        unsigned int index = rs & KVM_MMIO_REG_MASK;
1605        enum emulation_result emulated = EMULATE_DONE;
1606
1607        if (vcpu->arch.mmio_vsx_copy_nums > 2)
1608                return EMULATE_FAIL;
1609
1610        vcpu->arch.io_gpr = rs;
1611
1612        while (vcpu->arch.mmio_vmx_copy_nums) {
1613                switch (vcpu->arch.mmio_copy_type) {
1614                case KVMPPC_VMX_COPY_DWORD:
1615                        if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1616                                return EMULATE_FAIL;
1617
1618                        break;
1619                case KVMPPC_VMX_COPY_WORD:
1620                        if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1621                                return EMULATE_FAIL;
1622                        break;
1623                case KVMPPC_VMX_COPY_HWORD:
1624                        if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1625                                return EMULATE_FAIL;
1626                        break;
1627                case KVMPPC_VMX_COPY_BYTE:
1628                        if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1629                                return EMULATE_FAIL;
1630                        break;
1631                default:
1632                        return EMULATE_FAIL;
1633                }
1634
1635                emulated = kvmppc_handle_store(vcpu, val, bytes,
1636                                is_default_endian);
1637                if (emulated != EMULATE_DONE)
1638                        break;
1639
1640                vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1641                vcpu->arch.mmio_vmx_copy_nums--;
1642                vcpu->arch.mmio_vmx_offset++;
1643        }
1644
1645        return emulated;
1646}
1647
1648static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1649{
1650        struct kvm_run *run = vcpu->run;
1651        enum emulation_result emulated = EMULATE_FAIL;
1652        int r;
1653
1654        vcpu->arch.paddr_accessed += run->mmio.len;
1655
1656        if (!vcpu->mmio_is_write) {
1657                emulated = kvmppc_handle_vmx_load(vcpu,
1658                                vcpu->arch.io_gpr, run->mmio.len, 1);
1659        } else {
1660                emulated = kvmppc_handle_vmx_store(vcpu,
1661                                vcpu->arch.io_gpr, run->mmio.len, 1);
1662        }
1663
1664        switch (emulated) {
1665        case EMULATE_DO_MMIO:
1666                run->exit_reason = KVM_EXIT_MMIO;
1667                r = RESUME_HOST;
1668                break;
1669        case EMULATE_FAIL:
1670                pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1671                run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1672                run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1673                r = RESUME_HOST;
1674                break;
1675        default:
1676                r = RESUME_GUEST;
1677                break;
1678        }
1679        return r;
1680}
1681#endif /* CONFIG_ALTIVEC */
1682
1683int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1684{
1685        int r = 0;
1686        union kvmppc_one_reg val;
1687        int size;
1688
1689        size = one_reg_size(reg->id);
1690        if (size > sizeof(val))
1691                return -EINVAL;
1692
1693        r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1694        if (r == -EINVAL) {
1695                r = 0;
1696                switch (reg->id) {
1697#ifdef CONFIG_ALTIVEC
1698                case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1699                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1700                                r = -ENXIO;
1701                                break;
1702                        }
1703                        val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1704                        break;
1705                case KVM_REG_PPC_VSCR:
1706                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1707                                r = -ENXIO;
1708                                break;
1709                        }
1710                        val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1711                        break;
1712                case KVM_REG_PPC_VRSAVE:
1713                        val = get_reg_val(reg->id, vcpu->arch.vrsave);
1714                        break;
1715#endif /* CONFIG_ALTIVEC */
1716                default:
1717                        r = -EINVAL;
1718                        break;
1719                }
1720        }
1721
1722        if (r)
1723                return r;
1724
1725        if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1726                r = -EFAULT;
1727
1728        return r;
1729}
1730
1731int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1732{
1733        int r;
1734        union kvmppc_one_reg val;
1735        int size;
1736
1737        size = one_reg_size(reg->id);
1738        if (size > sizeof(val))
1739                return -EINVAL;
1740
1741        if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1742                return -EFAULT;
1743
1744        r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1745        if (r == -EINVAL) {
1746                r = 0;
1747                switch (reg->id) {
1748#ifdef CONFIG_ALTIVEC
1749                case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1750                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1751                                r = -ENXIO;
1752                                break;
1753                        }
1754                        vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1755                        break;
1756                case KVM_REG_PPC_VSCR:
1757                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1758                                r = -ENXIO;
1759                                break;
1760                        }
1761                        vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1762                        break;
1763                case KVM_REG_PPC_VRSAVE:
1764                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1765                                r = -ENXIO;
1766                                break;
1767                        }
1768                        vcpu->arch.vrsave = set_reg_val(reg->id, val);
1769                        break;
1770#endif /* CONFIG_ALTIVEC */
1771                default:
1772                        r = -EINVAL;
1773                        break;
1774                }
1775        }
1776
1777        return r;
1778}
1779
1780int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1781{
1782        struct kvm_run *run = vcpu->run;
1783        int r;
1784
1785        vcpu_load(vcpu);
1786
1787        if (vcpu->mmio_needed) {
1788                vcpu->mmio_needed = 0;
1789                if (!vcpu->mmio_is_write)
1790                        kvmppc_complete_mmio_load(vcpu);
1791#ifdef CONFIG_VSX
1792                if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1793                        vcpu->arch.mmio_vsx_copy_nums--;
1794                        vcpu->arch.mmio_vsx_offset++;
1795                }
1796
1797                if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1798                        r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1799                        if (r == RESUME_HOST) {
1800                                vcpu->mmio_needed = 1;
1801                                goto out;
1802                        }
1803                }
1804#endif
1805#ifdef CONFIG_ALTIVEC
1806                if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1807                        vcpu->arch.mmio_vmx_copy_nums--;
1808                        vcpu->arch.mmio_vmx_offset++;
1809                }
1810
1811                if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1812                        r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1813                        if (r == RESUME_HOST) {
1814                                vcpu->mmio_needed = 1;
1815                                goto out;
1816                        }
1817                }
1818#endif
1819        } else if (vcpu->arch.osi_needed) {
1820                u64 *gprs = run->osi.gprs;
1821                int i;
1822
1823                for (i = 0; i < 32; i++)
1824                        kvmppc_set_gpr(vcpu, i, gprs[i]);
1825                vcpu->arch.osi_needed = 0;
1826        } else if (vcpu->arch.hcall_needed) {
1827                int i;
1828
1829                kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1830                for (i = 0; i < 9; ++i)
1831                        kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1832                vcpu->arch.hcall_needed = 0;
1833#ifdef CONFIG_BOOKE
1834        } else if (vcpu->arch.epr_needed) {
1835                kvmppc_set_epr(vcpu, run->epr.epr);
1836                vcpu->arch.epr_needed = 0;
1837#endif
1838        }
1839
1840        kvm_sigset_activate(vcpu);
1841
1842        if (run->immediate_exit)
1843                r = -EINTR;
1844        else
1845                r = kvmppc_vcpu_run(vcpu);
1846
1847        kvm_sigset_deactivate(vcpu);
1848
1849#ifdef CONFIG_ALTIVEC
1850out:
1851#endif
1852        vcpu_put(vcpu);
1853        return r;
1854}
1855
1856int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1857{
1858        if (irq->irq == KVM_INTERRUPT_UNSET) {
1859                kvmppc_core_dequeue_external(vcpu);
1860                return 0;
1861        }
1862
1863        kvmppc_core_queue_external(vcpu, irq);
1864
1865        kvm_vcpu_kick(vcpu);
1866
1867        return 0;
1868}
1869
1870static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1871                                     struct kvm_enable_cap *cap)
1872{
1873        int r;
1874
1875        if (cap->flags)
1876                return -EINVAL;
1877
1878        switch (cap->cap) {
1879        case KVM_CAP_PPC_OSI:
1880                r = 0;
1881                vcpu->arch.osi_enabled = true;
1882                break;
1883        case KVM_CAP_PPC_PAPR:
1884                r = 0;
1885                vcpu->arch.papr_enabled = true;
1886                break;
1887        case KVM_CAP_PPC_EPR:
1888                r = 0;
1889                if (cap->args[0])
1890                        vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1891                else
1892                        vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1893                break;
1894#ifdef CONFIG_BOOKE
1895        case KVM_CAP_PPC_BOOKE_WATCHDOG:
1896                r = 0;
1897                vcpu->arch.watchdog_enabled = true;
1898                break;
1899#endif
1900#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1901        case KVM_CAP_SW_TLB: {
1902                struct kvm_config_tlb cfg;
1903                void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1904
1905                r = -EFAULT;
1906                if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1907                        break;
1908
1909                r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1910                break;
1911        }
1912#endif
1913#ifdef CONFIG_KVM_MPIC
1914        case KVM_CAP_IRQ_MPIC: {
1915                struct fd f;
1916                struct kvm_device *dev;
1917
1918                r = -EBADF;
1919                f = fdget(cap->args[0]);
1920                if (!f.file)
1921                        break;
1922
1923                r = -EPERM;
1924                dev = kvm_device_from_filp(f.file);
1925                if (dev)
1926                        r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1927
1928                fdput(f);
1929                break;
1930        }
1931#endif
1932#ifdef CONFIG_KVM_XICS
1933        case KVM_CAP_IRQ_XICS: {
1934                struct fd f;
1935                struct kvm_device *dev;
1936
1937                r = -EBADF;
1938                f = fdget(cap->args[0]);
1939                if (!f.file)
1940                        break;
1941
1942                r = -EPERM;
1943                dev = kvm_device_from_filp(f.file);
1944                if (dev) {
1945                        if (xics_on_xive())
1946                                r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1947                        else
1948                                r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1949                }
1950
1951                fdput(f);
1952                break;
1953        }
1954#endif /* CONFIG_KVM_XICS */
1955#ifdef CONFIG_KVM_XIVE
1956        case KVM_CAP_PPC_IRQ_XIVE: {
1957                struct fd f;
1958                struct kvm_device *dev;
1959
1960                r = -EBADF;
1961                f = fdget(cap->args[0]);
1962                if (!f.file)
1963                        break;
1964
1965                r = -ENXIO;
1966                if (!xive_enabled())
1967                        break;
1968
1969                r = -EPERM;
1970                dev = kvm_device_from_filp(f.file);
1971                if (dev)
1972                        r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1973                                                            cap->args[1]);
1974
1975                fdput(f);
1976                break;
1977        }
1978#endif /* CONFIG_KVM_XIVE */
1979#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1980        case KVM_CAP_PPC_FWNMI:
1981                r = -EINVAL;
1982                if (!is_kvmppc_hv_enabled(vcpu->kvm))
1983                        break;
1984                r = 0;
1985                vcpu->kvm->arch.fwnmi_enabled = true;
1986                break;
1987#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1988        default:
1989                r = -EINVAL;
1990                break;
1991        }
1992
1993        if (!r)
1994                r = kvmppc_sanity_check(vcpu);
1995
1996        return r;
1997}
1998
1999bool kvm_arch_intc_initialized(struct kvm *kvm)
2000{
2001#ifdef CONFIG_KVM_MPIC
2002        if (kvm->arch.mpic)
2003                return true;
2004#endif
2005#ifdef CONFIG_KVM_XICS
2006        if (kvm->arch.xics || kvm->arch.xive)
2007                return true;
2008#endif
2009        return false;
2010}
2011
2012int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2013                                    struct kvm_mp_state *mp_state)
2014{
2015        return -EINVAL;
2016}
2017
2018int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2019                                    struct kvm_mp_state *mp_state)
2020{
2021        return -EINVAL;
2022}
2023
2024long kvm_arch_vcpu_async_ioctl(struct file *filp,
2025                               unsigned int ioctl, unsigned long arg)
2026{
2027        struct kvm_vcpu *vcpu = filp->private_data;
2028        void __user *argp = (void __user *)arg;
2029
2030        if (ioctl == KVM_INTERRUPT) {
2031                struct kvm_interrupt irq;
2032                if (copy_from_user(&irq, argp, sizeof(irq)))
2033                        return -EFAULT;
2034                return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2035        }
2036        return -ENOIOCTLCMD;
2037}
2038
2039long kvm_arch_vcpu_ioctl(struct file *filp,
2040                         unsigned int ioctl, unsigned long arg)
2041{
2042        struct kvm_vcpu *vcpu = filp->private_data;
2043        void __user *argp = (void __user *)arg;
2044        long r;
2045
2046        switch (ioctl) {
2047        case KVM_ENABLE_CAP:
2048        {
2049                struct kvm_enable_cap cap;
2050                r = -EFAULT;
2051                if (copy_from_user(&cap, argp, sizeof(cap)))
2052                        goto out;
2053                vcpu_load(vcpu);
2054                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2055                vcpu_put(vcpu);
2056                break;
2057        }
2058
2059        case KVM_SET_ONE_REG:
2060        case KVM_GET_ONE_REG:
2061        {
2062                struct kvm_one_reg reg;
2063                r = -EFAULT;
2064                if (copy_from_user(&reg, argp, sizeof(reg)))
2065                        goto out;
2066                if (ioctl == KVM_SET_ONE_REG)
2067                        r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2068                else
2069                        r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2070                break;
2071        }
2072
2073#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2074        case KVM_DIRTY_TLB: {
2075                struct kvm_dirty_tlb dirty;
2076                r = -EFAULT;
2077                if (copy_from_user(&dirty, argp, sizeof(dirty)))
2078                        goto out;
2079                vcpu_load(vcpu);
2080                r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2081                vcpu_put(vcpu);
2082                break;
2083        }
2084#endif
2085        default:
2086                r = -EINVAL;
2087        }
2088
2089out:
2090        return r;
2091}
2092
2093vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2094{
2095        return VM_FAULT_SIGBUS;
2096}
2097
2098static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2099{
2100        u32 inst_nop = 0x60000000;
2101#ifdef CONFIG_KVM_BOOKE_HV
2102        u32 inst_sc1 = 0x44000022;
2103        pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2104        pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2105        pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2106        pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2107#else
2108        u32 inst_lis = 0x3c000000;
2109        u32 inst_ori = 0x60000000;
2110        u32 inst_sc = 0x44000002;
2111        u32 inst_imm_mask = 0xffff;
2112
2113        /*
2114         * The hypercall to get into KVM from within guest context is as
2115         * follows:
2116         *
2117         *    lis r0, r0, KVM_SC_MAGIC_R0@h
2118         *    ori r0, KVM_SC_MAGIC_R0@l
2119         *    sc
2120         *    nop
2121         */
2122        pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2123        pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2124        pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2125        pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2126#endif
2127
2128        pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2129
2130        return 0;
2131}
2132
2133int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2134                          bool line_status)
2135{
2136        if (!irqchip_in_kernel(kvm))
2137                return -ENXIO;
2138
2139        irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2140                                        irq_event->irq, irq_event->level,
2141                                        line_status);
2142        return 0;
2143}
2144
2145
2146int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2147                            struct kvm_enable_cap *cap)
2148{
2149        int r;
2150
2151        if (cap->flags)
2152                return -EINVAL;
2153
2154        switch (cap->cap) {
2155#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2156        case KVM_CAP_PPC_ENABLE_HCALL: {
2157                unsigned long hcall = cap->args[0];
2158
2159                r = -EINVAL;
2160                if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2161                    cap->args[1] > 1)
2162                        break;
2163                if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2164                        break;
2165                if (cap->args[1])
2166                        set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2167                else
2168                        clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2169                r = 0;
2170                break;
2171        }
2172        case KVM_CAP_PPC_SMT: {
2173                unsigned long mode = cap->args[0];
2174                unsigned long flags = cap->args[1];
2175
2176                r = -EINVAL;
2177                if (kvm->arch.kvm_ops->set_smt_mode)
2178                        r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2179                break;
2180        }
2181
2182        case KVM_CAP_PPC_NESTED_HV:
2183                r = -EINVAL;
2184                if (!is_kvmppc_hv_enabled(kvm) ||
2185                    !kvm->arch.kvm_ops->enable_nested)
2186                        break;
2187                r = kvm->arch.kvm_ops->enable_nested(kvm);
2188                break;
2189#endif
2190#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2191        case KVM_CAP_PPC_SECURE_GUEST:
2192                r = -EINVAL;
2193                if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2194                        break;
2195                r = kvm->arch.kvm_ops->enable_svm(kvm);
2196                break;
2197        case KVM_CAP_PPC_DAWR1:
2198                r = -EINVAL;
2199                if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2200                        break;
2201                r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2202                break;
2203#endif
2204        default:
2205                r = -EINVAL;
2206                break;
2207        }
2208
2209        return r;
2210}
2211
2212#ifdef CONFIG_PPC_BOOK3S_64
2213/*
2214 * These functions check whether the underlying hardware is safe
2215 * against attacks based on observing the effects of speculatively
2216 * executed instructions, and whether it supplies instructions for
2217 * use in workarounds.  The information comes from firmware, either
2218 * via the device tree on powernv platforms or from an hcall on
2219 * pseries platforms.
2220 */
2221#ifdef CONFIG_PPC_PSERIES
2222static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2223{
2224        struct h_cpu_char_result c;
2225        unsigned long rc;
2226
2227        if (!machine_is(pseries))
2228                return -ENOTTY;
2229
2230        rc = plpar_get_cpu_characteristics(&c);
2231        if (rc == H_SUCCESS) {
2232                cp->character = c.character;
2233                cp->behaviour = c.behaviour;
2234                cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2235                        KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2236                        KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2237                        KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2238                        KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2239                        KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2240                        KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2241                        KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2242                        KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2243                cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2244                        KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2245                        KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2246                        KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2247        }
2248        return 0;
2249}
2250#else
2251static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2252{
2253        return -ENOTTY;
2254}
2255#endif
2256
2257static inline bool have_fw_feat(struct device_node *fw_features,
2258                                const char *state, const char *name)
2259{
2260        struct device_node *np;
2261        bool r = false;
2262
2263        np = of_get_child_by_name(fw_features, name);
2264        if (np) {
2265                r = of_property_read_bool(np, state);
2266                of_node_put(np);
2267        }
2268        return r;
2269}
2270
2271static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2272{
2273        struct device_node *np, *fw_features;
2274        int r;
2275
2276        memset(cp, 0, sizeof(*cp));
2277        r = pseries_get_cpu_char(cp);
2278        if (r != -ENOTTY)
2279                return r;
2280
2281        np = of_find_node_by_name(NULL, "ibm,opal");
2282        if (np) {
2283                fw_features = of_get_child_by_name(np, "fw-features");
2284                of_node_put(np);
2285                if (!fw_features)
2286                        return 0;
2287                if (have_fw_feat(fw_features, "enabled",
2288                                 "inst-spec-barrier-ori31,31,0"))
2289                        cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2290                if (have_fw_feat(fw_features, "enabled",
2291                                 "fw-bcctrl-serialized"))
2292                        cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2293                if (have_fw_feat(fw_features, "enabled",
2294                                 "inst-l1d-flush-ori30,30,0"))
2295                        cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2296                if (have_fw_feat(fw_features, "enabled",
2297                                 "inst-l1d-flush-trig2"))
2298                        cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2299                if (have_fw_feat(fw_features, "enabled",
2300                                 "fw-l1d-thread-split"))
2301                        cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2302                if (have_fw_feat(fw_features, "enabled",
2303                                 "fw-count-cache-disabled"))
2304                        cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2305                if (have_fw_feat(fw_features, "enabled",
2306                                 "fw-count-cache-flush-bcctr2,0,0"))
2307                        cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2308                cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2309                        KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2310                        KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2311                        KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2312                        KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2313                        KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2314                        KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2315
2316                if (have_fw_feat(fw_features, "enabled",
2317                                 "speculation-policy-favor-security"))
2318                        cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2319                if (!have_fw_feat(fw_features, "disabled",
2320                                  "needs-l1d-flush-msr-pr-0-to-1"))
2321                        cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2322                if (!have_fw_feat(fw_features, "disabled",
2323                                  "needs-spec-barrier-for-bound-checks"))
2324                        cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2325                if (have_fw_feat(fw_features, "enabled",
2326                                 "needs-count-cache-flush-on-context-switch"))
2327                        cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2328                cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2329                        KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2330                        KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2331                        KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2332
2333                of_node_put(fw_features);
2334        }
2335
2336        return 0;
2337}
2338#endif
2339
2340long kvm_arch_vm_ioctl(struct file *filp,
2341                       unsigned int ioctl, unsigned long arg)
2342{
2343        struct kvm *kvm __maybe_unused = filp->private_data;
2344        void __user *argp = (void __user *)arg;
2345        long r;
2346
2347        switch (ioctl) {
2348        case KVM_PPC_GET_PVINFO: {
2349                struct kvm_ppc_pvinfo pvinfo;
2350                memset(&pvinfo, 0, sizeof(pvinfo));
2351                r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2352                if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2353                        r = -EFAULT;
2354                        goto out;
2355                }
2356
2357                break;
2358        }
2359#ifdef CONFIG_SPAPR_TCE_IOMMU
2360        case KVM_CREATE_SPAPR_TCE_64: {
2361                struct kvm_create_spapr_tce_64 create_tce_64;
2362
2363                r = -EFAULT;
2364                if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2365                        goto out;
2366                if (create_tce_64.flags) {
2367                        r = -EINVAL;
2368                        goto out;
2369                }
2370                r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2371                goto out;
2372        }
2373        case KVM_CREATE_SPAPR_TCE: {
2374                struct kvm_create_spapr_tce create_tce;
2375                struct kvm_create_spapr_tce_64 create_tce_64;
2376
2377                r = -EFAULT;
2378                if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2379                        goto out;
2380
2381                create_tce_64.liobn = create_tce.liobn;
2382                create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2383                create_tce_64.offset = 0;
2384                create_tce_64.size = create_tce.window_size >>
2385                                IOMMU_PAGE_SHIFT_4K;
2386                create_tce_64.flags = 0;
2387                r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2388                goto out;
2389        }
2390#endif
2391#ifdef CONFIG_PPC_BOOK3S_64
2392        case KVM_PPC_GET_SMMU_INFO: {
2393                struct kvm_ppc_smmu_info info;
2394                struct kvm *kvm = filp->private_data;
2395
2396                memset(&info, 0, sizeof(info));
2397                r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2398                if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2399                        r = -EFAULT;
2400                break;
2401        }
2402        case KVM_PPC_RTAS_DEFINE_TOKEN: {
2403                struct kvm *kvm = filp->private_data;
2404
2405                r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2406                break;
2407        }
2408        case KVM_PPC_CONFIGURE_V3_MMU: {
2409                struct kvm *kvm = filp->private_data;
2410                struct kvm_ppc_mmuv3_cfg cfg;
2411
2412                r = -EINVAL;
2413                if (!kvm->arch.kvm_ops->configure_mmu)
2414                        goto out;
2415                r = -EFAULT;
2416                if (copy_from_user(&cfg, argp, sizeof(cfg)))
2417                        goto out;
2418                r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2419                break;
2420        }
2421        case KVM_PPC_GET_RMMU_INFO: {
2422                struct kvm *kvm = filp->private_data;
2423                struct kvm_ppc_rmmu_info info;
2424
2425                r = -EINVAL;
2426                if (!kvm->arch.kvm_ops->get_rmmu_info)
2427                        goto out;
2428                r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2429                if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2430                        r = -EFAULT;
2431                break;
2432        }
2433        case KVM_PPC_GET_CPU_CHAR: {
2434                struct kvm_ppc_cpu_char cpuchar;
2435
2436                r = kvmppc_get_cpu_char(&cpuchar);
2437                if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2438                        r = -EFAULT;
2439                break;
2440        }
2441        case KVM_PPC_SVM_OFF: {
2442                struct kvm *kvm = filp->private_data;
2443
2444                r = 0;
2445                if (!kvm->arch.kvm_ops->svm_off)
2446                        goto out;
2447
2448                r = kvm->arch.kvm_ops->svm_off(kvm);
2449                break;
2450        }
2451        default: {
2452                struct kvm *kvm = filp->private_data;
2453                r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2454        }
2455#else /* CONFIG_PPC_BOOK3S_64 */
2456        default:
2457                r = -ENOTTY;
2458#endif
2459        }
2460out:
2461        return r;
2462}
2463
2464static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2465static unsigned long nr_lpids;
2466
2467long kvmppc_alloc_lpid(void)
2468{
2469        long lpid;
2470
2471        do {
2472                lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2473                if (lpid >= nr_lpids) {
2474                        pr_err("%s: No LPIDs free\n", __func__);
2475                        return -ENOMEM;
2476                }
2477        } while (test_and_set_bit(lpid, lpid_inuse));
2478
2479        return lpid;
2480}
2481EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2482
2483void kvmppc_claim_lpid(long lpid)
2484{
2485        set_bit(lpid, lpid_inuse);
2486}
2487EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2488
2489void kvmppc_free_lpid(long lpid)
2490{
2491        clear_bit(lpid, lpid_inuse);
2492}
2493EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2494
2495void kvmppc_init_lpid(unsigned long nr_lpids_param)
2496{
2497        nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2498        memset(lpid_inuse, 0, sizeof(lpid_inuse));
2499}
2500EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2501
2502int kvm_arch_init(void *opaque)
2503{
2504        return 0;
2505}
2506
2507EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2508