linux/arch/powerpc/kvm/powerpc.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright IBM Corp. 2007
  16 *
  17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  19 */
  20
  21#include <linux/errno.h>
  22#include <linux/err.h>
  23#include <linux/kvm_host.h>
  24#include <linux/vmalloc.h>
  25#include <linux/hrtimer.h>
  26#include <linux/fs.h>
  27#include <linux/slab.h>
  28#include <linux/file.h>
  29#include <linux/module.h>
  30#include <asm/cputable.h>
  31#include <asm/uaccess.h>
  32#include <asm/kvm_ppc.h>
  33#include <asm/tlbflush.h>
  34#include <asm/cputhreads.h>
  35#include <asm/irqflags.h>
  36#include "timing.h"
  37#include "irq.h"
  38#include "../mm/mmu_decl.h"
  39
  40#define CREATE_TRACE_POINTS
  41#include "trace.h"
  42
  43struct kvmppc_ops *kvmppc_hv_ops;
  44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
  45struct kvmppc_ops *kvmppc_pr_ops;
  46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
  47
  48
  49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
  50{
  51        return !!(v->arch.pending_exceptions) ||
  52               v->requests;
  53}
  54
  55int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  56{
  57        return 1;
  58}
  59
  60/*
  61 * Common checks before entering the guest world.  Call with interrupts
  62 * disabled.
  63 *
  64 * returns:
  65 *
  66 * == 1 if we're ready to go into guest state
  67 * <= 0 if we need to go back to the host with return value
  68 */
  69int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
  70{
  71        int r;
  72
  73        WARN_ON(irqs_disabled());
  74        hard_irq_disable();
  75
  76        while (true) {
  77                if (need_resched()) {
  78                        local_irq_enable();
  79                        cond_resched();
  80                        hard_irq_disable();
  81                        continue;
  82                }
  83
  84                if (signal_pending(current)) {
  85                        kvmppc_account_exit(vcpu, SIGNAL_EXITS);
  86                        vcpu->run->exit_reason = KVM_EXIT_INTR;
  87                        r = -EINTR;
  88                        break;
  89                }
  90
  91                vcpu->mode = IN_GUEST_MODE;
  92
  93                /*
  94                 * Reading vcpu->requests must happen after setting vcpu->mode,
  95                 * so we don't miss a request because the requester sees
  96                 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
  97                 * before next entering the guest (and thus doesn't IPI).
  98                 */
  99                smp_mb();
 100
 101                if (vcpu->requests) {
 102                        /* Make sure we process requests preemptable */
 103                        local_irq_enable();
 104                        trace_kvm_check_requests(vcpu);
 105                        r = kvmppc_core_check_requests(vcpu);
 106                        hard_irq_disable();
 107                        if (r > 0)
 108                                continue;
 109                        break;
 110                }
 111
 112                if (kvmppc_core_prepare_to_enter(vcpu)) {
 113                        /* interrupts got enabled in between, so we
 114                           are back at square 1 */
 115                        continue;
 116                }
 117
 118                __kvm_guest_enter();
 119                return 1;
 120        }
 121
 122        /* return to host */
 123        local_irq_enable();
 124        return r;
 125}
 126EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
 127
 128#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
 129static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
 130{
 131        struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
 132        int i;
 133
 134        shared->sprg0 = swab64(shared->sprg0);
 135        shared->sprg1 = swab64(shared->sprg1);
 136        shared->sprg2 = swab64(shared->sprg2);
 137        shared->sprg3 = swab64(shared->sprg3);
 138        shared->srr0 = swab64(shared->srr0);
 139        shared->srr1 = swab64(shared->srr1);
 140        shared->dar = swab64(shared->dar);
 141        shared->msr = swab64(shared->msr);
 142        shared->dsisr = swab32(shared->dsisr);
 143        shared->int_pending = swab32(shared->int_pending);
 144        for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
 145                shared->sr[i] = swab32(shared->sr[i]);
 146}
 147#endif
 148
 149int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
 150{
 151        int nr = kvmppc_get_gpr(vcpu, 11);
 152        int r;
 153        unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
 154        unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
 155        unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
 156        unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
 157        unsigned long r2 = 0;
 158
 159        if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
 160                /* 32 bit mode */
 161                param1 &= 0xffffffff;
 162                param2 &= 0xffffffff;
 163                param3 &= 0xffffffff;
 164                param4 &= 0xffffffff;
 165        }
 166
 167        switch (nr) {
 168        case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
 169        {
 170#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
 171                /* Book3S can be little endian, find it out here */
 172                int shared_big_endian = true;
 173                if (vcpu->arch.intr_msr & MSR_LE)
 174                        shared_big_endian = false;
 175                if (shared_big_endian != vcpu->arch.shared_big_endian)
 176                        kvmppc_swab_shared(vcpu);
 177                vcpu->arch.shared_big_endian = shared_big_endian;
 178#endif
 179
 180                if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
 181                        /*
 182                         * Older versions of the Linux magic page code had
 183                         * a bug where they would map their trampoline code
 184                         * NX. If that's the case, remove !PR NX capability.
 185                         */
 186                        vcpu->arch.disable_kernel_nx = true;
 187                        kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 188                }
 189
 190                vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
 191                vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
 192
 193#ifdef CONFIG_PPC_64K_PAGES
 194                /*
 195                 * Make sure our 4k magic page is in the same window of a 64k
 196                 * page within the guest and within the host's page.
 197                 */
 198                if ((vcpu->arch.magic_page_pa & 0xf000) !=
 199                    ((ulong)vcpu->arch.shared & 0xf000)) {
 200                        void *old_shared = vcpu->arch.shared;
 201                        ulong shared = (ulong)vcpu->arch.shared;
 202                        void *new_shared;
 203
 204                        shared &= PAGE_MASK;
 205                        shared |= vcpu->arch.magic_page_pa & 0xf000;
 206                        new_shared = (void*)shared;
 207                        memcpy(new_shared, old_shared, 0x1000);
 208                        vcpu->arch.shared = new_shared;
 209                }
 210#endif
 211
 212                r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
 213
 214                r = EV_SUCCESS;
 215                break;
 216        }
 217        case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
 218                r = EV_SUCCESS;
 219#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
 220                r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
 221#endif
 222
 223                /* Second return value is in r4 */
 224                break;
 225        case EV_HCALL_TOKEN(EV_IDLE):
 226                r = EV_SUCCESS;
 227                kvm_vcpu_block(vcpu);
 228                clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
 229                break;
 230        default:
 231                r = EV_UNIMPLEMENTED;
 232                break;
 233        }
 234
 235        kvmppc_set_gpr(vcpu, 4, r2);
 236
 237        return r;
 238}
 239EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
 240
 241int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
 242{
 243        int r = false;
 244
 245        /* We have to know what CPU to virtualize */
 246        if (!vcpu->arch.pvr)
 247                goto out;
 248
 249        /* PAPR only works with book3s_64 */
 250        if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
 251                goto out;
 252
 253        /* HV KVM can only do PAPR mode for now */
 254        if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
 255                goto out;
 256
 257#ifdef CONFIG_KVM_BOOKE_HV
 258        if (!cpu_has_feature(CPU_FTR_EMB_HV))
 259                goto out;
 260#endif
 261
 262        r = true;
 263
 264out:
 265        vcpu->arch.sane = r;
 266        return r ? 0 : -EINVAL;
 267}
 268EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
 269
 270int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
 271{
 272        enum emulation_result er;
 273        int r;
 274
 275        er = kvmppc_emulate_loadstore(vcpu);
 276        switch (er) {
 277        case EMULATE_DONE:
 278                /* Future optimization: only reload non-volatiles if they were
 279                 * actually modified. */
 280                r = RESUME_GUEST_NV;
 281                break;
 282        case EMULATE_AGAIN:
 283                r = RESUME_GUEST;
 284                break;
 285        case EMULATE_DO_MMIO:
 286                run->exit_reason = KVM_EXIT_MMIO;
 287                /* We must reload nonvolatiles because "update" load/store
 288                 * instructions modify register state. */
 289                /* Future optimization: only reload non-volatiles if they were
 290                 * actually modified. */
 291                r = RESUME_HOST_NV;
 292                break;
 293        case EMULATE_FAIL:
 294        {
 295                u32 last_inst;
 296
 297                kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
 298                /* XXX Deliver Program interrupt to guest. */
 299                pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
 300                r = RESUME_HOST;
 301                break;
 302        }
 303        default:
 304                WARN_ON(1);
 305                r = RESUME_GUEST;
 306        }
 307
 308        return r;
 309}
 310EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
 311
 312int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
 313              bool data)
 314{
 315        ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
 316        struct kvmppc_pte pte;
 317        int r;
 318
 319        vcpu->stat.st++;
 320
 321        r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
 322                         XLATE_WRITE, &pte);
 323        if (r < 0)
 324                return r;
 325
 326        *eaddr = pte.raddr;
 327
 328        if (!pte.may_write)
 329                return -EPERM;
 330
 331        /* Magic page override */
 332        if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
 333            ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
 334            !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 335                void *magic = vcpu->arch.shared;
 336                magic += pte.eaddr & 0xfff;
 337                memcpy(magic, ptr, size);
 338                return EMULATE_DONE;
 339        }
 340
 341        if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
 342                return EMULATE_DO_MMIO;
 343
 344        return EMULATE_DONE;
 345}
 346EXPORT_SYMBOL_GPL(kvmppc_st);
 347
 348int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
 349                      bool data)
 350{
 351        ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
 352        struct kvmppc_pte pte;
 353        int rc;
 354
 355        vcpu->stat.ld++;
 356
 357        rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
 358                          XLATE_READ, &pte);
 359        if (rc)
 360                return rc;
 361
 362        *eaddr = pte.raddr;
 363
 364        if (!pte.may_read)
 365                return -EPERM;
 366
 367        if (!data && !pte.may_execute)
 368                return -ENOEXEC;
 369
 370        /* Magic page override */
 371        if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
 372            ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
 373            !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 374                void *magic = vcpu->arch.shared;
 375                magic += pte.eaddr & 0xfff;
 376                memcpy(ptr, magic, size);
 377                return EMULATE_DONE;
 378        }
 379
 380        if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
 381                return EMULATE_DO_MMIO;
 382
 383        return EMULATE_DONE;
 384}
 385EXPORT_SYMBOL_GPL(kvmppc_ld);
 386
 387int kvm_arch_hardware_enable(void)
 388{
 389        return 0;
 390}
 391
 392int kvm_arch_hardware_setup(void)
 393{
 394        return 0;
 395}
 396
 397void kvm_arch_check_processor_compat(void *rtn)
 398{
 399        *(int *)rtn = kvmppc_core_check_processor_compat();
 400}
 401
 402int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 403{
 404        struct kvmppc_ops *kvm_ops = NULL;
 405        /*
 406         * if we have both HV and PR enabled, default is HV
 407         */
 408        if (type == 0) {
 409                if (kvmppc_hv_ops)
 410                        kvm_ops = kvmppc_hv_ops;
 411                else
 412                        kvm_ops = kvmppc_pr_ops;
 413                if (!kvm_ops)
 414                        goto err_out;
 415        } else  if (type == KVM_VM_PPC_HV) {
 416                if (!kvmppc_hv_ops)
 417                        goto err_out;
 418                kvm_ops = kvmppc_hv_ops;
 419        } else if (type == KVM_VM_PPC_PR) {
 420                if (!kvmppc_pr_ops)
 421                        goto err_out;
 422                kvm_ops = kvmppc_pr_ops;
 423        } else
 424                goto err_out;
 425
 426        if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
 427                return -ENOENT;
 428
 429        kvm->arch.kvm_ops = kvm_ops;
 430        return kvmppc_core_init_vm(kvm);
 431err_out:
 432        return -EINVAL;
 433}
 434
 435void kvm_arch_destroy_vm(struct kvm *kvm)
 436{
 437        unsigned int i;
 438        struct kvm_vcpu *vcpu;
 439
 440        kvm_for_each_vcpu(i, vcpu, kvm)
 441                kvm_arch_vcpu_free(vcpu);
 442
 443        mutex_lock(&kvm->lock);
 444        for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 445                kvm->vcpus[i] = NULL;
 446
 447        atomic_set(&kvm->online_vcpus, 0);
 448
 449        kvmppc_core_destroy_vm(kvm);
 450
 451        mutex_unlock(&kvm->lock);
 452
 453        /* drop the module reference */
 454        module_put(kvm->arch.kvm_ops->owner);
 455}
 456
 457int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 458{
 459        int r;
 460        /* Assume we're using HV mode when the HV module is loaded */
 461        int hv_enabled = kvmppc_hv_ops ? 1 : 0;
 462
 463        if (kvm) {
 464                /*
 465                 * Hooray - we know which VM type we're running on. Depend on
 466                 * that rather than the guess above.
 467                 */
 468                hv_enabled = is_kvmppc_hv_enabled(kvm);
 469        }
 470
 471        switch (ext) {
 472#ifdef CONFIG_BOOKE
 473        case KVM_CAP_PPC_BOOKE_SREGS:
 474        case KVM_CAP_PPC_BOOKE_WATCHDOG:
 475        case KVM_CAP_PPC_EPR:
 476#else
 477        case KVM_CAP_PPC_SEGSTATE:
 478        case KVM_CAP_PPC_HIOR:
 479        case KVM_CAP_PPC_PAPR:
 480#endif
 481        case KVM_CAP_PPC_UNSET_IRQ:
 482        case KVM_CAP_PPC_IRQ_LEVEL:
 483        case KVM_CAP_ENABLE_CAP:
 484        case KVM_CAP_ENABLE_CAP_VM:
 485        case KVM_CAP_ONE_REG:
 486        case KVM_CAP_IOEVENTFD:
 487        case KVM_CAP_DEVICE_CTRL:
 488                r = 1;
 489                break;
 490        case KVM_CAP_PPC_PAIRED_SINGLES:
 491        case KVM_CAP_PPC_OSI:
 492        case KVM_CAP_PPC_GET_PVINFO:
 493#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
 494        case KVM_CAP_SW_TLB:
 495#endif
 496                /* We support this only for PR */
 497                r = !hv_enabled;
 498                break;
 499#ifdef CONFIG_KVM_MMIO
 500        case KVM_CAP_COALESCED_MMIO:
 501                r = KVM_COALESCED_MMIO_PAGE_OFFSET;
 502                break;
 503#endif
 504#ifdef CONFIG_KVM_MPIC
 505        case KVM_CAP_IRQ_MPIC:
 506                r = 1;
 507                break;
 508#endif
 509
 510#ifdef CONFIG_PPC_BOOK3S_64
 511        case KVM_CAP_SPAPR_TCE:
 512        case KVM_CAP_PPC_ALLOC_HTAB:
 513        case KVM_CAP_PPC_RTAS:
 514        case KVM_CAP_PPC_FIXUP_HCALL:
 515        case KVM_CAP_PPC_ENABLE_HCALL:
 516#ifdef CONFIG_KVM_XICS
 517        case KVM_CAP_IRQ_XICS:
 518#endif
 519                r = 1;
 520                break;
 521#endif /* CONFIG_PPC_BOOK3S_64 */
 522#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 523        case KVM_CAP_PPC_SMT:
 524                if (hv_enabled)
 525                        r = threads_per_subcore;
 526                else
 527                        r = 0;
 528                break;
 529        case KVM_CAP_PPC_RMA:
 530                r = 0;
 531                break;
 532        case KVM_CAP_PPC_HWRNG:
 533                r = kvmppc_hwrng_present();
 534                break;
 535#endif
 536        case KVM_CAP_SYNC_MMU:
 537#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 538                r = hv_enabled;
 539#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 540                r = 1;
 541#else
 542                r = 0;
 543#endif
 544                break;
 545#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 546        case KVM_CAP_PPC_HTAB_FD:
 547                r = hv_enabled;
 548                break;
 549#endif
 550        case KVM_CAP_NR_VCPUS:
 551                /*
 552                 * Recommending a number of CPUs is somewhat arbitrary; we
 553                 * return the number of present CPUs for -HV (since a host
 554                 * will have secondary threads "offline"), and for other KVM
 555                 * implementations just count online CPUs.
 556                 */
 557                if (hv_enabled)
 558                        r = num_present_cpus();
 559                else
 560                        r = num_online_cpus();
 561                break;
 562        case KVM_CAP_NR_MEMSLOTS:
 563                r = KVM_USER_MEM_SLOTS;
 564                break;
 565        case KVM_CAP_MAX_VCPUS:
 566                r = KVM_MAX_VCPUS;
 567                break;
 568#ifdef CONFIG_PPC_BOOK3S_64
 569        case KVM_CAP_PPC_GET_SMMU_INFO:
 570                r = 1;
 571                break;
 572#endif
 573        default:
 574                r = 0;
 575                break;
 576        }
 577        return r;
 578
 579}
 580
 581long kvm_arch_dev_ioctl(struct file *filp,
 582                        unsigned int ioctl, unsigned long arg)
 583{
 584        return -EINVAL;
 585}
 586
 587void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 588                           struct kvm_memory_slot *dont)
 589{
 590        kvmppc_core_free_memslot(kvm, free, dont);
 591}
 592
 593int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 594                            unsigned long npages)
 595{
 596        return kvmppc_core_create_memslot(kvm, slot, npages);
 597}
 598
 599int kvm_arch_prepare_memory_region(struct kvm *kvm,
 600                                   struct kvm_memory_slot *memslot,
 601                                   const struct kvm_userspace_memory_region *mem,
 602                                   enum kvm_mr_change change)
 603{
 604        return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
 605}
 606
 607void kvm_arch_commit_memory_region(struct kvm *kvm,
 608                                   const struct kvm_userspace_memory_region *mem,
 609                                   const struct kvm_memory_slot *old,
 610                                   const struct kvm_memory_slot *new,
 611                                   enum kvm_mr_change change)
 612{
 613        kvmppc_core_commit_memory_region(kvm, mem, old, new);
 614}
 615
 616void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 617                                   struct kvm_memory_slot *slot)
 618{
 619        kvmppc_core_flush_memslot(kvm, slot);
 620}
 621
 622struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 623{
 624        struct kvm_vcpu *vcpu;
 625        vcpu = kvmppc_core_vcpu_create(kvm, id);
 626        if (!IS_ERR(vcpu)) {
 627                vcpu->arch.wqp = &vcpu->wq;
 628                kvmppc_create_vcpu_debugfs(vcpu, id);
 629        }
 630        return vcpu;
 631}
 632
 633void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 634{
 635}
 636
 637void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 638{
 639        /* Make sure we're not using the vcpu anymore */
 640        hrtimer_cancel(&vcpu->arch.dec_timer);
 641
 642        kvmppc_remove_vcpu_debugfs(vcpu);
 643
 644        switch (vcpu->arch.irq_type) {
 645        case KVMPPC_IRQ_MPIC:
 646                kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
 647                break;
 648        case KVMPPC_IRQ_XICS:
 649                kvmppc_xics_free_icp(vcpu);
 650                break;
 651        }
 652
 653        kvmppc_core_vcpu_free(vcpu);
 654}
 655
 656void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 657{
 658        kvm_arch_vcpu_free(vcpu);
 659}
 660
 661int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 662{
 663        return kvmppc_core_pending_dec(vcpu);
 664}
 665
 666static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
 667{
 668        struct kvm_vcpu *vcpu;
 669
 670        vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
 671        kvmppc_decrementer_func(vcpu);
 672
 673        return HRTIMER_NORESTART;
 674}
 675
 676int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 677{
 678        int ret;
 679
 680        hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
 681        vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
 682        vcpu->arch.dec_expires = ~(u64)0;
 683
 684#ifdef CONFIG_KVM_EXIT_TIMING
 685        mutex_init(&vcpu->arch.exit_timing_lock);
 686#endif
 687        ret = kvmppc_subarch_vcpu_init(vcpu);
 688        return ret;
 689}
 690
 691void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 692{
 693        kvmppc_mmu_destroy(vcpu);
 694        kvmppc_subarch_vcpu_uninit(vcpu);
 695}
 696
 697void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 698{
 699#ifdef CONFIG_BOOKE
 700        /*
 701         * vrsave (formerly usprg0) isn't used by Linux, but may
 702         * be used by the guest.
 703         *
 704         * On non-booke this is associated with Altivec and
 705         * is handled by code in book3s.c.
 706         */
 707        mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
 708#endif
 709        kvmppc_core_vcpu_load(vcpu, cpu);
 710}
 711
 712void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 713{
 714        kvmppc_core_vcpu_put(vcpu);
 715#ifdef CONFIG_BOOKE
 716        vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
 717#endif
 718}
 719
 720static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
 721                                      struct kvm_run *run)
 722{
 723        u64 uninitialized_var(gpr);
 724
 725        if (run->mmio.len > sizeof(gpr)) {
 726                printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
 727                return;
 728        }
 729
 730        if (!vcpu->arch.mmio_host_swabbed) {
 731                switch (run->mmio.len) {
 732                case 8: gpr = *(u64 *)run->mmio.data; break;
 733                case 4: gpr = *(u32 *)run->mmio.data; break;
 734                case 2: gpr = *(u16 *)run->mmio.data; break;
 735                case 1: gpr = *(u8 *)run->mmio.data; break;
 736                }
 737        } else {
 738                switch (run->mmio.len) {
 739                case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
 740                case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
 741                case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
 742                case 1: gpr = *(u8 *)run->mmio.data; break;
 743                }
 744        }
 745
 746        if (vcpu->arch.mmio_sign_extend) {
 747                switch (run->mmio.len) {
 748#ifdef CONFIG_PPC64
 749                case 4:
 750                        gpr = (s64)(s32)gpr;
 751                        break;
 752#endif
 753                case 2:
 754                        gpr = (s64)(s16)gpr;
 755                        break;
 756                case 1:
 757                        gpr = (s64)(s8)gpr;
 758                        break;
 759                }
 760        }
 761
 762        kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
 763
 764        switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
 765        case KVM_MMIO_REG_GPR:
 766                kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
 767                break;
 768        case KVM_MMIO_REG_FPR:
 769                VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
 770                break;
 771#ifdef CONFIG_PPC_BOOK3S
 772        case KVM_MMIO_REG_QPR:
 773                vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
 774                break;
 775        case KVM_MMIO_REG_FQPR:
 776                VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
 777                vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
 778                break;
 779#endif
 780        default:
 781                BUG();
 782        }
 783}
 784
 785int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
 786                       unsigned int rt, unsigned int bytes,
 787                       int is_default_endian)
 788{
 789        int idx, ret;
 790        bool host_swabbed;
 791
 792        /* Pity C doesn't have a logical XOR operator */
 793        if (kvmppc_need_byteswap(vcpu)) {
 794                host_swabbed = is_default_endian;
 795        } else {
 796                host_swabbed = !is_default_endian;
 797        }
 798
 799        if (bytes > sizeof(run->mmio.data)) {
 800                printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
 801                       run->mmio.len);
 802        }
 803
 804        run->mmio.phys_addr = vcpu->arch.paddr_accessed;
 805        run->mmio.len = bytes;
 806        run->mmio.is_write = 0;
 807
 808        vcpu->arch.io_gpr = rt;
 809        vcpu->arch.mmio_host_swabbed = host_swabbed;
 810        vcpu->mmio_needed = 1;
 811        vcpu->mmio_is_write = 0;
 812        vcpu->arch.mmio_sign_extend = 0;
 813
 814        idx = srcu_read_lock(&vcpu->kvm->srcu);
 815
 816        ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
 817                              bytes, &run->mmio.data);
 818
 819        srcu_read_unlock(&vcpu->kvm->srcu, idx);
 820
 821        if (!ret) {
 822                kvmppc_complete_mmio_load(vcpu, run);
 823                vcpu->mmio_needed = 0;
 824                return EMULATE_DONE;
 825        }
 826
 827        return EMULATE_DO_MMIO;
 828}
 829EXPORT_SYMBOL_GPL(kvmppc_handle_load);
 830
 831/* Same as above, but sign extends */
 832int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
 833                        unsigned int rt, unsigned int bytes,
 834                        int is_default_endian)
 835{
 836        int r;
 837
 838        vcpu->arch.mmio_sign_extend = 1;
 839        r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
 840
 841        return r;
 842}
 843
 844int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
 845                        u64 val, unsigned int bytes, int is_default_endian)
 846{
 847        void *data = run->mmio.data;
 848        int idx, ret;
 849        bool host_swabbed;
 850
 851        /* Pity C doesn't have a logical XOR operator */
 852        if (kvmppc_need_byteswap(vcpu)) {
 853                host_swabbed = is_default_endian;
 854        } else {
 855                host_swabbed = !is_default_endian;
 856        }
 857
 858        if (bytes > sizeof(run->mmio.data)) {
 859                printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
 860                       run->mmio.len);
 861        }
 862
 863        run->mmio.phys_addr = vcpu->arch.paddr_accessed;
 864        run->mmio.len = bytes;
 865        run->mmio.is_write = 1;
 866        vcpu->mmio_needed = 1;
 867        vcpu->mmio_is_write = 1;
 868
 869        /* Store the value at the lowest bytes in 'data'. */
 870        if (!host_swabbed) {
 871                switch (bytes) {
 872                case 8: *(u64 *)data = val; break;
 873                case 4: *(u32 *)data = val; break;
 874                case 2: *(u16 *)data = val; break;
 875                case 1: *(u8  *)data = val; break;
 876                }
 877        } else {
 878                switch (bytes) {
 879                case 8: *(u64 *)data = swab64(val); break;
 880                case 4: *(u32 *)data = swab32(val); break;
 881                case 2: *(u16 *)data = swab16(val); break;
 882                case 1: *(u8  *)data = val; break;
 883                }
 884        }
 885
 886        idx = srcu_read_lock(&vcpu->kvm->srcu);
 887
 888        ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
 889                               bytes, &run->mmio.data);
 890
 891        srcu_read_unlock(&vcpu->kvm->srcu, idx);
 892
 893        if (!ret) {
 894                vcpu->mmio_needed = 0;
 895                return EMULATE_DONE;
 896        }
 897
 898        return EMULATE_DO_MMIO;
 899}
 900EXPORT_SYMBOL_GPL(kvmppc_handle_store);
 901
 902int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 903{
 904        int r = 0;
 905        union kvmppc_one_reg val;
 906        int size;
 907
 908        size = one_reg_size(reg->id);
 909        if (size > sizeof(val))
 910                return -EINVAL;
 911
 912        r = kvmppc_get_one_reg(vcpu, reg->id, &val);
 913        if (r == -EINVAL) {
 914                r = 0;
 915                switch (reg->id) {
 916#ifdef CONFIG_ALTIVEC
 917                case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
 918                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 919                                r = -ENXIO;
 920                                break;
 921                        }
 922                        val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
 923                        break;
 924                case KVM_REG_PPC_VSCR:
 925                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 926                                r = -ENXIO;
 927                                break;
 928                        }
 929                        val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
 930                        break;
 931                case KVM_REG_PPC_VRSAVE:
 932                        val = get_reg_val(reg->id, vcpu->arch.vrsave);
 933                        break;
 934#endif /* CONFIG_ALTIVEC */
 935                default:
 936                        r = -EINVAL;
 937                        break;
 938                }
 939        }
 940
 941        if (r)
 942                return r;
 943
 944        if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
 945                r = -EFAULT;
 946
 947        return r;
 948}
 949
 950int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 951{
 952        int r;
 953        union kvmppc_one_reg val;
 954        int size;
 955
 956        size = one_reg_size(reg->id);
 957        if (size > sizeof(val))
 958                return -EINVAL;
 959
 960        if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
 961                return -EFAULT;
 962
 963        r = kvmppc_set_one_reg(vcpu, reg->id, &val);
 964        if (r == -EINVAL) {
 965                r = 0;
 966                switch (reg->id) {
 967#ifdef CONFIG_ALTIVEC
 968                case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
 969                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 970                                r = -ENXIO;
 971                                break;
 972                        }
 973                        vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
 974                        break;
 975                case KVM_REG_PPC_VSCR:
 976                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 977                                r = -ENXIO;
 978                                break;
 979                        }
 980                        vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
 981                        break;
 982                case KVM_REG_PPC_VRSAVE:
 983                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 984                                r = -ENXIO;
 985                                break;
 986                        }
 987                        vcpu->arch.vrsave = set_reg_val(reg->id, val);
 988                        break;
 989#endif /* CONFIG_ALTIVEC */
 990                default:
 991                        r = -EINVAL;
 992                        break;
 993                }
 994        }
 995
 996        return r;
 997}
 998
 999int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1000{
1001        int r;
1002        sigset_t sigsaved;
1003
1004        if (vcpu->sigset_active)
1005                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1006
1007        if (vcpu->mmio_needed) {
1008                if (!vcpu->mmio_is_write)
1009                        kvmppc_complete_mmio_load(vcpu, run);
1010                vcpu->mmio_needed = 0;
1011        } else if (vcpu->arch.osi_needed) {
1012                u64 *gprs = run->osi.gprs;
1013                int i;
1014
1015                for (i = 0; i < 32; i++)
1016                        kvmppc_set_gpr(vcpu, i, gprs[i]);
1017                vcpu->arch.osi_needed = 0;
1018        } else if (vcpu->arch.hcall_needed) {
1019                int i;
1020
1021                kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1022                for (i = 0; i < 9; ++i)
1023                        kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1024                vcpu->arch.hcall_needed = 0;
1025#ifdef CONFIG_BOOKE
1026        } else if (vcpu->arch.epr_needed) {
1027                kvmppc_set_epr(vcpu, run->epr.epr);
1028                vcpu->arch.epr_needed = 0;
1029#endif
1030        }
1031
1032        r = kvmppc_vcpu_run(run, vcpu);
1033
1034        if (vcpu->sigset_active)
1035                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1036
1037        return r;
1038}
1039
1040int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1041{
1042        if (irq->irq == KVM_INTERRUPT_UNSET) {
1043                kvmppc_core_dequeue_external(vcpu);
1044                return 0;
1045        }
1046
1047        kvmppc_core_queue_external(vcpu, irq);
1048
1049        kvm_vcpu_kick(vcpu);
1050
1051        return 0;
1052}
1053
1054static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1055                                     struct kvm_enable_cap *cap)
1056{
1057        int r;
1058
1059        if (cap->flags)
1060                return -EINVAL;
1061
1062        switch (cap->cap) {
1063        case KVM_CAP_PPC_OSI:
1064                r = 0;
1065                vcpu->arch.osi_enabled = true;
1066                break;
1067        case KVM_CAP_PPC_PAPR:
1068                r = 0;
1069                vcpu->arch.papr_enabled = true;
1070                break;
1071        case KVM_CAP_PPC_EPR:
1072                r = 0;
1073                if (cap->args[0])
1074                        vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1075                else
1076                        vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1077                break;
1078#ifdef CONFIG_BOOKE
1079        case KVM_CAP_PPC_BOOKE_WATCHDOG:
1080                r = 0;
1081                vcpu->arch.watchdog_enabled = true;
1082                break;
1083#endif
1084#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1085        case KVM_CAP_SW_TLB: {
1086                struct kvm_config_tlb cfg;
1087                void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1088
1089                r = -EFAULT;
1090                if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1091                        break;
1092
1093                r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1094                break;
1095        }
1096#endif
1097#ifdef CONFIG_KVM_MPIC
1098        case KVM_CAP_IRQ_MPIC: {
1099                struct fd f;
1100                struct kvm_device *dev;
1101
1102                r = -EBADF;
1103                f = fdget(cap->args[0]);
1104                if (!f.file)
1105                        break;
1106
1107                r = -EPERM;
1108                dev = kvm_device_from_filp(f.file);
1109                if (dev)
1110                        r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1111
1112                fdput(f);
1113                break;
1114        }
1115#endif
1116#ifdef CONFIG_KVM_XICS
1117        case KVM_CAP_IRQ_XICS: {
1118                struct fd f;
1119                struct kvm_device *dev;
1120
1121                r = -EBADF;
1122                f = fdget(cap->args[0]);
1123                if (!f.file)
1124                        break;
1125
1126                r = -EPERM;
1127                dev = kvm_device_from_filp(f.file);
1128                if (dev)
1129                        r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1130
1131                fdput(f);
1132                break;
1133        }
1134#endif /* CONFIG_KVM_XICS */
1135        default:
1136                r = -EINVAL;
1137                break;
1138        }
1139
1140        if (!r)
1141                r = kvmppc_sanity_check(vcpu);
1142
1143        return r;
1144}
1145
1146int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1147                                    struct kvm_mp_state *mp_state)
1148{
1149        return -EINVAL;
1150}
1151
1152int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1153                                    struct kvm_mp_state *mp_state)
1154{
1155        return -EINVAL;
1156}
1157
1158long kvm_arch_vcpu_ioctl(struct file *filp,
1159                         unsigned int ioctl, unsigned long arg)
1160{
1161        struct kvm_vcpu *vcpu = filp->private_data;
1162        void __user *argp = (void __user *)arg;
1163        long r;
1164
1165        switch (ioctl) {
1166        case KVM_INTERRUPT: {
1167                struct kvm_interrupt irq;
1168                r = -EFAULT;
1169                if (copy_from_user(&irq, argp, sizeof(irq)))
1170                        goto out;
1171                r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1172                goto out;
1173        }
1174
1175        case KVM_ENABLE_CAP:
1176        {
1177                struct kvm_enable_cap cap;
1178                r = -EFAULT;
1179                if (copy_from_user(&cap, argp, sizeof(cap)))
1180                        goto out;
1181                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1182                break;
1183        }
1184
1185        case KVM_SET_ONE_REG:
1186        case KVM_GET_ONE_REG:
1187        {
1188                struct kvm_one_reg reg;
1189                r = -EFAULT;
1190                if (copy_from_user(&reg, argp, sizeof(reg)))
1191                        goto out;
1192                if (ioctl == KVM_SET_ONE_REG)
1193                        r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1194                else
1195                        r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1196                break;
1197        }
1198
1199#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1200        case KVM_DIRTY_TLB: {
1201                struct kvm_dirty_tlb dirty;
1202                r = -EFAULT;
1203                if (copy_from_user(&dirty, argp, sizeof(dirty)))
1204                        goto out;
1205                r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1206                break;
1207        }
1208#endif
1209        default:
1210                r = -EINVAL;
1211        }
1212
1213out:
1214        return r;
1215}
1216
1217int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1218{
1219        return VM_FAULT_SIGBUS;
1220}
1221
1222static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1223{
1224        u32 inst_nop = 0x60000000;
1225#ifdef CONFIG_KVM_BOOKE_HV
1226        u32 inst_sc1 = 0x44000022;
1227        pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1228        pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1229        pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1230        pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1231#else
1232        u32 inst_lis = 0x3c000000;
1233        u32 inst_ori = 0x60000000;
1234        u32 inst_sc = 0x44000002;
1235        u32 inst_imm_mask = 0xffff;
1236
1237        /*
1238         * The hypercall to get into KVM from within guest context is as
1239         * follows:
1240         *
1241         *    lis r0, r0, KVM_SC_MAGIC_R0@h
1242         *    ori r0, KVM_SC_MAGIC_R0@l
1243         *    sc
1244         *    nop
1245         */
1246        pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1247        pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1248        pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1249        pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1250#endif
1251
1252        pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1253
1254        return 0;
1255}
1256
1257int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1258                          bool line_status)
1259{
1260        if (!irqchip_in_kernel(kvm))
1261                return -ENXIO;
1262
1263        irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1264                                        irq_event->irq, irq_event->level,
1265                                        line_status);
1266        return 0;
1267}
1268
1269
1270static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1271                                   struct kvm_enable_cap *cap)
1272{
1273        int r;
1274
1275        if (cap->flags)
1276                return -EINVAL;
1277
1278        switch (cap->cap) {
1279#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1280        case KVM_CAP_PPC_ENABLE_HCALL: {
1281                unsigned long hcall = cap->args[0];
1282
1283                r = -EINVAL;
1284                if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1285                    cap->args[1] > 1)
1286                        break;
1287                if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1288                        break;
1289                if (cap->args[1])
1290                        set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1291                else
1292                        clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1293                r = 0;
1294                break;
1295        }
1296#endif
1297        default:
1298                r = -EINVAL;
1299                break;
1300        }
1301
1302        return r;
1303}
1304
1305long kvm_arch_vm_ioctl(struct file *filp,
1306                       unsigned int ioctl, unsigned long arg)
1307{
1308        struct kvm *kvm __maybe_unused = filp->private_data;
1309        void __user *argp = (void __user *)arg;
1310        long r;
1311
1312        switch (ioctl) {
1313        case KVM_PPC_GET_PVINFO: {
1314                struct kvm_ppc_pvinfo pvinfo;
1315                memset(&pvinfo, 0, sizeof(pvinfo));
1316                r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1317                if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1318                        r = -EFAULT;
1319                        goto out;
1320                }
1321
1322                break;
1323        }
1324        case KVM_ENABLE_CAP:
1325        {
1326                struct kvm_enable_cap cap;
1327                r = -EFAULT;
1328                if (copy_from_user(&cap, argp, sizeof(cap)))
1329                        goto out;
1330                r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1331                break;
1332        }
1333#ifdef CONFIG_PPC_BOOK3S_64
1334        case KVM_CREATE_SPAPR_TCE: {
1335                struct kvm_create_spapr_tce create_tce;
1336
1337                r = -EFAULT;
1338                if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1339                        goto out;
1340                r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1341                goto out;
1342        }
1343        case KVM_PPC_GET_SMMU_INFO: {
1344                struct kvm_ppc_smmu_info info;
1345                struct kvm *kvm = filp->private_data;
1346
1347                memset(&info, 0, sizeof(info));
1348                r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1349                if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1350                        r = -EFAULT;
1351                break;
1352        }
1353        case KVM_PPC_RTAS_DEFINE_TOKEN: {
1354                struct kvm *kvm = filp->private_data;
1355
1356                r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1357                break;
1358        }
1359        default: {
1360                struct kvm *kvm = filp->private_data;
1361                r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1362        }
1363#else /* CONFIG_PPC_BOOK3S_64 */
1364        default:
1365                r = -ENOTTY;
1366#endif
1367        }
1368out:
1369        return r;
1370}
1371
1372static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1373static unsigned long nr_lpids;
1374
1375long kvmppc_alloc_lpid(void)
1376{
1377        long lpid;
1378
1379        do {
1380                lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1381                if (lpid >= nr_lpids) {
1382                        pr_err("%s: No LPIDs free\n", __func__);
1383                        return -ENOMEM;
1384                }
1385        } while (test_and_set_bit(lpid, lpid_inuse));
1386
1387        return lpid;
1388}
1389EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1390
1391void kvmppc_claim_lpid(long lpid)
1392{
1393        set_bit(lpid, lpid_inuse);
1394}
1395EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1396
1397void kvmppc_free_lpid(long lpid)
1398{
1399        clear_bit(lpid, lpid_inuse);
1400}
1401EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1402
1403void kvmppc_init_lpid(unsigned long nr_lpids_param)
1404{
1405        nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1406        memset(lpid_inuse, 0, sizeof(lpid_inuse));
1407}
1408EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1409
1410int kvm_arch_init(void *opaque)
1411{
1412        return 0;
1413}
1414
1415EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
1416