linux/arch/powerpc/kvm/powerpc.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright IBM Corp. 2007
  16 *
  17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  19 */
  20
  21#include <linux/errno.h>
  22#include <linux/err.h>
  23#include <linux/kvm_host.h>
  24#include <linux/vmalloc.h>
  25#include <linux/hrtimer.h>
  26#include <linux/fs.h>
  27#include <linux/slab.h>
  28#include <linux/file.h>
  29#include <linux/module.h>
  30#include <asm/cputable.h>
  31#include <asm/uaccess.h>
  32#include <asm/kvm_ppc.h>
  33#include <asm/tlbflush.h>
  34#include <asm/cputhreads.h>
  35#include <asm/irqflags.h>
  36#include "timing.h"
  37#include "irq.h"
  38#include "../mm/mmu_decl.h"
  39
  40#define CREATE_TRACE_POINTS
  41#include "trace.h"
  42
  43struct kvmppc_ops *kvmppc_hv_ops;
  44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
  45struct kvmppc_ops *kvmppc_pr_ops;
  46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
  47
  48
  49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
  50{
  51        return !!(v->arch.pending_exceptions) ||
  52               v->requests;
  53}
  54
  55int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  56{
  57        return 1;
  58}
  59
  60/*
  61 * Common checks before entering the guest world.  Call with interrupts
  62 * disabled.
  63 *
  64 * returns:
  65 *
  66 * == 1 if we're ready to go into guest state
  67 * <= 0 if we need to go back to the host with return value
  68 */
  69int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
  70{
  71        int r;
  72
  73        WARN_ON(irqs_disabled());
  74        hard_irq_disable();
  75
  76        while (true) {
  77                if (need_resched()) {
  78                        local_irq_enable();
  79                        cond_resched();
  80                        hard_irq_disable();
  81                        continue;
  82                }
  83
  84                if (signal_pending(current)) {
  85                        kvmppc_account_exit(vcpu, SIGNAL_EXITS);
  86                        vcpu->run->exit_reason = KVM_EXIT_INTR;
  87                        r = -EINTR;
  88                        break;
  89                }
  90
  91                vcpu->mode = IN_GUEST_MODE;
  92
  93                /*
  94                 * Reading vcpu->requests must happen after setting vcpu->mode,
  95                 * so we don't miss a request because the requester sees
  96                 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
  97                 * before next entering the guest (and thus doesn't IPI).
  98                 */
  99                smp_mb();
 100
 101                if (vcpu->requests) {
 102                        /* Make sure we process requests preemptable */
 103                        local_irq_enable();
 104                        trace_kvm_check_requests(vcpu);
 105                        r = kvmppc_core_check_requests(vcpu);
 106                        hard_irq_disable();
 107                        if (r > 0)
 108                                continue;
 109                        break;
 110                }
 111
 112                if (kvmppc_core_prepare_to_enter(vcpu)) {
 113                        /* interrupts got enabled in between, so we
 114                           are back at square 1 */
 115                        continue;
 116                }
 117
 118                kvm_guest_enter();
 119                return 1;
 120        }
 121
 122        /* return to host */
 123        local_irq_enable();
 124        return r;
 125}
 126EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
 127
 128#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
 129static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
 130{
 131        struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
 132        int i;
 133
 134        shared->sprg0 = swab64(shared->sprg0);
 135        shared->sprg1 = swab64(shared->sprg1);
 136        shared->sprg2 = swab64(shared->sprg2);
 137        shared->sprg3 = swab64(shared->sprg3);
 138        shared->srr0 = swab64(shared->srr0);
 139        shared->srr1 = swab64(shared->srr1);
 140        shared->dar = swab64(shared->dar);
 141        shared->msr = swab64(shared->msr);
 142        shared->dsisr = swab32(shared->dsisr);
 143        shared->int_pending = swab32(shared->int_pending);
 144        for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
 145                shared->sr[i] = swab32(shared->sr[i]);
 146}
 147#endif
 148
 149int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
 150{
 151        int nr = kvmppc_get_gpr(vcpu, 11);
 152        int r;
 153        unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
 154        unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
 155        unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
 156        unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
 157        unsigned long r2 = 0;
 158
 159        if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
 160                /* 32 bit mode */
 161                param1 &= 0xffffffff;
 162                param2 &= 0xffffffff;
 163                param3 &= 0xffffffff;
 164                param4 &= 0xffffffff;
 165        }
 166
 167        switch (nr) {
 168        case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
 169        {
 170#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
 171                /* Book3S can be little endian, find it out here */
 172                int shared_big_endian = true;
 173                if (vcpu->arch.intr_msr & MSR_LE)
 174                        shared_big_endian = false;
 175                if (shared_big_endian != vcpu->arch.shared_big_endian)
 176                        kvmppc_swab_shared(vcpu);
 177                vcpu->arch.shared_big_endian = shared_big_endian;
 178#endif
 179
 180                if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
 181                        /*
 182                         * Older versions of the Linux magic page code had
 183                         * a bug where they would map their trampoline code
 184                         * NX. If that's the case, remove !PR NX capability.
 185                         */
 186                        vcpu->arch.disable_kernel_nx = true;
 187                        kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 188                }
 189
 190                vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
 191                vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
 192
 193#ifdef CONFIG_PPC_64K_PAGES
 194                /*
 195                 * Make sure our 4k magic page is in the same window of a 64k
 196                 * page within the guest and within the host's page.
 197                 */
 198                if ((vcpu->arch.magic_page_pa & 0xf000) !=
 199                    ((ulong)vcpu->arch.shared & 0xf000)) {
 200                        void *old_shared = vcpu->arch.shared;
 201                        ulong shared = (ulong)vcpu->arch.shared;
 202                        void *new_shared;
 203
 204                        shared &= PAGE_MASK;
 205                        shared |= vcpu->arch.magic_page_pa & 0xf000;
 206                        new_shared = (void*)shared;
 207                        memcpy(new_shared, old_shared, 0x1000);
 208                        vcpu->arch.shared = new_shared;
 209                }
 210#endif
 211
 212                r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
 213
 214                r = EV_SUCCESS;
 215                break;
 216        }
 217        case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
 218                r = EV_SUCCESS;
 219#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
 220                r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
 221#endif
 222
 223                /* Second return value is in r4 */
 224                break;
 225        case EV_HCALL_TOKEN(EV_IDLE):
 226                r = EV_SUCCESS;
 227                kvm_vcpu_block(vcpu);
 228                clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
 229                break;
 230        default:
 231                r = EV_UNIMPLEMENTED;
 232                break;
 233        }
 234
 235        kvmppc_set_gpr(vcpu, 4, r2);
 236
 237        return r;
 238}
 239EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
 240
 241int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
 242{
 243        int r = false;
 244
 245        /* We have to know what CPU to virtualize */
 246        if (!vcpu->arch.pvr)
 247                goto out;
 248
 249        /* PAPR only works with book3s_64 */
 250        if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
 251                goto out;
 252
 253        /* HV KVM can only do PAPR mode for now */
 254        if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
 255                goto out;
 256
 257#ifdef CONFIG_KVM_BOOKE_HV
 258        if (!cpu_has_feature(CPU_FTR_EMB_HV))
 259                goto out;
 260#endif
 261
 262        r = true;
 263
 264out:
 265        vcpu->arch.sane = r;
 266        return r ? 0 : -EINVAL;
 267}
 268EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
 269
 270int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
 271{
 272        enum emulation_result er;
 273        int r;
 274
 275        er = kvmppc_emulate_loadstore(vcpu);
 276        switch (er) {
 277        case EMULATE_DONE:
 278                /* Future optimization: only reload non-volatiles if they were
 279                 * actually modified. */
 280                r = RESUME_GUEST_NV;
 281                break;
 282        case EMULATE_AGAIN:
 283                r = RESUME_GUEST;
 284                break;
 285        case EMULATE_DO_MMIO:
 286                run->exit_reason = KVM_EXIT_MMIO;
 287                /* We must reload nonvolatiles because "update" load/store
 288                 * instructions modify register state. */
 289                /* Future optimization: only reload non-volatiles if they were
 290                 * actually modified. */
 291                r = RESUME_HOST_NV;
 292                break;
 293        case EMULATE_FAIL:
 294        {
 295                u32 last_inst;
 296
 297                kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
 298                /* XXX Deliver Program interrupt to guest. */
 299                pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
 300                r = RESUME_HOST;
 301                break;
 302        }
 303        default:
 304                WARN_ON(1);
 305                r = RESUME_GUEST;
 306        }
 307
 308        return r;
 309}
 310EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
 311
 312int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
 313              bool data)
 314{
 315        ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
 316        struct kvmppc_pte pte;
 317        int r;
 318
 319        vcpu->stat.st++;
 320
 321        r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
 322                         XLATE_WRITE, &pte);
 323        if (r < 0)
 324                return r;
 325
 326        *eaddr = pte.raddr;
 327
 328        if (!pte.may_write)
 329                return -EPERM;
 330
 331        /* Magic page override */
 332        if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
 333            ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
 334            !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 335                void *magic = vcpu->arch.shared;
 336                magic += pte.eaddr & 0xfff;
 337                memcpy(magic, ptr, size);
 338                return EMULATE_DONE;
 339        }
 340
 341        if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
 342                return EMULATE_DO_MMIO;
 343
 344        return EMULATE_DONE;
 345}
 346EXPORT_SYMBOL_GPL(kvmppc_st);
 347
 348int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
 349                      bool data)
 350{
 351        ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
 352        struct kvmppc_pte pte;
 353        int rc;
 354
 355        vcpu->stat.ld++;
 356
 357        rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
 358                          XLATE_READ, &pte);
 359        if (rc)
 360                return rc;
 361
 362        *eaddr = pte.raddr;
 363
 364        if (!pte.may_read)
 365                return -EPERM;
 366
 367        if (!data && !pte.may_execute)
 368                return -ENOEXEC;
 369
 370        /* Magic page override */
 371        if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
 372            ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
 373            !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 374                void *magic = vcpu->arch.shared;
 375                magic += pte.eaddr & 0xfff;
 376                memcpy(ptr, magic, size);
 377                return EMULATE_DONE;
 378        }
 379
 380        if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
 381                return EMULATE_DO_MMIO;
 382
 383        return EMULATE_DONE;
 384}
 385EXPORT_SYMBOL_GPL(kvmppc_ld);
 386
 387int kvm_arch_hardware_enable(void)
 388{
 389        return 0;
 390}
 391
 392int kvm_arch_hardware_setup(void)
 393{
 394        return 0;
 395}
 396
 397void kvm_arch_check_processor_compat(void *rtn)
 398{
 399        *(int *)rtn = kvmppc_core_check_processor_compat();
 400}
 401
 402int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 403{
 404        struct kvmppc_ops *kvm_ops = NULL;
 405        /*
 406         * if we have both HV and PR enabled, default is HV
 407         */
 408        if (type == 0) {
 409                if (kvmppc_hv_ops)
 410                        kvm_ops = kvmppc_hv_ops;
 411                else
 412                        kvm_ops = kvmppc_pr_ops;
 413                if (!kvm_ops)
 414                        goto err_out;
 415        } else  if (type == KVM_VM_PPC_HV) {
 416                if (!kvmppc_hv_ops)
 417                        goto err_out;
 418                kvm_ops = kvmppc_hv_ops;
 419        } else if (type == KVM_VM_PPC_PR) {
 420                if (!kvmppc_pr_ops)
 421                        goto err_out;
 422                kvm_ops = kvmppc_pr_ops;
 423        } else
 424                goto err_out;
 425
 426        if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
 427                return -ENOENT;
 428
 429        kvm->arch.kvm_ops = kvm_ops;
 430        return kvmppc_core_init_vm(kvm);
 431err_out:
 432        return -EINVAL;
 433}
 434
 435void kvm_arch_destroy_vm(struct kvm *kvm)
 436{
 437        unsigned int i;
 438        struct kvm_vcpu *vcpu;
 439
 440        kvm_for_each_vcpu(i, vcpu, kvm)
 441                kvm_arch_vcpu_free(vcpu);
 442
 443        mutex_lock(&kvm->lock);
 444        for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 445                kvm->vcpus[i] = NULL;
 446
 447        atomic_set(&kvm->online_vcpus, 0);
 448
 449        kvmppc_core_destroy_vm(kvm);
 450
 451        mutex_unlock(&kvm->lock);
 452
 453        /* drop the module reference */
 454        module_put(kvm->arch.kvm_ops->owner);
 455}
 456
 457int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 458{
 459        int r;
 460        /* Assume we're using HV mode when the HV module is loaded */
 461        int hv_enabled = kvmppc_hv_ops ? 1 : 0;
 462
 463        if (kvm) {
 464                /*
 465                 * Hooray - we know which VM type we're running on. Depend on
 466                 * that rather than the guess above.
 467                 */
 468                hv_enabled = is_kvmppc_hv_enabled(kvm);
 469        }
 470
 471        switch (ext) {
 472#ifdef CONFIG_BOOKE
 473        case KVM_CAP_PPC_BOOKE_SREGS:
 474        case KVM_CAP_PPC_BOOKE_WATCHDOG:
 475        case KVM_CAP_PPC_EPR:
 476#else
 477        case KVM_CAP_PPC_SEGSTATE:
 478        case KVM_CAP_PPC_HIOR:
 479        case KVM_CAP_PPC_PAPR:
 480#endif
 481        case KVM_CAP_PPC_UNSET_IRQ:
 482        case KVM_CAP_PPC_IRQ_LEVEL:
 483        case KVM_CAP_ENABLE_CAP:
 484        case KVM_CAP_ENABLE_CAP_VM:
 485        case KVM_CAP_ONE_REG:
 486        case KVM_CAP_IOEVENTFD:
 487        case KVM_CAP_DEVICE_CTRL:
 488                r = 1;
 489                break;
 490        case KVM_CAP_PPC_PAIRED_SINGLES:
 491        case KVM_CAP_PPC_OSI:
 492        case KVM_CAP_PPC_GET_PVINFO:
 493#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
 494        case KVM_CAP_SW_TLB:
 495#endif
 496                /* We support this only for PR */
 497                r = !hv_enabled;
 498                break;
 499#ifdef CONFIG_KVM_MMIO
 500        case KVM_CAP_COALESCED_MMIO:
 501                r = KVM_COALESCED_MMIO_PAGE_OFFSET;
 502                break;
 503#endif
 504#ifdef CONFIG_KVM_MPIC
 505        case KVM_CAP_IRQ_MPIC:
 506                r = 1;
 507                break;
 508#endif
 509
 510#ifdef CONFIG_PPC_BOOK3S_64
 511        case KVM_CAP_SPAPR_TCE:
 512        case KVM_CAP_PPC_ALLOC_HTAB:
 513        case KVM_CAP_PPC_RTAS:
 514        case KVM_CAP_PPC_FIXUP_HCALL:
 515        case KVM_CAP_PPC_ENABLE_HCALL:
 516#ifdef CONFIG_KVM_XICS
 517        case KVM_CAP_IRQ_XICS:
 518#endif
 519                r = 1;
 520                break;
 521#endif /* CONFIG_PPC_BOOK3S_64 */
 522#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 523        case KVM_CAP_PPC_SMT:
 524                if (hv_enabled)
 525                        r = threads_per_subcore;
 526                else
 527                        r = 0;
 528                break;
 529        case KVM_CAP_PPC_RMA:
 530                r = 0;
 531                break;
 532#endif
 533        case KVM_CAP_SYNC_MMU:
 534#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 535                r = hv_enabled;
 536#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 537                r = 1;
 538#else
 539                r = 0;
 540#endif
 541                break;
 542#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 543        case KVM_CAP_PPC_HTAB_FD:
 544                r = hv_enabled;
 545                break;
 546#endif
 547        case KVM_CAP_NR_VCPUS:
 548                /*
 549                 * Recommending a number of CPUs is somewhat arbitrary; we
 550                 * return the number of present CPUs for -HV (since a host
 551                 * will have secondary threads "offline"), and for other KVM
 552                 * implementations just count online CPUs.
 553                 */
 554                if (hv_enabled)
 555                        r = num_present_cpus();
 556                else
 557                        r = num_online_cpus();
 558                break;
 559        case KVM_CAP_MAX_VCPUS:
 560                r = KVM_MAX_VCPUS;
 561                break;
 562#ifdef CONFIG_PPC_BOOK3S_64
 563        case KVM_CAP_PPC_GET_SMMU_INFO:
 564                r = 1;
 565                break;
 566#endif
 567        default:
 568                r = 0;
 569                break;
 570        }
 571        return r;
 572
 573}
 574
 575long kvm_arch_dev_ioctl(struct file *filp,
 576                        unsigned int ioctl, unsigned long arg)
 577{
 578        return -EINVAL;
 579}
 580
 581void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 582                           struct kvm_memory_slot *dont)
 583{
 584        kvmppc_core_free_memslot(kvm, free, dont);
 585}
 586
 587int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 588                            unsigned long npages)
 589{
 590        return kvmppc_core_create_memslot(kvm, slot, npages);
 591}
 592
 593int kvm_arch_prepare_memory_region(struct kvm *kvm,
 594                                   struct kvm_memory_slot *memslot,
 595                                   struct kvm_userspace_memory_region *mem,
 596                                   enum kvm_mr_change change)
 597{
 598        return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
 599}
 600
 601void kvm_arch_commit_memory_region(struct kvm *kvm,
 602                                   struct kvm_userspace_memory_region *mem,
 603                                   const struct kvm_memory_slot *old,
 604                                   enum kvm_mr_change change)
 605{
 606        kvmppc_core_commit_memory_region(kvm, mem, old);
 607}
 608
 609void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 610                                   struct kvm_memory_slot *slot)
 611{
 612        kvmppc_core_flush_memslot(kvm, slot);
 613}
 614
 615struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 616{
 617        struct kvm_vcpu *vcpu;
 618        vcpu = kvmppc_core_vcpu_create(kvm, id);
 619        if (!IS_ERR(vcpu)) {
 620                vcpu->arch.wqp = &vcpu->wq;
 621                kvmppc_create_vcpu_debugfs(vcpu, id);
 622        }
 623        return vcpu;
 624}
 625
 626void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 627{
 628}
 629
 630void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 631{
 632        /* Make sure we're not using the vcpu anymore */
 633        hrtimer_cancel(&vcpu->arch.dec_timer);
 634
 635        kvmppc_remove_vcpu_debugfs(vcpu);
 636
 637        switch (vcpu->arch.irq_type) {
 638        case KVMPPC_IRQ_MPIC:
 639                kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
 640                break;
 641        case KVMPPC_IRQ_XICS:
 642                kvmppc_xics_free_icp(vcpu);
 643                break;
 644        }
 645
 646        kvmppc_core_vcpu_free(vcpu);
 647}
 648
 649void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 650{
 651        kvm_arch_vcpu_free(vcpu);
 652}
 653
 654int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 655{
 656        return kvmppc_core_pending_dec(vcpu);
 657}
 658
 659enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
 660{
 661        struct kvm_vcpu *vcpu;
 662
 663        vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
 664        kvmppc_decrementer_func(vcpu);
 665
 666        return HRTIMER_NORESTART;
 667}
 668
 669int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 670{
 671        int ret;
 672
 673        hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
 674        vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
 675        vcpu->arch.dec_expires = ~(u64)0;
 676
 677#ifdef CONFIG_KVM_EXIT_TIMING
 678        mutex_init(&vcpu->arch.exit_timing_lock);
 679#endif
 680        ret = kvmppc_subarch_vcpu_init(vcpu);
 681        return ret;
 682}
 683
 684void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 685{
 686        kvmppc_mmu_destroy(vcpu);
 687        kvmppc_subarch_vcpu_uninit(vcpu);
 688}
 689
 690void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 691{
 692#ifdef CONFIG_BOOKE
 693        /*
 694         * vrsave (formerly usprg0) isn't used by Linux, but may
 695         * be used by the guest.
 696         *
 697         * On non-booke this is associated with Altivec and
 698         * is handled by code in book3s.c.
 699         */
 700        mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
 701#endif
 702        kvmppc_core_vcpu_load(vcpu, cpu);
 703}
 704
 705void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 706{
 707        kvmppc_core_vcpu_put(vcpu);
 708#ifdef CONFIG_BOOKE
 709        vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
 710#endif
 711}
 712
 713static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
 714                                      struct kvm_run *run)
 715{
 716        u64 uninitialized_var(gpr);
 717
 718        if (run->mmio.len > sizeof(gpr)) {
 719                printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
 720                return;
 721        }
 722
 723        if (vcpu->arch.mmio_is_bigendian) {
 724                switch (run->mmio.len) {
 725                case 8: gpr = *(u64 *)run->mmio.data; break;
 726                case 4: gpr = *(u32 *)run->mmio.data; break;
 727                case 2: gpr = *(u16 *)run->mmio.data; break;
 728                case 1: gpr = *(u8 *)run->mmio.data; break;
 729                }
 730        } else {
 731                /* Convert BE data from userland back to LE. */
 732                switch (run->mmio.len) {
 733                case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
 734                case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
 735                case 1: gpr = *(u8 *)run->mmio.data; break;
 736                }
 737        }
 738
 739        if (vcpu->arch.mmio_sign_extend) {
 740                switch (run->mmio.len) {
 741#ifdef CONFIG_PPC64
 742                case 4:
 743                        gpr = (s64)(s32)gpr;
 744                        break;
 745#endif
 746                case 2:
 747                        gpr = (s64)(s16)gpr;
 748                        break;
 749                case 1:
 750                        gpr = (s64)(s8)gpr;
 751                        break;
 752                }
 753        }
 754
 755        kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
 756
 757        switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
 758        case KVM_MMIO_REG_GPR:
 759                kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
 760                break;
 761        case KVM_MMIO_REG_FPR:
 762                VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
 763                break;
 764#ifdef CONFIG_PPC_BOOK3S
 765        case KVM_MMIO_REG_QPR:
 766                vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
 767                break;
 768        case KVM_MMIO_REG_FQPR:
 769                VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
 770                vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
 771                break;
 772#endif
 773        default:
 774                BUG();
 775        }
 776}
 777
 778int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
 779                       unsigned int rt, unsigned int bytes,
 780                       int is_default_endian)
 781{
 782        int idx, ret;
 783        int is_bigendian;
 784
 785        if (kvmppc_need_byteswap(vcpu)) {
 786                /* Default endianness is "little endian". */
 787                is_bigendian = !is_default_endian;
 788        } else {
 789                /* Default endianness is "big endian". */
 790                is_bigendian = is_default_endian;
 791        }
 792
 793        if (bytes > sizeof(run->mmio.data)) {
 794                printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
 795                       run->mmio.len);
 796        }
 797
 798        run->mmio.phys_addr = vcpu->arch.paddr_accessed;
 799        run->mmio.len = bytes;
 800        run->mmio.is_write = 0;
 801
 802        vcpu->arch.io_gpr = rt;
 803        vcpu->arch.mmio_is_bigendian = is_bigendian;
 804        vcpu->mmio_needed = 1;
 805        vcpu->mmio_is_write = 0;
 806        vcpu->arch.mmio_sign_extend = 0;
 807
 808        idx = srcu_read_lock(&vcpu->kvm->srcu);
 809
 810        ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
 811                              bytes, &run->mmio.data);
 812
 813        srcu_read_unlock(&vcpu->kvm->srcu, idx);
 814
 815        if (!ret) {
 816                kvmppc_complete_mmio_load(vcpu, run);
 817                vcpu->mmio_needed = 0;
 818                return EMULATE_DONE;
 819        }
 820
 821        return EMULATE_DO_MMIO;
 822}
 823EXPORT_SYMBOL_GPL(kvmppc_handle_load);
 824
 825/* Same as above, but sign extends */
 826int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
 827                        unsigned int rt, unsigned int bytes,
 828                        int is_default_endian)
 829{
 830        int r;
 831
 832        vcpu->arch.mmio_sign_extend = 1;
 833        r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
 834
 835        return r;
 836}
 837
 838int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
 839                        u64 val, unsigned int bytes, int is_default_endian)
 840{
 841        void *data = run->mmio.data;
 842        int idx, ret;
 843        int is_bigendian;
 844
 845        if (kvmppc_need_byteswap(vcpu)) {
 846                /* Default endianness is "little endian". */
 847                is_bigendian = !is_default_endian;
 848        } else {
 849                /* Default endianness is "big endian". */
 850                is_bigendian = is_default_endian;
 851        }
 852
 853        if (bytes > sizeof(run->mmio.data)) {
 854                printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
 855                       run->mmio.len);
 856        }
 857
 858        run->mmio.phys_addr = vcpu->arch.paddr_accessed;
 859        run->mmio.len = bytes;
 860        run->mmio.is_write = 1;
 861        vcpu->mmio_needed = 1;
 862        vcpu->mmio_is_write = 1;
 863
 864        /* Store the value at the lowest bytes in 'data'. */
 865        if (is_bigendian) {
 866                switch (bytes) {
 867                case 8: *(u64 *)data = val; break;
 868                case 4: *(u32 *)data = val; break;
 869                case 2: *(u16 *)data = val; break;
 870                case 1: *(u8  *)data = val; break;
 871                }
 872        } else {
 873                /* Store LE value into 'data'. */
 874                switch (bytes) {
 875                case 4: st_le32(data, val); break;
 876                case 2: st_le16(data, val); break;
 877                case 1: *(u8 *)data = val; break;
 878                }
 879        }
 880
 881        idx = srcu_read_lock(&vcpu->kvm->srcu);
 882
 883        ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
 884                               bytes, &run->mmio.data);
 885
 886        srcu_read_unlock(&vcpu->kvm->srcu, idx);
 887
 888        if (!ret) {
 889                vcpu->mmio_needed = 0;
 890                return EMULATE_DONE;
 891        }
 892
 893        return EMULATE_DO_MMIO;
 894}
 895EXPORT_SYMBOL_GPL(kvmppc_handle_store);
 896
 897int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 898{
 899        int r = 0;
 900        union kvmppc_one_reg val;
 901        int size;
 902
 903        size = one_reg_size(reg->id);
 904        if (size > sizeof(val))
 905                return -EINVAL;
 906
 907        r = kvmppc_get_one_reg(vcpu, reg->id, &val);
 908        if (r == -EINVAL) {
 909                r = 0;
 910                switch (reg->id) {
 911#ifdef CONFIG_ALTIVEC
 912                case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
 913                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 914                                r = -ENXIO;
 915                                break;
 916                        }
 917                        vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
 918                        break;
 919                case KVM_REG_PPC_VSCR:
 920                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 921                                r = -ENXIO;
 922                                break;
 923                        }
 924                        vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
 925                        break;
 926                case KVM_REG_PPC_VRSAVE:
 927                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 928                                r = -ENXIO;
 929                                break;
 930                        }
 931                        vcpu->arch.vrsave = set_reg_val(reg->id, val);
 932                        break;
 933#endif /* CONFIG_ALTIVEC */
 934                default:
 935                        r = -EINVAL;
 936                        break;
 937                }
 938        }
 939
 940        if (r)
 941                return r;
 942
 943        if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
 944                r = -EFAULT;
 945
 946        return r;
 947}
 948
 949int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 950{
 951        int r;
 952        union kvmppc_one_reg val;
 953        int size;
 954
 955        size = one_reg_size(reg->id);
 956        if (size > sizeof(val))
 957                return -EINVAL;
 958
 959        if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
 960                return -EFAULT;
 961
 962        r = kvmppc_set_one_reg(vcpu, reg->id, &val);
 963        if (r == -EINVAL) {
 964                r = 0;
 965                switch (reg->id) {
 966#ifdef CONFIG_ALTIVEC
 967                case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
 968                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 969                                r = -ENXIO;
 970                                break;
 971                        }
 972                        val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
 973                        break;
 974                case KVM_REG_PPC_VSCR:
 975                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 976                                r = -ENXIO;
 977                                break;
 978                        }
 979                        val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
 980                        break;
 981                case KVM_REG_PPC_VRSAVE:
 982                        val = get_reg_val(reg->id, vcpu->arch.vrsave);
 983                        break;
 984#endif /* CONFIG_ALTIVEC */
 985                default:
 986                        r = -EINVAL;
 987                        break;
 988                }
 989        }
 990
 991        return r;
 992}
 993
 994int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 995{
 996        int r;
 997        sigset_t sigsaved;
 998
 999        if (vcpu->sigset_active)
1000                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1001
1002        if (vcpu->mmio_needed) {
1003                if (!vcpu->mmio_is_write)
1004                        kvmppc_complete_mmio_load(vcpu, run);
1005                vcpu->mmio_needed = 0;
1006        } else if (vcpu->arch.osi_needed) {
1007                u64 *gprs = run->osi.gprs;
1008                int i;
1009
1010                for (i = 0; i < 32; i++)
1011                        kvmppc_set_gpr(vcpu, i, gprs[i]);
1012                vcpu->arch.osi_needed = 0;
1013        } else if (vcpu->arch.hcall_needed) {
1014                int i;
1015
1016                kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1017                for (i = 0; i < 9; ++i)
1018                        kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1019                vcpu->arch.hcall_needed = 0;
1020#ifdef CONFIG_BOOKE
1021        } else if (vcpu->arch.epr_needed) {
1022                kvmppc_set_epr(vcpu, run->epr.epr);
1023                vcpu->arch.epr_needed = 0;
1024#endif
1025        }
1026
1027        r = kvmppc_vcpu_run(run, vcpu);
1028
1029        if (vcpu->sigset_active)
1030                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1031
1032        return r;
1033}
1034
1035int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1036{
1037        if (irq->irq == KVM_INTERRUPT_UNSET) {
1038                kvmppc_core_dequeue_external(vcpu);
1039                return 0;
1040        }
1041
1042        kvmppc_core_queue_external(vcpu, irq);
1043
1044        kvm_vcpu_kick(vcpu);
1045
1046        return 0;
1047}
1048
1049static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1050                                     struct kvm_enable_cap *cap)
1051{
1052        int r;
1053
1054        if (cap->flags)
1055                return -EINVAL;
1056
1057        switch (cap->cap) {
1058        case KVM_CAP_PPC_OSI:
1059                r = 0;
1060                vcpu->arch.osi_enabled = true;
1061                break;
1062        case KVM_CAP_PPC_PAPR:
1063                r = 0;
1064                vcpu->arch.papr_enabled = true;
1065                break;
1066        case KVM_CAP_PPC_EPR:
1067                r = 0;
1068                if (cap->args[0])
1069                        vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1070                else
1071                        vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1072                break;
1073#ifdef CONFIG_BOOKE
1074        case KVM_CAP_PPC_BOOKE_WATCHDOG:
1075                r = 0;
1076                vcpu->arch.watchdog_enabled = true;
1077                break;
1078#endif
1079#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1080        case KVM_CAP_SW_TLB: {
1081                struct kvm_config_tlb cfg;
1082                void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1083
1084                r = -EFAULT;
1085                if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1086                        break;
1087
1088                r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1089                break;
1090        }
1091#endif
1092#ifdef CONFIG_KVM_MPIC
1093        case KVM_CAP_IRQ_MPIC: {
1094                struct fd f;
1095                struct kvm_device *dev;
1096
1097                r = -EBADF;
1098                f = fdget(cap->args[0]);
1099                if (!f.file)
1100                        break;
1101
1102                r = -EPERM;
1103                dev = kvm_device_from_filp(f.file);
1104                if (dev)
1105                        r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1106
1107                fdput(f);
1108                break;
1109        }
1110#endif
1111#ifdef CONFIG_KVM_XICS
1112        case KVM_CAP_IRQ_XICS: {
1113                struct fd f;
1114                struct kvm_device *dev;
1115
1116                r = -EBADF;
1117                f = fdget(cap->args[0]);
1118                if (!f.file)
1119                        break;
1120
1121                r = -EPERM;
1122                dev = kvm_device_from_filp(f.file);
1123                if (dev)
1124                        r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1125
1126                fdput(f);
1127                break;
1128        }
1129#endif /* CONFIG_KVM_XICS */
1130        default:
1131                r = -EINVAL;
1132                break;
1133        }
1134
1135        if (!r)
1136                r = kvmppc_sanity_check(vcpu);
1137
1138        return r;
1139}
1140
1141int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1142                                    struct kvm_mp_state *mp_state)
1143{
1144        return -EINVAL;
1145}
1146
1147int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1148                                    struct kvm_mp_state *mp_state)
1149{
1150        return -EINVAL;
1151}
1152
1153long kvm_arch_vcpu_ioctl(struct file *filp,
1154                         unsigned int ioctl, unsigned long arg)
1155{
1156        struct kvm_vcpu *vcpu = filp->private_data;
1157        void __user *argp = (void __user *)arg;
1158        long r;
1159
1160        switch (ioctl) {
1161        case KVM_INTERRUPT: {
1162                struct kvm_interrupt irq;
1163                r = -EFAULT;
1164                if (copy_from_user(&irq, argp, sizeof(irq)))
1165                        goto out;
1166                r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1167                goto out;
1168        }
1169
1170        case KVM_ENABLE_CAP:
1171        {
1172                struct kvm_enable_cap cap;
1173                r = -EFAULT;
1174                if (copy_from_user(&cap, argp, sizeof(cap)))
1175                        goto out;
1176                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1177                break;
1178        }
1179
1180        case KVM_SET_ONE_REG:
1181        case KVM_GET_ONE_REG:
1182        {
1183                struct kvm_one_reg reg;
1184                r = -EFAULT;
1185                if (copy_from_user(&reg, argp, sizeof(reg)))
1186                        goto out;
1187                if (ioctl == KVM_SET_ONE_REG)
1188                        r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1189                else
1190                        r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1191                break;
1192        }
1193
1194#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1195        case KVM_DIRTY_TLB: {
1196                struct kvm_dirty_tlb dirty;
1197                r = -EFAULT;
1198                if (copy_from_user(&dirty, argp, sizeof(dirty)))
1199                        goto out;
1200                r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1201                break;
1202        }
1203#endif
1204        default:
1205                r = -EINVAL;
1206        }
1207
1208out:
1209        return r;
1210}
1211
1212int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1213{
1214        return VM_FAULT_SIGBUS;
1215}
1216
1217static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1218{
1219        u32 inst_nop = 0x60000000;
1220#ifdef CONFIG_KVM_BOOKE_HV
1221        u32 inst_sc1 = 0x44000022;
1222        pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1223        pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1224        pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1225        pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1226#else
1227        u32 inst_lis = 0x3c000000;
1228        u32 inst_ori = 0x60000000;
1229        u32 inst_sc = 0x44000002;
1230        u32 inst_imm_mask = 0xffff;
1231
1232        /*
1233         * The hypercall to get into KVM from within guest context is as
1234         * follows:
1235         *
1236         *    lis r0, r0, KVM_SC_MAGIC_R0@h
1237         *    ori r0, KVM_SC_MAGIC_R0@l
1238         *    sc
1239         *    nop
1240         */
1241        pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1242        pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1243        pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1244        pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1245#endif
1246
1247        pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1248
1249        return 0;
1250}
1251
1252int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1253                          bool line_status)
1254{
1255        if (!irqchip_in_kernel(kvm))
1256                return -ENXIO;
1257
1258        irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1259                                        irq_event->irq, irq_event->level,
1260                                        line_status);
1261        return 0;
1262}
1263
1264
1265static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1266                                   struct kvm_enable_cap *cap)
1267{
1268        int r;
1269
1270        if (cap->flags)
1271                return -EINVAL;
1272
1273        switch (cap->cap) {
1274#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1275        case KVM_CAP_PPC_ENABLE_HCALL: {
1276                unsigned long hcall = cap->args[0];
1277
1278                r = -EINVAL;
1279                if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1280                    cap->args[1] > 1)
1281                        break;
1282                if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1283                        break;
1284                if (cap->args[1])
1285                        set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1286                else
1287                        clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1288                r = 0;
1289                break;
1290        }
1291#endif
1292        default:
1293                r = -EINVAL;
1294                break;
1295        }
1296
1297        return r;
1298}
1299
1300long kvm_arch_vm_ioctl(struct file *filp,
1301                       unsigned int ioctl, unsigned long arg)
1302{
1303        struct kvm *kvm __maybe_unused = filp->private_data;
1304        void __user *argp = (void __user *)arg;
1305        long r;
1306
1307        switch (ioctl) {
1308        case KVM_PPC_GET_PVINFO: {
1309                struct kvm_ppc_pvinfo pvinfo;
1310                memset(&pvinfo, 0, sizeof(pvinfo));
1311                r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1312                if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1313                        r = -EFAULT;
1314                        goto out;
1315                }
1316
1317                break;
1318        }
1319        case KVM_ENABLE_CAP:
1320        {
1321                struct kvm_enable_cap cap;
1322                r = -EFAULT;
1323                if (copy_from_user(&cap, argp, sizeof(cap)))
1324                        goto out;
1325                r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1326                break;
1327        }
1328#ifdef CONFIG_PPC_BOOK3S_64
1329        case KVM_CREATE_SPAPR_TCE: {
1330                struct kvm_create_spapr_tce create_tce;
1331
1332                r = -EFAULT;
1333                if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1334                        goto out;
1335                r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1336                goto out;
1337        }
1338        case KVM_PPC_GET_SMMU_INFO: {
1339                struct kvm_ppc_smmu_info info;
1340                struct kvm *kvm = filp->private_data;
1341
1342                memset(&info, 0, sizeof(info));
1343                r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1344                if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1345                        r = -EFAULT;
1346                break;
1347        }
1348        case KVM_PPC_RTAS_DEFINE_TOKEN: {
1349                struct kvm *kvm = filp->private_data;
1350
1351                r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1352                break;
1353        }
1354        default: {
1355                struct kvm *kvm = filp->private_data;
1356                r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1357        }
1358#else /* CONFIG_PPC_BOOK3S_64 */
1359        default:
1360                r = -ENOTTY;
1361#endif
1362        }
1363out:
1364        return r;
1365}
1366
1367static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1368static unsigned long nr_lpids;
1369
1370long kvmppc_alloc_lpid(void)
1371{
1372        long lpid;
1373
1374        do {
1375                lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1376                if (lpid >= nr_lpids) {
1377                        pr_err("%s: No LPIDs free\n", __func__);
1378                        return -ENOMEM;
1379                }
1380        } while (test_and_set_bit(lpid, lpid_inuse));
1381
1382        return lpid;
1383}
1384EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1385
1386void kvmppc_claim_lpid(long lpid)
1387{
1388        set_bit(lpid, lpid_inuse);
1389}
1390EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1391
1392void kvmppc_free_lpid(long lpid)
1393{
1394        clear_bit(lpid, lpid_inuse);
1395}
1396EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1397
1398void kvmppc_init_lpid(unsigned long nr_lpids_param)
1399{
1400        nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1401        memset(lpid_inuse, 0, sizeof(lpid_inuse));
1402}
1403EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1404
1405int kvm_arch_init(void *opaque)
1406{
1407        return 0;
1408}
1409
1410EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
1411