linux/arch/powerpc/kvm/powerpc.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright IBM Corp. 2007
  16 *
  17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  19 */
  20
  21#include <linux/errno.h>
  22#include <linux/err.h>
  23#include <linux/kvm_host.h>
  24#include <linux/module.h>
  25#include <linux/vmalloc.h>
  26#include <linux/hrtimer.h>
  27#include <linux/fs.h>
  28#include <linux/slab.h>
  29#include <asm/cputable.h>
  30#include <asm/uaccess.h>
  31#include <asm/kvm_ppc.h>
  32#include <asm/tlbflush.h>
  33#include "timing.h"
  34#include "../mm/mmu_decl.h"
  35
  36#define CREATE_TRACE_POINTS
  37#include "trace.h"
  38
  39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
  40{
  41        return !(v->arch.shared->msr & MSR_WE) ||
  42               !!(v->arch.pending_exceptions);
  43}
  44
  45int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
  46{
  47        int nr = kvmppc_get_gpr(vcpu, 11);
  48        int r;
  49        unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
  50        unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
  51        unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
  52        unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
  53        unsigned long r2 = 0;
  54
  55        if (!(vcpu->arch.shared->msr & MSR_SF)) {
  56                /* 32 bit mode */
  57                param1 &= 0xffffffff;
  58                param2 &= 0xffffffff;
  59                param3 &= 0xffffffff;
  60                param4 &= 0xffffffff;
  61        }
  62
  63        switch (nr) {
  64        case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
  65        {
  66                vcpu->arch.magic_page_pa = param1;
  67                vcpu->arch.magic_page_ea = param2;
  68
  69                r2 = KVM_MAGIC_FEAT_SR;
  70
  71                r = HC_EV_SUCCESS;
  72                break;
  73        }
  74        case HC_VENDOR_KVM | KVM_HC_FEATURES:
  75                r = HC_EV_SUCCESS;
  76#if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */
  77                r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
  78#endif
  79
  80                /* Second return value is in r4 */
  81                break;
  82        default:
  83                r = HC_EV_UNIMPLEMENTED;
  84                break;
  85        }
  86
  87        kvmppc_set_gpr(vcpu, 4, r2);
  88
  89        return r;
  90}
  91
  92int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
  93{
  94        enum emulation_result er;
  95        int r;
  96
  97        er = kvmppc_emulate_instruction(run, vcpu);
  98        switch (er) {
  99        case EMULATE_DONE:
 100                /* Future optimization: only reload non-volatiles if they were
 101                 * actually modified. */
 102                r = RESUME_GUEST_NV;
 103                break;
 104        case EMULATE_DO_MMIO:
 105                run->exit_reason = KVM_EXIT_MMIO;
 106                /* We must reload nonvolatiles because "update" load/store
 107                 * instructions modify register state. */
 108                /* Future optimization: only reload non-volatiles if they were
 109                 * actually modified. */
 110                r = RESUME_HOST_NV;
 111                break;
 112        case EMULATE_FAIL:
 113                /* XXX Deliver Program interrupt to guest. */
 114                printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
 115                       kvmppc_get_last_inst(vcpu));
 116                r = RESUME_HOST;
 117                break;
 118        default:
 119                BUG();
 120        }
 121
 122        return r;
 123}
 124
 125int kvm_arch_hardware_enable(void *garbage)
 126{
 127        return 0;
 128}
 129
 130void kvm_arch_hardware_disable(void *garbage)
 131{
 132}
 133
 134int kvm_arch_hardware_setup(void)
 135{
 136        return 0;
 137}
 138
 139void kvm_arch_hardware_unsetup(void)
 140{
 141}
 142
 143void kvm_arch_check_processor_compat(void *rtn)
 144{
 145        *(int *)rtn = kvmppc_core_check_processor_compat();
 146}
 147
 148int kvm_arch_init_vm(struct kvm *kvm)
 149{
 150        return 0;
 151}
 152
 153void kvm_arch_destroy_vm(struct kvm *kvm)
 154{
 155        unsigned int i;
 156        struct kvm_vcpu *vcpu;
 157
 158        kvm_for_each_vcpu(i, vcpu, kvm)
 159                kvm_arch_vcpu_free(vcpu);
 160
 161        mutex_lock(&kvm->lock);
 162        for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 163                kvm->vcpus[i] = NULL;
 164
 165        atomic_set(&kvm->online_vcpus, 0);
 166        mutex_unlock(&kvm->lock);
 167}
 168
 169void kvm_arch_sync_events(struct kvm *kvm)
 170{
 171}
 172
 173int kvm_dev_ioctl_check_extension(long ext)
 174{
 175        int r;
 176
 177        switch (ext) {
 178        case KVM_CAP_PPC_SEGSTATE:
 179        case KVM_CAP_PPC_PAIRED_SINGLES:
 180        case KVM_CAP_PPC_UNSET_IRQ:
 181        case KVM_CAP_PPC_IRQ_LEVEL:
 182        case KVM_CAP_ENABLE_CAP:
 183        case KVM_CAP_PPC_OSI:
 184        case KVM_CAP_PPC_GET_PVINFO:
 185                r = 1;
 186                break;
 187        case KVM_CAP_COALESCED_MMIO:
 188                r = KVM_COALESCED_MMIO_PAGE_OFFSET;
 189                break;
 190        default:
 191                r = 0;
 192                break;
 193        }
 194        return r;
 195
 196}
 197
 198long kvm_arch_dev_ioctl(struct file *filp,
 199                        unsigned int ioctl, unsigned long arg)
 200{
 201        return -EINVAL;
 202}
 203
 204int kvm_arch_prepare_memory_region(struct kvm *kvm,
 205                                   struct kvm_memory_slot *memslot,
 206                                   struct kvm_memory_slot old,
 207                                   struct kvm_userspace_memory_region *mem,
 208                                   int user_alloc)
 209{
 210        return 0;
 211}
 212
 213void kvm_arch_commit_memory_region(struct kvm *kvm,
 214               struct kvm_userspace_memory_region *mem,
 215               struct kvm_memory_slot old,
 216               int user_alloc)
 217{
 218       return;
 219}
 220
 221
 222void kvm_arch_flush_shadow(struct kvm *kvm)
 223{
 224}
 225
 226struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 227{
 228        struct kvm_vcpu *vcpu;
 229        vcpu = kvmppc_core_vcpu_create(kvm, id);
 230        if (!IS_ERR(vcpu))
 231                kvmppc_create_vcpu_debugfs(vcpu, id);
 232        return vcpu;
 233}
 234
 235void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 236{
 237        /* Make sure we're not using the vcpu anymore */
 238        hrtimer_cancel(&vcpu->arch.dec_timer);
 239        tasklet_kill(&vcpu->arch.tasklet);
 240
 241        kvmppc_remove_vcpu_debugfs(vcpu);
 242        kvmppc_core_vcpu_free(vcpu);
 243}
 244
 245void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 246{
 247        kvm_arch_vcpu_free(vcpu);
 248}
 249
 250int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 251{
 252        return kvmppc_core_pending_dec(vcpu);
 253}
 254
 255static void kvmppc_decrementer_func(unsigned long data)
 256{
 257        struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
 258
 259        kvmppc_core_queue_dec(vcpu);
 260
 261        if (waitqueue_active(&vcpu->wq)) {
 262                wake_up_interruptible(&vcpu->wq);
 263                vcpu->stat.halt_wakeup++;
 264        }
 265}
 266
 267/*
 268 * low level hrtimer wake routine. Because this runs in hardirq context
 269 * we schedule a tasklet to do the real work.
 270 */
 271enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
 272{
 273        struct kvm_vcpu *vcpu;
 274
 275        vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
 276        tasklet_schedule(&vcpu->arch.tasklet);
 277
 278        return HRTIMER_NORESTART;
 279}
 280
 281int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 282{
 283        hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
 284        tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
 285        vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
 286
 287        return 0;
 288}
 289
 290void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 291{
 292        kvmppc_mmu_destroy(vcpu);
 293}
 294
 295void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 296{
 297        kvmppc_core_vcpu_load(vcpu, cpu);
 298}
 299
 300void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 301{
 302        kvmppc_core_vcpu_put(vcpu);
 303}
 304
 305int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 306                                        struct kvm_guest_debug *dbg)
 307{
 308        return -EINVAL;
 309}
 310
 311static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
 312                                     struct kvm_run *run)
 313{
 314        kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
 315}
 316
 317static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
 318                                      struct kvm_run *run)
 319{
 320        u64 uninitialized_var(gpr);
 321
 322        if (run->mmio.len > sizeof(gpr)) {
 323                printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
 324                return;
 325        }
 326
 327        if (vcpu->arch.mmio_is_bigendian) {
 328                switch (run->mmio.len) {
 329                case 8: gpr = *(u64 *)run->mmio.data; break;
 330                case 4: gpr = *(u32 *)run->mmio.data; break;
 331                case 2: gpr = *(u16 *)run->mmio.data; break;
 332                case 1: gpr = *(u8 *)run->mmio.data; break;
 333                }
 334        } else {
 335                /* Convert BE data from userland back to LE. */
 336                switch (run->mmio.len) {
 337                case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
 338                case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
 339                case 1: gpr = *(u8 *)run->mmio.data; break;
 340                }
 341        }
 342
 343        if (vcpu->arch.mmio_sign_extend) {
 344                switch (run->mmio.len) {
 345#ifdef CONFIG_PPC64
 346                case 4:
 347                        gpr = (s64)(s32)gpr;
 348                        break;
 349#endif
 350                case 2:
 351                        gpr = (s64)(s16)gpr;
 352                        break;
 353                case 1:
 354                        gpr = (s64)(s8)gpr;
 355                        break;
 356                }
 357        }
 358
 359        kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
 360
 361        switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
 362        case KVM_REG_GPR:
 363                kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
 364                break;
 365        case KVM_REG_FPR:
 366                vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
 367                break;
 368#ifdef CONFIG_PPC_BOOK3S
 369        case KVM_REG_QPR:
 370                vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
 371                break;
 372        case KVM_REG_FQPR:
 373                vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
 374                vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
 375                break;
 376#endif
 377        default:
 378                BUG();
 379        }
 380}
 381
 382int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
 383                       unsigned int rt, unsigned int bytes, int is_bigendian)
 384{
 385        if (bytes > sizeof(run->mmio.data)) {
 386                printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
 387                       run->mmio.len);
 388        }
 389
 390        run->mmio.phys_addr = vcpu->arch.paddr_accessed;
 391        run->mmio.len = bytes;
 392        run->mmio.is_write = 0;
 393
 394        vcpu->arch.io_gpr = rt;
 395        vcpu->arch.mmio_is_bigendian = is_bigendian;
 396        vcpu->mmio_needed = 1;
 397        vcpu->mmio_is_write = 0;
 398        vcpu->arch.mmio_sign_extend = 0;
 399
 400        return EMULATE_DO_MMIO;
 401}
 402
 403/* Same as above, but sign extends */
 404int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
 405                        unsigned int rt, unsigned int bytes, int is_bigendian)
 406{
 407        int r;
 408
 409        r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
 410        vcpu->arch.mmio_sign_extend = 1;
 411
 412        return r;
 413}
 414
 415int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
 416                        u64 val, unsigned int bytes, int is_bigendian)
 417{
 418        void *data = run->mmio.data;
 419
 420        if (bytes > sizeof(run->mmio.data)) {
 421                printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
 422                       run->mmio.len);
 423        }
 424
 425        run->mmio.phys_addr = vcpu->arch.paddr_accessed;
 426        run->mmio.len = bytes;
 427        run->mmio.is_write = 1;
 428        vcpu->mmio_needed = 1;
 429        vcpu->mmio_is_write = 1;
 430
 431        /* Store the value at the lowest bytes in 'data'. */
 432        if (is_bigendian) {
 433                switch (bytes) {
 434                case 8: *(u64 *)data = val; break;
 435                case 4: *(u32 *)data = val; break;
 436                case 2: *(u16 *)data = val; break;
 437                case 1: *(u8  *)data = val; break;
 438                }
 439        } else {
 440                /* Store LE value into 'data'. */
 441                switch (bytes) {
 442                case 4: st_le32(data, val); break;
 443                case 2: st_le16(data, val); break;
 444                case 1: *(u8 *)data = val; break;
 445                }
 446        }
 447
 448        return EMULATE_DO_MMIO;
 449}
 450
 451int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 452{
 453        int r;
 454        sigset_t sigsaved;
 455
 456        if (vcpu->sigset_active)
 457                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 458
 459        if (vcpu->mmio_needed) {
 460                if (!vcpu->mmio_is_write)
 461                        kvmppc_complete_mmio_load(vcpu, run);
 462                vcpu->mmio_needed = 0;
 463        } else if (vcpu->arch.dcr_needed) {
 464                if (!vcpu->arch.dcr_is_write)
 465                        kvmppc_complete_dcr_load(vcpu, run);
 466                vcpu->arch.dcr_needed = 0;
 467        } else if (vcpu->arch.osi_needed) {
 468                u64 *gprs = run->osi.gprs;
 469                int i;
 470
 471                for (i = 0; i < 32; i++)
 472                        kvmppc_set_gpr(vcpu, i, gprs[i]);
 473                vcpu->arch.osi_needed = 0;
 474        }
 475
 476        kvmppc_core_deliver_interrupts(vcpu);
 477
 478        local_irq_disable();
 479        kvm_guest_enter();
 480        r = __kvmppc_vcpu_run(run, vcpu);
 481        kvm_guest_exit();
 482        local_irq_enable();
 483
 484        if (vcpu->sigset_active)
 485                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 486
 487        return r;
 488}
 489
 490int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
 491{
 492        if (irq->irq == KVM_INTERRUPT_UNSET)
 493                kvmppc_core_dequeue_external(vcpu, irq);
 494        else
 495                kvmppc_core_queue_external(vcpu, irq);
 496
 497        if (waitqueue_active(&vcpu->wq)) {
 498                wake_up_interruptible(&vcpu->wq);
 499                vcpu->stat.halt_wakeup++;
 500        }
 501
 502        return 0;
 503}
 504
 505static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 506                                     struct kvm_enable_cap *cap)
 507{
 508        int r;
 509
 510        if (cap->flags)
 511                return -EINVAL;
 512
 513        switch (cap->cap) {
 514        case KVM_CAP_PPC_OSI:
 515                r = 0;
 516                vcpu->arch.osi_enabled = true;
 517                break;
 518        default:
 519                r = -EINVAL;
 520                break;
 521        }
 522
 523        return r;
 524}
 525
 526int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 527                                    struct kvm_mp_state *mp_state)
 528{
 529        return -EINVAL;
 530}
 531
 532int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 533                                    struct kvm_mp_state *mp_state)
 534{
 535        return -EINVAL;
 536}
 537
 538long kvm_arch_vcpu_ioctl(struct file *filp,
 539                         unsigned int ioctl, unsigned long arg)
 540{
 541        struct kvm_vcpu *vcpu = filp->private_data;
 542        void __user *argp = (void __user *)arg;
 543        long r;
 544
 545        switch (ioctl) {
 546        case KVM_INTERRUPT: {
 547                struct kvm_interrupt irq;
 548                r = -EFAULT;
 549                if (copy_from_user(&irq, argp, sizeof(irq)))
 550                        goto out;
 551                r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
 552                goto out;
 553        }
 554
 555        case KVM_ENABLE_CAP:
 556        {
 557                struct kvm_enable_cap cap;
 558                r = -EFAULT;
 559                if (copy_from_user(&cap, argp, sizeof(cap)))
 560                        goto out;
 561                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
 562                break;
 563        }
 564        default:
 565                r = -EINVAL;
 566        }
 567
 568out:
 569        return r;
 570}
 571
 572static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
 573{
 574        u32 inst_lis = 0x3c000000;
 575        u32 inst_ori = 0x60000000;
 576        u32 inst_nop = 0x60000000;
 577        u32 inst_sc = 0x44000002;
 578        u32 inst_imm_mask = 0xffff;
 579
 580        /*
 581         * The hypercall to get into KVM from within guest context is as
 582         * follows:
 583         *
 584         *    lis r0, r0, KVM_SC_MAGIC_R0@h
 585         *    ori r0, KVM_SC_MAGIC_R0@l
 586         *    sc
 587         *    nop
 588         */
 589        pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
 590        pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
 591        pvinfo->hcall[2] = inst_sc;
 592        pvinfo->hcall[3] = inst_nop;
 593
 594        return 0;
 595}
 596
 597long kvm_arch_vm_ioctl(struct file *filp,
 598                       unsigned int ioctl, unsigned long arg)
 599{
 600        void __user *argp = (void __user *)arg;
 601        long r;
 602
 603        switch (ioctl) {
 604        case KVM_PPC_GET_PVINFO: {
 605                struct kvm_ppc_pvinfo pvinfo;
 606                memset(&pvinfo, 0, sizeof(pvinfo));
 607                r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
 608                if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
 609                        r = -EFAULT;
 610                        goto out;
 611                }
 612
 613                break;
 614        }
 615        default:
 616                r = -ENOTTY;
 617        }
 618
 619out:
 620        return r;
 621}
 622
 623int kvm_arch_init(void *opaque)
 624{
 625        return 0;
 626}
 627
 628void kvm_arch_exit(void)
 629{
 630}
 631