linux/arch/powerpc/kvm/book3s_pr.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
   3 *
   4 * Authors:
   5 *    Alexander Graf <agraf@suse.de>
   6 *    Kevin Wolf <mail@kevin-wolf.de>
   7 *    Paul Mackerras <paulus@samba.org>
   8 *
   9 * Description:
  10 * Functions relating to running KVM on Book 3S processors where
  11 * we don't have access to hypervisor mode, and we run the guest
  12 * in problem state (user mode).
  13 *
  14 * This file is derived from arch/powerpc/kvm/44x.c,
  15 * by Hollis Blanchard <hollisb@us.ibm.com>.
  16 *
  17 * This program is free software; you can redistribute it and/or modify
  18 * it under the terms of the GNU General Public License, version 2, as
  19 * published by the Free Software Foundation.
  20 */
  21
  22#include <linux/kvm_host.h>
  23#include <linux/export.h>
  24#include <linux/err.h>
  25#include <linux/slab.h>
  26
  27#include <asm/reg.h>
  28#include <asm/cputable.h>
  29#include <asm/cacheflush.h>
  30#include <asm/tlbflush.h>
  31#include <linux/uaccess.h>
  32#include <asm/io.h>
  33#include <asm/kvm_ppc.h>
  34#include <asm/kvm_book3s.h>
  35#include <asm/mmu_context.h>
  36#include <asm/switch_to.h>
  37#include <asm/firmware.h>
  38#include <asm/setup.h>
  39#include <linux/gfp.h>
  40#include <linux/sched.h>
  41#include <linux/vmalloc.h>
  42#include <linux/highmem.h>
  43#include <linux/module.h>
  44#include <linux/miscdevice.h>
  45
  46#include "book3s.h"
  47
  48#define CREATE_TRACE_POINTS
  49#include "trace_pr.h"
  50
  51/* #define EXIT_DEBUG */
  52/* #define DEBUG_EXT */
  53
  54static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
  55                             ulong msr);
  56static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
  57
  58/* Some compatibility defines */
  59#ifdef CONFIG_PPC_BOOK3S_32
  60#define MSR_USER32 MSR_USER
  61#define MSR_USER64 MSR_USER
  62#define HW_PAGE_SIZE PAGE_SIZE
  63#endif
  64
  65static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
  66{
  67        ulong msr = kvmppc_get_msr(vcpu);
  68        return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
  69}
  70
  71static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
  72{
  73        ulong msr = kvmppc_get_msr(vcpu);
  74        ulong pc = kvmppc_get_pc(vcpu);
  75
  76        /* We are in DR only split real mode */
  77        if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
  78                return;
  79
  80        /* We have not fixed up the guest already */
  81        if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
  82                return;
  83
  84        /* The code is in fixupable address space */
  85        if (pc & SPLIT_HACK_MASK)
  86                return;
  87
  88        vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
  89        kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
  90}
  91
  92void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
  93
  94static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
  95{
  96#ifdef CONFIG_PPC_BOOK3S_64
  97        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  98        memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
  99        svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
 100        svcpu->in_use = 0;
 101        svcpu_put(svcpu);
 102#endif
 103
 104        /* Disable AIL if supported */
 105        if (cpu_has_feature(CPU_FTR_HVMODE) &&
 106            cpu_has_feature(CPU_FTR_ARCH_207S))
 107                mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
 108
 109        vcpu->cpu = smp_processor_id();
 110#ifdef CONFIG_PPC_BOOK3S_32
 111        current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
 112#endif
 113
 114        if (kvmppc_is_split_real(vcpu))
 115                kvmppc_fixup_split_real(vcpu);
 116}
 117
 118static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
 119{
 120#ifdef CONFIG_PPC_BOOK3S_64
 121        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 122        if (svcpu->in_use) {
 123                kvmppc_copy_from_svcpu(vcpu, svcpu);
 124        }
 125        memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
 126        to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
 127        svcpu_put(svcpu);
 128#endif
 129
 130        if (kvmppc_is_split_real(vcpu))
 131                kvmppc_unfixup_split_real(vcpu);
 132
 133        kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
 134        kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
 135
 136        /* Enable AIL if supported */
 137        if (cpu_has_feature(CPU_FTR_HVMODE) &&
 138            cpu_has_feature(CPU_FTR_ARCH_207S))
 139                mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
 140
 141        vcpu->cpu = -1;
 142}
 143
 144/* Copy data needed by real-mode code from vcpu to shadow vcpu */
 145void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
 146                          struct kvm_vcpu *vcpu)
 147{
 148        svcpu->gpr[0] = vcpu->arch.gpr[0];
 149        svcpu->gpr[1] = vcpu->arch.gpr[1];
 150        svcpu->gpr[2] = vcpu->arch.gpr[2];
 151        svcpu->gpr[3] = vcpu->arch.gpr[3];
 152        svcpu->gpr[4] = vcpu->arch.gpr[4];
 153        svcpu->gpr[5] = vcpu->arch.gpr[5];
 154        svcpu->gpr[6] = vcpu->arch.gpr[6];
 155        svcpu->gpr[7] = vcpu->arch.gpr[7];
 156        svcpu->gpr[8] = vcpu->arch.gpr[8];
 157        svcpu->gpr[9] = vcpu->arch.gpr[9];
 158        svcpu->gpr[10] = vcpu->arch.gpr[10];
 159        svcpu->gpr[11] = vcpu->arch.gpr[11];
 160        svcpu->gpr[12] = vcpu->arch.gpr[12];
 161        svcpu->gpr[13] = vcpu->arch.gpr[13];
 162        svcpu->cr  = vcpu->arch.cr;
 163        svcpu->xer = vcpu->arch.xer;
 164        svcpu->ctr = vcpu->arch.ctr;
 165        svcpu->lr  = vcpu->arch.lr;
 166        svcpu->pc  = vcpu->arch.pc;
 167#ifdef CONFIG_PPC_BOOK3S_64
 168        svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
 169#endif
 170        /*
 171         * Now also save the current time base value. We use this
 172         * to find the guest purr and spurr value.
 173         */
 174        vcpu->arch.entry_tb = get_tb();
 175        vcpu->arch.entry_vtb = get_vtb();
 176        if (cpu_has_feature(CPU_FTR_ARCH_207S))
 177                vcpu->arch.entry_ic = mfspr(SPRN_IC);
 178        svcpu->in_use = true;
 179}
 180
 181/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
 182void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
 183                            struct kvmppc_book3s_shadow_vcpu *svcpu)
 184{
 185        /*
 186         * vcpu_put would just call us again because in_use hasn't
 187         * been updated yet.
 188         */
 189        preempt_disable();
 190
 191        /*
 192         * Maybe we were already preempted and synced the svcpu from
 193         * our preempt notifiers. Don't bother touching this svcpu then.
 194         */
 195        if (!svcpu->in_use)
 196                goto out;
 197
 198        vcpu->arch.gpr[0] = svcpu->gpr[0];
 199        vcpu->arch.gpr[1] = svcpu->gpr[1];
 200        vcpu->arch.gpr[2] = svcpu->gpr[2];
 201        vcpu->arch.gpr[3] = svcpu->gpr[3];
 202        vcpu->arch.gpr[4] = svcpu->gpr[4];
 203        vcpu->arch.gpr[5] = svcpu->gpr[5];
 204        vcpu->arch.gpr[6] = svcpu->gpr[6];
 205        vcpu->arch.gpr[7] = svcpu->gpr[7];
 206        vcpu->arch.gpr[8] = svcpu->gpr[8];
 207        vcpu->arch.gpr[9] = svcpu->gpr[9];
 208        vcpu->arch.gpr[10] = svcpu->gpr[10];
 209        vcpu->arch.gpr[11] = svcpu->gpr[11];
 210        vcpu->arch.gpr[12] = svcpu->gpr[12];
 211        vcpu->arch.gpr[13] = svcpu->gpr[13];
 212        vcpu->arch.cr  = svcpu->cr;
 213        vcpu->arch.xer = svcpu->xer;
 214        vcpu->arch.ctr = svcpu->ctr;
 215        vcpu->arch.lr  = svcpu->lr;
 216        vcpu->arch.pc  = svcpu->pc;
 217        vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
 218        vcpu->arch.fault_dar   = svcpu->fault_dar;
 219        vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
 220        vcpu->arch.last_inst   = svcpu->last_inst;
 221#ifdef CONFIG_PPC_BOOK3S_64
 222        vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
 223#endif
 224        /*
 225         * Update purr and spurr using time base on exit.
 226         */
 227        vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
 228        vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
 229        to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
 230        if (cpu_has_feature(CPU_FTR_ARCH_207S))
 231                vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
 232        svcpu->in_use = false;
 233
 234out:
 235        preempt_enable();
 236}
 237
 238static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
 239{
 240        int r = 1; /* Indicate we want to get back into the guest */
 241
 242        /* We misuse TLB_FLUSH to indicate that we want to clear
 243           all shadow cache entries */
 244        if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
 245                kvmppc_mmu_pte_flush(vcpu, 0, 0);
 246
 247        return r;
 248}
 249
 250/************* MMU Notifiers *************/
 251static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
 252                             unsigned long end)
 253{
 254        long i;
 255        struct kvm_vcpu *vcpu;
 256        struct kvm_memslots *slots;
 257        struct kvm_memory_slot *memslot;
 258
 259        slots = kvm_memslots(kvm);
 260        kvm_for_each_memslot(memslot, slots) {
 261                unsigned long hva_start, hva_end;
 262                gfn_t gfn, gfn_end;
 263
 264                hva_start = max(start, memslot->userspace_addr);
 265                hva_end = min(end, memslot->userspace_addr +
 266                                        (memslot->npages << PAGE_SHIFT));
 267                if (hva_start >= hva_end)
 268                        continue;
 269                /*
 270                 * {gfn(page) | page intersects with [hva_start, hva_end)} =
 271                 * {gfn, gfn+1, ..., gfn_end-1}.
 272                 */
 273                gfn = hva_to_gfn_memslot(hva_start, memslot);
 274                gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
 275                kvm_for_each_vcpu(i, vcpu, kvm)
 276                        kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
 277                                              gfn_end << PAGE_SHIFT);
 278        }
 279}
 280
 281static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
 282{
 283        trace_kvm_unmap_hva(hva);
 284
 285        do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
 286
 287        return 0;
 288}
 289
 290static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
 291                                  unsigned long end)
 292{
 293        do_kvm_unmap_hva(kvm, start, end);
 294
 295        return 0;
 296}
 297
 298static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
 299                          unsigned long end)
 300{
 301        /* XXX could be more clever ;) */
 302        return 0;
 303}
 304
 305static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
 306{
 307        /* XXX could be more clever ;) */
 308        return 0;
 309}
 310
 311static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
 312{
 313        /* The page will get remapped properly on its next fault */
 314        do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
 315}
 316
 317/*****************************************/
 318
 319static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
 320{
 321        ulong guest_msr = kvmppc_get_msr(vcpu);
 322        ulong smsr = guest_msr;
 323
 324        /* Guest MSR values */
 325        smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
 326        /* Process MSR values */
 327        smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
 328        /* External providers the guest reserved */
 329        smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
 330        /* 64-bit Process MSR values */
 331#ifdef CONFIG_PPC_BOOK3S_64
 332        smsr |= MSR_ISF | MSR_HV;
 333#endif
 334        vcpu->arch.shadow_msr = smsr;
 335}
 336
 337static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
 338{
 339        ulong old_msr = kvmppc_get_msr(vcpu);
 340
 341#ifdef EXIT_DEBUG
 342        printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
 343#endif
 344
 345        msr &= to_book3s(vcpu)->msr_mask;
 346        kvmppc_set_msr_fast(vcpu, msr);
 347        kvmppc_recalc_shadow_msr(vcpu);
 348
 349        if (msr & MSR_POW) {
 350                if (!vcpu->arch.pending_exceptions) {
 351                        kvm_vcpu_block(vcpu);
 352                        kvm_clear_request(KVM_REQ_UNHALT, vcpu);
 353                        vcpu->stat.halt_wakeup++;
 354
 355                        /* Unset POW bit after we woke up */
 356                        msr &= ~MSR_POW;
 357                        kvmppc_set_msr_fast(vcpu, msr);
 358                }
 359        }
 360
 361        if (kvmppc_is_split_real(vcpu))
 362                kvmppc_fixup_split_real(vcpu);
 363        else
 364                kvmppc_unfixup_split_real(vcpu);
 365
 366        if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
 367                   (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
 368                kvmppc_mmu_flush_segments(vcpu);
 369                kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
 370
 371                /* Preload magic page segment when in kernel mode */
 372                if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
 373                        struct kvm_vcpu_arch *a = &vcpu->arch;
 374
 375                        if (msr & MSR_DR)
 376                                kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
 377                        else
 378                                kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
 379                }
 380        }
 381
 382        /*
 383         * When switching from 32 to 64-bit, we may have a stale 32-bit
 384         * magic page around, we need to flush it. Typically 32-bit magic
 385         * page will be instanciated when calling into RTAS. Note: We
 386         * assume that such transition only happens while in kernel mode,
 387         * ie, we never transition from user 32-bit to kernel 64-bit with
 388         * a 32-bit magic page around.
 389         */
 390        if (vcpu->arch.magic_page_pa &&
 391            !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
 392                /* going from RTAS to normal kernel code */
 393                kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
 394                                     ~0xFFFUL);
 395        }
 396
 397        /* Preload FPU if it's enabled */
 398        if (kvmppc_get_msr(vcpu) & MSR_FP)
 399                kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
 400}
 401
 402void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
 403{
 404        u32 host_pvr;
 405
 406        vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
 407        vcpu->arch.pvr = pvr;
 408#ifdef CONFIG_PPC_BOOK3S_64
 409        if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
 410                kvmppc_mmu_book3s_64_init(vcpu);
 411                if (!to_book3s(vcpu)->hior_explicit)
 412                        to_book3s(vcpu)->hior = 0xfff00000;
 413                to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
 414                vcpu->arch.cpu_type = KVM_CPU_3S_64;
 415        } else
 416#endif
 417        {
 418                kvmppc_mmu_book3s_32_init(vcpu);
 419                if (!to_book3s(vcpu)->hior_explicit)
 420                        to_book3s(vcpu)->hior = 0;
 421                to_book3s(vcpu)->msr_mask = 0xffffffffULL;
 422                vcpu->arch.cpu_type = KVM_CPU_3S_32;
 423        }
 424
 425        kvmppc_sanity_check(vcpu);
 426
 427        /* If we are in hypervisor level on 970, we can tell the CPU to
 428         * treat DCBZ as 32 bytes store */
 429        vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
 430        if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
 431            !strcmp(cur_cpu_spec->platform, "ppc970"))
 432                vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
 433
 434        /* Cell performs badly if MSR_FEx are set. So let's hope nobody
 435           really needs them in a VM on Cell and force disable them. */
 436        if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
 437                to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
 438
 439        /*
 440         * If they're asking for POWER6 or later, set the flag
 441         * indicating that we can do multiple large page sizes
 442         * and 1TB segments.
 443         * Also set the flag that indicates that tlbie has the large
 444         * page bit in the RB operand instead of the instruction.
 445         */
 446        switch (PVR_VER(pvr)) {
 447        case PVR_POWER6:
 448        case PVR_POWER7:
 449        case PVR_POWER7p:
 450        case PVR_POWER8:
 451        case PVR_POWER8E:
 452        case PVR_POWER8NVL:
 453                vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
 454                        BOOK3S_HFLAG_NEW_TLBIE;
 455                break;
 456        }
 457
 458#ifdef CONFIG_PPC_BOOK3S_32
 459        /* 32 bit Book3S always has 32 byte dcbz */
 460        vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
 461#endif
 462
 463        /* On some CPUs we can execute paired single operations natively */
 464        asm ( "mfpvr %0" : "=r"(host_pvr));
 465        switch (host_pvr) {
 466        case 0x00080200:        /* lonestar 2.0 */
 467        case 0x00088202:        /* lonestar 2.2 */
 468        case 0x70000100:        /* gekko 1.0 */
 469        case 0x00080100:        /* gekko 2.0 */
 470        case 0x00083203:        /* gekko 2.3a */
 471        case 0x00083213:        /* gekko 2.3b */
 472        case 0x00083204:        /* gekko 2.4 */
 473        case 0x00083214:        /* gekko 2.4e (8SE) - retail HW2 */
 474        case 0x00087200:        /* broadway */
 475                vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
 476                /* Enable HID2.PSE - in case we need it later */
 477                mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
 478        }
 479}
 480
 481/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
 482 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
 483 * emulate 32 bytes dcbz length.
 484 *
 485 * The Book3s_64 inventors also realized this case and implemented a special bit
 486 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
 487 *
 488 * My approach here is to patch the dcbz instruction on executing pages.
 489 */
 490static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
 491{
 492        struct page *hpage;
 493        u64 hpage_offset;
 494        u32 *page;
 495        int i;
 496
 497        hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
 498        if (is_error_page(hpage))
 499                return;
 500
 501        hpage_offset = pte->raddr & ~PAGE_MASK;
 502        hpage_offset &= ~0xFFFULL;
 503        hpage_offset /= 4;
 504
 505        get_page(hpage);
 506        page = kmap_atomic(hpage);
 507
 508        /* patch dcbz into reserved instruction, so we trap */
 509        for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
 510                if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
 511                        page[i] &= cpu_to_be32(0xfffffff7);
 512
 513        kunmap_atomic(page);
 514        put_page(hpage);
 515}
 516
 517static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 518{
 519        ulong mp_pa = vcpu->arch.magic_page_pa;
 520
 521        if (!(kvmppc_get_msr(vcpu) & MSR_SF))
 522                mp_pa = (uint32_t)mp_pa;
 523
 524        gpa &= ~0xFFFULL;
 525        if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
 526                return true;
 527        }
 528
 529        return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
 530}
 531
 532int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 533                            ulong eaddr, int vec)
 534{
 535        bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
 536        bool iswrite = false;
 537        int r = RESUME_GUEST;
 538        int relocated;
 539        int page_found = 0;
 540        struct kvmppc_pte pte = { 0 };
 541        bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
 542        bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
 543        u64 vsid;
 544
 545        relocated = data ? dr : ir;
 546        if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
 547                iswrite = true;
 548
 549        /* Resolve real address if translation turned on */
 550        if (relocated) {
 551                page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
 552        } else {
 553                pte.may_execute = true;
 554                pte.may_read = true;
 555                pte.may_write = true;
 556                pte.raddr = eaddr & KVM_PAM;
 557                pte.eaddr = eaddr;
 558                pte.vpage = eaddr >> 12;
 559                pte.page_size = MMU_PAGE_64K;
 560        }
 561
 562        switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
 563        case 0:
 564                pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
 565                break;
 566        case MSR_DR:
 567                if (!data &&
 568                    (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
 569                    ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
 570                        pte.raddr &= ~SPLIT_HACK_MASK;
 571                /* fall through */
 572        case MSR_IR:
 573                vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
 574
 575                if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
 576                        pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
 577                else
 578                        pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
 579                pte.vpage |= vsid;
 580
 581                if (vsid == -1)
 582                        page_found = -EINVAL;
 583                break;
 584        }
 585
 586        if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
 587           (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
 588                /*
 589                 * If we do the dcbz hack, we have to NX on every execution,
 590                 * so we can patch the executing code. This renders our guest
 591                 * NX-less.
 592                 */
 593                pte.may_execute = !data;
 594        }
 595
 596        if (page_found == -ENOENT) {
 597                /* Page not found in guest PTE entries */
 598                u64 ssrr1 = vcpu->arch.shadow_srr1;
 599                u64 msr = kvmppc_get_msr(vcpu);
 600                kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
 601                kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
 602                kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
 603                kvmppc_book3s_queue_irqprio(vcpu, vec);
 604        } else if (page_found == -EPERM) {
 605                /* Storage protection */
 606                u32 dsisr = vcpu->arch.fault_dsisr;
 607                u64 ssrr1 = vcpu->arch.shadow_srr1;
 608                u64 msr = kvmppc_get_msr(vcpu);
 609                kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
 610                dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
 611                kvmppc_set_dsisr(vcpu, dsisr);
 612                kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
 613                kvmppc_book3s_queue_irqprio(vcpu, vec);
 614        } else if (page_found == -EINVAL) {
 615                /* Page not found in guest SLB */
 616                kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
 617                kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
 618        } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
 619                if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
 620                        /*
 621                         * There is already a host HPTE there, presumably
 622                         * a read-only one for a page the guest thinks
 623                         * is writable, so get rid of it first.
 624                         */
 625                        kvmppc_mmu_unmap_page(vcpu, &pte);
 626                }
 627                /* The guest's PTE is not mapped yet. Map on the host */
 628                if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
 629                        /* Exit KVM if mapping failed */
 630                        run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 631                        return RESUME_HOST;
 632                }
 633                if (data)
 634                        vcpu->stat.sp_storage++;
 635                else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
 636                         (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
 637                        kvmppc_patch_dcbz(vcpu, &pte);
 638        } else {
 639                /* MMIO */
 640                vcpu->stat.mmio_exits++;
 641                vcpu->arch.paddr_accessed = pte.raddr;
 642                vcpu->arch.vaddr_accessed = pte.eaddr;
 643                r = kvmppc_emulate_mmio(run, vcpu);
 644                if ( r == RESUME_HOST_NV )
 645                        r = RESUME_HOST;
 646        }
 647
 648        return r;
 649}
 650
 651/* Give up external provider (FPU, Altivec, VSX) */
 652void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
 653{
 654        struct thread_struct *t = &current->thread;
 655
 656        /*
 657         * VSX instructions can access FP and vector registers, so if
 658         * we are giving up VSX, make sure we give up FP and VMX as well.
 659         */
 660        if (msr & MSR_VSX)
 661                msr |= MSR_FP | MSR_VEC;
 662
 663        msr &= vcpu->arch.guest_owned_ext;
 664        if (!msr)
 665                return;
 666
 667#ifdef DEBUG_EXT
 668        printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
 669#endif
 670
 671        if (msr & MSR_FP) {
 672                /*
 673                 * Note that on CPUs with VSX, giveup_fpu stores
 674                 * both the traditional FP registers and the added VSX
 675                 * registers into thread.fp_state.fpr[].
 676                 */
 677                if (t->regs->msr & MSR_FP)
 678                        giveup_fpu(current);
 679                t->fp_save_area = NULL;
 680        }
 681
 682#ifdef CONFIG_ALTIVEC
 683        if (msr & MSR_VEC) {
 684                if (current->thread.regs->msr & MSR_VEC)
 685                        giveup_altivec(current);
 686                t->vr_save_area = NULL;
 687        }
 688#endif
 689
 690        vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
 691        kvmppc_recalc_shadow_msr(vcpu);
 692}
 693
 694/* Give up facility (TAR / EBB / DSCR) */
 695static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
 696{
 697#ifdef CONFIG_PPC_BOOK3S_64
 698        if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
 699                /* Facility not available to the guest, ignore giveup request*/
 700                return;
 701        }
 702
 703        switch (fac) {
 704        case FSCR_TAR_LG:
 705                vcpu->arch.tar = mfspr(SPRN_TAR);
 706                mtspr(SPRN_TAR, current->thread.tar);
 707                vcpu->arch.shadow_fscr &= ~FSCR_TAR;
 708                break;
 709        }
 710#endif
 711}
 712
 713/* Handle external providers (FPU, Altivec, VSX) */
 714static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
 715                             ulong msr)
 716{
 717        struct thread_struct *t = &current->thread;
 718
 719        /* When we have paired singles, we emulate in software */
 720        if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
 721                return RESUME_GUEST;
 722
 723        if (!(kvmppc_get_msr(vcpu) & msr)) {
 724                kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
 725                return RESUME_GUEST;
 726        }
 727
 728        if (msr == MSR_VSX) {
 729                /* No VSX?  Give an illegal instruction interrupt */
 730#ifdef CONFIG_VSX
 731                if (!cpu_has_feature(CPU_FTR_VSX))
 732#endif
 733                {
 734                        kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
 735                        return RESUME_GUEST;
 736                }
 737
 738                /*
 739                 * We have to load up all the FP and VMX registers before
 740                 * we can let the guest use VSX instructions.
 741                 */
 742                msr = MSR_FP | MSR_VEC | MSR_VSX;
 743        }
 744
 745        /* See if we already own all the ext(s) needed */
 746        msr &= ~vcpu->arch.guest_owned_ext;
 747        if (!msr)
 748                return RESUME_GUEST;
 749
 750#ifdef DEBUG_EXT
 751        printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
 752#endif
 753
 754        if (msr & MSR_FP) {
 755                preempt_disable();
 756                enable_kernel_fp();
 757                load_fp_state(&vcpu->arch.fp);
 758                disable_kernel_fp();
 759                t->fp_save_area = &vcpu->arch.fp;
 760                preempt_enable();
 761        }
 762
 763        if (msr & MSR_VEC) {
 764#ifdef CONFIG_ALTIVEC
 765                preempt_disable();
 766                enable_kernel_altivec();
 767                load_vr_state(&vcpu->arch.vr);
 768                disable_kernel_altivec();
 769                t->vr_save_area = &vcpu->arch.vr;
 770                preempt_enable();
 771#endif
 772        }
 773
 774        t->regs->msr |= msr;
 775        vcpu->arch.guest_owned_ext |= msr;
 776        kvmppc_recalc_shadow_msr(vcpu);
 777
 778        return RESUME_GUEST;
 779}
 780
 781/*
 782 * Kernel code using FP or VMX could have flushed guest state to
 783 * the thread_struct; if so, get it back now.
 784 */
 785static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
 786{
 787        unsigned long lost_ext;
 788
 789        lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
 790        if (!lost_ext)
 791                return;
 792
 793        if (lost_ext & MSR_FP) {
 794                preempt_disable();
 795                enable_kernel_fp();
 796                load_fp_state(&vcpu->arch.fp);
 797                disable_kernel_fp();
 798                preempt_enable();
 799        }
 800#ifdef CONFIG_ALTIVEC
 801        if (lost_ext & MSR_VEC) {
 802                preempt_disable();
 803                enable_kernel_altivec();
 804                load_vr_state(&vcpu->arch.vr);
 805                disable_kernel_altivec();
 806                preempt_enable();
 807        }
 808#endif
 809        current->thread.regs->msr |= lost_ext;
 810}
 811
 812#ifdef CONFIG_PPC_BOOK3S_64
 813
 814static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
 815{
 816        /* Inject the Interrupt Cause field and trigger a guest interrupt */
 817        vcpu->arch.fscr &= ~(0xffULL << 56);
 818        vcpu->arch.fscr |= (fac << 56);
 819        kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
 820}
 821
 822static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
 823{
 824        enum emulation_result er = EMULATE_FAIL;
 825
 826        if (!(kvmppc_get_msr(vcpu) & MSR_PR))
 827                er = kvmppc_emulate_instruction(vcpu->run, vcpu);
 828
 829        if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
 830                /* Couldn't emulate, trigger interrupt in guest */
 831                kvmppc_trigger_fac_interrupt(vcpu, fac);
 832        }
 833}
 834
 835/* Enable facilities (TAR, EBB, DSCR) for the guest */
 836static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
 837{
 838        bool guest_fac_enabled;
 839        BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
 840
 841        /*
 842         * Not every facility is enabled by FSCR bits, check whether the
 843         * guest has this facility enabled at all.
 844         */
 845        switch (fac) {
 846        case FSCR_TAR_LG:
 847        case FSCR_EBB_LG:
 848                guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
 849                break;
 850        case FSCR_TM_LG:
 851                guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
 852                break;
 853        default:
 854                guest_fac_enabled = false;
 855                break;
 856        }
 857
 858        if (!guest_fac_enabled) {
 859                /* Facility not enabled by the guest */
 860                kvmppc_trigger_fac_interrupt(vcpu, fac);
 861                return RESUME_GUEST;
 862        }
 863
 864        switch (fac) {
 865        case FSCR_TAR_LG:
 866                /* TAR switching isn't lazy in Linux yet */
 867                current->thread.tar = mfspr(SPRN_TAR);
 868                mtspr(SPRN_TAR, vcpu->arch.tar);
 869                vcpu->arch.shadow_fscr |= FSCR_TAR;
 870                break;
 871        default:
 872                kvmppc_emulate_fac(vcpu, fac);
 873                break;
 874        }
 875
 876        return RESUME_GUEST;
 877}
 878
 879void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
 880{
 881        if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
 882                /* TAR got dropped, drop it in shadow too */
 883                kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
 884        }
 885        vcpu->arch.fscr = fscr;
 886}
 887#endif
 888
 889static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
 890{
 891        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
 892                u64 msr = kvmppc_get_msr(vcpu);
 893
 894                kvmppc_set_msr(vcpu, msr | MSR_SE);
 895        }
 896}
 897
 898static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
 899{
 900        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
 901                u64 msr = kvmppc_get_msr(vcpu);
 902
 903                kvmppc_set_msr(vcpu, msr & ~MSR_SE);
 904        }
 905}
 906
 907static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
 908                                  unsigned int exit_nr)
 909{
 910        enum emulation_result er;
 911        ulong flags;
 912        u32 last_inst;
 913        int emul, r;
 914
 915        /*
 916         * shadow_srr1 only contains valid flags if we came here via a program
 917         * exception. The other exceptions (emulation assist, FP unavailable,
 918         * etc.) do not provide flags in SRR1, so use an illegal-instruction
 919         * exception when injecting a program interrupt into the guest.
 920         */
 921        if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
 922                flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
 923        else
 924                flags = SRR1_PROGILL;
 925
 926        emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
 927        if (emul != EMULATE_DONE)
 928                return RESUME_GUEST;
 929
 930        if (kvmppc_get_msr(vcpu) & MSR_PR) {
 931#ifdef EXIT_DEBUG
 932                pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
 933                        kvmppc_get_pc(vcpu), last_inst);
 934#endif
 935                if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
 936                        kvmppc_core_queue_program(vcpu, flags);
 937                        return RESUME_GUEST;
 938                }
 939        }
 940
 941        vcpu->stat.emulated_inst_exits++;
 942        er = kvmppc_emulate_instruction(run, vcpu);
 943        switch (er) {
 944        case EMULATE_DONE:
 945                r = RESUME_GUEST_NV;
 946                break;
 947        case EMULATE_AGAIN:
 948                r = RESUME_GUEST;
 949                break;
 950        case EMULATE_FAIL:
 951                pr_crit("%s: emulation at %lx failed (%08x)\n",
 952                        __func__, kvmppc_get_pc(vcpu), last_inst);
 953                kvmppc_core_queue_program(vcpu, flags);
 954                r = RESUME_GUEST;
 955                break;
 956        case EMULATE_DO_MMIO:
 957                run->exit_reason = KVM_EXIT_MMIO;
 958                r = RESUME_HOST_NV;
 959                break;
 960        case EMULATE_EXIT_USER:
 961                r = RESUME_HOST_NV;
 962                break;
 963        default:
 964                BUG();
 965        }
 966
 967        return r;
 968}
 969
 970int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
 971                          unsigned int exit_nr)
 972{
 973        int r = RESUME_HOST;
 974        int s;
 975
 976        vcpu->stat.sum_exits++;
 977
 978        run->exit_reason = KVM_EXIT_UNKNOWN;
 979        run->ready_for_interrupt_injection = 1;
 980
 981        /* We get here with MSR.EE=1 */
 982
 983        trace_kvm_exit(exit_nr, vcpu);
 984        guest_exit();
 985
 986        switch (exit_nr) {
 987        case BOOK3S_INTERRUPT_INST_STORAGE:
 988        {
 989                ulong shadow_srr1 = vcpu->arch.shadow_srr1;
 990                vcpu->stat.pf_instruc++;
 991
 992                if (kvmppc_is_split_real(vcpu))
 993                        kvmppc_fixup_split_real(vcpu);
 994
 995#ifdef CONFIG_PPC_BOOK3S_32
 996                /* We set segments as unused segments when invalidating them. So
 997                 * treat the respective fault as segment fault. */
 998                {
 999                        struct kvmppc_book3s_shadow_vcpu *svcpu;
1000                        u32 sr;
1001
1002                        svcpu = svcpu_get(vcpu);
1003                        sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
1004                        svcpu_put(svcpu);
1005                        if (sr == SR_INVALID) {
1006                                kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1007                                r = RESUME_GUEST;
1008                                break;
1009                        }
1010                }
1011#endif
1012
1013                /* only care about PTEG not found errors, but leave NX alone */
1014                if (shadow_srr1 & 0x40000000) {
1015                        int idx = srcu_read_lock(&vcpu->kvm->srcu);
1016                        r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
1017                        srcu_read_unlock(&vcpu->kvm->srcu, idx);
1018                        vcpu->stat.sp_instruc++;
1019                } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1020                          (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1021                        /*
1022                         * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1023                         *     so we can't use the NX bit inside the guest. Let's cross our fingers,
1024                         *     that no guest that needs the dcbz hack does NX.
1025                         */
1026                        kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1027                        r = RESUME_GUEST;
1028                } else {
1029                        u64 msr = kvmppc_get_msr(vcpu);
1030                        msr |= shadow_srr1 & 0x58000000;
1031                        kvmppc_set_msr_fast(vcpu, msr);
1032                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1033                        r = RESUME_GUEST;
1034                }
1035                break;
1036        }
1037        case BOOK3S_INTERRUPT_DATA_STORAGE:
1038        {
1039                ulong dar = kvmppc_get_fault_dar(vcpu);
1040                u32 fault_dsisr = vcpu->arch.fault_dsisr;
1041                vcpu->stat.pf_storage++;
1042
1043#ifdef CONFIG_PPC_BOOK3S_32
1044                /* We set segments as unused segments when invalidating them. So
1045                 * treat the respective fault as segment fault. */
1046                {
1047                        struct kvmppc_book3s_shadow_vcpu *svcpu;
1048                        u32 sr;
1049
1050                        svcpu = svcpu_get(vcpu);
1051                        sr = svcpu->sr[dar >> SID_SHIFT];
1052                        svcpu_put(svcpu);
1053                        if (sr == SR_INVALID) {
1054                                kvmppc_mmu_map_segment(vcpu, dar);
1055                                r = RESUME_GUEST;
1056                                break;
1057                        }
1058                }
1059#endif
1060
1061                /*
1062                 * We need to handle missing shadow PTEs, and
1063                 * protection faults due to us mapping a page read-only
1064                 * when the guest thinks it is writable.
1065                 */
1066                if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
1067                        int idx = srcu_read_lock(&vcpu->kvm->srcu);
1068                        r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
1069                        srcu_read_unlock(&vcpu->kvm->srcu, idx);
1070                } else {
1071                        kvmppc_set_dar(vcpu, dar);
1072                        kvmppc_set_dsisr(vcpu, fault_dsisr);
1073                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1074                        r = RESUME_GUEST;
1075                }
1076                break;
1077        }
1078        case BOOK3S_INTERRUPT_DATA_SEGMENT:
1079                if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
1080                        kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
1081                        kvmppc_book3s_queue_irqprio(vcpu,
1082                                BOOK3S_INTERRUPT_DATA_SEGMENT);
1083                }
1084                r = RESUME_GUEST;
1085                break;
1086        case BOOK3S_INTERRUPT_INST_SEGMENT:
1087                if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1088                        kvmppc_book3s_queue_irqprio(vcpu,
1089                                BOOK3S_INTERRUPT_INST_SEGMENT);
1090                }
1091                r = RESUME_GUEST;
1092                break;
1093        /* We're good on these - the host merely wanted to get our attention */
1094        case BOOK3S_INTERRUPT_DECREMENTER:
1095        case BOOK3S_INTERRUPT_HV_DECREMENTER:
1096        case BOOK3S_INTERRUPT_DOORBELL:
1097        case BOOK3S_INTERRUPT_H_DOORBELL:
1098                vcpu->stat.dec_exits++;
1099                r = RESUME_GUEST;
1100                break;
1101        case BOOK3S_INTERRUPT_EXTERNAL:
1102        case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
1103        case BOOK3S_INTERRUPT_EXTERNAL_HV:
1104                vcpu->stat.ext_intr_exits++;
1105                r = RESUME_GUEST;
1106                break;
1107        case BOOK3S_INTERRUPT_PERFMON:
1108                r = RESUME_GUEST;
1109                break;
1110        case BOOK3S_INTERRUPT_PROGRAM:
1111        case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1112                r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
1113                break;
1114        case BOOK3S_INTERRUPT_SYSCALL:
1115        {
1116                u32 last_sc;
1117                int emul;
1118
1119                /* Get last sc for papr */
1120                if (vcpu->arch.papr_enabled) {
1121                        /* The sc instuction points SRR0 to the next inst */
1122                        emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1123                        if (emul != EMULATE_DONE) {
1124                                kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1125                                r = RESUME_GUEST;
1126                                break;
1127                        }
1128                }
1129
1130                if (vcpu->arch.papr_enabled &&
1131                    (last_sc == 0x44000022) &&
1132                    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
1133                        /* SC 1 papr hypercalls */
1134                        ulong cmd = kvmppc_get_gpr(vcpu, 3);
1135                        int i;
1136
1137#ifdef CONFIG_PPC_BOOK3S_64
1138                        if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1139                                r = RESUME_GUEST;
1140                                break;
1141                        }
1142#endif
1143
1144                        run->papr_hcall.nr = cmd;
1145                        for (i = 0; i < 9; ++i) {
1146                                ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1147                                run->papr_hcall.args[i] = gpr;
1148                        }
1149                        run->exit_reason = KVM_EXIT_PAPR_HCALL;
1150                        vcpu->arch.hcall_needed = 1;
1151                        r = RESUME_HOST;
1152                } else if (vcpu->arch.osi_enabled &&
1153                    (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1154                    (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1155                        /* MOL hypercalls */
1156                        u64 *gprs = run->osi.gprs;
1157                        int i;
1158
1159                        run->exit_reason = KVM_EXIT_OSI;
1160                        for (i = 0; i < 32; i++)
1161                                gprs[i] = kvmppc_get_gpr(vcpu, i);
1162                        vcpu->arch.osi_needed = 1;
1163                        r = RESUME_HOST_NV;
1164                } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
1165                    (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1166                        /* KVM PV hypercalls */
1167                        kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1168                        r = RESUME_GUEST;
1169                } else {
1170                        /* Guest syscalls */
1171                        vcpu->stat.syscall_exits++;
1172                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1173                        r = RESUME_GUEST;
1174                }
1175                break;
1176        }
1177        case BOOK3S_INTERRUPT_FP_UNAVAIL:
1178        case BOOK3S_INTERRUPT_ALTIVEC:
1179        case BOOK3S_INTERRUPT_VSX:
1180        {
1181                int ext_msr = 0;
1182                int emul;
1183                u32 last_inst;
1184
1185                if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1186                        /* Do paired single instruction emulation */
1187                        emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1188                                                    &last_inst);
1189                        if (emul == EMULATE_DONE)
1190                                r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
1191                        else
1192                                r = RESUME_GUEST;
1193
1194                        break;
1195                }
1196
1197                /* Enable external provider */
1198                switch (exit_nr) {
1199                case BOOK3S_INTERRUPT_FP_UNAVAIL:
1200                        ext_msr = MSR_FP;
1201                        break;
1202
1203                case BOOK3S_INTERRUPT_ALTIVEC:
1204                        ext_msr = MSR_VEC;
1205                        break;
1206
1207                case BOOK3S_INTERRUPT_VSX:
1208                        ext_msr = MSR_VSX;
1209                        break;
1210                }
1211
1212                r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1213                break;
1214        }
1215        case BOOK3S_INTERRUPT_ALIGNMENT:
1216        {
1217                u32 last_inst;
1218                int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1219
1220                if (emul == EMULATE_DONE) {
1221                        u32 dsisr;
1222                        u64 dar;
1223
1224                        dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1225                        dar = kvmppc_alignment_dar(vcpu, last_inst);
1226
1227                        kvmppc_set_dsisr(vcpu, dsisr);
1228                        kvmppc_set_dar(vcpu, dar);
1229
1230                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1231                }
1232                r = RESUME_GUEST;
1233                break;
1234        }
1235#ifdef CONFIG_PPC_BOOK3S_64
1236        case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1237                kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1238                r = RESUME_GUEST;
1239                break;
1240#endif
1241        case BOOK3S_INTERRUPT_MACHINE_CHECK:
1242                kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1243                r = RESUME_GUEST;
1244                break;
1245        case BOOK3S_INTERRUPT_TRACE:
1246                if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1247                        run->exit_reason = KVM_EXIT_DEBUG;
1248                        r = RESUME_HOST;
1249                } else {
1250                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1251                        r = RESUME_GUEST;
1252                }
1253                break;
1254        default:
1255        {
1256                ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1257                /* Ugh - bork here! What did we get? */
1258                printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1259                        exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
1260                r = RESUME_HOST;
1261                BUG();
1262                break;
1263        }
1264        }
1265
1266        if (!(r & RESUME_HOST)) {
1267                /* To avoid clobbering exit_reason, only check for signals if
1268                 * we aren't already exiting to userspace for some other
1269                 * reason. */
1270
1271                /*
1272                 * Interrupts could be timers for the guest which we have to
1273                 * inject again, so let's postpone them until we're in the guest
1274                 * and if we really did time things so badly, then we just exit
1275                 * again due to a host external interrupt.
1276                 */
1277                s = kvmppc_prepare_to_enter(vcpu);
1278                if (s <= 0)
1279                        r = s;
1280                else {
1281                        /* interrupts now hard-disabled */
1282                        kvmppc_fix_ee_before_entry();
1283                }
1284
1285                kvmppc_handle_lost_ext(vcpu);
1286        }
1287
1288        trace_kvm_book3s_reenter(r, vcpu);
1289
1290        return r;
1291}
1292
1293static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1294                                            struct kvm_sregs *sregs)
1295{
1296        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1297        int i;
1298
1299        sregs->pvr = vcpu->arch.pvr;
1300
1301        sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1302        if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1303                for (i = 0; i < 64; i++) {
1304                        sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1305                        sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1306                }
1307        } else {
1308                for (i = 0; i < 16; i++)
1309                        sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
1310
1311                for (i = 0; i < 8; i++) {
1312                        sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1313                        sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1314                }
1315        }
1316
1317        return 0;
1318}
1319
1320static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1321                                            struct kvm_sregs *sregs)
1322{
1323        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1324        int i;
1325
1326        kvmppc_set_pvr_pr(vcpu, sregs->pvr);
1327
1328        vcpu3s->sdr1 = sregs->u.s.sdr1;
1329        if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1330                for (i = 0; i < 64; i++) {
1331                        vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1332                                                    sregs->u.s.ppc64.slb[i].slbe);
1333                }
1334        } else {
1335                for (i = 0; i < 16; i++) {
1336                        vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1337                }
1338                for (i = 0; i < 8; i++) {
1339                        kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1340                                       (u32)sregs->u.s.ppc32.ibat[i]);
1341                        kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1342                                       (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1343                        kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1344                                       (u32)sregs->u.s.ppc32.dbat[i]);
1345                        kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1346                                       (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1347                }
1348        }
1349
1350        /* Flush the MMU after messing with the segments */
1351        kvmppc_mmu_pte_flush(vcpu, 0, 0);
1352
1353        return 0;
1354}
1355
1356static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1357                                 union kvmppc_one_reg *val)
1358{
1359        int r = 0;
1360
1361        switch (id) {
1362        case KVM_REG_PPC_DEBUG_INST:
1363                *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1364                break;
1365        case KVM_REG_PPC_HIOR:
1366                *val = get_reg_val(id, to_book3s(vcpu)->hior);
1367                break;
1368        case KVM_REG_PPC_VTB:
1369                *val = get_reg_val(id, to_book3s(vcpu)->vtb);
1370                break;
1371        case KVM_REG_PPC_LPCR:
1372        case KVM_REG_PPC_LPCR_64:
1373                /*
1374                 * We are only interested in the LPCR_ILE bit
1375                 */
1376                if (vcpu->arch.intr_msr & MSR_LE)
1377                        *val = get_reg_val(id, LPCR_ILE);
1378                else
1379                        *val = get_reg_val(id, 0);
1380                break;
1381        default:
1382                r = -EINVAL;
1383                break;
1384        }
1385
1386        return r;
1387}
1388
1389static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1390{
1391        if (new_lpcr & LPCR_ILE)
1392                vcpu->arch.intr_msr |= MSR_LE;
1393        else
1394                vcpu->arch.intr_msr &= ~MSR_LE;
1395}
1396
1397static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1398                                 union kvmppc_one_reg *val)
1399{
1400        int r = 0;
1401
1402        switch (id) {
1403        case KVM_REG_PPC_HIOR:
1404                to_book3s(vcpu)->hior = set_reg_val(id, *val);
1405                to_book3s(vcpu)->hior_explicit = true;
1406                break;
1407        case KVM_REG_PPC_VTB:
1408                to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1409                break;
1410        case KVM_REG_PPC_LPCR:
1411        case KVM_REG_PPC_LPCR_64:
1412                kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1413                break;
1414        default:
1415                r = -EINVAL;
1416                break;
1417        }
1418
1419        return r;
1420}
1421
1422static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1423                                                   unsigned int id)
1424{
1425        struct kvmppc_vcpu_book3s *vcpu_book3s;
1426        struct kvm_vcpu *vcpu;
1427        int err = -ENOMEM;
1428        unsigned long p;
1429
1430        vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1431        if (!vcpu)
1432                goto out;
1433
1434        vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1435        if (!vcpu_book3s)
1436                goto free_vcpu;
1437        vcpu->arch.book3s = vcpu_book3s;
1438
1439#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1440        vcpu->arch.shadow_vcpu =
1441                kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1442        if (!vcpu->arch.shadow_vcpu)
1443                goto free_vcpu3s;
1444#endif
1445
1446        err = kvm_vcpu_init(vcpu, kvm, id);
1447        if (err)
1448                goto free_shadow_vcpu;
1449
1450        err = -ENOMEM;
1451        p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1452        if (!p)
1453                goto uninit_vcpu;
1454        vcpu->arch.shared = (void *)p;
1455#ifdef CONFIG_PPC_BOOK3S_64
1456        /* Always start the shared struct in native endian mode */
1457#ifdef __BIG_ENDIAN__
1458        vcpu->arch.shared_big_endian = true;
1459#else
1460        vcpu->arch.shared_big_endian = false;
1461#endif
1462
1463        /*
1464         * Default to the same as the host if we're on sufficiently
1465         * recent machine that we have 1TB segments;
1466         * otherwise default to PPC970FX.
1467         */
1468        vcpu->arch.pvr = 0x3C0301;
1469        if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1470                vcpu->arch.pvr = mfspr(SPRN_PVR);
1471        vcpu->arch.intr_msr = MSR_SF;
1472#else
1473        /* default to book3s_32 (750) */
1474        vcpu->arch.pvr = 0x84202;
1475#endif
1476        kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1477        vcpu->arch.slb_nr = 64;
1478
1479        vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
1480
1481        err = kvmppc_mmu_init(vcpu);
1482        if (err < 0)
1483                goto uninit_vcpu;
1484
1485        return vcpu;
1486
1487uninit_vcpu:
1488        kvm_vcpu_uninit(vcpu);
1489free_shadow_vcpu:
1490#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1491        kfree(vcpu->arch.shadow_vcpu);
1492free_vcpu3s:
1493#endif
1494        vfree(vcpu_book3s);
1495free_vcpu:
1496        kmem_cache_free(kvm_vcpu_cache, vcpu);
1497out:
1498        return ERR_PTR(err);
1499}
1500
1501static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1502{
1503        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1504
1505        free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1506        kvm_vcpu_uninit(vcpu);
1507#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1508        kfree(vcpu->arch.shadow_vcpu);
1509#endif
1510        vfree(vcpu_book3s);
1511        kmem_cache_free(kvm_vcpu_cache, vcpu);
1512}
1513
1514static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1515{
1516        int ret;
1517#ifdef CONFIG_ALTIVEC
1518        unsigned long uninitialized_var(vrsave);
1519#endif
1520
1521        /* Check if we can run the vcpu at all */
1522        if (!vcpu->arch.sane) {
1523                kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1524                ret = -EINVAL;
1525                goto out;
1526        }
1527
1528        kvmppc_setup_debug(vcpu);
1529
1530        /*
1531         * Interrupts could be timers for the guest which we have to inject
1532         * again, so let's postpone them until we're in the guest and if we
1533         * really did time things so badly, then we just exit again due to
1534         * a host external interrupt.
1535         */
1536        ret = kvmppc_prepare_to_enter(vcpu);
1537        if (ret <= 0)
1538                goto out;
1539        /* interrupts now hard-disabled */
1540
1541        /* Save FPU, Altivec and VSX state */
1542        giveup_all(current);
1543
1544        /* Preload FPU if it's enabled */
1545        if (kvmppc_get_msr(vcpu) & MSR_FP)
1546                kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1547
1548        kvmppc_fix_ee_before_entry();
1549
1550        ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1551
1552        kvmppc_clear_debug(vcpu);
1553
1554        /* No need for guest_exit. It's done in handle_exit.
1555           We also get here with interrupts enabled. */
1556
1557        /* Make sure we save the guest FPU/Altivec/VSX state */
1558        kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1559
1560        /* Make sure we save the guest TAR/EBB/DSCR state */
1561        kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1562
1563out:
1564        vcpu->mode = OUTSIDE_GUEST_MODE;
1565        return ret;
1566}
1567
1568/*
1569 * Get (and clear) the dirty memory log for a memory slot.
1570 */
1571static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1572                                         struct kvm_dirty_log *log)
1573{
1574        struct kvm_memslots *slots;
1575        struct kvm_memory_slot *memslot;
1576        struct kvm_vcpu *vcpu;
1577        ulong ga, ga_end;
1578        int is_dirty = 0;
1579        int r;
1580        unsigned long n;
1581
1582        mutex_lock(&kvm->slots_lock);
1583
1584        r = kvm_get_dirty_log(kvm, log, &is_dirty);
1585        if (r)
1586                goto out;
1587
1588        /* If nothing is dirty, don't bother messing with page tables. */
1589        if (is_dirty) {
1590                slots = kvm_memslots(kvm);
1591                memslot = id_to_memslot(slots, log->slot);
1592
1593                ga = memslot->base_gfn << PAGE_SHIFT;
1594                ga_end = ga + (memslot->npages << PAGE_SHIFT);
1595
1596                kvm_for_each_vcpu(n, vcpu, kvm)
1597                        kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1598
1599                n = kvm_dirty_bitmap_bytes(memslot);
1600                memset(memslot->dirty_bitmap, 0, n);
1601        }
1602
1603        r = 0;
1604out:
1605        mutex_unlock(&kvm->slots_lock);
1606        return r;
1607}
1608
1609static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1610                                         struct kvm_memory_slot *memslot)
1611{
1612        return;
1613}
1614
1615static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1616                                        struct kvm_memory_slot *memslot,
1617                                        const struct kvm_userspace_memory_region *mem)
1618{
1619        return 0;
1620}
1621
1622static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1623                                const struct kvm_userspace_memory_region *mem,
1624                                const struct kvm_memory_slot *old,
1625                                const struct kvm_memory_slot *new)
1626{
1627        return;
1628}
1629
1630static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1631                                        struct kvm_memory_slot *dont)
1632{
1633        return;
1634}
1635
1636static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1637                                         unsigned long npages)
1638{
1639        return 0;
1640}
1641
1642
1643#ifdef CONFIG_PPC64
1644static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1645                                         struct kvm_ppc_smmu_info *info)
1646{
1647        long int i;
1648        struct kvm_vcpu *vcpu;
1649
1650        info->flags = 0;
1651
1652        /* SLB is always 64 entries */
1653        info->slb_size = 64;
1654
1655        /* Standard 4k base page size segment */
1656        info->sps[0].page_shift = 12;
1657        info->sps[0].slb_enc = 0;
1658        info->sps[0].enc[0].page_shift = 12;
1659        info->sps[0].enc[0].pte_enc = 0;
1660
1661        /*
1662         * 64k large page size.
1663         * We only want to put this in if the CPUs we're emulating
1664         * support it, but unfortunately we don't have a vcpu easily
1665         * to hand here to test.  Just pick the first vcpu, and if
1666         * that doesn't exist yet, report the minimum capability,
1667         * i.e., no 64k pages.
1668         * 1T segment support goes along with 64k pages.
1669         */
1670        i = 1;
1671        vcpu = kvm_get_vcpu(kvm, 0);
1672        if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1673                info->flags = KVM_PPC_1T_SEGMENTS;
1674                info->sps[i].page_shift = 16;
1675                info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1676                info->sps[i].enc[0].page_shift = 16;
1677                info->sps[i].enc[0].pte_enc = 1;
1678                ++i;
1679        }
1680
1681        /* Standard 16M large page size segment */
1682        info->sps[i].page_shift = 24;
1683        info->sps[i].slb_enc = SLB_VSID_L;
1684        info->sps[i].enc[0].page_shift = 24;
1685        info->sps[i].enc[0].pte_enc = 0;
1686
1687        return 0;
1688}
1689#else
1690static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1691                                         struct kvm_ppc_smmu_info *info)
1692{
1693        /* We should not get called */
1694        BUG();
1695}
1696#endif /* CONFIG_PPC64 */
1697
1698static unsigned int kvm_global_user_count = 0;
1699static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1700
1701static int kvmppc_core_init_vm_pr(struct kvm *kvm)
1702{
1703        mutex_init(&kvm->arch.hpt_mutex);
1704
1705#ifdef CONFIG_PPC_BOOK3S_64
1706        /* Start out with the default set of hcalls enabled */
1707        kvmppc_pr_init_default_hcalls(kvm);
1708#endif
1709
1710        if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1711                spin_lock(&kvm_global_user_count_lock);
1712                if (++kvm_global_user_count == 1)
1713                        pseries_disable_reloc_on_exc();
1714                spin_unlock(&kvm_global_user_count_lock);
1715        }
1716        return 0;
1717}
1718
1719static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
1720{
1721#ifdef CONFIG_PPC64
1722        WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1723#endif
1724
1725        if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1726                spin_lock(&kvm_global_user_count_lock);
1727                BUG_ON(kvm_global_user_count == 0);
1728                if (--kvm_global_user_count == 0)
1729                        pseries_enable_reloc_on_exc();
1730                spin_unlock(&kvm_global_user_count_lock);
1731        }
1732}
1733
1734static int kvmppc_core_check_processor_compat_pr(void)
1735{
1736        /*
1737         * Disable KVM for Power9 untill the required bits merged.
1738         */
1739        if (cpu_has_feature(CPU_FTR_ARCH_300))
1740                return -EIO;
1741        return 0;
1742}
1743
1744static long kvm_arch_vm_ioctl_pr(struct file *filp,
1745                                 unsigned int ioctl, unsigned long arg)
1746{
1747        return -ENOTTY;
1748}
1749
1750static struct kvmppc_ops kvm_ops_pr = {
1751        .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1752        .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1753        .get_one_reg = kvmppc_get_one_reg_pr,
1754        .set_one_reg = kvmppc_set_one_reg_pr,
1755        .vcpu_load   = kvmppc_core_vcpu_load_pr,
1756        .vcpu_put    = kvmppc_core_vcpu_put_pr,
1757        .set_msr     = kvmppc_set_msr_pr,
1758        .vcpu_run    = kvmppc_vcpu_run_pr,
1759        .vcpu_create = kvmppc_core_vcpu_create_pr,
1760        .vcpu_free   = kvmppc_core_vcpu_free_pr,
1761        .check_requests = kvmppc_core_check_requests_pr,
1762        .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1763        .flush_memslot = kvmppc_core_flush_memslot_pr,
1764        .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1765        .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1766        .unmap_hva = kvm_unmap_hva_pr,
1767        .unmap_hva_range = kvm_unmap_hva_range_pr,
1768        .age_hva  = kvm_age_hva_pr,
1769        .test_age_hva = kvm_test_age_hva_pr,
1770        .set_spte_hva = kvm_set_spte_hva_pr,
1771        .mmu_destroy  = kvmppc_mmu_destroy_pr,
1772        .free_memslot = kvmppc_core_free_memslot_pr,
1773        .create_memslot = kvmppc_core_create_memslot_pr,
1774        .init_vm = kvmppc_core_init_vm_pr,
1775        .destroy_vm = kvmppc_core_destroy_vm_pr,
1776        .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1777        .emulate_op = kvmppc_core_emulate_op_pr,
1778        .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1779        .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1780        .fast_vcpu_kick = kvm_vcpu_kick,
1781        .arch_vm_ioctl  = kvm_arch_vm_ioctl_pr,
1782#ifdef CONFIG_PPC_BOOK3S_64
1783        .hcall_implemented = kvmppc_hcall_impl_pr,
1784#endif
1785};
1786
1787
1788int kvmppc_book3s_init_pr(void)
1789{
1790        int r;
1791
1792        r = kvmppc_core_check_processor_compat_pr();
1793        if (r < 0)
1794                return r;
1795
1796        kvm_ops_pr.owner = THIS_MODULE;
1797        kvmppc_pr_ops = &kvm_ops_pr;
1798
1799        r = kvmppc_mmu_hpte_sysinit();
1800        return r;
1801}
1802
1803void kvmppc_book3s_exit_pr(void)
1804{
1805        kvmppc_pr_ops = NULL;
1806        kvmppc_mmu_hpte_sysexit();
1807}
1808
1809/*
1810 * We only support separate modules for book3s 64
1811 */
1812#ifdef CONFIG_PPC_BOOK3S_64
1813
1814module_init(kvmppc_book3s_init_pr);
1815module_exit(kvmppc_book3s_exit_pr);
1816
1817MODULE_LICENSE("GPL");
1818MODULE_ALIAS_MISCDEV(KVM_MINOR);
1819MODULE_ALIAS("devname:kvm");
1820#endif
1821