linux/arch/powerpc/include/asm/kvm_book3s.h
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright SUSE Linux Products GmbH 2009
  16 *
  17 * Authors: Alexander Graf <agraf@suse.de>
  18 */
  19
  20#ifndef __ASM_KVM_BOOK3S_H__
  21#define __ASM_KVM_BOOK3S_H__
  22
  23#include <linux/types.h>
  24#include <linux/kvm_host.h>
  25#include <asm/kvm_book3s_asm.h>
  26
  27struct kvmppc_bat {
  28        u64 raw;
  29        u32 bepi;
  30        u32 bepi_mask;
  31        u32 brpn;
  32        u8 wimg;
  33        u8 pp;
  34        bool vs         : 1;
  35        bool vp         : 1;
  36};
  37
  38struct kvmppc_sid_map {
  39        u64 guest_vsid;
  40        u64 guest_esid;
  41        u64 host_vsid;
  42        bool valid      : 1;
  43};
  44
  45#define SID_MAP_BITS    9
  46#define SID_MAP_NUM     (1 << SID_MAP_BITS)
  47#define SID_MAP_MASK    (SID_MAP_NUM - 1)
  48
  49#ifdef CONFIG_PPC_BOOK3S_64
  50#define SID_CONTEXTS    1
  51#else
  52#define SID_CONTEXTS    128
  53#define VSID_POOL_SIZE  (SID_CONTEXTS * 16)
  54#endif
  55
  56struct hpte_cache {
  57        struct hlist_node list_pte;
  58        struct hlist_node list_pte_long;
  59        struct hlist_node list_vpte;
  60        struct hlist_node list_vpte_long;
  61        struct rcu_head rcu_head;
  62        u64 host_vpn;
  63        u64 pfn;
  64        ulong slot;
  65        struct kvmppc_pte pte;
  66};
  67
  68struct kvmppc_vcpu_book3s {
  69        struct kvm_vcpu vcpu;
  70        struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
  71        struct kvmppc_sid_map sid_map[SID_MAP_NUM];
  72        struct {
  73                u64 esid;
  74                u64 vsid;
  75        } slb_shadow[64];
  76        u8 slb_shadow_max;
  77        struct kvmppc_bat ibat[8];
  78        struct kvmppc_bat dbat[8];
  79        u64 hid[6];
  80        u64 gqr[8];
  81        u64 sdr1;
  82        u64 hior;
  83        u64 msr_mask;
  84        u64 purr_offset;
  85        u64 spurr_offset;
  86#ifdef CONFIG_PPC_BOOK3S_32
  87        u32 vsid_pool[VSID_POOL_SIZE];
  88        u32 vsid_next;
  89#else
  90        u64 proto_vsid_first;
  91        u64 proto_vsid_max;
  92        u64 proto_vsid_next;
  93#endif
  94        int context_id[SID_CONTEXTS];
  95
  96        bool hior_explicit;             /* HIOR is set by ioctl, not PVR */
  97
  98        struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
  99        struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
 100        struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
 101        struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
 102        int hpte_cache_count;
 103        spinlock_t mmu_lock;
 104};
 105
 106#define CONTEXT_HOST            0
 107#define CONTEXT_GUEST           1
 108#define CONTEXT_GUEST_END       2
 109
 110#define VSID_REAL       0x0fffffffffc00000ULL
 111#define VSID_BAT        0x0fffffffffb00000ULL
 112#define VSID_1T         0x1000000000000000ULL
 113#define VSID_REAL_DR    0x2000000000000000ULL
 114#define VSID_REAL_IR    0x4000000000000000ULL
 115#define VSID_PR         0x8000000000000000ULL
 116
 117extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
 118extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
 119extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
 120extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
 121extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
 122extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
 123extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
 124extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
 125extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
 126extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
 127extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
 128extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
 129extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
 130                        struct kvm_vcpu *vcpu, unsigned long addr,
 131                        unsigned long status);
 132extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
 133                        unsigned long slb_v, unsigned long valid);
 134
 135extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
 136extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
 137extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
 138extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
 139extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
 140extern int kvmppc_mmu_hpte_sysinit(void);
 141extern void kvmppc_mmu_hpte_sysexit(void);
 142extern int kvmppc_mmu_hv_init(void);
 143
 144extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
 145extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
 146extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
 147extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
 148                                          unsigned int vec);
 149extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
 150extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
 151                           bool upper, u32 val);
 152extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
 153extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
 154extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 155extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
 156                        unsigned long *rmap, long pte_index, int realmode);
 157extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
 158                        unsigned long pte_index);
 159void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
 160                        unsigned long pte_index);
 161extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
 162                        unsigned long *nb_ret);
 163extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
 164                        unsigned long gpa, bool dirty);
 165extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
 166                        long pte_index, unsigned long pteh, unsigned long ptel);
 167extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 168                        long pte_index, unsigned long pteh, unsigned long ptel,
 169                        pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
 170extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
 171                        unsigned long pte_index, unsigned long avpn,
 172                        unsigned long *hpret);
 173extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
 174                        struct kvm_memory_slot *memslot, unsigned long *map);
 175
 176extern void kvmppc_entry_trampoline(void);
 177extern void kvmppc_hv_entry_trampoline(void);
 178extern void kvmppc_load_up_fpu(void);
 179extern void kvmppc_load_up_altivec(void);
 180extern void kvmppc_load_up_vsx(void);
 181extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
 182extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
 183extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
 184
 185static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
 186{
 187        return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
 188}
 189
 190extern void kvm_return_point(void);
 191
 192/* Also add subarch specific defines */
 193
 194#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
 195#include <asm/kvm_book3s_32.h>
 196#endif
 197#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
 198#include <asm/kvm_book3s_64.h>
 199#endif
 200
 201#ifdef CONFIG_KVM_BOOK3S_PR
 202
 203static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
 204{
 205        return to_book3s(vcpu)->hior;
 206}
 207
 208static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
 209                        unsigned long pending_now, unsigned long old_pending)
 210{
 211        if (pending_now)
 212                vcpu->arch.shared->int_pending = 1;
 213        else if (old_pending)
 214                vcpu->arch.shared->int_pending = 0;
 215}
 216
 217static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
 218{
 219        if ( num < 14 ) {
 220                struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 221                svcpu->gpr[num] = val;
 222                svcpu_put(svcpu);
 223                to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
 224        } else
 225                vcpu->arch.gpr[num] = val;
 226}
 227
 228static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
 229{
 230        if ( num < 14 ) {
 231                struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 232                ulong r = svcpu->gpr[num];
 233                svcpu_put(svcpu);
 234                return r;
 235        } else
 236                return vcpu->arch.gpr[num];
 237}
 238
 239static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
 240{
 241        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 242        svcpu->cr = val;
 243        svcpu_put(svcpu);
 244        to_book3s(vcpu)->shadow_vcpu->cr = val;
 245}
 246
 247static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 248{
 249        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 250        u32 r;
 251        r = svcpu->cr;
 252        svcpu_put(svcpu);
 253        return r;
 254}
 255
 256static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
 257{
 258        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 259        svcpu->xer = val;
 260        to_book3s(vcpu)->shadow_vcpu->xer = val;
 261        svcpu_put(svcpu);
 262}
 263
 264static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
 265{
 266        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 267        u32 r;
 268        r = svcpu->xer;
 269        svcpu_put(svcpu);
 270        return r;
 271}
 272
 273static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
 274{
 275        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 276        svcpu->ctr = val;
 277        svcpu_put(svcpu);
 278}
 279
 280static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
 281{
 282        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 283        ulong r;
 284        r = svcpu->ctr;
 285        svcpu_put(svcpu);
 286        return r;
 287}
 288
 289static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
 290{
 291        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 292        svcpu->lr = val;
 293        svcpu_put(svcpu);
 294}
 295
 296static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
 297{
 298        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 299        ulong r;
 300        r = svcpu->lr;
 301        svcpu_put(svcpu);
 302        return r;
 303}
 304
 305static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
 306{
 307        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 308        svcpu->pc = val;
 309        svcpu_put(svcpu);
 310}
 311
 312static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
 313{
 314        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 315        ulong r;
 316        r = svcpu->pc;
 317        svcpu_put(svcpu);
 318        return r;
 319}
 320
 321static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
 322{
 323        ulong pc = kvmppc_get_pc(vcpu);
 324        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 325        u32 r;
 326
 327        /* Load the instruction manually if it failed to do so in the
 328         * exit path */
 329        if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
 330                kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
 331
 332        r = svcpu->last_inst;
 333        svcpu_put(svcpu);
 334        return r;
 335}
 336
 337/*
 338 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
 339 * Because the sc instruction sets SRR0 to point to the following
 340 * instruction, we have to fetch from pc - 4.
 341 */
 342static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
 343{
 344        ulong pc = kvmppc_get_pc(vcpu) - 4;
 345        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 346        u32 r;
 347
 348        /* Load the instruction manually if it failed to do so in the
 349         * exit path */
 350        if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
 351                kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
 352
 353        r = svcpu->last_inst;
 354        svcpu_put(svcpu);
 355        return r;
 356}
 357
 358static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
 359{
 360        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
 361        ulong r;
 362        r = svcpu->fault_dar;
 363        svcpu_put(svcpu);
 364        return r;
 365}
 366
 367static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
 368{
 369        ulong crit_raw = vcpu->arch.shared->critical;
 370        ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
 371        bool crit;
 372
 373        /* Truncate crit indicators in 32 bit mode */
 374        if (!(vcpu->arch.shared->msr & MSR_SF)) {
 375                crit_raw &= 0xffffffff;
 376                crit_r1 &= 0xffffffff;
 377        }
 378
 379        /* Critical section when crit == r1 */
 380        crit = (crit_raw == crit_r1);
 381        /* ... and we're in supervisor mode */
 382        crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
 383
 384        return crit;
 385}
 386#else /* CONFIG_KVM_BOOK3S_PR */
 387
 388static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
 389{
 390        return 0;
 391}
 392
 393static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
 394                        unsigned long pending_now, unsigned long old_pending)
 395{
 396}
 397
 398static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
 399{
 400        vcpu->arch.gpr[num] = val;
 401}
 402
 403static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
 404{
 405        return vcpu->arch.gpr[num];
 406}
 407
 408static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
 409{
 410        vcpu->arch.cr = val;
 411}
 412
 413static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 414{
 415        return vcpu->arch.cr;
 416}
 417
 418static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
 419{
 420        vcpu->arch.xer = val;
 421}
 422
 423static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
 424{
 425        return vcpu->arch.xer;
 426}
 427
 428static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
 429{
 430        vcpu->arch.ctr = val;
 431}
 432
 433static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
 434{
 435        return vcpu->arch.ctr;
 436}
 437
 438static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
 439{
 440        vcpu->arch.lr = val;
 441}
 442
 443static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
 444{
 445        return vcpu->arch.lr;
 446}
 447
 448static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
 449{
 450        vcpu->arch.pc = val;
 451}
 452
 453static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
 454{
 455        return vcpu->arch.pc;
 456}
 457
 458static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
 459{
 460        ulong pc = kvmppc_get_pc(vcpu);
 461
 462        /* Load the instruction manually if it failed to do so in the
 463         * exit path */
 464        if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
 465                kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
 466
 467        return vcpu->arch.last_inst;
 468}
 469
 470/*
 471 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
 472 * Because the sc instruction sets SRR0 to point to the following
 473 * instruction, we have to fetch from pc - 4.
 474 */
 475static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
 476{
 477        ulong pc = kvmppc_get_pc(vcpu) - 4;
 478
 479        /* Load the instruction manually if it failed to do so in the
 480         * exit path */
 481        if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
 482                kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
 483
 484        return vcpu->arch.last_inst;
 485}
 486
 487static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
 488{
 489        return vcpu->arch.fault_dar;
 490}
 491
 492static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
 493{
 494        return false;
 495}
 496#endif
 497
 498/* Magic register values loaded into r3 and r4 before the 'sc' assembly
 499 * instruction for the OSI hypercalls */
 500#define OSI_SC_MAGIC_R3                 0x113724FA
 501#define OSI_SC_MAGIC_R4                 0x77810F9B
 502
 503#define INS_DCBZ                        0x7c0007ec
 504/* TO = 31 for unconditional trap */
 505#define INS_TW                          0x7fe00008
 506
 507/* LPIDs we support with this build -- runtime limit may be lower */
 508#define KVMPPC_NR_LPIDS                 (LPID_RSVD + 1)
 509
 510#endif /* __ASM_KVM_BOOK3S_H__ */
 511