linux/arch/powerpc/include/asm/kvm_ppc.h
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright IBM Corp. 2008
  16 *
  17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18 */
  19
  20#ifndef __POWERPC_KVM_PPC_H__
  21#define __POWERPC_KVM_PPC_H__
  22
  23/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
  24 * dependencies. */
  25
  26#include <linux/mutex.h>
  27#include <linux/timer.h>
  28#include <linux/types.h>
  29#include <linux/kvm_types.h>
  30#include <linux/kvm_host.h>
  31#include <linux/bug.h>
  32#ifdef CONFIG_PPC_BOOK3S
  33#include <asm/kvm_book3s.h>
  34#else
  35#include <asm/kvm_booke.h>
  36#endif
  37#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  38#include <asm/paca.h>
  39#endif
  40
  41/*
  42 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
  43 * for supporting software breakpoint.
  44 */
  45#define KVMPPC_INST_SW_BREAKPOINT       0x00dddd00
  46
  47enum emulation_result {
  48        EMULATE_DONE,         /* no further processing */
  49        EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
  50        EMULATE_FAIL,         /* can't emulate this instruction */
  51        EMULATE_AGAIN,        /* something went wrong. go again */
  52        EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
  53};
  54
  55enum instruction_fetch_type {
  56        INST_GENERIC,
  57        INST_SC,                /* system call */
  58};
  59
  60enum xlate_instdata {
  61        XLATE_INST,             /* translate instruction address */
  62        XLATE_DATA              /* translate data address */
  63};
  64
  65enum xlate_readwrite {
  66        XLATE_READ,             /* check for read permissions */
  67        XLATE_WRITE             /* check for write permissions */
  68};
  69
  70extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
  71extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
  72extern void kvmppc_handler_highmem(void);
  73
  74extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
  75extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
  76                              unsigned int rt, unsigned int bytes,
  77                              int is_default_endian);
  78extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
  79                               unsigned int rt, unsigned int bytes,
  80                               int is_default_endian);
  81extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
  82                                unsigned int rt, unsigned int bytes,
  83                        int is_default_endian, int mmio_sign_extend);
  84extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
  85                unsigned int rt, unsigned int bytes, int is_default_endian);
  86extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
  87                unsigned int rs, unsigned int bytes, int is_default_endian);
  88extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
  89                               u64 val, unsigned int bytes,
  90                               int is_default_endian);
  91extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
  92                                int rs, unsigned int bytes,
  93                                int is_default_endian);
  94
  95extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
  96                                 enum instruction_fetch_type type, u32 *inst);
  97
  98extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
  99                     bool data);
 100extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
 101                     bool data);
 102extern int kvmppc_emulate_instruction(struct kvm_run *run,
 103                                      struct kvm_vcpu *vcpu);
 104extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
 105extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
 106extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
 107extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
 108extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
 109extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
 110extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
 111extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
 112
 113/* Core-specific hooks */
 114
 115extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
 116                           unsigned int gtlb_idx);
 117extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
 118extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
 119extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
 120extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
 121extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
 122extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
 123extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
 124                              gva_t eaddr);
 125extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
 126extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
 127extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
 128                        enum xlate_instdata xlid, enum xlate_readwrite xlrw,
 129                        struct kvmppc_pte *pte);
 130
 131extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
 132                                                unsigned int id);
 133extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
 134extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
 135extern int kvmppc_core_check_processor_compat(void);
 136extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
 137                                      struct kvm_translation *tr);
 138
 139extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 140extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
 141
 142extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
 143extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
 144extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
 145extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
 146extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
 147extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
 148extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
 149extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
 150extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
 151                                       struct kvm_interrupt *irq);
 152extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
 153extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
 154                                        ulong esr_flags);
 155extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
 156                                           ulong dear_flags,
 157                                           ulong esr_flags);
 158extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
 159extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
 160                                           ulong esr_flags);
 161extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
 162extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
 163
 164extern int kvmppc_booke_init(void);
 165extern void kvmppc_booke_exit(void);
 166
 167extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
 168extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
 169extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
 170
 171extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
 172extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
 173extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
 174extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
 175extern void kvmppc_rmap_reset(struct kvm *kvm);
 176extern long kvmppc_prepare_vrma(struct kvm *kvm,
 177                                struct kvm_userspace_memory_region *mem);
 178extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
 179                        struct kvm_memory_slot *memslot, unsigned long porder);
 180extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
 181extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
 182                struct iommu_group *grp);
 183extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
 184                struct iommu_group *grp);
 185extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
 186extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
 187extern void kvmppc_setup_partition_table(struct kvm *kvm);
 188
 189extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 190                                struct kvm_create_spapr_tce_64 *args);
 191extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
 192                struct kvm *kvm, unsigned long liobn);
 193#define kvmppc_ioba_validate(stt, ioba, npages)                         \
 194                (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
 195                                (stt)->size, (ioba), (npages)) ?        \
 196                                H_PARAMETER : H_SUCCESS)
 197extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
 198                unsigned long tce);
 199extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
 200                unsigned long *ua, unsigned long **prmap);
 201extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
 202                unsigned long idx, unsigned long tce);
 203extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 204                             unsigned long ioba, unsigned long tce);
 205extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 206                unsigned long liobn, unsigned long ioba,
 207                unsigned long tce_list, unsigned long npages);
 208extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
 209                unsigned long liobn, unsigned long ioba,
 210                unsigned long tce_value, unsigned long npages);
 211extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 212                             unsigned long ioba);
 213extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
 214extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
 215extern int kvmppc_core_init_vm(struct kvm *kvm);
 216extern void kvmppc_core_destroy_vm(struct kvm *kvm);
 217extern void kvmppc_core_free_memslot(struct kvm *kvm,
 218                                     struct kvm_memory_slot *free,
 219                                     struct kvm_memory_slot *dont);
 220extern int kvmppc_core_create_memslot(struct kvm *kvm,
 221                                      struct kvm_memory_slot *slot,
 222                                      unsigned long npages);
 223extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
 224                                struct kvm_memory_slot *memslot,
 225                                const struct kvm_userspace_memory_region *mem);
 226extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
 227                                const struct kvm_userspace_memory_region *mem,
 228                                const struct kvm_memory_slot *old,
 229                                const struct kvm_memory_slot *new);
 230extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
 231                                      struct kvm_ppc_smmu_info *info);
 232extern void kvmppc_core_flush_memslot(struct kvm *kvm,
 233                                      struct kvm_memory_slot *memslot);
 234
 235extern int kvmppc_bookehv_init(void);
 236extern void kvmppc_bookehv_exit(void);
 237
 238extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
 239
 240extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
 241extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
 242                                            struct kvm_ppc_resize_hpt *rhpt);
 243extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
 244                                           struct kvm_ppc_resize_hpt *rhpt);
 245
 246int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
 247
 248extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
 249extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
 250extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
 251
 252extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
 253                                u32 priority);
 254extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
 255                                u32 *priority);
 256extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
 257extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
 258
 259void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
 260void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
 261
 262union kvmppc_one_reg {
 263        u32     wval;
 264        u64     dval;
 265        vector128 vval;
 266        u64     vsxval[2];
 267        u32     vsx32val[4];
 268        u16     vsx16val[8];
 269        u8      vsx8val[16];
 270        struct {
 271                u64     addr;
 272                u64     length;
 273        }       vpaval;
 274};
 275
 276struct kvmppc_ops {
 277        struct module *owner;
 278        int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 279        int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 280        int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
 281                           union kvmppc_one_reg *val);
 282        int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
 283                           union kvmppc_one_reg *val);
 284        void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
 285        void (*vcpu_put)(struct kvm_vcpu *vcpu);
 286        void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
 287        int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
 288        struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
 289        void (*vcpu_free)(struct kvm_vcpu *vcpu);
 290        int (*check_requests)(struct kvm_vcpu *vcpu);
 291        int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
 292        void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
 293        int (*prepare_memory_region)(struct kvm *kvm,
 294                                     struct kvm_memory_slot *memslot,
 295                                     const struct kvm_userspace_memory_region *mem);
 296        void (*commit_memory_region)(struct kvm *kvm,
 297                                     const struct kvm_userspace_memory_region *mem,
 298                                     const struct kvm_memory_slot *old,
 299                                     const struct kvm_memory_slot *new);
 300        int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
 301                           unsigned long end);
 302        int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
 303        int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
 304        void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
 305        void (*mmu_destroy)(struct kvm_vcpu *vcpu);
 306        void (*free_memslot)(struct kvm_memory_slot *free,
 307                             struct kvm_memory_slot *dont);
 308        int (*create_memslot)(struct kvm_memory_slot *slot,
 309                              unsigned long npages);
 310        int (*init_vm)(struct kvm *kvm);
 311        void (*destroy_vm)(struct kvm *kvm);
 312        int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
 313        int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
 314                          unsigned int inst, int *advance);
 315        int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
 316        int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
 317        void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
 318        long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
 319                              unsigned long arg);
 320        int (*hcall_implemented)(unsigned long hcall);
 321        int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
 322                                       struct irq_bypass_producer *);
 323        void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
 324                                        struct irq_bypass_producer *);
 325        int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
 326        int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
 327        int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
 328                            unsigned long flags);
 329        void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
 330};
 331
 332extern struct kvmppc_ops *kvmppc_hv_ops;
 333extern struct kvmppc_ops *kvmppc_pr_ops;
 334
 335static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
 336                                enum instruction_fetch_type type, u32 *inst)
 337{
 338        int ret = EMULATE_DONE;
 339        u32 fetched_inst;
 340
 341        /* Load the instruction manually if it failed to do so in the
 342         * exit path */
 343        if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
 344                ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
 345
 346        /*  Write fetch_failed unswapped if the fetch failed */
 347        if (ret == EMULATE_DONE)
 348                fetched_inst = kvmppc_need_byteswap(vcpu) ?
 349                                swab32(vcpu->arch.last_inst) :
 350                                vcpu->arch.last_inst;
 351        else
 352                fetched_inst = vcpu->arch.last_inst;
 353
 354        *inst = fetched_inst;
 355        return ret;
 356}
 357
 358static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
 359{
 360        return kvm->arch.kvm_ops == kvmppc_hv_ops;
 361}
 362
 363extern int kvmppc_hwrng_present(void);
 364
 365/*
 366 * Cuts out inst bits with ordering according to spec.
 367 * That means the leftmost bit is zero. All given bits are included.
 368 */
 369static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
 370{
 371        u32 r;
 372        u32 mask;
 373
 374        BUG_ON(msb > lsb);
 375
 376        mask = (1 << (lsb - msb + 1)) - 1;
 377        r = (inst >> (63 - lsb)) & mask;
 378
 379        return r;
 380}
 381
 382/*
 383 * Replaces inst bits with ordering according to spec.
 384 */
 385static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
 386{
 387        u32 r;
 388        u32 mask;
 389
 390        BUG_ON(msb > lsb);
 391
 392        mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
 393        r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
 394
 395        return r;
 396}
 397
 398#define one_reg_size(id)        \
 399        (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
 400
 401#define get_reg_val(id, reg)    ({              \
 402        union kvmppc_one_reg __u;               \
 403        switch (one_reg_size(id)) {             \
 404        case 4: __u.wval = (reg); break;        \
 405        case 8: __u.dval = (reg); break;        \
 406        default: BUG();                         \
 407        }                                       \
 408        __u;                                    \
 409})
 410
 411
 412#define set_reg_val(id, val)    ({              \
 413        u64 __v;                                \
 414        switch (one_reg_size(id)) {             \
 415        case 4: __v = (val).wval; break;        \
 416        case 8: __v = (val).dval; break;        \
 417        default: BUG();                         \
 418        }                                       \
 419        __v;                                    \
 420})
 421
 422int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 423int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 424
 425int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 426int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 427
 428int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
 429int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
 430int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
 431int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
 432
 433void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
 434
 435struct openpic;
 436
 437#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 438extern void kvm_cma_reserve(void) __init;
 439static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
 440{
 441        paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
 442}
 443
 444static inline void kvmppc_set_xive_tima(int cpu,
 445                                        unsigned long phys_addr,
 446                                        void __iomem *virt_addr)
 447{
 448        paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
 449        paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
 450}
 451
 452static inline u32 kvmppc_get_xics_latch(void)
 453{
 454        u32 xirr;
 455
 456        xirr = get_paca()->kvm_hstate.saved_xirr;
 457        get_paca()->kvm_hstate.saved_xirr = 0;
 458        return xirr;
 459}
 460
 461static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
 462{
 463        paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
 464}
 465
 466static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
 467{
 468        vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
 469}
 470
 471extern void kvm_hv_vm_activated(void);
 472extern void kvm_hv_vm_deactivated(void);
 473extern bool kvm_hv_mode_active(void);
 474
 475#else
 476static inline void __init kvm_cma_reserve(void)
 477{}
 478
 479static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
 480{}
 481
 482static inline void kvmppc_set_xive_tima(int cpu,
 483                                        unsigned long phys_addr,
 484                                        void __iomem *virt_addr)
 485{}
 486
 487static inline u32 kvmppc_get_xics_latch(void)
 488{
 489        return 0;
 490}
 491
 492static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
 493{}
 494
 495static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
 496{
 497        kvm_vcpu_kick(vcpu);
 498}
 499
 500static inline bool kvm_hv_mode_active(void)             { return false; }
 501
 502#endif
 503
 504#ifdef CONFIG_KVM_XICS
 505static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
 506{
 507        return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
 508}
 509
 510static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
 511                                struct kvm *kvm)
 512{
 513        if (kvm && kvm_irq_bypass)
 514                return kvm->arch.pimap;
 515        return NULL;
 516}
 517
 518extern void kvmppc_alloc_host_rm_ops(void);
 519extern void kvmppc_free_host_rm_ops(void);
 520extern void kvmppc_free_pimap(struct kvm *kvm);
 521extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
 522extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
 523extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
 524extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
 525extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
 526extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
 527                        struct kvm_vcpu *vcpu, u32 cpu);
 528extern void kvmppc_xics_ipi_action(void);
 529extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
 530                                   unsigned long host_irq);
 531extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
 532                                   unsigned long host_irq);
 533extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
 534                                        struct kvmppc_irq_map *irq_map,
 535                                        struct kvmppc_passthru_irqmap *pimap,
 536                                        bool *again);
 537
 538extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
 539                               int level, bool line_status);
 540
 541extern int h_ipi_redirect;
 542#else
 543static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
 544                                struct kvm *kvm)
 545        { return NULL; }
 546static inline void kvmppc_alloc_host_rm_ops(void) {};
 547static inline void kvmppc_free_host_rm_ops(void) {};
 548static inline void kvmppc_free_pimap(struct kvm *kvm) {};
 549static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
 550        { return 0; }
 551static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
 552        { return 0; }
 553static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
 554static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
 555        { return 0; }
 556#endif
 557
 558#ifdef CONFIG_KVM_XIVE
 559/*
 560 * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
 561 * ie. P9 new interrupt controller, while the second "xive" is the legacy
 562 * "eXternal Interrupt Vector Entry" which is the configuration of an
 563 * interrupt on the "xics" interrupt controller on P8 and earlier. Those
 564 * two function consume or produce a legacy "XIVE" state from the
 565 * new "XIVE" interrupt controller.
 566 */
 567extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
 568                                u32 priority);
 569extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
 570                                u32 *priority);
 571extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
 572extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
 573extern void kvmppc_xive_init_module(void);
 574extern void kvmppc_xive_exit_module(void);
 575
 576extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
 577                                    struct kvm_vcpu *vcpu, u32 cpu);
 578extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
 579extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
 580                                  struct irq_desc *host_desc);
 581extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
 582                                  struct irq_desc *host_desc);
 583extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
 584extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
 585
 586extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
 587                               int level, bool line_status);
 588#else
 589static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
 590                                       u32 priority) { return -1; }
 591static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
 592                                       u32 *priority) { return -1; }
 593static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
 594static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
 595static inline void kvmppc_xive_init_module(void) { }
 596static inline void kvmppc_xive_exit_module(void) { }
 597
 598static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
 599                                           struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
 600static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
 601static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
 602                                         struct irq_desc *host_desc) { return -ENODEV; }
 603static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
 604                                         struct irq_desc *host_desc) { return -ENODEV; }
 605static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
 606static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
 607
 608static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
 609                                      int level, bool line_status) { return -ENODEV; }
 610#endif /* CONFIG_KVM_XIVE */
 611
 612/*
 613 * Prototypes for functions called only from assembler code.
 614 * Having prototypes reduces sparse errors.
 615 */
 616long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 617                         unsigned long ioba, unsigned long tce);
 618long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 619                                  unsigned long liobn, unsigned long ioba,
 620                                  unsigned long tce_list, unsigned long npages);
 621long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
 622                           unsigned long liobn, unsigned long ioba,
 623                           unsigned long tce_value, unsigned long npages);
 624long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
 625                            unsigned int yield_count);
 626long kvmppc_h_random(struct kvm_vcpu *vcpu);
 627void kvmhv_commence_exit(int trap);
 628long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
 629void kvmppc_subcore_enter_guest(void);
 630void kvmppc_subcore_exit_guest(void);
 631long kvmppc_realmode_hmi_handler(void);
 632long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
 633                    long pte_index, unsigned long pteh, unsigned long ptel);
 634long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
 635                     unsigned long pte_index, unsigned long avpn);
 636long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
 637long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
 638                      unsigned long pte_index, unsigned long avpn,
 639                      unsigned long va);
 640long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
 641                   unsigned long pte_index);
 642long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
 643                        unsigned long pte_index);
 644long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
 645                        unsigned long pte_index);
 646long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
 647                          unsigned long slb_v, unsigned int status, bool data);
 648unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
 649unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
 650unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
 651int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
 652                    unsigned long mfrr);
 653int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
 654int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
 655
 656/*
 657 * Host-side operations we want to set up while running in real
 658 * mode in the guest operating on the xics.
 659 * Currently only VCPU wakeup is supported.
 660 */
 661
 662union kvmppc_rm_state {
 663        unsigned long raw;
 664        struct {
 665                u32 in_host;
 666                u32 rm_action;
 667        };
 668};
 669
 670struct kvmppc_host_rm_core {
 671        union kvmppc_rm_state rm_state;
 672        void *rm_data;
 673        char pad[112];
 674};
 675
 676struct kvmppc_host_rm_ops {
 677        struct kvmppc_host_rm_core      *rm_core;
 678        void            (*vcpu_kick)(struct kvm_vcpu *vcpu);
 679};
 680
 681extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
 682
 683static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
 684{
 685#ifdef CONFIG_KVM_BOOKE_HV
 686        return mfspr(SPRN_GEPR);
 687#elif defined(CONFIG_BOOKE)
 688        return vcpu->arch.epr;
 689#else
 690        return 0;
 691#endif
 692}
 693
 694static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
 695{
 696#ifdef CONFIG_KVM_BOOKE_HV
 697        mtspr(SPRN_GEPR, epr);
 698#elif defined(CONFIG_BOOKE)
 699        vcpu->arch.epr = epr;
 700#endif
 701}
 702
 703#ifdef CONFIG_KVM_MPIC
 704
 705void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
 706int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
 707                             u32 cpu);
 708void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
 709
 710#else
 711
 712static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
 713{
 714}
 715
 716static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
 717                struct kvm_vcpu *vcpu, u32 cpu)
 718{
 719        return -EINVAL;
 720}
 721
 722static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
 723                struct kvm_vcpu *vcpu)
 724{
 725}
 726
 727#endif /* CONFIG_KVM_MPIC */
 728
 729int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
 730                              struct kvm_config_tlb *cfg);
 731int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
 732                             struct kvm_dirty_tlb *cfg);
 733
 734long kvmppc_alloc_lpid(void);
 735void kvmppc_claim_lpid(long lpid);
 736void kvmppc_free_lpid(long lpid);
 737void kvmppc_init_lpid(unsigned long nr_lpids);
 738
 739static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
 740{
 741        struct page *page;
 742        /*
 743         * We can only access pages that the kernel maps
 744         * as memory. Bail out for unmapped ones.
 745         */
 746        if (!pfn_valid(pfn))
 747                return;
 748
 749        /* Clear i-cache for new pages */
 750        page = pfn_to_page(pfn);
 751        if (!test_bit(PG_arch_1, &page->flags)) {
 752                flush_dcache_icache_page(page);
 753                set_bit(PG_arch_1, &page->flags);
 754        }
 755}
 756
 757/*
 758 * Shared struct helpers. The shared struct can be little or big endian,
 759 * depending on the guest endianness. So expose helpers to all of them.
 760 */
 761static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
 762{
 763#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
 764        /* Only Book3S_64 PR supports bi-endian for now */
 765        return vcpu->arch.shared_big_endian;
 766#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
 767        /* Book3s_64 HV on little endian is always little endian */
 768        return false;
 769#else
 770        return true;
 771#endif
 772}
 773
 774#define SPRNG_WRAPPER_GET(reg, bookehv_spr)                             \
 775static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)             \
 776{                                                                       \
 777        return mfspr(bookehv_spr);                                      \
 778}                                                                       \
 779
 780#define SPRNG_WRAPPER_SET(reg, bookehv_spr)                             \
 781static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)   \
 782{                                                                       \
 783        mtspr(bookehv_spr, val);                                                \
 784}                                                                       \
 785
 786#define SHARED_WRAPPER_GET(reg, size)                                   \
 787static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)           \
 788{                                                                       \
 789        if (kvmppc_shared_big_endian(vcpu))                             \
 790               return be##size##_to_cpu(vcpu->arch.shared->reg);        \
 791        else                                                            \
 792               return le##size##_to_cpu(vcpu->arch.shared->reg);        \
 793}                                                                       \
 794
 795#define SHARED_WRAPPER_SET(reg, size)                                   \
 796static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
 797{                                                                       \
 798        if (kvmppc_shared_big_endian(vcpu))                             \
 799               vcpu->arch.shared->reg = cpu_to_be##size(val);           \
 800        else                                                            \
 801               vcpu->arch.shared->reg = cpu_to_le##size(val);           \
 802}                                                                       \
 803
 804#define SHARED_WRAPPER(reg, size)                                       \
 805        SHARED_WRAPPER_GET(reg, size)                                   \
 806        SHARED_WRAPPER_SET(reg, size)                                   \
 807
 808#define SPRNG_WRAPPER(reg, bookehv_spr)                                 \
 809        SPRNG_WRAPPER_GET(reg, bookehv_spr)                             \
 810        SPRNG_WRAPPER_SET(reg, bookehv_spr)                             \
 811
 812#ifdef CONFIG_KVM_BOOKE_HV
 813
 814#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)                    \
 815        SPRNG_WRAPPER(reg, bookehv_spr)                                 \
 816
 817#else
 818
 819#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)                    \
 820        SHARED_WRAPPER(reg, size)                                       \
 821
 822#endif
 823
 824SHARED_WRAPPER(critical, 64)
 825SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
 826SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
 827SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
 828SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
 829SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
 830SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
 831SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
 832SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
 833SHARED_WRAPPER_GET(msr, 64)
 834static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
 835{
 836        if (kvmppc_shared_big_endian(vcpu))
 837               vcpu->arch.shared->msr = cpu_to_be64(val);
 838        else
 839               vcpu->arch.shared->msr = cpu_to_le64(val);
 840}
 841SHARED_WRAPPER(dsisr, 32)
 842SHARED_WRAPPER(int_pending, 32)
 843SHARED_WRAPPER(sprg4, 64)
 844SHARED_WRAPPER(sprg5, 64)
 845SHARED_WRAPPER(sprg6, 64)
 846SHARED_WRAPPER(sprg7, 64)
 847
 848static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
 849{
 850        if (kvmppc_shared_big_endian(vcpu))
 851               return be32_to_cpu(vcpu->arch.shared->sr[nr]);
 852        else
 853               return le32_to_cpu(vcpu->arch.shared->sr[nr]);
 854}
 855
 856static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
 857{
 858        if (kvmppc_shared_big_endian(vcpu))
 859               vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
 860        else
 861               vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
 862}
 863
 864/*
 865 * Please call after prepare_to_enter. This function puts the lazy ee and irq
 866 * disabled tracking state back to normal mode, without actually enabling
 867 * interrupts.
 868 */
 869static inline void kvmppc_fix_ee_before_entry(void)
 870{
 871        trace_hardirqs_on();
 872
 873#ifdef CONFIG_PPC64
 874        /*
 875         * To avoid races, the caller must have gone directly from having
 876         * interrupts fully-enabled to hard-disabled.
 877         */
 878        WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
 879
 880        /* Only need to enable IRQs by hard enabling them after this */
 881        local_paca->irq_happened = 0;
 882        irq_soft_mask_set(IRQS_ENABLED);
 883#endif
 884}
 885
 886static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
 887{
 888        ulong ea;
 889        ulong msr_64bit = 0;
 890
 891        ea = kvmppc_get_gpr(vcpu, rb);
 892        if (ra)
 893                ea += kvmppc_get_gpr(vcpu, ra);
 894
 895#if defined(CONFIG_PPC_BOOK3E_64)
 896        msr_64bit = MSR_CM;
 897#elif defined(CONFIG_PPC_BOOK3S_64)
 898        msr_64bit = MSR_SF;
 899#endif
 900
 901        if (!(kvmppc_get_msr(vcpu) & msr_64bit))
 902                ea = (uint32_t)ea;
 903
 904        return ea;
 905}
 906
 907extern void xics_wake_cpu(int cpu);
 908
 909#endif /* __POWERPC_KVM_PPC_H__ */
 910