linux/arch/powerpc/include/asm/kvm_book3s.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *
   4 * Copyright SUSE Linux Products GmbH 2009
   5 *
   6 * Authors: Alexander Graf <agraf@suse.de>
   7 */
   8
   9#ifndef __ASM_KVM_BOOK3S_H__
  10#define __ASM_KVM_BOOK3S_H__
  11
  12#include <linux/types.h>
  13#include <linux/kvm_host.h>
  14#include <asm/kvm_book3s_asm.h>
  15
  16struct kvmppc_bat {
  17        u64 raw;
  18        u32 bepi;
  19        u32 bepi_mask;
  20        u32 brpn;
  21        u8 wimg;
  22        u8 pp;
  23        bool vs         : 1;
  24        bool vp         : 1;
  25};
  26
  27struct kvmppc_sid_map {
  28        u64 guest_vsid;
  29        u64 guest_esid;
  30        u64 host_vsid;
  31        bool valid      : 1;
  32};
  33
  34#define SID_MAP_BITS    9
  35#define SID_MAP_NUM     (1 << SID_MAP_BITS)
  36#define SID_MAP_MASK    (SID_MAP_NUM - 1)
  37
  38#ifdef CONFIG_PPC_BOOK3S_64
  39#define SID_CONTEXTS    1
  40#else
  41#define SID_CONTEXTS    128
  42#define VSID_POOL_SIZE  (SID_CONTEXTS * 16)
  43#endif
  44
  45struct hpte_cache {
  46        struct hlist_node list_pte;
  47        struct hlist_node list_pte_long;
  48        struct hlist_node list_vpte;
  49        struct hlist_node list_vpte_long;
  50#ifdef CONFIG_PPC_BOOK3S_64
  51        struct hlist_node list_vpte_64k;
  52#endif
  53        struct rcu_head rcu_head;
  54        u64 host_vpn;
  55        u64 pfn;
  56        ulong slot;
  57        struct kvmppc_pte pte;
  58        int pagesize;
  59};
  60
  61/*
  62 * Struct for a virtual core.
  63 * Note: entry_exit_map combines a bitmap of threads that have entered
  64 * in the bottom 8 bits and a bitmap of threads that have exited in the
  65 * next 8 bits.  This is so that we can atomically set the entry bit
  66 * iff the exit map is 0 without taking a lock.
  67 */
  68struct kvmppc_vcore {
  69        int n_runnable;
  70        int num_threads;
  71        int entry_exit_map;
  72        int napping_threads;
  73        int first_vcpuid;
  74        u16 pcpu;
  75        u16 last_cpu;
  76        u8 vcore_state;
  77        u8 in_guest;
  78        struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
  79        struct list_head preempt_list;
  80        spinlock_t lock;
  81        struct rcuwait wait;
  82        spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
  83        u64 stolen_tb;
  84        u64 preempt_tb;
  85        struct kvm_vcpu *runner;
  86        struct kvm *kvm;
  87        u64 tb_offset;          /* guest timebase - host timebase */
  88        u64 tb_offset_applied;  /* timebase offset currently in force */
  89        ulong lpcr;
  90        u32 arch_compat;
  91        ulong pcr;
  92        ulong dpdes;            /* doorbell state (POWER8) */
  93        ulong vtb;              /* virtual timebase */
  94        ulong conferring_threads;
  95        unsigned int halt_poll_ns;
  96        atomic_t online_count;
  97};
  98
  99struct kvmppc_vcpu_book3s {
 100        struct kvmppc_sid_map sid_map[SID_MAP_NUM];
 101        struct {
 102                u64 esid;
 103                u64 vsid;
 104        } slb_shadow[64];
 105        u8 slb_shadow_max;
 106        struct kvmppc_bat ibat[8];
 107        struct kvmppc_bat dbat[8];
 108        u64 hid[6];
 109        u64 gqr[8];
 110        u64 sdr1;
 111        u64 hior;
 112        u64 msr_mask;
 113        u64 vtb;
 114#ifdef CONFIG_PPC_BOOK3S_32
 115        u32 vsid_pool[VSID_POOL_SIZE];
 116        u32 vsid_next;
 117#else
 118        u64 proto_vsid_first;
 119        u64 proto_vsid_max;
 120        u64 proto_vsid_next;
 121#endif
 122        int context_id[SID_CONTEXTS];
 123
 124        bool hior_explicit;             /* HIOR is set by ioctl, not PVR */
 125
 126        struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
 127        struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
 128        struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
 129        struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
 130#ifdef CONFIG_PPC_BOOK3S_64
 131        struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
 132#endif
 133        int hpte_cache_count;
 134        spinlock_t mmu_lock;
 135};
 136
 137#define VSID_REAL       0x07ffffffffc00000ULL
 138#define VSID_BAT        0x07ffffffffb00000ULL
 139#define VSID_64K        0x0800000000000000ULL
 140#define VSID_1T         0x1000000000000000ULL
 141#define VSID_REAL_DR    0x2000000000000000ULL
 142#define VSID_REAL_IR    0x4000000000000000ULL
 143#define VSID_PR         0x8000000000000000ULL
 144
 145extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
 146extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
 147extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
 148extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
 149extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
 150extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
 151extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
 152extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
 153                               bool iswrite);
 154extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
 155extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
 156extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
 157extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
 158extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
 159                        unsigned long addr, unsigned long status);
 160extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
 161                        unsigned long slb_v, unsigned long valid);
 162extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
 163                        unsigned long gpa, gva_t ea, int is_store);
 164
 165extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
 166extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
 167extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
 168extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
 169extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
 170extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
 171extern int kvmppc_mmu_hpte_sysinit(void);
 172extern void kvmppc_mmu_hpte_sysexit(void);
 173extern int kvmppc_mmu_hv_init(void);
 174extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
 175
 176extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
 177                        unsigned long ea, unsigned long dsisr);
 178extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
 179                                        gva_t eaddr, void *to, void *from,
 180                                        unsigned long n);
 181extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
 182                                        void *to, unsigned long n);
 183extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
 184                                      void *from, unsigned long n);
 185extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
 186                                      struct kvmppc_pte *gpte, u64 root,
 187                                      u64 *pte_ret_p);
 188extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
 189                        struct kvmppc_pte *gpte, u64 table,
 190                        int table_index, u64 *pte_ret_p);
 191extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 192                        struct kvmppc_pte *gpte, bool data, bool iswrite);
 193extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
 194                                    unsigned int pshift, unsigned int lpid);
 195extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
 196                        unsigned int shift,
 197                        const struct kvm_memory_slot *memslot,
 198                        unsigned int lpid);
 199extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
 200                                    bool writing, unsigned long gpa,
 201                                    unsigned int lpid);
 202extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
 203                                unsigned long gpa,
 204                                struct kvm_memory_slot *memslot,
 205                                bool writing, bool kvm_ro,
 206                                pte_t *inserted_pte, unsigned int *levelp);
 207extern int kvmppc_init_vm_radix(struct kvm *kvm);
 208extern void kvmppc_free_radix(struct kvm *kvm);
 209extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
 210                                      unsigned int lpid);
 211extern int kvmppc_radix_init(void);
 212extern void kvmppc_radix_exit(void);
 213extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
 214                            unsigned long gfn);
 215extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
 216                          unsigned long gfn);
 217extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
 218                               unsigned long gfn);
 219extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
 220                        struct kvm_memory_slot *memslot, unsigned long *map);
 221extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
 222                        const struct kvm_memory_slot *memslot);
 223extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
 224
 225/* XXX remove this export when load_last_inst() is generic */
 226extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
 227extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
 228extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
 229                                          unsigned int vec);
 230extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
 231extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
 232extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
 233                           bool upper, u32 val);
 234extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
 235extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
 236extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
 237                        bool writing, bool *writable);
 238extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
 239                        unsigned long *rmap, long pte_index, int realmode);
 240extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
 241                        unsigned long gfn, unsigned long psize);
 242extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
 243                        unsigned long pte_index);
 244void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
 245                        unsigned long pte_index);
 246extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
 247                        unsigned long *nb_ret);
 248extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
 249                        unsigned long gpa, bool dirty);
 250extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 251                        long pte_index, unsigned long pteh, unsigned long ptel,
 252                        pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
 253extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
 254                        unsigned long pte_index, unsigned long avpn,
 255                        unsigned long *hpret);
 256extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
 257                        struct kvm_memory_slot *memslot, unsigned long *map);
 258extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
 259                        struct kvm_memory_slot *memslot,
 260                        unsigned long *map);
 261extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
 262                        unsigned long lpcr);
 263extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
 264                        unsigned long mask);
 265extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
 266
 267extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
 268extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
 269extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
 270
 271extern void kvmppc_entry_trampoline(void);
 272extern void kvmppc_hv_entry_trampoline(void);
 273extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
 274extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
 275extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
 276extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
 277extern int kvmppc_hcall_impl_pr(unsigned long cmd);
 278extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
 279extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
 280extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
 281
 282long kvmppc_read_intr(void);
 283void kvmppc_bad_interrupt(struct pt_regs *regs);
 284void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip);
 285void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip);
 286void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
 287void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
 288
 289#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 290void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
 291void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
 292void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
 293void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
 294#else
 295static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
 296static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
 297static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
 298static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
 299#endif
 300
 301long kvmhv_nested_init(void);
 302void kvmhv_nested_exit(void);
 303void kvmhv_vm_nested_init(struct kvm *kvm);
 304long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
 305long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
 306void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
 307void kvmhv_release_all_nested(struct kvm *kvm);
 308long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
 309long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
 310long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
 311                             unsigned long type, unsigned long pg_sizes,
 312                             unsigned long start, unsigned long end);
 313int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
 314                          u64 time_limit, unsigned long lpcr);
 315void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
 316void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
 317                                   struct hv_guest_state *hr);
 318long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
 319
 320void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
 321
 322extern int kvm_irq_bypass;
 323
 324static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
 325{
 326        return vcpu->arch.book3s;
 327}
 328
 329/* Also add subarch specific defines */
 330
 331#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
 332#include <asm/kvm_book3s_32.h>
 333#endif
 334#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
 335#include <asm/kvm_book3s_64.h>
 336#endif
 337
 338static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
 339{
 340        vcpu->arch.regs.gpr[num] = val;
 341}
 342
 343static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
 344{
 345        return vcpu->arch.regs.gpr[num];
 346}
 347
 348static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
 349{
 350        vcpu->arch.regs.ccr = val;
 351}
 352
 353static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 354{
 355        return vcpu->arch.regs.ccr;
 356}
 357
 358static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
 359{
 360        vcpu->arch.regs.xer = val;
 361}
 362
 363static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
 364{
 365        return vcpu->arch.regs.xer;
 366}
 367
 368static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
 369{
 370        vcpu->arch.regs.ctr = val;
 371}
 372
 373static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
 374{
 375        return vcpu->arch.regs.ctr;
 376}
 377
 378static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
 379{
 380        vcpu->arch.regs.link = val;
 381}
 382
 383static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
 384{
 385        return vcpu->arch.regs.link;
 386}
 387
 388static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
 389{
 390        vcpu->arch.regs.nip = val;
 391}
 392
 393static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
 394{
 395        return vcpu->arch.regs.nip;
 396}
 397
 398static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
 399static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
 400{
 401        return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
 402}
 403
 404static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
 405{
 406        return vcpu->arch.fault_dar;
 407}
 408
 409static inline bool is_kvmppc_resume_guest(int r)
 410{
 411        return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
 412}
 413
 414static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
 415static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
 416{
 417        /* Only PR KVM supports the magic page */
 418        return !is_kvmppc_hv_enabled(vcpu->kvm);
 419}
 420
 421extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
 422extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
 423
 424/* Magic register values loaded into r3 and r4 before the 'sc' assembly
 425 * instruction for the OSI hypercalls */
 426#define OSI_SC_MAGIC_R3                 0x113724FA
 427#define OSI_SC_MAGIC_R4                 0x77810F9B
 428
 429#define INS_DCBZ                        0x7c0007ec
 430/* TO = 31 for unconditional trap */
 431#define INS_TW                          0x7fe00008
 432
 433#define SPLIT_HACK_MASK                 0xff000000
 434#define SPLIT_HACK_OFFS                 0xfb000000
 435
 436/*
 437 * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the
 438 * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
 439 * (but not its actual threading mode, which is not available) to avoid
 440 * collisions.
 441 *
 442 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
 443 * 0) unchanged: if the guest is filling each VCORE completely then it will be
 444 * using consecutive IDs and it will fill the space without any packing.
 445 *
 446 * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
 447 * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
 448 * added to avoid collisions.
 449 *
 450 * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
 451 * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
 452 * can be safely packed into the second half of each VCORE by adding an offset
 453 * of (stride / 2).
 454 *
 455 * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
 456 * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
 457 * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
 458 *
 459 * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
 460 * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
 461 * must be free to use.
 462 *
 463 * (The offsets for each block are stored in block_offsets[], indexed by the
 464 * block number if the stride is 8. For cases where the guest's stride is less
 465 * than 8, we can re-use the block_offsets array by multiplying the block
 466 * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
 467 */
 468static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
 469{
 470        const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
 471        int stride = kvm->arch.emul_smt_mode;
 472        int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
 473        u32 packed_id;
 474
 475        if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
 476                return 0;
 477        packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
 478        if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
 479                return 0;
 480        return packed_id;
 481}
 482
 483#endif /* __ASM_KVM_BOOK3S_H__ */
 484