1
2
3
4
5
6
7
8
9#ifndef __ASM_KVM_BOOK3S_H__
10#define __ASM_KVM_BOOK3S_H__
11
12#include <linux/types.h>
13#include <linux/kvm_host.h>
14#include <asm/kvm_book3s_asm.h>
15
16struct kvmppc_bat {
17 u64 raw;
18 u32 bepi;
19 u32 bepi_mask;
20 u32 brpn;
21 u8 wimg;
22 u8 pp;
23 bool vs : 1;
24 bool vp : 1;
25};
26
27struct kvmppc_sid_map {
28 u64 guest_vsid;
29 u64 guest_esid;
30 u64 host_vsid;
31 bool valid : 1;
32};
33
34#define SID_MAP_BITS 9
35#define SID_MAP_NUM (1 << SID_MAP_BITS)
36#define SID_MAP_MASK (SID_MAP_NUM - 1)
37
38#ifdef CONFIG_PPC_BOOK3S_64
39#define SID_CONTEXTS 1
40#else
41#define SID_CONTEXTS 128
42#define VSID_POOL_SIZE (SID_CONTEXTS * 16)
43#endif
44
45struct hpte_cache {
46 struct hlist_node list_pte;
47 struct hlist_node list_pte_long;
48 struct hlist_node list_vpte;
49 struct hlist_node list_vpte_long;
50#ifdef CONFIG_PPC_BOOK3S_64
51 struct hlist_node list_vpte_64k;
52#endif
53 struct rcu_head rcu_head;
54 u64 host_vpn;
55 u64 pfn;
56 ulong slot;
57 struct kvmppc_pte pte;
58 int pagesize;
59};
60
61
62
63
64
65
66
67
68struct kvmppc_vcore {
69 int n_runnable;
70 int num_threads;
71 int entry_exit_map;
72 int napping_threads;
73 int first_vcpuid;
74 u16 pcpu;
75 u16 last_cpu;
76 u8 vcore_state;
77 u8 in_guest;
78 struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
79 struct list_head preempt_list;
80 spinlock_t lock;
81 struct rcuwait wait;
82 spinlock_t stoltb_lock;
83 u64 stolen_tb;
84 u64 preempt_tb;
85 struct kvm_vcpu *runner;
86 struct kvm *kvm;
87 u64 tb_offset;
88 u64 tb_offset_applied;
89 ulong lpcr;
90 u32 arch_compat;
91 ulong pcr;
92 ulong dpdes;
93 ulong vtb;
94 ulong conferring_threads;
95 unsigned int halt_poll_ns;
96 atomic_t online_count;
97};
98
99struct kvmppc_vcpu_book3s {
100 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
101 struct {
102 u64 esid;
103 u64 vsid;
104 } slb_shadow[64];
105 u8 slb_shadow_max;
106 struct kvmppc_bat ibat[8];
107 struct kvmppc_bat dbat[8];
108 u64 hid[6];
109 u64 gqr[8];
110 u64 sdr1;
111 u64 hior;
112 u64 msr_mask;
113 u64 vtb;
114#ifdef CONFIG_PPC_BOOK3S_32
115 u32 vsid_pool[VSID_POOL_SIZE];
116 u32 vsid_next;
117#else
118 u64 proto_vsid_first;
119 u64 proto_vsid_max;
120 u64 proto_vsid_next;
121#endif
122 int context_id[SID_CONTEXTS];
123
124 bool hior_explicit;
125
126 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
127 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
128 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
129 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
130#ifdef CONFIG_PPC_BOOK3S_64
131 struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
132#endif
133 int hpte_cache_count;
134 spinlock_t mmu_lock;
135};
136
137#define VSID_REAL 0x07ffffffffc00000ULL
138#define VSID_BAT 0x07ffffffffb00000ULL
139#define VSID_64K 0x0800000000000000ULL
140#define VSID_1T 0x1000000000000000ULL
141#define VSID_REAL_DR 0x2000000000000000ULL
142#define VSID_REAL_IR 0x4000000000000000ULL
143#define VSID_PR 0x8000000000000000ULL
144
145extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
146extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
147extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
148extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
149extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
150extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
151extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
152extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
153 bool iswrite);
154extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
155extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
156extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
157extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
158extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
159 unsigned long addr, unsigned long status);
160extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
161 unsigned long slb_v, unsigned long valid);
162extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
163 unsigned long gpa, gva_t ea, int is_store);
164
165extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
166extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
167extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
168extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
169extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
170extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
171extern int kvmppc_mmu_hpte_sysinit(void);
172extern void kvmppc_mmu_hpte_sysexit(void);
173extern int kvmppc_mmu_hv_init(void);
174extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
175
176extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
177 unsigned long ea, unsigned long dsisr);
178extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
179 gva_t eaddr, void *to, void *from,
180 unsigned long n);
181extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
182 void *to, unsigned long n);
183extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
184 void *from, unsigned long n);
185extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
186 struct kvmppc_pte *gpte, u64 root,
187 u64 *pte_ret_p);
188extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
189 struct kvmppc_pte *gpte, u64 table,
190 int table_index, u64 *pte_ret_p);
191extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
192 struct kvmppc_pte *gpte, bool data, bool iswrite);
193extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
194 unsigned int pshift, unsigned int lpid);
195extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
196 unsigned int shift,
197 const struct kvm_memory_slot *memslot,
198 unsigned int lpid);
199extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
200 bool writing, unsigned long gpa,
201 unsigned int lpid);
202extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
203 unsigned long gpa,
204 struct kvm_memory_slot *memslot,
205 bool writing, bool kvm_ro,
206 pte_t *inserted_pte, unsigned int *levelp);
207extern int kvmppc_init_vm_radix(struct kvm *kvm);
208extern void kvmppc_free_radix(struct kvm *kvm);
209extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
210 unsigned int lpid);
211extern int kvmppc_radix_init(void);
212extern void kvmppc_radix_exit(void);
213extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
214 unsigned long gfn);
215extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
216 unsigned long gfn);
217extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
218 unsigned long gfn);
219extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
220 struct kvm_memory_slot *memslot, unsigned long *map);
221extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
222 const struct kvm_memory_slot *memslot);
223extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
224
225
226extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
227extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
228extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
229 unsigned int vec);
230extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
231extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
232extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
233 bool upper, u32 val);
234extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
235extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
236extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
237 bool writing, bool *writable);
238extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
239 unsigned long *rmap, long pte_index, int realmode);
240extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
241 unsigned long gfn, unsigned long psize);
242extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
243 unsigned long pte_index);
244void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
245 unsigned long pte_index);
246extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
247 unsigned long *nb_ret);
248extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
249 unsigned long gpa, bool dirty);
250extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
251 long pte_index, unsigned long pteh, unsigned long ptel,
252 pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
253extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
254 unsigned long pte_index, unsigned long avpn,
255 unsigned long *hpret);
256extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
257 struct kvm_memory_slot *memslot, unsigned long *map);
258extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
259 struct kvm_memory_slot *memslot,
260 unsigned long *map);
261extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
262 unsigned long mask);
263extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
264
265extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
266extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
267extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
268
269extern void kvmppc_entry_trampoline(void);
270extern void kvmppc_hv_entry_trampoline(void);
271extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
272extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
273extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
274extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
275extern int kvmppc_hcall_impl_pr(unsigned long cmd);
276extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
277extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
278extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
279
280#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
281void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
282void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
283void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
284void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
285#else
286static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
287static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
288static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
289static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
290#endif
291
292long kvmhv_nested_init(void);
293void kvmhv_nested_exit(void);
294void kvmhv_vm_nested_init(struct kvm *kvm);
295long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
296long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
297void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
298void kvmhv_release_all_nested(struct kvm *kvm);
299long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
300long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
301int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
302 u64 time_limit, unsigned long lpcr);
303void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
304void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
305 struct hv_guest_state *hr);
306long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
307
308void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
309
310extern int kvm_irq_bypass;
311
312static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
313{
314 return vcpu->arch.book3s;
315}
316
317
318
319#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
320#include <asm/kvm_book3s_32.h>
321#endif
322#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
323#include <asm/kvm_book3s_64.h>
324#endif
325
326static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
327{
328 vcpu->arch.regs.gpr[num] = val;
329}
330
331static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
332{
333 return vcpu->arch.regs.gpr[num];
334}
335
336static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
337{
338 vcpu->arch.regs.ccr = val;
339}
340
341static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
342{
343 return vcpu->arch.regs.ccr;
344}
345
346static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
347{
348 vcpu->arch.regs.xer = val;
349}
350
351static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
352{
353 return vcpu->arch.regs.xer;
354}
355
356static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
357{
358 vcpu->arch.regs.ctr = val;
359}
360
361static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
362{
363 return vcpu->arch.regs.ctr;
364}
365
366static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
367{
368 vcpu->arch.regs.link = val;
369}
370
371static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
372{
373 return vcpu->arch.regs.link;
374}
375
376static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
377{
378 vcpu->arch.regs.nip = val;
379}
380
381static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
382{
383 return vcpu->arch.regs.nip;
384}
385
386static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
387static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
388{
389 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
390}
391
392static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
393{
394 return vcpu->arch.fault_dar;
395}
396
397static inline bool is_kvmppc_resume_guest(int r)
398{
399 return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
400}
401
402static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
403static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
404{
405
406 return !is_kvmppc_hv_enabled(vcpu->kvm);
407}
408
409extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
410extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
411
412
413
414#define OSI_SC_MAGIC_R3 0x113724FA
415#define OSI_SC_MAGIC_R4 0x77810F9B
416
417#define INS_DCBZ 0x7c0007ec
418
419#define INS_TW 0x7fe00008
420
421#define SPLIT_HACK_MASK 0xff000000
422#define SPLIT_HACK_OFFS 0xfb000000
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
457{
458 const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
459 int stride = kvm->arch.emul_smt_mode;
460 int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
461 u32 packed_id;
462
463 if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
464 return 0;
465 packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
466 if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
467 return 0;
468 return packed_id;
469}
470
471#endif
472