1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#ifndef __ASM_KVM_BOOK3S_H__
21#define __ASM_KVM_BOOK3S_H__
22
23#include <linux/types.h>
24#include <linux/kvm_host.h>
25#include <asm/kvm_book3s_asm.h>
26
27struct kvmppc_bat {
28 u64 raw;
29 u32 bepi;
30 u32 bepi_mask;
31 u32 brpn;
32 u8 wimg;
33 u8 pp;
34 bool vs : 1;
35 bool vp : 1;
36};
37
38struct kvmppc_sid_map {
39 u64 guest_vsid;
40 u64 guest_esid;
41 u64 host_vsid;
42 bool valid : 1;
43};
44
45#define SID_MAP_BITS 9
46#define SID_MAP_NUM (1 << SID_MAP_BITS)
47#define SID_MAP_MASK (SID_MAP_NUM - 1)
48
49#ifdef CONFIG_PPC_BOOK3S_64
50#define SID_CONTEXTS 1
51#else
52#define SID_CONTEXTS 128
53#define VSID_POOL_SIZE (SID_CONTEXTS * 16)
54#endif
55
56struct hpte_cache {
57 struct hlist_node list_pte;
58 struct hlist_node list_pte_long;
59 struct hlist_node list_vpte;
60 struct hlist_node list_vpte_long;
61 struct rcu_head rcu_head;
62 u64 host_vpn;
63 u64 pfn;
64 ulong slot;
65 struct kvmppc_pte pte;
66};
67
68struct kvmppc_vcpu_book3s {
69 struct kvm_vcpu vcpu;
70 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
71 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
72 struct {
73 u64 esid;
74 u64 vsid;
75 } slb_shadow[64];
76 u8 slb_shadow_max;
77 struct kvmppc_bat ibat[8];
78 struct kvmppc_bat dbat[8];
79 u64 hid[6];
80 u64 gqr[8];
81 u64 sdr1;
82 u64 hior;
83 u64 msr_mask;
84 u64 purr_offset;
85 u64 spurr_offset;
86#ifdef CONFIG_PPC_BOOK3S_32
87 u32 vsid_pool[VSID_POOL_SIZE];
88 u32 vsid_next;
89#else
90 u64 proto_vsid_first;
91 u64 proto_vsid_max;
92 u64 proto_vsid_next;
93#endif
94 int context_id[SID_CONTEXTS];
95
96 bool hior_explicit;
97
98 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
99 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
100 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
101 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
102 int hpte_cache_count;
103 spinlock_t mmu_lock;
104};
105
106#define CONTEXT_HOST 0
107#define CONTEXT_GUEST 1
108#define CONTEXT_GUEST_END 2
109
110#define VSID_REAL 0x1fffffffffc00000ULL
111#define VSID_BAT 0x1fffffffffb00000ULL
112#define VSID_REAL_DR 0x2000000000000000ULL
113#define VSID_REAL_IR 0x4000000000000000ULL
114#define VSID_PR 0x8000000000000000ULL
115
116extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
117extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
118extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
119extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
120extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
121extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
122extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
123extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
124extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
125extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
126extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
127extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
128 struct kvm_vcpu *vcpu, unsigned long addr,
129 unsigned long status);
130extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
131 unsigned long slb_v, unsigned long valid);
132
133extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
134extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
135extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
136extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
137extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
138extern int kvmppc_mmu_hpte_sysinit(void);
139extern void kvmppc_mmu_hpte_sysexit(void);
140extern int kvmppc_mmu_hv_init(void);
141
142extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
143extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
144extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
145extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
146 unsigned int vec);
147extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
148extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
149 bool upper, u32 val);
150extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
151extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
152extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
153extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
154 unsigned long *rmap, long pte_index, int realmode);
155extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
156 unsigned long pte_index);
157void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
158 unsigned long pte_index);
159extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
160 unsigned long *nb_ret);
161extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
162 unsigned long gpa, bool dirty);
163extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
164 long pte_index, unsigned long pteh, unsigned long ptel);
165extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
166 long pte_index, unsigned long pteh, unsigned long ptel,
167 pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
168extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
169 unsigned long pte_index, unsigned long avpn,
170 unsigned long *hpret);
171extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
172 struct kvm_memory_slot *memslot, unsigned long *map);
173
174extern void kvmppc_entry_trampoline(void);
175extern void kvmppc_hv_entry_trampoline(void);
176extern void kvmppc_load_up_fpu(void);
177extern void kvmppc_load_up_altivec(void);
178extern void kvmppc_load_up_vsx(void);
179extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
180extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
181extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
182
183static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
184{
185 return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
186}
187
188extern void kvm_return_point(void);
189
190
191
192#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
193#include <asm/kvm_book3s_32.h>
194#endif
195#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
196#include <asm/kvm_book3s_64.h>
197#endif
198
199#ifdef CONFIG_KVM_BOOK3S_PR
200
201static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
202{
203 return to_book3s(vcpu)->hior;
204}
205
206static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
207 unsigned long pending_now, unsigned long old_pending)
208{
209 if (pending_now)
210 vcpu->arch.shared->int_pending = 1;
211 else if (old_pending)
212 vcpu->arch.shared->int_pending = 0;
213}
214
215static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
216{
217 if ( num < 14 ) {
218 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
219 svcpu->gpr[num] = val;
220 svcpu_put(svcpu);
221 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
222 } else
223 vcpu->arch.gpr[num] = val;
224}
225
226static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
227{
228 if ( num < 14 ) {
229 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
230 ulong r = svcpu->gpr[num];
231 svcpu_put(svcpu);
232 return r;
233 } else
234 return vcpu->arch.gpr[num];
235}
236
237static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
238{
239 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
240 svcpu->cr = val;
241 svcpu_put(svcpu);
242 to_book3s(vcpu)->shadow_vcpu->cr = val;
243}
244
245static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
246{
247 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
248 u32 r;
249 r = svcpu->cr;
250 svcpu_put(svcpu);
251 return r;
252}
253
254static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
255{
256 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
257 svcpu->xer = val;
258 to_book3s(vcpu)->shadow_vcpu->xer = val;
259 svcpu_put(svcpu);
260}
261
262static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
263{
264 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
265 u32 r;
266 r = svcpu->xer;
267 svcpu_put(svcpu);
268 return r;
269}
270
271static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
272{
273 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
274 svcpu->ctr = val;
275 svcpu_put(svcpu);
276}
277
278static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
279{
280 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
281 ulong r;
282 r = svcpu->ctr;
283 svcpu_put(svcpu);
284 return r;
285}
286
287static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
288{
289 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
290 svcpu->lr = val;
291 svcpu_put(svcpu);
292}
293
294static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
295{
296 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
297 ulong r;
298 r = svcpu->lr;
299 svcpu_put(svcpu);
300 return r;
301}
302
303static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
304{
305 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
306 svcpu->pc = val;
307 svcpu_put(svcpu);
308}
309
310static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
311{
312 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
313 ulong r;
314 r = svcpu->pc;
315 svcpu_put(svcpu);
316 return r;
317}
318
319static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
320{
321 ulong pc = kvmppc_get_pc(vcpu);
322 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
323 u32 r;
324
325
326
327 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
328 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
329
330 r = svcpu->last_inst;
331 svcpu_put(svcpu);
332 return r;
333}
334
335static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
336{
337 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
338 ulong r;
339 r = svcpu->fault_dar;
340 svcpu_put(svcpu);
341 return r;
342}
343
344static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
345{
346 ulong crit_raw = vcpu->arch.shared->critical;
347 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
348 bool crit;
349
350
351 if (!(vcpu->arch.shared->msr & MSR_SF)) {
352 crit_raw &= 0xffffffff;
353 crit_r1 &= 0xffffffff;
354 }
355
356
357 crit = (crit_raw == crit_r1);
358
359 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
360
361 return crit;
362}
363#else
364
365static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
366{
367 return 0;
368}
369
370static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
371 unsigned long pending_now, unsigned long old_pending)
372{
373}
374
375static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
376{
377 vcpu->arch.gpr[num] = val;
378}
379
380static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
381{
382 return vcpu->arch.gpr[num];
383}
384
385static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
386{
387 vcpu->arch.cr = val;
388}
389
390static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
391{
392 return vcpu->arch.cr;
393}
394
395static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
396{
397 vcpu->arch.xer = val;
398}
399
400static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
401{
402 return vcpu->arch.xer;
403}
404
405static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
406{
407 vcpu->arch.ctr = val;
408}
409
410static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
411{
412 return vcpu->arch.ctr;
413}
414
415static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
416{
417 vcpu->arch.lr = val;
418}
419
420static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
421{
422 return vcpu->arch.lr;
423}
424
425static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
426{
427 vcpu->arch.pc = val;
428}
429
430static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
431{
432 return vcpu->arch.pc;
433}
434
435static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
436{
437 ulong pc = kvmppc_get_pc(vcpu);
438
439
440
441 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
442 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
443
444 return vcpu->arch.last_inst;
445}
446
447static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
448{
449 return vcpu->arch.fault_dar;
450}
451
452static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
453{
454 return false;
455}
456#endif
457
458
459
460#define OSI_SC_MAGIC_R3 0x113724FA
461#define OSI_SC_MAGIC_R4 0x77810F9B
462
463#define INS_DCBZ 0x7c0007ec
464
465#define INS_TW 0x7fe00008
466
467
468#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
469
470#endif
471