1
2
3
4
5
6
7
8
9#ifndef __POWERPC_KVM_PPC_H__
10#define __POWERPC_KVM_PPC_H__
11
12
13
14
15#include <linux/mutex.h>
16#include <linux/timer.h>
17#include <linux/types.h>
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
20#include <linux/bug.h>
21#ifdef CONFIG_PPC_BOOK3S
22#include <asm/kvm_book3s.h>
23#else
24#include <asm/kvm_booke.h>
25#endif
26#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
27#include <asm/paca.h>
28#include <asm/xive.h>
29#include <asm/cpu_has_feature.h>
30#endif
31
32
33
34
35
36#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
37
38enum emulation_result {
39 EMULATE_DONE,
40 EMULATE_DO_MMIO,
41 EMULATE_FAIL,
42 EMULATE_AGAIN,
43 EMULATE_EXIT_USER,
44};
45
46enum instruction_fetch_type {
47 INST_GENERIC,
48 INST_SC,
49};
50
51enum xlate_instdata {
52 XLATE_INST,
53 XLATE_DATA
54};
55
56enum xlate_readwrite {
57 XLATE_READ,
58 XLATE_WRITE
59};
60
61extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
62extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
63extern void kvmppc_handler_highmem(void);
64
65extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
66extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
67 unsigned int rt, unsigned int bytes,
68 int is_default_endian);
69extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
70 unsigned int rt, unsigned int bytes,
71 int is_default_endian);
72extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
73 unsigned int rt, unsigned int bytes,
74 int is_default_endian, int mmio_sign_extend);
75extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes, int is_default_endian);
77extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
78 unsigned int rs, unsigned int bytes, int is_default_endian);
79extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
80 u64 val, unsigned int bytes,
81 int is_default_endian);
82extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
83 int rs, unsigned int bytes,
84 int is_default_endian);
85
86extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
87 enum instruction_fetch_type type, u32 *inst);
88
89extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
90 bool data);
91extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92 bool data);
93extern int kvmppc_emulate_instruction(struct kvm_run *run,
94 struct kvm_vcpu *vcpu);
95extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
96extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
97extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
98extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
99extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
100extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
101extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
102extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
103
104
105
106extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
107 unsigned int gtlb_idx);
108extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
109extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
110extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
111extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
112extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
113extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
114extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
115 gva_t eaddr);
116extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
117extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
118extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
119 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
120 struct kvmppc_pte *pte);
121
122extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
123 unsigned int id);
124extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
125extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
126extern int kvmppc_core_check_processor_compat(void);
127extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
128 struct kvm_translation *tr);
129
130extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
131extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
132
133extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
134extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
135extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
136extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
137extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
138extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
139extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
140extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
141extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
142extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
143 struct kvm_interrupt *irq);
144extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
145extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
146 ulong esr_flags);
147extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
148 ulong dear_flags,
149 ulong esr_flags);
150extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
151extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
152 ulong esr_flags);
153extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
154extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
155
156extern int kvmppc_booke_init(void);
157extern void kvmppc_booke_exit(void);
158
159extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
160extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
161extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
162
163extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
164extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
165extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
166extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
167extern void kvmppc_rmap_reset(struct kvm *kvm);
168extern long kvmppc_prepare_vrma(struct kvm *kvm,
169 struct kvm_userspace_memory_region *mem);
170extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
171 struct kvm_memory_slot *memslot, unsigned long porder);
172extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
173extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
174 struct iommu_group *grp);
175extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
176 struct iommu_group *grp);
177extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
178extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
179extern void kvmppc_setup_partition_table(struct kvm *kvm);
180
181extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
182 struct kvm_create_spapr_tce_64 *args);
183extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
184 struct kvm *kvm, unsigned long liobn);
185#define kvmppc_ioba_validate(stt, ioba, npages) \
186 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
187 (stt)->size, (ioba), (npages)) ? \
188 H_PARAMETER : H_SUCCESS)
189extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
190 unsigned long ioba, unsigned long tce);
191extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
192 unsigned long liobn, unsigned long ioba,
193 unsigned long tce_list, unsigned long npages);
194extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
195 unsigned long liobn, unsigned long ioba,
196 unsigned long tce_value, unsigned long npages);
197extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
198 unsigned long ioba);
199extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
200extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
201extern int kvmppc_core_init_vm(struct kvm *kvm);
202extern void kvmppc_core_destroy_vm(struct kvm *kvm);
203extern void kvmppc_core_free_memslot(struct kvm *kvm,
204 struct kvm_memory_slot *free,
205 struct kvm_memory_slot *dont);
206extern int kvmppc_core_create_memslot(struct kvm *kvm,
207 struct kvm_memory_slot *slot,
208 unsigned long npages);
209extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
210 struct kvm_memory_slot *memslot,
211 const struct kvm_userspace_memory_region *mem);
212extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
213 const struct kvm_userspace_memory_region *mem,
214 const struct kvm_memory_slot *old,
215 const struct kvm_memory_slot *new,
216 enum kvm_mr_change change);
217extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
218 struct kvm_ppc_smmu_info *info);
219extern void kvmppc_core_flush_memslot(struct kvm *kvm,
220 struct kvm_memory_slot *memslot);
221
222extern int kvmppc_bookehv_init(void);
223extern void kvmppc_bookehv_exit(void);
224
225extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
226
227extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
228extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
229 struct kvm_ppc_resize_hpt *rhpt);
230extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
231 struct kvm_ppc_resize_hpt *rhpt);
232
233int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
234
235extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
236extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
237extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
238
239extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
240 u32 priority);
241extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
242 u32 *priority);
243extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
244extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
245
246void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
247void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
248
249union kvmppc_one_reg {
250 u32 wval;
251 u64 dval;
252 vector128 vval;
253 u64 vsxval[2];
254 u32 vsx32val[4];
255 u16 vsx16val[8];
256 u8 vsx8val[16];
257 struct {
258 u64 addr;
259 u64 length;
260 } vpaval;
261 u64 xive_timaval[2];
262};
263
264struct kvmppc_ops {
265 struct module *owner;
266 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
267 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
268 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
269 union kvmppc_one_reg *val);
270 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
271 union kvmppc_one_reg *val);
272 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
273 void (*vcpu_put)(struct kvm_vcpu *vcpu);
274 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
275 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
276 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
277 void (*vcpu_free)(struct kvm_vcpu *vcpu);
278 int (*check_requests)(struct kvm_vcpu *vcpu);
279 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
280 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
281 int (*prepare_memory_region)(struct kvm *kvm,
282 struct kvm_memory_slot *memslot,
283 const struct kvm_userspace_memory_region *mem);
284 void (*commit_memory_region)(struct kvm *kvm,
285 const struct kvm_userspace_memory_region *mem,
286 const struct kvm_memory_slot *old,
287 const struct kvm_memory_slot *new,
288 enum kvm_mr_change change);
289 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
290 unsigned long end);
291 int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
292 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
293 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
294 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
295 void (*free_memslot)(struct kvm_memory_slot *free,
296 struct kvm_memory_slot *dont);
297 int (*create_memslot)(struct kvm_memory_slot *slot,
298 unsigned long npages);
299 int (*init_vm)(struct kvm *kvm);
300 void (*destroy_vm)(struct kvm *kvm);
301 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
302 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
303 unsigned int inst, int *advance);
304 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
305 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
306 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
307 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
308 unsigned long arg);
309 int (*hcall_implemented)(unsigned long hcall);
310 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
311 struct irq_bypass_producer *);
312 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
313 struct irq_bypass_producer *);
314 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
315 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
316 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
317 unsigned long flags);
318 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
319 int (*enable_nested)(struct kvm *kvm);
320 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
321 int size);
322 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
323 int size);
324};
325
326extern struct kvmppc_ops *kvmppc_hv_ops;
327extern struct kvmppc_ops *kvmppc_pr_ops;
328
329static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
330 enum instruction_fetch_type type, u32 *inst)
331{
332 int ret = EMULATE_DONE;
333 u32 fetched_inst;
334
335
336
337 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
338 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
339
340
341 if (ret == EMULATE_DONE)
342 fetched_inst = kvmppc_need_byteswap(vcpu) ?
343 swab32(vcpu->arch.last_inst) :
344 vcpu->arch.last_inst;
345 else
346 fetched_inst = vcpu->arch.last_inst;
347
348 *inst = fetched_inst;
349 return ret;
350}
351
352static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
353{
354 return kvm->arch.kvm_ops == kvmppc_hv_ops;
355}
356
357extern int kvmppc_hwrng_present(void);
358
359
360
361
362
363static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
364{
365 u32 r;
366 u32 mask;
367
368 BUG_ON(msb > lsb);
369
370 mask = (1 << (lsb - msb + 1)) - 1;
371 r = (inst >> (63 - lsb)) & mask;
372
373 return r;
374}
375
376
377
378
379static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
380{
381 u32 r;
382 u32 mask;
383
384 BUG_ON(msb > lsb);
385
386 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
387 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
388
389 return r;
390}
391
392#define one_reg_size(id) \
393 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
394
395#define get_reg_val(id, reg) ({ \
396 union kvmppc_one_reg __u; \
397 switch (one_reg_size(id)) { \
398 case 4: __u.wval = (reg); break; \
399 case 8: __u.dval = (reg); break; \
400 default: BUG(); \
401 } \
402 __u; \
403})
404
405
406#define set_reg_val(id, val) ({ \
407 u64 __v; \
408 switch (one_reg_size(id)) { \
409 case 4: __v = (val).wval; break; \
410 case 8: __v = (val).dval; break; \
411 default: BUG(); \
412 } \
413 __v; \
414})
415
416int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
417int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
418
419int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
420int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
421
422int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
423int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
424int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
425int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
426
427void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
428
429struct openpic;
430
431#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
432extern void kvm_cma_reserve(void) __init;
433static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
434{
435 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
436}
437
438static inline void kvmppc_set_xive_tima(int cpu,
439 unsigned long phys_addr,
440 void __iomem *virt_addr)
441{
442 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
443 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
444}
445
446static inline u32 kvmppc_get_xics_latch(void)
447{
448 u32 xirr;
449
450 xirr = get_paca()->kvm_hstate.saved_xirr;
451 get_paca()->kvm_hstate.saved_xirr = 0;
452 return xirr;
453}
454
455static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
456{
457 paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
458}
459
460static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
461{
462 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
463}
464
465extern void kvm_hv_vm_activated(void);
466extern void kvm_hv_vm_deactivated(void);
467extern bool kvm_hv_mode_active(void);
468
469extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
470 struct kvm_nested_guest *nested);
471
472#else
473static inline void __init kvm_cma_reserve(void)
474{}
475
476static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
477{}
478
479static inline void kvmppc_set_xive_tima(int cpu,
480 unsigned long phys_addr,
481 void __iomem *virt_addr)
482{}
483
484static inline u32 kvmppc_get_xics_latch(void)
485{
486 return 0;
487}
488
489static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
490{}
491
492static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
493{
494 kvm_vcpu_kick(vcpu);
495}
496
497static inline bool kvm_hv_mode_active(void) { return false; }
498
499#endif
500
501#ifdef CONFIG_KVM_XICS
502static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
503{
504 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
505}
506
507static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
508 struct kvm *kvm)
509{
510 if (kvm && kvm_irq_bypass)
511 return kvm->arch.pimap;
512 return NULL;
513}
514
515extern void kvmppc_alloc_host_rm_ops(void);
516extern void kvmppc_free_host_rm_ops(void);
517extern void kvmppc_free_pimap(struct kvm *kvm);
518extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
519extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
520extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
521extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
522extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
523extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
524 struct kvm_vcpu *vcpu, u32 cpu);
525extern void kvmppc_xics_ipi_action(void);
526extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
527 unsigned long host_irq);
528extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
529 unsigned long host_irq);
530extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
531 struct kvmppc_irq_map *irq_map,
532 struct kvmppc_passthru_irqmap *pimap,
533 bool *again);
534
535extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
536 int level, bool line_status);
537
538extern int h_ipi_redirect;
539#else
540static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
541 struct kvm *kvm)
542 { return NULL; }
543static inline void kvmppc_alloc_host_rm_ops(void) {};
544static inline void kvmppc_free_host_rm_ops(void) {};
545static inline void kvmppc_free_pimap(struct kvm *kvm) {};
546static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
547 { return 0; }
548static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
549 { return 0; }
550static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
551static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
552 { return 0; }
553#endif
554
555#ifdef CONFIG_KVM_XIVE
556
557
558
559
560
561
562
563
564extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
565 u32 priority);
566extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
567 u32 *priority);
568extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
569extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
570extern void kvmppc_xive_init_module(void);
571extern void kvmppc_xive_exit_module(void);
572
573extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
574 struct kvm_vcpu *vcpu, u32 cpu);
575extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
576extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
577 struct irq_desc *host_desc);
578extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
579 struct irq_desc *host_desc);
580extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
581extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
582
583extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
584 int level, bool line_status);
585extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
586
587static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
588{
589 return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
590}
591
592extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
593 struct kvm_vcpu *vcpu, u32 cpu);
594extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
595extern void kvmppc_xive_native_init_module(void);
596extern void kvmppc_xive_native_exit_module(void);
597extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
598 union kvmppc_one_reg *val);
599extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
600 union kvmppc_one_reg *val);
601
602#else
603static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
604 u32 priority) { return -1; }
605static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
606 u32 *priority) { return -1; }
607static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
608static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
609static inline void kvmppc_xive_init_module(void) { }
610static inline void kvmppc_xive_exit_module(void) { }
611
612static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
613 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
614static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
615static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
616 struct irq_desc *host_desc) { return -ENODEV; }
617static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
618 struct irq_desc *host_desc) { return -ENODEV; }
619static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
620static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
621
622static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
623 int level, bool line_status) { return -ENODEV; }
624static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
625
626static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
627 { return 0; }
628static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
629 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
630static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
631static inline void kvmppc_xive_native_init_module(void) { }
632static inline void kvmppc_xive_native_exit_module(void) { }
633static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
634 union kvmppc_one_reg *val)
635{ return 0; }
636static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
637 union kvmppc_one_reg *val)
638{ return -ENOENT; }
639
640#endif
641
642#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
643static inline bool xics_on_xive(void)
644{
645 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
646}
647#else
648static inline bool xics_on_xive(void)
649{
650 return false;
651}
652#endif
653
654
655
656
657
658long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
659 unsigned long ioba, unsigned long tce);
660long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
661 unsigned long liobn, unsigned long ioba,
662 unsigned long tce_list, unsigned long npages);
663long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
664 unsigned long liobn, unsigned long ioba,
665 unsigned long tce_value, unsigned long npages);
666long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
667 unsigned int yield_count);
668long kvmppc_h_random(struct kvm_vcpu *vcpu);
669void kvmhv_commence_exit(int trap);
670void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
671void kvmppc_subcore_enter_guest(void);
672void kvmppc_subcore_exit_guest(void);
673long kvmppc_realmode_hmi_handler(void);
674long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
675 long pte_index, unsigned long pteh, unsigned long ptel);
676long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
677 unsigned long pte_index, unsigned long avpn);
678long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
679long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
680 unsigned long pte_index, unsigned long avpn,
681 unsigned long va);
682long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
683 unsigned long pte_index);
684long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
685 unsigned long pte_index);
686long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
687 unsigned long pte_index);
688long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
689 unsigned long dest, unsigned long src);
690long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
691 unsigned long slb_v, unsigned int status, bool data);
692unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
693unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
694unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
695int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
696 unsigned long mfrr);
697int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
698int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
699void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
700
701
702
703
704
705
706
707union kvmppc_rm_state {
708 unsigned long raw;
709 struct {
710 u32 in_host;
711 u32 rm_action;
712 };
713};
714
715struct kvmppc_host_rm_core {
716 union kvmppc_rm_state rm_state;
717 void *rm_data;
718 char pad[112];
719};
720
721struct kvmppc_host_rm_ops {
722 struct kvmppc_host_rm_core *rm_core;
723 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
724};
725
726extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
727
728static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
729{
730#ifdef CONFIG_KVM_BOOKE_HV
731 return mfspr(SPRN_GEPR);
732#elif defined(CONFIG_BOOKE)
733 return vcpu->arch.epr;
734#else
735 return 0;
736#endif
737}
738
739static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
740{
741#ifdef CONFIG_KVM_BOOKE_HV
742 mtspr(SPRN_GEPR, epr);
743#elif defined(CONFIG_BOOKE)
744 vcpu->arch.epr = epr;
745#endif
746}
747
748#ifdef CONFIG_KVM_MPIC
749
750void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
751int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
752 u32 cpu);
753void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
754
755#else
756
757static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
758{
759}
760
761static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
762 struct kvm_vcpu *vcpu, u32 cpu)
763{
764 return -EINVAL;
765}
766
767static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
768 struct kvm_vcpu *vcpu)
769{
770}
771
772#endif
773
774int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
775 struct kvm_config_tlb *cfg);
776int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
777 struct kvm_dirty_tlb *cfg);
778
779long kvmppc_alloc_lpid(void);
780void kvmppc_claim_lpid(long lpid);
781void kvmppc_free_lpid(long lpid);
782void kvmppc_init_lpid(unsigned long nr_lpids);
783
784static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
785{
786 struct page *page;
787
788
789
790
791 if (!pfn_valid(pfn))
792 return;
793
794
795 page = pfn_to_page(pfn);
796 if (!test_bit(PG_arch_1, &page->flags)) {
797 flush_dcache_icache_page(page);
798 set_bit(PG_arch_1, &page->flags);
799 }
800}
801
802
803
804
805
806static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
807{
808#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
809
810 return vcpu->arch.shared_big_endian;
811#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
812
813 return false;
814#else
815 return true;
816#endif
817}
818
819#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
820static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
821{ \
822 return mfspr(bookehv_spr); \
823} \
824
825#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
826static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
827{ \
828 mtspr(bookehv_spr, val); \
829} \
830
831#define SHARED_WRAPPER_GET(reg, size) \
832static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
833{ \
834 if (kvmppc_shared_big_endian(vcpu)) \
835 return be##size##_to_cpu(vcpu->arch.shared->reg); \
836 else \
837 return le##size##_to_cpu(vcpu->arch.shared->reg); \
838} \
839
840#define SHARED_WRAPPER_SET(reg, size) \
841static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
842{ \
843 if (kvmppc_shared_big_endian(vcpu)) \
844 vcpu->arch.shared->reg = cpu_to_be##size(val); \
845 else \
846 vcpu->arch.shared->reg = cpu_to_le##size(val); \
847} \
848
849#define SHARED_WRAPPER(reg, size) \
850 SHARED_WRAPPER_GET(reg, size) \
851 SHARED_WRAPPER_SET(reg, size) \
852
853#define SPRNG_WRAPPER(reg, bookehv_spr) \
854 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
855 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
856
857#ifdef CONFIG_KVM_BOOKE_HV
858
859#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
860 SPRNG_WRAPPER(reg, bookehv_spr) \
861
862#else
863
864#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
865 SHARED_WRAPPER(reg, size) \
866
867#endif
868
869SHARED_WRAPPER(critical, 64)
870SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
871SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
872SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
873SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
874SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
875SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
876SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
877SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
878SHARED_WRAPPER_GET(msr, 64)
879static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
880{
881 if (kvmppc_shared_big_endian(vcpu))
882 vcpu->arch.shared->msr = cpu_to_be64(val);
883 else
884 vcpu->arch.shared->msr = cpu_to_le64(val);
885}
886SHARED_WRAPPER(dsisr, 32)
887SHARED_WRAPPER(int_pending, 32)
888SHARED_WRAPPER(sprg4, 64)
889SHARED_WRAPPER(sprg5, 64)
890SHARED_WRAPPER(sprg6, 64)
891SHARED_WRAPPER(sprg7, 64)
892
893static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
894{
895 if (kvmppc_shared_big_endian(vcpu))
896 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
897 else
898 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
899}
900
901static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
902{
903 if (kvmppc_shared_big_endian(vcpu))
904 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
905 else
906 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
907}
908
909
910
911
912
913
914static inline void kvmppc_fix_ee_before_entry(void)
915{
916 trace_hardirqs_on();
917
918#ifdef CONFIG_PPC64
919
920
921
922
923 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
924
925
926 local_paca->irq_happened = 0;
927 irq_soft_mask_set(IRQS_ENABLED);
928#endif
929}
930
931static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
932{
933 ulong ea;
934 ulong msr_64bit = 0;
935
936 ea = kvmppc_get_gpr(vcpu, rb);
937 if (ra)
938 ea += kvmppc_get_gpr(vcpu, ra);
939
940#if defined(CONFIG_PPC_BOOK3E_64)
941 msr_64bit = MSR_CM;
942#elif defined(CONFIG_PPC_BOOK3S_64)
943 msr_64bit = MSR_SF;
944#endif
945
946 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
947 ea = (uint32_t)ea;
948
949 return ea;
950}
951
952extern void xics_wake_cpu(int cpu);
953
954#endif
955