1
2
3
4
5
6
7
8
9#ifndef __POWERPC_KVM_PPC_H__
10#define __POWERPC_KVM_PPC_H__
11
12
13
14
15#include <linux/mutex.h>
16#include <linux/timer.h>
17#include <linux/types.h>
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
20#include <linux/bug.h>
21#ifdef CONFIG_PPC_BOOK3S
22#include <asm/kvm_book3s.h>
23#else
24#include <asm/kvm_booke.h>
25#endif
26#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
27#include <asm/paca.h>
28#include <asm/xive.h>
29#include <asm/cpu_has_feature.h>
30#endif
31
32
33
34
35
36#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
37
38enum emulation_result {
39 EMULATE_DONE,
40 EMULATE_DO_MMIO,
41 EMULATE_FAIL,
42 EMULATE_AGAIN,
43 EMULATE_EXIT_USER,
44};
45
46enum instruction_fetch_type {
47 INST_GENERIC,
48 INST_SC,
49};
50
51enum xlate_instdata {
52 XLATE_INST,
53 XLATE_DATA
54};
55
56enum xlate_readwrite {
57 XLATE_READ,
58 XLATE_WRITE
59};
60
61extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
62extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
63extern void kvmppc_handler_highmem(void);
64
65extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
66extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
67 unsigned int rt, unsigned int bytes,
68 int is_default_endian);
69extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
70 unsigned int rt, unsigned int bytes,
71 int is_default_endian);
72extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
73 unsigned int rt, unsigned int bytes,
74 int is_default_endian, int mmio_sign_extend);
75extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes, int is_default_endian);
77extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
78 unsigned int rs, unsigned int bytes, int is_default_endian);
79extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
80 u64 val, unsigned int bytes,
81 int is_default_endian);
82extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
83 int rs, unsigned int bytes,
84 int is_default_endian);
85
86extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
87 enum instruction_fetch_type type, u32 *inst);
88
89extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
90 bool data);
91extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92 bool data);
93extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
94extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
95extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
96extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
97extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
98extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
99extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
100extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
102
103
104
105extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
106 unsigned int gtlb_idx);
107extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
108extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
109extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
110extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
111extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
112 gva_t eaddr);
113extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
114extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
115extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
116 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
117 struct kvmppc_pte *pte);
118
119extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
120extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
121extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
122extern int kvmppc_core_check_processor_compat(void);
123extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
124 struct kvm_translation *tr);
125
126extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
127extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
128
129extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
130extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
131extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
132extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
133extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
134extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
135extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
136extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
137extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
138extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
139extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
140 struct kvm_interrupt *irq);
141extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
142extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
143 ulong esr_flags);
144extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
145 ulong dear_flags,
146 ulong esr_flags);
147extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
148extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
149 ulong esr_flags);
150extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
151extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
152
153extern int kvmppc_booke_init(void);
154extern void kvmppc_booke_exit(void);
155
156extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
157extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
158extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
159
160extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
161extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
162extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
163extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
164extern void kvmppc_rmap_reset(struct kvm *kvm);
165extern long kvmppc_prepare_vrma(struct kvm *kvm,
166 struct kvm_userspace_memory_region *mem);
167extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
168 struct kvm_memory_slot *memslot, unsigned long porder);
169extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
170extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
171 struct iommu_group *grp);
172extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
173 struct iommu_group *grp);
174extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
175extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
176extern void kvmppc_setup_partition_table(struct kvm *kvm);
177
178extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
179 struct kvm_create_spapr_tce_64 *args);
180extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
181 struct kvm *kvm, unsigned long liobn);
182#define kvmppc_ioba_validate(stt, ioba, npages) \
183 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
184 (stt)->size, (ioba), (npages)) ? \
185 H_PARAMETER : H_SUCCESS)
186extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
187 unsigned long ioba, unsigned long tce);
188extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
189 unsigned long liobn, unsigned long ioba,
190 unsigned long tce_list, unsigned long npages);
191extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
192 unsigned long liobn, unsigned long ioba,
193 unsigned long tce_value, unsigned long npages);
194extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
195 unsigned long ioba);
196extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
197extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
198extern int kvmppc_core_init_vm(struct kvm *kvm);
199extern void kvmppc_core_destroy_vm(struct kvm *kvm);
200extern void kvmppc_core_free_memslot(struct kvm *kvm,
201 struct kvm_memory_slot *slot);
202extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
203 struct kvm_memory_slot *memslot,
204 const struct kvm_userspace_memory_region *mem,
205 enum kvm_mr_change change);
206extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
207 const struct kvm_userspace_memory_region *mem,
208 const struct kvm_memory_slot *old,
209 const struct kvm_memory_slot *new,
210 enum kvm_mr_change change);
211extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
212 struct kvm_ppc_smmu_info *info);
213extern void kvmppc_core_flush_memslot(struct kvm *kvm,
214 struct kvm_memory_slot *memslot);
215
216extern int kvmppc_bookehv_init(void);
217extern void kvmppc_bookehv_exit(void);
218
219extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
220
221extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
222extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
223 struct kvm_ppc_resize_hpt *rhpt);
224extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
225 struct kvm_ppc_resize_hpt *rhpt);
226
227int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
228
229extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
230extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
231extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
232
233extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
234 u32 priority);
235extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
236 u32 *priority);
237extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
238extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
239
240void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
241void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
242
243union kvmppc_one_reg {
244 u32 wval;
245 u64 dval;
246 vector128 vval;
247 u64 vsxval[2];
248 u32 vsx32val[4];
249 u16 vsx16val[8];
250 u8 vsx8val[16];
251 struct {
252 u64 addr;
253 u64 length;
254 } vpaval;
255 u64 xive_timaval[2];
256};
257
258struct kvmppc_ops {
259 struct module *owner;
260 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
261 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
262 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
263 union kvmppc_one_reg *val);
264 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
265 union kvmppc_one_reg *val);
266 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
267 void (*vcpu_put)(struct kvm_vcpu *vcpu);
268 void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
269 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
270 int (*vcpu_run)(struct kvm_vcpu *vcpu);
271 int (*vcpu_create)(struct kvm_vcpu *vcpu);
272 void (*vcpu_free)(struct kvm_vcpu *vcpu);
273 int (*check_requests)(struct kvm_vcpu *vcpu);
274 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
275 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
276 int (*prepare_memory_region)(struct kvm *kvm,
277 struct kvm_memory_slot *memslot,
278 const struct kvm_userspace_memory_region *mem,
279 enum kvm_mr_change change);
280 void (*commit_memory_region)(struct kvm *kvm,
281 const struct kvm_userspace_memory_region *mem,
282 const struct kvm_memory_slot *old,
283 const struct kvm_memory_slot *new,
284 enum kvm_mr_change change);
285 bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
286 bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
287 bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
288 bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
289 void (*free_memslot)(struct kvm_memory_slot *slot);
290 int (*init_vm)(struct kvm *kvm);
291 void (*destroy_vm)(struct kvm *kvm);
292 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
293 int (*emulate_op)(struct kvm_vcpu *vcpu,
294 unsigned int inst, int *advance);
295 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
296 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
297 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
298 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
299 unsigned long arg);
300 int (*hcall_implemented)(unsigned long hcall);
301 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
302 struct irq_bypass_producer *);
303 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
304 struct irq_bypass_producer *);
305 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
306 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
307 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
308 unsigned long flags);
309 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
310 int (*enable_nested)(struct kvm *kvm);
311 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
312 int size);
313 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
314 int size);
315 int (*enable_svm)(struct kvm *kvm);
316 int (*svm_off)(struct kvm *kvm);
317 int (*enable_dawr1)(struct kvm *kvm);
318 bool (*hash_v3_possible)(void);
319};
320
321extern struct kvmppc_ops *kvmppc_hv_ops;
322extern struct kvmppc_ops *kvmppc_pr_ops;
323
324static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
325 enum instruction_fetch_type type, u32 *inst)
326{
327 int ret = EMULATE_DONE;
328 u32 fetched_inst;
329
330
331
332 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
333 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
334
335
336 if (ret == EMULATE_DONE)
337 fetched_inst = kvmppc_need_byteswap(vcpu) ?
338 swab32(vcpu->arch.last_inst) :
339 vcpu->arch.last_inst;
340 else
341 fetched_inst = vcpu->arch.last_inst;
342
343 *inst = fetched_inst;
344 return ret;
345}
346
347static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
348{
349 return kvm->arch.kvm_ops == kvmppc_hv_ops;
350}
351
352extern int kvmppc_hwrng_present(void);
353
354
355
356
357
358static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
359{
360 u32 r;
361 u32 mask;
362
363 BUG_ON(msb > lsb);
364
365 mask = (1 << (lsb - msb + 1)) - 1;
366 r = (inst >> (63 - lsb)) & mask;
367
368 return r;
369}
370
371
372
373
374static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
375{
376 u32 r;
377 u32 mask;
378
379 BUG_ON(msb > lsb);
380
381 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
382 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
383
384 return r;
385}
386
387#define one_reg_size(id) \
388 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
389
390#define get_reg_val(id, reg) ({ \
391 union kvmppc_one_reg __u; \
392 switch (one_reg_size(id)) { \
393 case 4: __u.wval = (reg); break; \
394 case 8: __u.dval = (reg); break; \
395 default: BUG(); \
396 } \
397 __u; \
398})
399
400
401#define set_reg_val(id, val) ({ \
402 u64 __v; \
403 switch (one_reg_size(id)) { \
404 case 4: __v = (val).wval; break; \
405 case 8: __v = (val).dval; break; \
406 default: BUG(); \
407 } \
408 __v; \
409})
410
411int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
412int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
413
414int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
415int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
416
417int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
418int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
419int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
420int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
421
422void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
423
424struct openpic;
425
426#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
427extern void kvm_cma_reserve(void) __init;
428static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
429{
430 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
431}
432
433static inline void kvmppc_set_xive_tima(int cpu,
434 unsigned long phys_addr,
435 void __iomem *virt_addr)
436{
437 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
438 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
439}
440
441static inline u32 kvmppc_get_xics_latch(void)
442{
443 u32 xirr;
444
445 xirr = get_paca()->kvm_hstate.saved_xirr;
446 get_paca()->kvm_hstate.saved_xirr = 0;
447 return xirr;
448}
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524static inline void kvmppc_set_host_ipi(int cpu)
525{
526
527
528
529
530
531 smp_mb();
532 paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
533}
534
535static inline void kvmppc_clear_host_ipi(int cpu)
536{
537 paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
538
539
540
541
542
543 smp_mb();
544}
545
546static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
547{
548 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
549}
550
551extern void kvm_hv_vm_activated(void);
552extern void kvm_hv_vm_deactivated(void);
553extern bool kvm_hv_mode_active(void);
554
555extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
556 struct kvm_nested_guest *nested);
557
558#else
559static inline void __init kvm_cma_reserve(void)
560{}
561
562static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
563{}
564
565static inline void kvmppc_set_xive_tima(int cpu,
566 unsigned long phys_addr,
567 void __iomem *virt_addr)
568{}
569
570static inline u32 kvmppc_get_xics_latch(void)
571{
572 return 0;
573}
574
575static inline void kvmppc_set_host_ipi(int cpu)
576{}
577
578static inline void kvmppc_clear_host_ipi(int cpu)
579{}
580
581static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
582{
583 kvm_vcpu_kick(vcpu);
584}
585
586static inline bool kvm_hv_mode_active(void) { return false; }
587
588#endif
589
590#ifdef CONFIG_KVM_XICS
591static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
592{
593 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
594}
595
596static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
597 struct kvm *kvm)
598{
599 if (kvm && kvm_irq_bypass)
600 return kvm->arch.pimap;
601 return NULL;
602}
603
604extern void kvmppc_alloc_host_rm_ops(void);
605extern void kvmppc_free_host_rm_ops(void);
606extern void kvmppc_free_pimap(struct kvm *kvm);
607extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
608extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
609extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
610extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
611extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
612extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
613extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
614 struct kvm_vcpu *vcpu, u32 cpu);
615extern void kvmppc_xics_ipi_action(void);
616extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
617 unsigned long host_irq);
618extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
619 unsigned long host_irq);
620extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
621 struct kvmppc_irq_map *irq_map,
622 struct kvmppc_passthru_irqmap *pimap,
623 bool *again);
624
625extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
626 int level, bool line_status);
627
628extern int h_ipi_redirect;
629#else
630static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
631 struct kvm *kvm)
632 { return NULL; }
633static inline void kvmppc_alloc_host_rm_ops(void) {}
634static inline void kvmppc_free_host_rm_ops(void) {}
635static inline void kvmppc_free_pimap(struct kvm *kvm) {}
636static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
637 { return 0; }
638static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
639 { return 0; }
640static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
641static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
642 { return 0; }
643static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
644 { return 0; }
645#endif
646
647#ifdef CONFIG_KVM_XIVE
648
649
650
651
652
653
654
655
656extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
657 u32 priority);
658extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
659 u32 *priority);
660extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
661extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
662
663extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
664 struct kvm_vcpu *vcpu, u32 cpu);
665extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
666extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
667 struct irq_desc *host_desc);
668extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
669 struct irq_desc *host_desc);
670extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
671extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
672
673extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
674 int level, bool line_status);
675extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
676extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
677extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
678
679static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
680{
681 return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
682}
683
684extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
685 struct kvm_vcpu *vcpu, u32 cpu);
686extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
687extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
688 union kvmppc_one_reg *val);
689extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
690 union kvmppc_one_reg *val);
691extern bool kvmppc_xive_native_supported(void);
692
693#else
694static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
695 u32 priority) { return -1; }
696static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
697 u32 *priority) { return -1; }
698static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
699static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
700
701static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
702 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
703static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
704static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
705 struct irq_desc *host_desc) { return -ENODEV; }
706static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
707 struct irq_desc *host_desc) { return -ENODEV; }
708static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
709static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
710
711static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
712 int level, bool line_status) { return -ENODEV; }
713static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
714static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
715static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { }
716
717static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
718 { return 0; }
719static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
720 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
721static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
722static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
723 union kvmppc_one_reg *val)
724{ return 0; }
725static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
726 union kvmppc_one_reg *val)
727{ return -ENOENT; }
728
729#endif
730
731#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
732static inline bool xics_on_xive(void)
733{
734 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
735}
736#else
737static inline bool xics_on_xive(void)
738{
739 return false;
740}
741#endif
742
743
744
745
746
747long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
748 unsigned long ioba, unsigned long tce);
749long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
750 unsigned long liobn, unsigned long ioba,
751 unsigned long tce_list, unsigned long npages);
752long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
753 unsigned long liobn, unsigned long ioba,
754 unsigned long tce_value, unsigned long npages);
755long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
756 unsigned int yield_count);
757long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
758void kvmhv_commence_exit(int trap);
759void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
760void kvmppc_subcore_enter_guest(void);
761void kvmppc_subcore_exit_guest(void);
762long kvmppc_realmode_hmi_handler(void);
763long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
764 long pte_index, unsigned long pteh, unsigned long ptel);
765long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
766 unsigned long pte_index, unsigned long avpn);
767long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
768long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
769 unsigned long pte_index, unsigned long avpn);
770long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
771 unsigned long pte_index);
772long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
773 unsigned long pte_index);
774long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
775 unsigned long pte_index);
776long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
777 unsigned long dest, unsigned long src);
778long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
779 unsigned long slb_v, unsigned int status, bool data);
780unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
781unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
782unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
783int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
784 unsigned long mfrr);
785int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
786int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
787void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
788
789
790
791
792
793
794
795union kvmppc_rm_state {
796 unsigned long raw;
797 struct {
798 u32 in_host;
799 u32 rm_action;
800 };
801};
802
803struct kvmppc_host_rm_core {
804 union kvmppc_rm_state rm_state;
805 void *rm_data;
806 char pad[112];
807};
808
809struct kvmppc_host_rm_ops {
810 struct kvmppc_host_rm_core *rm_core;
811 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
812};
813
814extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
815
816static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
817{
818#ifdef CONFIG_KVM_BOOKE_HV
819 return mfspr(SPRN_GEPR);
820#elif defined(CONFIG_BOOKE)
821 return vcpu->arch.epr;
822#else
823 return 0;
824#endif
825}
826
827static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
828{
829#ifdef CONFIG_KVM_BOOKE_HV
830 mtspr(SPRN_GEPR, epr);
831#elif defined(CONFIG_BOOKE)
832 vcpu->arch.epr = epr;
833#endif
834}
835
836#ifdef CONFIG_KVM_MPIC
837
838void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
839int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
840 u32 cpu);
841void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
842
843#else
844
845static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
846{
847}
848
849static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
850 struct kvm_vcpu *vcpu, u32 cpu)
851{
852 return -EINVAL;
853}
854
855static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
856 struct kvm_vcpu *vcpu)
857{
858}
859
860#endif
861
862int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
863 struct kvm_config_tlb *cfg);
864int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
865 struct kvm_dirty_tlb *cfg);
866
867long kvmppc_alloc_lpid(void);
868void kvmppc_claim_lpid(long lpid);
869void kvmppc_free_lpid(long lpid);
870void kvmppc_init_lpid(unsigned long nr_lpids);
871
872static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
873{
874 struct page *page;
875
876
877
878
879 if (!pfn_valid(pfn))
880 return;
881
882
883 page = pfn_to_page(pfn);
884 if (!test_bit(PG_dcache_clean, &page->flags)) {
885 flush_dcache_icache_page(page);
886 set_bit(PG_dcache_clean, &page->flags);
887 }
888}
889
890
891
892
893
894static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
895{
896#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
897
898 return vcpu->arch.shared_big_endian;
899#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
900
901 return false;
902#else
903 return true;
904#endif
905}
906
907#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
908static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
909{ \
910 return mfspr(bookehv_spr); \
911} \
912
913#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
914static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
915{ \
916 mtspr(bookehv_spr, val); \
917} \
918
919#define SHARED_WRAPPER_GET(reg, size) \
920static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
921{ \
922 if (kvmppc_shared_big_endian(vcpu)) \
923 return be##size##_to_cpu(vcpu->arch.shared->reg); \
924 else \
925 return le##size##_to_cpu(vcpu->arch.shared->reg); \
926} \
927
928#define SHARED_WRAPPER_SET(reg, size) \
929static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
930{ \
931 if (kvmppc_shared_big_endian(vcpu)) \
932 vcpu->arch.shared->reg = cpu_to_be##size(val); \
933 else \
934 vcpu->arch.shared->reg = cpu_to_le##size(val); \
935} \
936
937#define SHARED_WRAPPER(reg, size) \
938 SHARED_WRAPPER_GET(reg, size) \
939 SHARED_WRAPPER_SET(reg, size) \
940
941#define SPRNG_WRAPPER(reg, bookehv_spr) \
942 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
943 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
944
945#ifdef CONFIG_KVM_BOOKE_HV
946
947#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
948 SPRNG_WRAPPER(reg, bookehv_spr) \
949
950#else
951
952#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
953 SHARED_WRAPPER(reg, size) \
954
955#endif
956
957SHARED_WRAPPER(critical, 64)
958SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
959SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
960SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
961SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
962SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
963SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
964SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
965SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
966SHARED_WRAPPER_GET(msr, 64)
967static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
968{
969 if (kvmppc_shared_big_endian(vcpu))
970 vcpu->arch.shared->msr = cpu_to_be64(val);
971 else
972 vcpu->arch.shared->msr = cpu_to_le64(val);
973}
974SHARED_WRAPPER(dsisr, 32)
975SHARED_WRAPPER(int_pending, 32)
976SHARED_WRAPPER(sprg4, 64)
977SHARED_WRAPPER(sprg5, 64)
978SHARED_WRAPPER(sprg6, 64)
979SHARED_WRAPPER(sprg7, 64)
980
981static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
982{
983 if (kvmppc_shared_big_endian(vcpu))
984 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
985 else
986 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
987}
988
989static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
990{
991 if (kvmppc_shared_big_endian(vcpu))
992 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
993 else
994 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
995}
996
997
998
999
1000
1001
1002static inline void kvmppc_fix_ee_before_entry(void)
1003{
1004 trace_hardirqs_on();
1005
1006#ifdef CONFIG_PPC64
1007
1008
1009
1010
1011 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1012
1013
1014 local_paca->irq_happened = 0;
1015 irq_soft_mask_set(IRQS_ENABLED);
1016#endif
1017}
1018
1019static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1020{
1021 ulong ea;
1022 ulong msr_64bit = 0;
1023
1024 ea = kvmppc_get_gpr(vcpu, rb);
1025 if (ra)
1026 ea += kvmppc_get_gpr(vcpu, ra);
1027
1028#if defined(CONFIG_PPC_BOOK3E_64)
1029 msr_64bit = MSR_CM;
1030#elif defined(CONFIG_PPC_BOOK3S_64)
1031 msr_64bit = MSR_SF;
1032#endif
1033
1034 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
1035 ea = (uint32_t)ea;
1036
1037 return ea;
1038}
1039
1040extern void xics_wake_cpu(int cpu);
1041
1042#endif
1043