1
2
3
4
5
6
7
8
9#ifndef __POWERPC_KVM_PPC_H__
10#define __POWERPC_KVM_PPC_H__
11
12
13
14
15#include <linux/mutex.h>
16#include <linux/timer.h>
17#include <linux/types.h>
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
20#include <linux/bug.h>
21#ifdef CONFIG_PPC_BOOK3S
22#include <asm/kvm_book3s.h>
23#else
24#include <asm/kvm_booke.h>
25#endif
26#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
27#include <asm/paca.h>
28#include <asm/xive.h>
29#include <asm/cpu_has_feature.h>
30#endif
31
32
33
34
35
36#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
37
38enum emulation_result {
39 EMULATE_DONE,
40 EMULATE_DO_MMIO,
41 EMULATE_FAIL,
42 EMULATE_AGAIN,
43 EMULATE_EXIT_USER,
44};
45
46enum instruction_fetch_type {
47 INST_GENERIC,
48 INST_SC,
49};
50
51enum xlate_instdata {
52 XLATE_INST,
53 XLATE_DATA
54};
55
56enum xlate_readwrite {
57 XLATE_READ,
58 XLATE_WRITE
59};
60
61extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
62extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
63extern void kvmppc_handler_highmem(void);
64
65extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
66extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
67 unsigned int rt, unsigned int bytes,
68 int is_default_endian);
69extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
70 unsigned int rt, unsigned int bytes,
71 int is_default_endian);
72extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
73 unsigned int rt, unsigned int bytes,
74 int is_default_endian, int mmio_sign_extend);
75extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes, int is_default_endian);
77extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
78 unsigned int rs, unsigned int bytes, int is_default_endian);
79extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
80 u64 val, unsigned int bytes,
81 int is_default_endian);
82extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
83 int rs, unsigned int bytes,
84 int is_default_endian);
85
86extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
87 enum instruction_fetch_type type, u32 *inst);
88
89extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
90 bool data);
91extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92 bool data);
93extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
94extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
95extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
96extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
97extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
98extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
99extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
100extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
102
103
104
105extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
106 unsigned int gtlb_idx);
107extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
108extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
109extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
110extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
111extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
112 gva_t eaddr);
113extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
114extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
115extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
116 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
117 struct kvmppc_pte *pte);
118
119extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
120extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
121extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
122extern int kvmppc_core_check_processor_compat(void);
123extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
124 struct kvm_translation *tr);
125
126extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
127extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
128
129extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
130extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
131extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
132extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
133extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
134extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
135extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
136extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
137extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
138extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
139 struct kvm_interrupt *irq);
140extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
141extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
142 ulong esr_flags);
143extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
144 ulong dear_flags,
145 ulong esr_flags);
146extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
147extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
148 ulong esr_flags);
149extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
150extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
151
152extern int kvmppc_booke_init(void);
153extern void kvmppc_booke_exit(void);
154
155extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
156extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
157extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
158
159extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
160extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
161extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
162extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
163extern void kvmppc_rmap_reset(struct kvm *kvm);
164extern long kvmppc_prepare_vrma(struct kvm *kvm,
165 struct kvm_userspace_memory_region *mem);
166extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
167 struct kvm_memory_slot *memslot, unsigned long porder);
168extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
169extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
170 struct iommu_group *grp);
171extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
172 struct iommu_group *grp);
173extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
174extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
175extern void kvmppc_setup_partition_table(struct kvm *kvm);
176
177extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
178 struct kvm_create_spapr_tce_64 *args);
179extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
180 struct kvm *kvm, unsigned long liobn);
181#define kvmppc_ioba_validate(stt, ioba, npages) \
182 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
183 (stt)->size, (ioba), (npages)) ? \
184 H_PARAMETER : H_SUCCESS)
185extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
186 unsigned long ioba, unsigned long tce);
187extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
188 unsigned long liobn, unsigned long ioba,
189 unsigned long tce_list, unsigned long npages);
190extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
191 unsigned long liobn, unsigned long ioba,
192 unsigned long tce_value, unsigned long npages);
193extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
194 unsigned long ioba);
195extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
196extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
197extern int kvmppc_core_init_vm(struct kvm *kvm);
198extern void kvmppc_core_destroy_vm(struct kvm *kvm);
199extern void kvmppc_core_free_memslot(struct kvm *kvm,
200 struct kvm_memory_slot *slot);
201extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
202 struct kvm_memory_slot *memslot,
203 const struct kvm_userspace_memory_region *mem,
204 enum kvm_mr_change change);
205extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
206 const struct kvm_userspace_memory_region *mem,
207 const struct kvm_memory_slot *old,
208 const struct kvm_memory_slot *new,
209 enum kvm_mr_change change);
210extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
211 struct kvm_ppc_smmu_info *info);
212extern void kvmppc_core_flush_memslot(struct kvm *kvm,
213 struct kvm_memory_slot *memslot);
214
215extern int kvmppc_bookehv_init(void);
216extern void kvmppc_bookehv_exit(void);
217
218extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
219
220extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
221extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
222 struct kvm_ppc_resize_hpt *rhpt);
223extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
224 struct kvm_ppc_resize_hpt *rhpt);
225
226int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
227
228extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
229extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
230extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
231
232extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
233 u32 priority);
234extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
235 u32 *priority);
236extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
237extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
238
239void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
240void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
241
242union kvmppc_one_reg {
243 u32 wval;
244 u64 dval;
245 vector128 vval;
246 u64 vsxval[2];
247 u32 vsx32val[4];
248 u16 vsx16val[8];
249 u8 vsx8val[16];
250 struct {
251 u64 addr;
252 u64 length;
253 } vpaval;
254 u64 xive_timaval[2];
255};
256
257struct kvmppc_ops {
258 struct module *owner;
259 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
260 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
261 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
262 union kvmppc_one_reg *val);
263 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
264 union kvmppc_one_reg *val);
265 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
266 void (*vcpu_put)(struct kvm_vcpu *vcpu);
267 void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
268 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
269 int (*vcpu_run)(struct kvm_vcpu *vcpu);
270 int (*vcpu_create)(struct kvm_vcpu *vcpu);
271 void (*vcpu_free)(struct kvm_vcpu *vcpu);
272 int (*check_requests)(struct kvm_vcpu *vcpu);
273 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
274 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
275 int (*prepare_memory_region)(struct kvm *kvm,
276 struct kvm_memory_slot *memslot,
277 const struct kvm_userspace_memory_region *mem,
278 enum kvm_mr_change change);
279 void (*commit_memory_region)(struct kvm *kvm,
280 const struct kvm_userspace_memory_region *mem,
281 const struct kvm_memory_slot *old,
282 const struct kvm_memory_slot *new,
283 enum kvm_mr_change change);
284 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
285 unsigned long end);
286 int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
287 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
288 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
289 void (*free_memslot)(struct kvm_memory_slot *slot);
290 int (*init_vm)(struct kvm *kvm);
291 void (*destroy_vm)(struct kvm *kvm);
292 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
293 int (*emulate_op)(struct kvm_vcpu *vcpu,
294 unsigned int inst, int *advance);
295 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
296 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
297 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
298 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
299 unsigned long arg);
300 int (*hcall_implemented)(unsigned long hcall);
301 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
302 struct irq_bypass_producer *);
303 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
304 struct irq_bypass_producer *);
305 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
306 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
307 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
308 unsigned long flags);
309 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
310 int (*enable_nested)(struct kvm *kvm);
311 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
312 int size);
313 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
314 int size);
315 int (*enable_svm)(struct kvm *kvm);
316 int (*svm_off)(struct kvm *kvm);
317};
318
319extern struct kvmppc_ops *kvmppc_hv_ops;
320extern struct kvmppc_ops *kvmppc_pr_ops;
321
322static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
323 enum instruction_fetch_type type, u32 *inst)
324{
325 int ret = EMULATE_DONE;
326 u32 fetched_inst;
327
328
329
330 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
331 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
332
333
334 if (ret == EMULATE_DONE)
335 fetched_inst = kvmppc_need_byteswap(vcpu) ?
336 swab32(vcpu->arch.last_inst) :
337 vcpu->arch.last_inst;
338 else
339 fetched_inst = vcpu->arch.last_inst;
340
341 *inst = fetched_inst;
342 return ret;
343}
344
345static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
346{
347 return kvm->arch.kvm_ops == kvmppc_hv_ops;
348}
349
350extern int kvmppc_hwrng_present(void);
351
352
353
354
355
356static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
357{
358 u32 r;
359 u32 mask;
360
361 BUG_ON(msb > lsb);
362
363 mask = (1 << (lsb - msb + 1)) - 1;
364 r = (inst >> (63 - lsb)) & mask;
365
366 return r;
367}
368
369
370
371
372static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
373{
374 u32 r;
375 u32 mask;
376
377 BUG_ON(msb > lsb);
378
379 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
380 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
381
382 return r;
383}
384
385#define one_reg_size(id) \
386 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
387
388#define get_reg_val(id, reg) ({ \
389 union kvmppc_one_reg __u; \
390 switch (one_reg_size(id)) { \
391 case 4: __u.wval = (reg); break; \
392 case 8: __u.dval = (reg); break; \
393 default: BUG(); \
394 } \
395 __u; \
396})
397
398
399#define set_reg_val(id, val) ({ \
400 u64 __v; \
401 switch (one_reg_size(id)) { \
402 case 4: __v = (val).wval; break; \
403 case 8: __v = (val).dval; break; \
404 default: BUG(); \
405 } \
406 __v; \
407})
408
409int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
410int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
411
412int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
413int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
414
415int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
416int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
417int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
418int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
419
420void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
421
422struct openpic;
423
424#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
425extern void kvm_cma_reserve(void) __init;
426static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
427{
428 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
429}
430
431static inline void kvmppc_set_xive_tima(int cpu,
432 unsigned long phys_addr,
433 void __iomem *virt_addr)
434{
435 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
436 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
437}
438
439static inline u32 kvmppc_get_xics_latch(void)
440{
441 u32 xirr;
442
443 xirr = get_paca()->kvm_hstate.saved_xirr;
444 get_paca()->kvm_hstate.saved_xirr = 0;
445 return xirr;
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522static inline void kvmppc_set_host_ipi(int cpu)
523{
524
525
526
527
528
529 smp_mb();
530 paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
531}
532
533static inline void kvmppc_clear_host_ipi(int cpu)
534{
535 paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
536
537
538
539
540
541 smp_mb();
542}
543
544static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
545{
546 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
547}
548
549extern void kvm_hv_vm_activated(void);
550extern void kvm_hv_vm_deactivated(void);
551extern bool kvm_hv_mode_active(void);
552
553extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
554 struct kvm_nested_guest *nested);
555
556#else
557static inline void __init kvm_cma_reserve(void)
558{}
559
560static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
561{}
562
563static inline void kvmppc_set_xive_tima(int cpu,
564 unsigned long phys_addr,
565 void __iomem *virt_addr)
566{}
567
568static inline u32 kvmppc_get_xics_latch(void)
569{
570 return 0;
571}
572
573static inline void kvmppc_set_host_ipi(int cpu)
574{}
575
576static inline void kvmppc_clear_host_ipi(int cpu)
577{}
578
579static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
580{
581 kvm_vcpu_kick(vcpu);
582}
583
584static inline bool kvm_hv_mode_active(void) { return false; }
585
586#endif
587
588#ifdef CONFIG_KVM_XICS
589static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
590{
591 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
592}
593
594static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
595 struct kvm *kvm)
596{
597 if (kvm && kvm_irq_bypass)
598 return kvm->arch.pimap;
599 return NULL;
600}
601
602extern void kvmppc_alloc_host_rm_ops(void);
603extern void kvmppc_free_host_rm_ops(void);
604extern void kvmppc_free_pimap(struct kvm *kvm);
605extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
606extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
607extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
608extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
609extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
610extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
611 struct kvm_vcpu *vcpu, u32 cpu);
612extern void kvmppc_xics_ipi_action(void);
613extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
614 unsigned long host_irq);
615extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
616 unsigned long host_irq);
617extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
618 struct kvmppc_irq_map *irq_map,
619 struct kvmppc_passthru_irqmap *pimap,
620 bool *again);
621
622extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
623 int level, bool line_status);
624
625extern int h_ipi_redirect;
626#else
627static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
628 struct kvm *kvm)
629 { return NULL; }
630static inline void kvmppc_alloc_host_rm_ops(void) {};
631static inline void kvmppc_free_host_rm_ops(void) {};
632static inline void kvmppc_free_pimap(struct kvm *kvm) {};
633static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
634 { return 0; }
635static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
636 { return 0; }
637static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
638static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
639 { return 0; }
640#endif
641
642#ifdef CONFIG_KVM_XIVE
643
644
645
646
647
648
649
650
651extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
652 u32 priority);
653extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
654 u32 *priority);
655extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
656extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
657extern void kvmppc_xive_init_module(void);
658extern void kvmppc_xive_exit_module(void);
659
660extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
661 struct kvm_vcpu *vcpu, u32 cpu);
662extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
663extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
664 struct irq_desc *host_desc);
665extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
666 struct irq_desc *host_desc);
667extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
668extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
669
670extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
671 int level, bool line_status);
672extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
673
674static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
675{
676 return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
677}
678
679extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
680 struct kvm_vcpu *vcpu, u32 cpu);
681extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
682extern void kvmppc_xive_native_init_module(void);
683extern void kvmppc_xive_native_exit_module(void);
684extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
685 union kvmppc_one_reg *val);
686extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
687 union kvmppc_one_reg *val);
688extern bool kvmppc_xive_native_supported(void);
689
690#else
691static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
692 u32 priority) { return -1; }
693static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
694 u32 *priority) { return -1; }
695static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
696static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
697static inline void kvmppc_xive_init_module(void) { }
698static inline void kvmppc_xive_exit_module(void) { }
699
700static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
701 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
702static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
703static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
704 struct irq_desc *host_desc) { return -ENODEV; }
705static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
706 struct irq_desc *host_desc) { return -ENODEV; }
707static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
708static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
709
710static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
711 int level, bool line_status) { return -ENODEV; }
712static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
713
714static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
715 { return 0; }
716static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
717 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
718static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
719static inline void kvmppc_xive_native_init_module(void) { }
720static inline void kvmppc_xive_native_exit_module(void) { }
721static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
722 union kvmppc_one_reg *val)
723{ return 0; }
724static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
725 union kvmppc_one_reg *val)
726{ return -ENOENT; }
727
728#endif
729
730#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
731static inline bool xics_on_xive(void)
732{
733 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
734}
735#else
736static inline bool xics_on_xive(void)
737{
738 return false;
739}
740#endif
741
742
743
744
745
746long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
747 unsigned long ioba, unsigned long tce);
748long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
749 unsigned long liobn, unsigned long ioba,
750 unsigned long tce_list, unsigned long npages);
751long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
752 unsigned long liobn, unsigned long ioba,
753 unsigned long tce_value, unsigned long npages);
754long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
755 unsigned int yield_count);
756long kvmppc_h_random(struct kvm_vcpu *vcpu);
757void kvmhv_commence_exit(int trap);
758void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
759void kvmppc_subcore_enter_guest(void);
760void kvmppc_subcore_exit_guest(void);
761long kvmppc_realmode_hmi_handler(void);
762long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
763 long pte_index, unsigned long pteh, unsigned long ptel);
764long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
765 unsigned long pte_index, unsigned long avpn);
766long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
767long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
768 unsigned long pte_index, unsigned long avpn,
769 unsigned long va);
770long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
771 unsigned long pte_index);
772long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
773 unsigned long pte_index);
774long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
775 unsigned long pte_index);
776long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
777 unsigned long dest, unsigned long src);
778long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
779 unsigned long slb_v, unsigned int status, bool data);
780unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
781unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
782unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
783int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
784 unsigned long mfrr);
785int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
786int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
787void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
788
789
790
791
792
793
794
795union kvmppc_rm_state {
796 unsigned long raw;
797 struct {
798 u32 in_host;
799 u32 rm_action;
800 };
801};
802
803struct kvmppc_host_rm_core {
804 union kvmppc_rm_state rm_state;
805 void *rm_data;
806 char pad[112];
807};
808
809struct kvmppc_host_rm_ops {
810 struct kvmppc_host_rm_core *rm_core;
811 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
812};
813
814extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
815
816static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
817{
818#ifdef CONFIG_KVM_BOOKE_HV
819 return mfspr(SPRN_GEPR);
820#elif defined(CONFIG_BOOKE)
821 return vcpu->arch.epr;
822#else
823 return 0;
824#endif
825}
826
827static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
828{
829#ifdef CONFIG_KVM_BOOKE_HV
830 mtspr(SPRN_GEPR, epr);
831#elif defined(CONFIG_BOOKE)
832 vcpu->arch.epr = epr;
833#endif
834}
835
836#ifdef CONFIG_KVM_MPIC
837
838void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
839int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
840 u32 cpu);
841void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
842
843#else
844
845static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
846{
847}
848
849static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
850 struct kvm_vcpu *vcpu, u32 cpu)
851{
852 return -EINVAL;
853}
854
855static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
856 struct kvm_vcpu *vcpu)
857{
858}
859
860#endif
861
862int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
863 struct kvm_config_tlb *cfg);
864int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
865 struct kvm_dirty_tlb *cfg);
866
867long kvmppc_alloc_lpid(void);
868void kvmppc_claim_lpid(long lpid);
869void kvmppc_free_lpid(long lpid);
870void kvmppc_init_lpid(unsigned long nr_lpids);
871
872static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
873{
874 struct page *page;
875
876
877
878
879 if (!pfn_valid(pfn))
880 return;
881
882
883 page = pfn_to_page(pfn);
884 if (!test_bit(PG_arch_1, &page->flags)) {
885 flush_dcache_icache_page(page);
886 set_bit(PG_arch_1, &page->flags);
887 }
888}
889
890
891
892
893
894static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
895{
896#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
897
898 return vcpu->arch.shared_big_endian;
899#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
900
901 return false;
902#else
903 return true;
904#endif
905}
906
907#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
908static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
909{ \
910 return mfspr(bookehv_spr); \
911} \
912
913#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
914static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
915{ \
916 mtspr(bookehv_spr, val); \
917} \
918
919#define SHARED_WRAPPER_GET(reg, size) \
920static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
921{ \
922 if (kvmppc_shared_big_endian(vcpu)) \
923 return be##size##_to_cpu(vcpu->arch.shared->reg); \
924 else \
925 return le##size##_to_cpu(vcpu->arch.shared->reg); \
926} \
927
928#define SHARED_WRAPPER_SET(reg, size) \
929static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
930{ \
931 if (kvmppc_shared_big_endian(vcpu)) \
932 vcpu->arch.shared->reg = cpu_to_be##size(val); \
933 else \
934 vcpu->arch.shared->reg = cpu_to_le##size(val); \
935} \
936
937#define SHARED_WRAPPER(reg, size) \
938 SHARED_WRAPPER_GET(reg, size) \
939 SHARED_WRAPPER_SET(reg, size) \
940
941#define SPRNG_WRAPPER(reg, bookehv_spr) \
942 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
943 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
944
945#ifdef CONFIG_KVM_BOOKE_HV
946
947#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
948 SPRNG_WRAPPER(reg, bookehv_spr) \
949
950#else
951
952#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
953 SHARED_WRAPPER(reg, size) \
954
955#endif
956
957SHARED_WRAPPER(critical, 64)
958SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
959SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
960SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
961SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
962SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
963SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
964SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
965SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
966SHARED_WRAPPER_GET(msr, 64)
967static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
968{
969 if (kvmppc_shared_big_endian(vcpu))
970 vcpu->arch.shared->msr = cpu_to_be64(val);
971 else
972 vcpu->arch.shared->msr = cpu_to_le64(val);
973}
974SHARED_WRAPPER(dsisr, 32)
975SHARED_WRAPPER(int_pending, 32)
976SHARED_WRAPPER(sprg4, 64)
977SHARED_WRAPPER(sprg5, 64)
978SHARED_WRAPPER(sprg6, 64)
979SHARED_WRAPPER(sprg7, 64)
980
981static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
982{
983 if (kvmppc_shared_big_endian(vcpu))
984 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
985 else
986 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
987}
988
989static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
990{
991 if (kvmppc_shared_big_endian(vcpu))
992 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
993 else
994 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
995}
996
997
998
999
1000
1001
1002static inline void kvmppc_fix_ee_before_entry(void)
1003{
1004 trace_hardirqs_on();
1005
1006#ifdef CONFIG_PPC64
1007
1008
1009
1010
1011 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1012
1013
1014 local_paca->irq_happened = 0;
1015 irq_soft_mask_set(IRQS_ENABLED);
1016#endif
1017}
1018
1019static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1020{
1021 ulong ea;
1022 ulong msr_64bit = 0;
1023
1024 ea = kvmppc_get_gpr(vcpu, rb);
1025 if (ra)
1026 ea += kvmppc_get_gpr(vcpu, ra);
1027
1028#if defined(CONFIG_PPC_BOOK3E_64)
1029 msr_64bit = MSR_CM;
1030#elif defined(CONFIG_PPC_BOOK3S_64)
1031 msr_64bit = MSR_SF;
1032#endif
1033
1034 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
1035 ea = (uint32_t)ea;
1036
1037 return ea;
1038}
1039
1040extern void xics_wake_cpu(int cpu);
1041
1042#endif
1043