1#ifndef __KVM_HOST_H
2#define __KVM_HOST_H
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/hardirq.h>
11#include <linux/list.h>
12#include <linux/mutex.h>
13#include <linux/spinlock.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/bug.h>
17#include <linux/mm.h>
18#include <linux/mmu_notifier.h>
19#include <linux/preempt.h>
20#include <linux/msi.h>
21#include <linux/slab.h>
22#include <linux/rcupdate.h>
23#include <linux/ratelimit.h>
24#include <linux/err.h>
25#include <linux/irqflags.h>
26#include <linux/context_tracking.h>
27#include <asm/signal.h>
28
29#include <linux/kvm.h>
30#include <linux/kvm_para.h>
31
32#include <linux/kvm_types.h>
33
34#include <asm/kvm_host.h>
35
36#ifndef KVM_MMIO_SIZE
37#define KVM_MMIO_SIZE 8
38#endif
39
40
41
42
43
44
45#define KVM_MEMSLOT_INVALID (1UL << 16)
46
47
48#define KVM_MAX_MMIO_FRAGMENTS 2
49
50
51
52
53
54
55#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
56#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
57#define KVM_PFN_NOSLOT (0x1ULL << 63)
58
59#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
60#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
61#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
62
63
64
65
66
67static inline bool is_error_pfn(pfn_t pfn)
68{
69 return !!(pfn & KVM_PFN_ERR_MASK);
70}
71
72
73
74
75
76
77static inline bool is_error_noslot_pfn(pfn_t pfn)
78{
79 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
80}
81
82
83static inline bool is_noslot_pfn(pfn_t pfn)
84{
85 return pfn == KVM_PFN_NOSLOT;
86}
87
88
89
90
91
92#ifndef KVM_HVA_ERR_BAD
93
94#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
95#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
96
97static inline bool kvm_is_error_hva(unsigned long addr)
98{
99 return addr >= PAGE_OFFSET;
100}
101
102#endif
103
104#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
105
106static inline bool is_error_page(struct page *page)
107{
108 return IS_ERR(page);
109}
110
111
112
113
114#define KVM_REQ_TLB_FLUSH 0
115#define KVM_REQ_MIGRATE_TIMER 1
116#define KVM_REQ_REPORT_TPR_ACCESS 2
117#define KVM_REQ_MMU_RELOAD 3
118#define KVM_REQ_TRIPLE_FAULT 4
119#define KVM_REQ_PENDING_TIMER 5
120#define KVM_REQ_UNHALT 6
121#define KVM_REQ_MMU_SYNC 7
122#define KVM_REQ_CLOCK_UPDATE 8
123#define KVM_REQ_KICK 9
124#define KVM_REQ_DEACTIVATE_FPU 10
125#define KVM_REQ_EVENT 11
126#define KVM_REQ_APF_HALT 12
127#define KVM_REQ_STEAL_UPDATE 13
128#define KVM_REQ_NMI 14
129#define KVM_REQ_PMU 15
130#define KVM_REQ_PMI 16
131#define KVM_REQ_WATCHDOG 17
132#define KVM_REQ_MASTERCLOCK_UPDATE 18
133#define KVM_REQ_MCLOCK_INPROGRESS 19
134#define KVM_REQ_EPR_EXIT 20
135#define KVM_REQ_SCAN_IOAPIC 21
136#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
137
138#define KVM_USERSPACE_IRQ_SOURCE_ID 0
139#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
140
141struct kvm;
142struct kvm_vcpu;
143extern struct kmem_cache *kvm_vcpu_cache;
144
145extern raw_spinlock_t kvm_lock;
146extern struct list_head vm_list;
147
148struct kvm_io_range {
149 gpa_t addr;
150 int len;
151 struct kvm_io_device *dev;
152};
153
154#define NR_IOBUS_DEVS 1000
155
156struct kvm_io_bus {
157 int dev_count;
158 int ioeventfd_count;
159 struct kvm_io_range range[];
160};
161
162enum kvm_bus {
163 KVM_MMIO_BUS,
164 KVM_PIO_BUS,
165 KVM_VIRTIO_CCW_NOTIFY_BUS,
166 KVM_NR_BUSES
167};
168
169int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
170 int len, const void *val);
171int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
172 int len, const void *val, long cookie);
173int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
174 void *val);
175int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
176 int len, void *val, long cookie);
177int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
178 int len, struct kvm_io_device *dev);
179int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
180 struct kvm_io_device *dev);
181
182#ifdef CONFIG_KVM_ASYNC_PF
183struct kvm_async_pf {
184 struct work_struct work;
185 struct list_head link;
186 struct list_head queue;
187 struct kvm_vcpu *vcpu;
188 struct mm_struct *mm;
189 gva_t gva;
190 unsigned long addr;
191 struct kvm_arch_async_pf arch;
192 struct page *page;
193 bool done;
194};
195
196void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
197void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
198int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
199 struct kvm_arch_async_pf *arch);
200int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
201#endif
202
203enum {
204 OUTSIDE_GUEST_MODE,
205 IN_GUEST_MODE,
206 EXITING_GUEST_MODE,
207 READING_SHADOW_PAGE_TABLES,
208};
209
210
211
212
213
214struct kvm_mmio_fragment {
215 gpa_t gpa;
216 void *data;
217 unsigned len;
218};
219
220struct kvm_vcpu {
221 struct kvm *kvm;
222#ifdef CONFIG_PREEMPT_NOTIFIERS
223 struct preempt_notifier preempt_notifier;
224#endif
225 int cpu;
226 int vcpu_id;
227 int srcu_idx;
228 int mode;
229 unsigned long requests;
230 unsigned long guest_debug;
231
232 struct mutex mutex;
233 struct kvm_run *run;
234
235 int fpu_active;
236 int guest_fpu_loaded, guest_xcr0_loaded;
237 wait_queue_head_t wq;
238 struct pid *pid;
239 int sigset_active;
240 sigset_t sigset;
241 struct kvm_vcpu_stat stat;
242
243#ifdef CONFIG_HAS_IOMEM
244 int mmio_needed;
245 int mmio_read_completed;
246 int mmio_is_write;
247 int mmio_cur_fragment;
248 int mmio_nr_fragments;
249 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
250#endif
251
252#ifdef CONFIG_KVM_ASYNC_PF
253 struct {
254 u32 queued;
255 struct list_head queue;
256 struct list_head done;
257 spinlock_t lock;
258 } async_pf;
259#endif
260
261#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
262
263
264
265
266
267
268 struct {
269 bool in_spin_loop;
270 bool dy_eligible;
271 } spin_loop;
272#endif
273 bool preempted;
274 struct kvm_vcpu_arch arch;
275};
276
277static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
278{
279 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
280}
281
282
283
284
285
286#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
287
288struct kvm_memory_slot {
289 gfn_t base_gfn;
290 unsigned long npages;
291 unsigned long *dirty_bitmap;
292 struct kvm_arch_memory_slot arch;
293 unsigned long userspace_addr;
294 u32 flags;
295 short id;
296};
297
298static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
299{
300 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
301}
302
303struct kvm_kernel_irq_routing_entry {
304 u32 gsi;
305 u32 type;
306 int (*set)(struct kvm_kernel_irq_routing_entry *e,
307 struct kvm *kvm, int irq_source_id, int level,
308 bool line_status);
309 union {
310 struct {
311 unsigned irqchip;
312 unsigned pin;
313 } irqchip;
314 struct msi_msg msi;
315 };
316 struct hlist_node link;
317};
318
319#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
320
321struct kvm_irq_routing_table {
322 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
323 struct kvm_kernel_irq_routing_entry *rt_entries;
324 u32 nr_rt_entries;
325
326
327
328
329 struct hlist_head map[0];
330};
331
332#else
333
334struct kvm_irq_routing_table {};
335
336#endif
337
338#ifndef KVM_PRIVATE_MEM_SLOTS
339#define KVM_PRIVATE_MEM_SLOTS 0
340#endif
341
342#ifndef KVM_MEM_SLOTS_NUM
343#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
344#endif
345
346
347
348
349
350
351struct kvm_memslots {
352 u64 generation;
353 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
354
355 short id_to_index[KVM_MEM_SLOTS_NUM];
356};
357
358struct kvm {
359 spinlock_t mmu_lock;
360 struct mutex slots_lock;
361 struct mm_struct *mm;
362 struct kvm_memslots *memslots;
363 struct srcu_struct srcu;
364#ifdef CONFIG_KVM_APIC_ARCHITECTURE
365 u32 bsp_vcpu_id;
366#endif
367 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
368 atomic_t online_vcpus;
369 int last_boosted_vcpu;
370 struct list_head vm_list;
371 struct mutex lock;
372 struct kvm_io_bus *buses[KVM_NR_BUSES];
373#ifdef CONFIG_HAVE_KVM_EVENTFD
374 struct {
375 spinlock_t lock;
376 struct list_head items;
377 struct list_head resampler_list;
378 struct mutex resampler_lock;
379 } irqfds;
380 struct list_head ioeventfds;
381#endif
382 struct kvm_vm_stat stat;
383 struct kvm_arch arch;
384 atomic_t users_count;
385#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
386 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
387 spinlock_t ring_lock;
388 struct list_head coalesced_zones;
389#endif
390
391 struct mutex irq_lock;
392#ifdef CONFIG_HAVE_KVM_IRQCHIP
393
394
395
396
397 struct kvm_irq_routing_table __rcu *irq_routing;
398 struct hlist_head mask_notifier_list;
399 struct hlist_head irq_ack_notifier_list;
400#endif
401
402#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
403 struct mmu_notifier mmu_notifier;
404 unsigned long mmu_notifier_seq;
405 long mmu_notifier_count;
406#endif
407 long tlbs_dirty;
408 struct list_head devices;
409};
410
411#define kvm_err(fmt, ...) \
412 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
413#define kvm_info(fmt, ...) \
414 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
415#define kvm_debug(fmt, ...) \
416 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
417#define kvm_pr_unimpl(fmt, ...) \
418 pr_err_ratelimited("kvm [%i]: " fmt, \
419 task_tgid_nr(current), ## __VA_ARGS__)
420
421
422#define vcpu_unimpl(vcpu, fmt, ...) \
423 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
424
425static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
426{
427 smp_rmb();
428 return kvm->vcpus[i];
429}
430
431#define kvm_for_each_vcpu(idx, vcpup, kvm) \
432 for (idx = 0; \
433 idx < atomic_read(&kvm->online_vcpus) && \
434 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
435 idx++)
436
437#define kvm_for_each_memslot(memslot, slots) \
438 for (memslot = &slots->memslots[0]; \
439 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
440 memslot++)
441
442int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
443void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
444
445int __must_check vcpu_load(struct kvm_vcpu *vcpu);
446void vcpu_put(struct kvm_vcpu *vcpu);
447
448#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
449int kvm_irqfd_init(void);
450void kvm_irqfd_exit(void);
451#else
452static inline int kvm_irqfd_init(void)
453{
454 return 0;
455}
456
457static inline void kvm_irqfd_exit(void)
458{
459}
460#endif
461int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
462 struct module *module);
463void kvm_exit(void);
464
465void kvm_get_kvm(struct kvm *kvm);
466void kvm_put_kvm(struct kvm *kvm);
467void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
468 u64 last_generation);
469
470static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
471{
472 return rcu_dereference_check(kvm->memslots,
473 srcu_read_lock_held(&kvm->srcu)
474 || lockdep_is_held(&kvm->slots_lock));
475}
476
477static inline struct kvm_memory_slot *
478id_to_memslot(struct kvm_memslots *slots, int id)
479{
480 int index = slots->id_to_index[id];
481 struct kvm_memory_slot *slot;
482
483 slot = &slots->memslots[index];
484
485 WARN_ON(slot->id != id);
486 return slot;
487}
488
489
490
491
492
493
494
495
496
497
498
499
500enum kvm_mr_change {
501 KVM_MR_CREATE,
502 KVM_MR_DELETE,
503 KVM_MR_MOVE,
504 KVM_MR_FLAGS_ONLY,
505};
506
507int kvm_set_memory_region(struct kvm *kvm,
508 struct kvm_userspace_memory_region *mem);
509int __kvm_set_memory_region(struct kvm *kvm,
510 struct kvm_userspace_memory_region *mem);
511void kvm_arch_free_memslot(struct kvm_memory_slot *free,
512 struct kvm_memory_slot *dont);
513int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
514void kvm_arch_memslots_updated(struct kvm *kvm);
515int kvm_arch_prepare_memory_region(struct kvm *kvm,
516 struct kvm_memory_slot *memslot,
517 struct kvm_userspace_memory_region *mem,
518 enum kvm_mr_change change);
519void kvm_arch_commit_memory_region(struct kvm *kvm,
520 struct kvm_userspace_memory_region *mem,
521 const struct kvm_memory_slot *old,
522 enum kvm_mr_change change);
523bool kvm_largepages_enabled(void);
524void kvm_disable_largepages(void);
525
526void kvm_arch_flush_shadow_all(struct kvm *kvm);
527
528void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
529 struct kvm_memory_slot *slot);
530
531int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
532 int nr_pages);
533
534struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
535unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
536unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
537unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
538void kvm_release_page_clean(struct page *page);
539void kvm_release_page_dirty(struct page *page);
540void kvm_set_page_dirty(struct page *page);
541void kvm_set_page_accessed(struct page *page);
542
543pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
544pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
545 bool write_fault, bool *writable);
546pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
547pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
548 bool *writable);
549pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
550pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
551
552void kvm_release_pfn_dirty(pfn_t pfn);
553void kvm_release_pfn_clean(pfn_t pfn);
554void kvm_set_pfn_dirty(pfn_t pfn);
555void kvm_set_pfn_accessed(pfn_t pfn);
556void kvm_get_pfn(pfn_t pfn);
557
558int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
559 int len);
560int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
561 unsigned long len);
562int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
563int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
564 void *data, unsigned long len);
565int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
566 int offset, int len);
567int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
568 unsigned long len);
569int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
570 void *data, unsigned long len);
571int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
572 gpa_t gpa, unsigned long len);
573int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
574int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
575struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
576int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
577unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
578void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
579void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
580 gfn_t gfn);
581
582void kvm_vcpu_block(struct kvm_vcpu *vcpu);
583void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
584bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
585void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
586void kvm_resched(struct kvm_vcpu *vcpu);
587void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
588void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
589
590void kvm_flush_remote_tlbs(struct kvm *kvm);
591void kvm_reload_remote_mmus(struct kvm *kvm);
592void kvm_make_mclock_inprogress_request(struct kvm *kvm);
593void kvm_make_scan_ioapic_request(struct kvm *kvm);
594
595long kvm_arch_dev_ioctl(struct file *filp,
596 unsigned int ioctl, unsigned long arg);
597long kvm_arch_vcpu_ioctl(struct file *filp,
598 unsigned int ioctl, unsigned long arg);
599int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
600
601int kvm_dev_ioctl_check_extension(long ext);
602
603int kvm_get_dirty_log(struct kvm *kvm,
604 struct kvm_dirty_log *log, int *is_dirty);
605int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
606 struct kvm_dirty_log *log);
607
608int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
609 struct kvm_userspace_memory_region *mem);
610int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
611 bool line_status);
612long kvm_arch_vm_ioctl(struct file *filp,
613 unsigned int ioctl, unsigned long arg);
614
615int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
616int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
617
618int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
619 struct kvm_translation *tr);
620
621int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
622int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
623int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
624 struct kvm_sregs *sregs);
625int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
626 struct kvm_sregs *sregs);
627int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
628 struct kvm_mp_state *mp_state);
629int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
630 struct kvm_mp_state *mp_state);
631int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
632 struct kvm_guest_debug *dbg);
633int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
634
635int kvm_arch_init(void *opaque);
636void kvm_arch_exit(void);
637
638int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
639void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
640
641void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
642void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
643void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
644struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
645int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
646int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
647void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
648
649int kvm_arch_hardware_enable(void *garbage);
650void kvm_arch_hardware_disable(void *garbage);
651int kvm_arch_hardware_setup(void);
652void kvm_arch_hardware_unsetup(void);
653void kvm_arch_check_processor_compat(void *rtn);
654int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
655int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
656
657void kvm_free_physmem(struct kvm *kvm);
658
659void *kvm_kvzalloc(unsigned long size);
660void kvm_kvfree(const void *addr);
661
662#ifndef __KVM_HAVE_ARCH_VM_ALLOC
663static inline struct kvm *kvm_arch_alloc_vm(void)
664{
665 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
666}
667
668static inline void kvm_arch_free_vm(struct kvm *kvm)
669{
670 kfree(kvm);
671}
672#endif
673
674static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
675{
676#ifdef __KVM_HAVE_ARCH_WQP
677 return vcpu->arch.wqp;
678#else
679 return &vcpu->wq;
680#endif
681}
682
683int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
684void kvm_arch_destroy_vm(struct kvm *kvm);
685void kvm_arch_sync_events(struct kvm *kvm);
686
687int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
688void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
689
690bool kvm_is_mmio_pfn(pfn_t pfn);
691
692struct kvm_irq_ack_notifier {
693 struct hlist_node link;
694 unsigned gsi;
695 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
696};
697
698struct kvm_assigned_dev_kernel {
699 struct kvm_irq_ack_notifier ack_notifier;
700 struct list_head list;
701 int assigned_dev_id;
702 int host_segnr;
703 int host_busnr;
704 int host_devfn;
705 unsigned int entries_nr;
706 int host_irq;
707 bool host_irq_disabled;
708 bool pci_2_3;
709 struct msix_entry *host_msix_entries;
710 int guest_irq;
711 struct msix_entry *guest_msix_entries;
712 unsigned long irq_requested_type;
713 int irq_source_id;
714 int flags;
715 struct pci_dev *dev;
716 struct kvm *kvm;
717 spinlock_t intx_lock;
718 spinlock_t intx_mask_lock;
719 char irq_name[32];
720 struct pci_saved_state *pci_saved_state;
721};
722
723struct kvm_irq_mask_notifier {
724 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
725 int irq;
726 struct hlist_node link;
727};
728
729void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
730 struct kvm_irq_mask_notifier *kimn);
731void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
732 struct kvm_irq_mask_notifier *kimn);
733void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
734 bool mask);
735
736int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
737 bool line_status);
738int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
739int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
740 int irq_source_id, int level, bool line_status);
741bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
742void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
743void kvm_register_irq_ack_notifier(struct kvm *kvm,
744 struct kvm_irq_ack_notifier *kian);
745void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
746 struct kvm_irq_ack_notifier *kian);
747int kvm_request_irq_source_id(struct kvm *kvm);
748void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
749
750
751#define KVM_IOMMU_CACHE_COHERENCY 0x1
752
753#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
754int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
755void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
756int kvm_iommu_map_guest(struct kvm *kvm);
757int kvm_iommu_unmap_guest(struct kvm *kvm);
758int kvm_assign_device(struct kvm *kvm,
759 struct kvm_assigned_dev_kernel *assigned_dev);
760int kvm_deassign_device(struct kvm *kvm,
761 struct kvm_assigned_dev_kernel *assigned_dev);
762#else
763static inline int kvm_iommu_map_pages(struct kvm *kvm,
764 struct kvm_memory_slot *slot)
765{
766 return 0;
767}
768
769static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
770 struct kvm_memory_slot *slot)
771{
772}
773
774static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
775{
776 return 0;
777}
778#endif
779
780static inline void kvm_guest_enter(void)
781{
782 unsigned long flags;
783
784 BUG_ON(preemptible());
785
786 local_irq_save(flags);
787 guest_enter();
788 local_irq_restore(flags);
789
790
791
792
793
794
795
796
797 rcu_virt_note_context_switch(smp_processor_id());
798}
799
800static inline void kvm_guest_exit(void)
801{
802 unsigned long flags;
803
804 local_irq_save(flags);
805 guest_exit();
806 local_irq_restore(flags);
807}
808
809
810
811
812
813
814
815static inline struct kvm_memory_slot *
816search_memslots(struct kvm_memslots *slots, gfn_t gfn)
817{
818 struct kvm_memory_slot *memslot;
819
820 kvm_for_each_memslot(memslot, slots)
821 if (gfn >= memslot->base_gfn &&
822 gfn < memslot->base_gfn + memslot->npages)
823 return memslot;
824
825 return NULL;
826}
827
828static inline struct kvm_memory_slot *
829__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
830{
831 return search_memslots(slots, gfn);
832}
833
834static inline unsigned long
835__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
836{
837 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
838}
839
840static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
841{
842 return gfn_to_memslot(kvm, gfn)->id;
843}
844
845static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
846{
847
848 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
849 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
850}
851
852static inline gfn_t
853hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
854{
855 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
856
857 return slot->base_gfn + gfn_offset;
858}
859
860static inline gpa_t gfn_to_gpa(gfn_t gfn)
861{
862 return (gpa_t)gfn << PAGE_SHIFT;
863}
864
865static inline gfn_t gpa_to_gfn(gpa_t gpa)
866{
867 return (gfn_t)(gpa >> PAGE_SHIFT);
868}
869
870static inline hpa_t pfn_to_hpa(pfn_t pfn)
871{
872 return (hpa_t)pfn << PAGE_SHIFT;
873}
874
875static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
876{
877 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
878}
879
880enum kvm_stat_kind {
881 KVM_STAT_VM,
882 KVM_STAT_VCPU,
883};
884
885struct kvm_stats_debugfs_item {
886 const char *name;
887 int offset;
888 enum kvm_stat_kind kind;
889 struct dentry *dentry;
890};
891extern struct kvm_stats_debugfs_item debugfs_entries[];
892extern struct dentry *kvm_debugfs_dir;
893
894#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
895static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
896{
897 if (unlikely(kvm->mmu_notifier_count))
898 return 1;
899
900
901
902
903
904
905
906
907
908
909 smp_rmb();
910 if (kvm->mmu_notifier_seq != mmu_seq)
911 return 1;
912 return 0;
913}
914#endif
915
916#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
917
918#define KVM_MAX_IRQ_ROUTES 1024
919
920int kvm_setup_default_irq_routing(struct kvm *kvm);
921int kvm_set_irq_routing(struct kvm *kvm,
922 const struct kvm_irq_routing_entry *entries,
923 unsigned nr,
924 unsigned flags);
925int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
926 struct kvm_kernel_irq_routing_entry *e,
927 const struct kvm_irq_routing_entry *ue);
928void kvm_free_irq_routing(struct kvm *kvm);
929
930int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
931
932#else
933
934static inline void kvm_free_irq_routing(struct kvm *kvm) {}
935
936#endif
937
938#ifdef CONFIG_HAVE_KVM_EVENTFD
939
940void kvm_eventfd_init(struct kvm *kvm);
941int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
942
943#ifdef CONFIG_HAVE_KVM_IRQCHIP
944int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
945void kvm_irqfd_release(struct kvm *kvm);
946void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
947#else
948static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
949{
950 return -EINVAL;
951}
952
953static inline void kvm_irqfd_release(struct kvm *kvm) {}
954#endif
955
956#else
957
958static inline void kvm_eventfd_init(struct kvm *kvm) {}
959
960static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
961{
962 return -EINVAL;
963}
964
965static inline void kvm_irqfd_release(struct kvm *kvm) {}
966
967#ifdef CONFIG_HAVE_KVM_IRQCHIP
968static inline void kvm_irq_routing_update(struct kvm *kvm,
969 struct kvm_irq_routing_table *irq_rt)
970{
971 rcu_assign_pointer(kvm->irq_routing, irq_rt);
972}
973#endif
974
975static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
976{
977 return -ENOSYS;
978}
979
980#endif
981
982#ifdef CONFIG_KVM_APIC_ARCHITECTURE
983static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
984{
985 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
986}
987
988bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
989
990#else
991
992static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
993
994#endif
995
996#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
997
998long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
999 unsigned long arg);
1000
1001void kvm_free_all_assigned_devices(struct kvm *kvm);
1002
1003#else
1004
1005static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
1006 unsigned long arg)
1007{
1008 return -ENOTTY;
1009}
1010
1011static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
1012
1013#endif
1014
1015static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1016{
1017 set_bit(req, &vcpu->requests);
1018}
1019
1020static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1021{
1022 if (test_bit(req, &vcpu->requests)) {
1023 clear_bit(req, &vcpu->requests);
1024 return true;
1025 } else {
1026 return false;
1027 }
1028}
1029
1030extern bool kvm_rebooting;
1031
1032struct kvm_device_ops;
1033
1034struct kvm_device {
1035 struct kvm_device_ops *ops;
1036 struct kvm *kvm;
1037 void *private;
1038 struct list_head vm_node;
1039};
1040
1041
1042struct kvm_device_ops {
1043 const char *name;
1044 int (*create)(struct kvm_device *dev, u32 type);
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 void (*destroy)(struct kvm_device *dev);
1055
1056 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1057 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1058 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1059 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1060 unsigned long arg);
1061};
1062
1063void kvm_device_get(struct kvm_device *dev);
1064void kvm_device_put(struct kvm_device *dev);
1065struct kvm_device *kvm_device_from_filp(struct file *filp);
1066
1067extern struct kvm_device_ops kvm_mpic_ops;
1068extern struct kvm_device_ops kvm_xics_ops;
1069
1070#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1071
1072static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1073{
1074 vcpu->spin_loop.in_spin_loop = val;
1075}
1076static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1077{
1078 vcpu->spin_loop.dy_eligible = val;
1079}
1080
1081#else
1082
1083static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1084{
1085}
1086
1087static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1088{
1089}
1090
1091static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1092{
1093 return true;
1094}
1095
1096#endif
1097#endif
1098
1099