1#ifndef __KVM_HOST_H
2#define __KVM_HOST_H
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/hardirq.h>
11#include <linux/list.h>
12#include <linux/mutex.h>
13#include <linux/spinlock.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/bug.h>
17#include <linux/mm.h>
18#include <linux/mmu_notifier.h>
19#include <linux/preempt.h>
20#include <linux/msi.h>
21#include <linux/slab.h>
22#include <linux/rcupdate.h>
23#include <linux/ratelimit.h>
24#include <linux/err.h>
25#include <linux/irqflags.h>
26#include <linux/context_tracking.h>
27#include <asm/signal.h>
28
29#include <linux/kvm.h>
30#include <linux/kvm_para.h>
31
32#include <linux/kvm_types.h>
33
34#include <asm/kvm_host.h>
35
36#ifndef KVM_MMIO_SIZE
37#define KVM_MMIO_SIZE 8
38#endif
39
40
41
42
43
44
45#define KVM_MEMSLOT_INVALID (1UL << 16)
46
47
48#define KVM_MAX_MMIO_FRAGMENTS 2
49
50
51
52
53
54
55#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
56#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
57#define KVM_PFN_NOSLOT (0x1ULL << 63)
58
59#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
60#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
61#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
62
63
64
65
66
67static inline bool is_error_pfn(pfn_t pfn)
68{
69 return !!(pfn & KVM_PFN_ERR_MASK);
70}
71
72
73
74
75
76
77static inline bool is_error_noslot_pfn(pfn_t pfn)
78{
79 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
80}
81
82
83static inline bool is_noslot_pfn(pfn_t pfn)
84{
85 return pfn == KVM_PFN_NOSLOT;
86}
87
88#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
89#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
90
91static inline bool kvm_is_error_hva(unsigned long addr)
92{
93 return addr >= PAGE_OFFSET;
94}
95
96#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
97
98static inline bool is_error_page(struct page *page)
99{
100 return IS_ERR(page);
101}
102
103
104
105
106#define KVM_REQ_TLB_FLUSH 0
107#define KVM_REQ_MIGRATE_TIMER 1
108#define KVM_REQ_REPORT_TPR_ACCESS 2
109#define KVM_REQ_MMU_RELOAD 3
110#define KVM_REQ_TRIPLE_FAULT 4
111#define KVM_REQ_PENDING_TIMER 5
112#define KVM_REQ_UNHALT 6
113#define KVM_REQ_MMU_SYNC 7
114#define KVM_REQ_CLOCK_UPDATE 8
115#define KVM_REQ_KICK 9
116#define KVM_REQ_DEACTIVATE_FPU 10
117#define KVM_REQ_EVENT 11
118#define KVM_REQ_APF_HALT 12
119#define KVM_REQ_STEAL_UPDATE 13
120#define KVM_REQ_NMI 14
121#define KVM_REQ_PMU 15
122#define KVM_REQ_PMI 16
123#define KVM_REQ_WATCHDOG 17
124#define KVM_REQ_MASTERCLOCK_UPDATE 18
125#define KVM_REQ_MCLOCK_INPROGRESS 19
126#define KVM_REQ_EPR_EXIT 20
127#define KVM_REQ_SCAN_IOAPIC 21
128#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
129
130#define KVM_USERSPACE_IRQ_SOURCE_ID 0
131#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
132
133struct kvm;
134struct kvm_vcpu;
135extern struct kmem_cache *kvm_vcpu_cache;
136
137extern raw_spinlock_t kvm_lock;
138extern struct list_head vm_list;
139
140struct kvm_io_range {
141 gpa_t addr;
142 int len;
143 struct kvm_io_device *dev;
144};
145
146#define NR_IOBUS_DEVS 1000
147
148struct kvm_io_bus {
149 int dev_count;
150 int ioeventfd_count;
151 struct kvm_io_range range[];
152};
153
154enum kvm_bus {
155 KVM_MMIO_BUS,
156 KVM_PIO_BUS,
157 KVM_VIRTIO_CCW_NOTIFY_BUS,
158 KVM_NR_BUSES
159};
160
161int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
162 int len, const void *val);
163int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
164 void *val);
165int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
166 int len, struct kvm_io_device *dev);
167int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
168 struct kvm_io_device *dev);
169
170#ifdef CONFIG_KVM_ASYNC_PF
171struct kvm_async_pf {
172 struct work_struct work;
173 struct list_head link;
174 struct list_head queue;
175 struct kvm_vcpu *vcpu;
176 struct mm_struct *mm;
177 gva_t gva;
178 unsigned long addr;
179 struct kvm_arch_async_pf arch;
180 struct page *page;
181 bool done;
182};
183
184void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
185void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
186int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
187 struct kvm_arch_async_pf *arch);
188int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
189#endif
190
191enum {
192 OUTSIDE_GUEST_MODE,
193 IN_GUEST_MODE,
194 EXITING_GUEST_MODE,
195 READING_SHADOW_PAGE_TABLES,
196};
197
198
199
200
201
202struct kvm_mmio_fragment {
203 gpa_t gpa;
204 void *data;
205 unsigned len;
206};
207
208struct kvm_vcpu {
209 struct kvm *kvm;
210#ifdef CONFIG_PREEMPT_NOTIFIERS
211 struct preempt_notifier preempt_notifier;
212#endif
213 int cpu;
214 int vcpu_id;
215 int srcu_idx;
216 int mode;
217 unsigned long requests;
218 unsigned long guest_debug;
219
220 struct mutex mutex;
221 struct kvm_run *run;
222
223 int fpu_active;
224 int guest_fpu_loaded, guest_xcr0_loaded;
225 wait_queue_head_t wq;
226 struct pid *pid;
227 int sigset_active;
228 sigset_t sigset;
229 struct kvm_vcpu_stat stat;
230
231#ifdef CONFIG_HAS_IOMEM
232 int mmio_needed;
233 int mmio_read_completed;
234 int mmio_is_write;
235 int mmio_cur_fragment;
236 int mmio_nr_fragments;
237 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
238#endif
239
240#ifdef CONFIG_KVM_ASYNC_PF
241 struct {
242 u32 queued;
243 struct list_head queue;
244 struct list_head done;
245 spinlock_t lock;
246 } async_pf;
247#endif
248
249#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
250
251
252
253
254
255
256 struct {
257 bool in_spin_loop;
258 bool dy_eligible;
259 } spin_loop;
260#endif
261 bool preempted;
262 struct kvm_vcpu_arch arch;
263};
264
265static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
266{
267 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
268}
269
270
271
272
273
274#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
275
276struct kvm_memory_slot {
277 gfn_t base_gfn;
278 unsigned long npages;
279 unsigned long *dirty_bitmap;
280 struct kvm_arch_memory_slot arch;
281 unsigned long userspace_addr;
282 u32 flags;
283 short id;
284};
285
286static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
287{
288 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
289}
290
291struct kvm_kernel_irq_routing_entry {
292 u32 gsi;
293 u32 type;
294 int (*set)(struct kvm_kernel_irq_routing_entry *e,
295 struct kvm *kvm, int irq_source_id, int level,
296 bool line_status);
297 union {
298 struct {
299 unsigned irqchip;
300 unsigned pin;
301 } irqchip;
302 struct msi_msg msi;
303 };
304 struct hlist_node link;
305};
306
307#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
308
309struct kvm_irq_routing_table {
310 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
311 struct kvm_kernel_irq_routing_entry *rt_entries;
312 u32 nr_rt_entries;
313
314
315
316
317 struct hlist_head map[0];
318};
319
320#else
321
322struct kvm_irq_routing_table {};
323
324#endif
325
326#ifndef KVM_PRIVATE_MEM_SLOTS
327#define KVM_PRIVATE_MEM_SLOTS 0
328#endif
329
330#ifndef KVM_MEM_SLOTS_NUM
331#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
332#endif
333
334
335
336
337
338
339struct kvm_memslots {
340 u64 generation;
341 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
342
343 short id_to_index[KVM_MEM_SLOTS_NUM];
344};
345
346struct kvm {
347 spinlock_t mmu_lock;
348 struct mutex slots_lock;
349 struct mm_struct *mm;
350 struct kvm_memslots *memslots;
351 struct srcu_struct srcu;
352#ifdef CONFIG_KVM_APIC_ARCHITECTURE
353 u32 bsp_vcpu_id;
354#endif
355 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
356 atomic_t online_vcpus;
357 int last_boosted_vcpu;
358 struct list_head vm_list;
359 struct mutex lock;
360 struct kvm_io_bus *buses[KVM_NR_BUSES];
361#ifdef CONFIG_HAVE_KVM_EVENTFD
362 struct {
363 spinlock_t lock;
364 struct list_head items;
365 struct list_head resampler_list;
366 struct mutex resampler_lock;
367 } irqfds;
368 struct list_head ioeventfds;
369#endif
370 struct kvm_vm_stat stat;
371 struct kvm_arch arch;
372 atomic_t users_count;
373#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
374 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
375 spinlock_t ring_lock;
376 struct list_head coalesced_zones;
377#endif
378
379 struct mutex irq_lock;
380#ifdef CONFIG_HAVE_KVM_IRQCHIP
381
382
383
384
385 struct kvm_irq_routing_table __rcu *irq_routing;
386 struct hlist_head mask_notifier_list;
387 struct hlist_head irq_ack_notifier_list;
388#endif
389
390#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
391 struct mmu_notifier mmu_notifier;
392 unsigned long mmu_notifier_seq;
393 long mmu_notifier_count;
394#endif
395 long tlbs_dirty;
396 struct list_head devices;
397};
398
399#define kvm_err(fmt, ...) \
400 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
401#define kvm_info(fmt, ...) \
402 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
403#define kvm_debug(fmt, ...) \
404 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
405#define kvm_pr_unimpl(fmt, ...) \
406 pr_err_ratelimited("kvm [%i]: " fmt, \
407 task_tgid_nr(current), ## __VA_ARGS__)
408
409
410#define vcpu_unimpl(vcpu, fmt, ...) \
411 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
412
413static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
414{
415 smp_rmb();
416 return kvm->vcpus[i];
417}
418
419#define kvm_for_each_vcpu(idx, vcpup, kvm) \
420 for (idx = 0; \
421 idx < atomic_read(&kvm->online_vcpus) && \
422 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
423 idx++)
424
425#define kvm_for_each_memslot(memslot, slots) \
426 for (memslot = &slots->memslots[0]; \
427 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
428 memslot++)
429
430int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
431void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
432
433int __must_check vcpu_load(struct kvm_vcpu *vcpu);
434void vcpu_put(struct kvm_vcpu *vcpu);
435
436#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
437int kvm_irqfd_init(void);
438void kvm_irqfd_exit(void);
439#else
440static inline int kvm_irqfd_init(void)
441{
442 return 0;
443}
444
445static inline void kvm_irqfd_exit(void)
446{
447}
448#endif
449int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
450 struct module *module);
451void kvm_exit(void);
452
453void kvm_get_kvm(struct kvm *kvm);
454void kvm_put_kvm(struct kvm *kvm);
455void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
456 u64 last_generation);
457
458static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
459{
460 return rcu_dereference_check(kvm->memslots,
461 srcu_read_lock_held(&kvm->srcu)
462 || lockdep_is_held(&kvm->slots_lock));
463}
464
465static inline struct kvm_memory_slot *
466id_to_memslot(struct kvm_memslots *slots, int id)
467{
468 int index = slots->id_to_index[id];
469 struct kvm_memory_slot *slot;
470
471 slot = &slots->memslots[index];
472
473 WARN_ON(slot->id != id);
474 return slot;
475}
476
477
478
479
480
481
482
483
484
485
486
487
488enum kvm_mr_change {
489 KVM_MR_CREATE,
490 KVM_MR_DELETE,
491 KVM_MR_MOVE,
492 KVM_MR_FLAGS_ONLY,
493};
494
495int kvm_set_memory_region(struct kvm *kvm,
496 struct kvm_userspace_memory_region *mem);
497int __kvm_set_memory_region(struct kvm *kvm,
498 struct kvm_userspace_memory_region *mem);
499void kvm_arch_free_memslot(struct kvm_memory_slot *free,
500 struct kvm_memory_slot *dont);
501int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
502int kvm_arch_prepare_memory_region(struct kvm *kvm,
503 struct kvm_memory_slot *memslot,
504 struct kvm_userspace_memory_region *mem,
505 enum kvm_mr_change change);
506void kvm_arch_commit_memory_region(struct kvm *kvm,
507 struct kvm_userspace_memory_region *mem,
508 const struct kvm_memory_slot *old,
509 enum kvm_mr_change change);
510bool kvm_largepages_enabled(void);
511void kvm_disable_largepages(void);
512
513void kvm_arch_flush_shadow_all(struct kvm *kvm);
514
515void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
516 struct kvm_memory_slot *slot);
517
518int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
519 int nr_pages);
520
521struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
522unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
523unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
524void kvm_release_page_clean(struct page *page);
525void kvm_release_page_dirty(struct page *page);
526void kvm_set_page_dirty(struct page *page);
527void kvm_set_page_accessed(struct page *page);
528
529pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
530pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
531 bool write_fault, bool *writable);
532pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
533pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
534 bool *writable);
535pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
536pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
537
538void kvm_release_pfn_dirty(pfn_t pfn);
539void kvm_release_pfn_clean(pfn_t pfn);
540void kvm_set_pfn_dirty(pfn_t pfn);
541void kvm_set_pfn_accessed(pfn_t pfn);
542void kvm_get_pfn(pfn_t pfn);
543
544int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
545 int len);
546int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
547 unsigned long len);
548int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
549int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
550 void *data, unsigned long len);
551int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
552 int offset, int len);
553int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
554 unsigned long len);
555int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
556 void *data, unsigned long len);
557int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
558 gpa_t gpa, unsigned long len);
559int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
560int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
561struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
562int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
563unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
564void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
565void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
566 gfn_t gfn);
567
568void kvm_vcpu_block(struct kvm_vcpu *vcpu);
569void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
570bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
571void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
572void kvm_resched(struct kvm_vcpu *vcpu);
573void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
574void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
575
576void kvm_flush_remote_tlbs(struct kvm *kvm);
577void kvm_reload_remote_mmus(struct kvm *kvm);
578void kvm_make_mclock_inprogress_request(struct kvm *kvm);
579void kvm_make_scan_ioapic_request(struct kvm *kvm);
580
581long kvm_arch_dev_ioctl(struct file *filp,
582 unsigned int ioctl, unsigned long arg);
583long kvm_arch_vcpu_ioctl(struct file *filp,
584 unsigned int ioctl, unsigned long arg);
585int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
586
587int kvm_dev_ioctl_check_extension(long ext);
588
589int kvm_get_dirty_log(struct kvm *kvm,
590 struct kvm_dirty_log *log, int *is_dirty);
591int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
592 struct kvm_dirty_log *log);
593
594int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
595 struct kvm_userspace_memory_region *mem);
596int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
597 bool line_status);
598long kvm_arch_vm_ioctl(struct file *filp,
599 unsigned int ioctl, unsigned long arg);
600
601int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
602int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
603
604int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
605 struct kvm_translation *tr);
606
607int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
608int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
609int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
610 struct kvm_sregs *sregs);
611int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
612 struct kvm_sregs *sregs);
613int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
614 struct kvm_mp_state *mp_state);
615int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
616 struct kvm_mp_state *mp_state);
617int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
618 struct kvm_guest_debug *dbg);
619int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
620
621int kvm_arch_init(void *opaque);
622void kvm_arch_exit(void);
623
624int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
625void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
626
627void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
628void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
629void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
630struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
631int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
632int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
633void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
634
635int kvm_arch_hardware_enable(void *garbage);
636void kvm_arch_hardware_disable(void *garbage);
637int kvm_arch_hardware_setup(void);
638void kvm_arch_hardware_unsetup(void);
639void kvm_arch_check_processor_compat(void *rtn);
640int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
641int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
642
643void kvm_free_physmem(struct kvm *kvm);
644
645void *kvm_kvzalloc(unsigned long size);
646void kvm_kvfree(const void *addr);
647
648#ifndef __KVM_HAVE_ARCH_VM_ALLOC
649static inline struct kvm *kvm_arch_alloc_vm(void)
650{
651 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
652}
653
654static inline void kvm_arch_free_vm(struct kvm *kvm)
655{
656 kfree(kvm);
657}
658#endif
659
660static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
661{
662#ifdef __KVM_HAVE_ARCH_WQP
663 return vcpu->arch.wqp;
664#else
665 return &vcpu->wq;
666#endif
667}
668
669int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
670void kvm_arch_destroy_vm(struct kvm *kvm);
671void kvm_arch_sync_events(struct kvm *kvm);
672
673int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
674void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
675
676bool kvm_is_mmio_pfn(pfn_t pfn);
677
678struct kvm_irq_ack_notifier {
679 struct hlist_node link;
680 unsigned gsi;
681 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
682};
683
684struct kvm_assigned_dev_kernel {
685 struct kvm_irq_ack_notifier ack_notifier;
686 struct list_head list;
687 int assigned_dev_id;
688 int host_segnr;
689 int host_busnr;
690 int host_devfn;
691 unsigned int entries_nr;
692 int host_irq;
693 bool host_irq_disabled;
694 bool pci_2_3;
695 struct msix_entry *host_msix_entries;
696 int guest_irq;
697 struct msix_entry *guest_msix_entries;
698 unsigned long irq_requested_type;
699 int irq_source_id;
700 int flags;
701 struct pci_dev *dev;
702 struct kvm *kvm;
703 spinlock_t intx_lock;
704 spinlock_t intx_mask_lock;
705 char irq_name[32];
706 struct pci_saved_state *pci_saved_state;
707};
708
709struct kvm_irq_mask_notifier {
710 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
711 int irq;
712 struct hlist_node link;
713};
714
715void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
716 struct kvm_irq_mask_notifier *kimn);
717void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
718 struct kvm_irq_mask_notifier *kimn);
719void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
720 bool mask);
721
722int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
723 bool line_status);
724int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
725int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
726 int irq_source_id, int level, bool line_status);
727bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
728void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
729void kvm_register_irq_ack_notifier(struct kvm *kvm,
730 struct kvm_irq_ack_notifier *kian);
731void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
732 struct kvm_irq_ack_notifier *kian);
733int kvm_request_irq_source_id(struct kvm *kvm);
734void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
735
736
737#define KVM_IOMMU_CACHE_COHERENCY 0x1
738
739#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
740int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
741void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
742int kvm_iommu_map_guest(struct kvm *kvm);
743int kvm_iommu_unmap_guest(struct kvm *kvm);
744int kvm_assign_device(struct kvm *kvm,
745 struct kvm_assigned_dev_kernel *assigned_dev);
746int kvm_deassign_device(struct kvm *kvm,
747 struct kvm_assigned_dev_kernel *assigned_dev);
748#else
749static inline int kvm_iommu_map_pages(struct kvm *kvm,
750 struct kvm_memory_slot *slot)
751{
752 return 0;
753}
754
755static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
756 struct kvm_memory_slot *slot)
757{
758}
759
760static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
761{
762 return 0;
763}
764#endif
765
766static inline void kvm_guest_enter(void)
767{
768 unsigned long flags;
769
770 BUG_ON(preemptible());
771
772 local_irq_save(flags);
773 guest_enter();
774 local_irq_restore(flags);
775
776
777
778
779
780
781
782
783 rcu_virt_note_context_switch(smp_processor_id());
784}
785
786static inline void kvm_guest_exit(void)
787{
788 unsigned long flags;
789
790 local_irq_save(flags);
791 guest_exit();
792 local_irq_restore(flags);
793}
794
795
796
797
798
799
800
801static inline struct kvm_memory_slot *
802search_memslots(struct kvm_memslots *slots, gfn_t gfn)
803{
804 struct kvm_memory_slot *memslot;
805
806 kvm_for_each_memslot(memslot, slots)
807 if (gfn >= memslot->base_gfn &&
808 gfn < memslot->base_gfn + memslot->npages)
809 return memslot;
810
811 return NULL;
812}
813
814static inline struct kvm_memory_slot *
815__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
816{
817 return search_memslots(slots, gfn);
818}
819
820static inline unsigned long
821__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
822{
823 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
824}
825
826static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
827{
828 return gfn_to_memslot(kvm, gfn)->id;
829}
830
831static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
832{
833
834 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
835 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
836}
837
838static inline gfn_t
839hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
840{
841 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
842
843 return slot->base_gfn + gfn_offset;
844}
845
846static inline gpa_t gfn_to_gpa(gfn_t gfn)
847{
848 return (gpa_t)gfn << PAGE_SHIFT;
849}
850
851static inline gfn_t gpa_to_gfn(gpa_t gpa)
852{
853 return (gfn_t)(gpa >> PAGE_SHIFT);
854}
855
856static inline hpa_t pfn_to_hpa(pfn_t pfn)
857{
858 return (hpa_t)pfn << PAGE_SHIFT;
859}
860
861static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
862{
863 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
864}
865
866enum kvm_stat_kind {
867 KVM_STAT_VM,
868 KVM_STAT_VCPU,
869};
870
871struct kvm_stats_debugfs_item {
872 const char *name;
873 int offset;
874 enum kvm_stat_kind kind;
875 struct dentry *dentry;
876};
877extern struct kvm_stats_debugfs_item debugfs_entries[];
878extern struct dentry *kvm_debugfs_dir;
879
880#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
881static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
882{
883 if (unlikely(kvm->mmu_notifier_count))
884 return 1;
885
886
887
888
889
890
891
892
893
894
895 smp_rmb();
896 if (kvm->mmu_notifier_seq != mmu_seq)
897 return 1;
898 return 0;
899}
900#endif
901
902#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
903
904#define KVM_MAX_IRQ_ROUTES 1024
905
906int kvm_setup_default_irq_routing(struct kvm *kvm);
907int kvm_set_irq_routing(struct kvm *kvm,
908 const struct kvm_irq_routing_entry *entries,
909 unsigned nr,
910 unsigned flags);
911int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
912 struct kvm_kernel_irq_routing_entry *e,
913 const struct kvm_irq_routing_entry *ue);
914void kvm_free_irq_routing(struct kvm *kvm);
915
916int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
917
918#else
919
920static inline void kvm_free_irq_routing(struct kvm *kvm) {}
921
922#endif
923
924#ifdef CONFIG_HAVE_KVM_EVENTFD
925
926void kvm_eventfd_init(struct kvm *kvm);
927int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
928
929#ifdef CONFIG_HAVE_KVM_IRQCHIP
930int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
931void kvm_irqfd_release(struct kvm *kvm);
932void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
933#else
934static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
935{
936 return -EINVAL;
937}
938
939static inline void kvm_irqfd_release(struct kvm *kvm) {}
940#endif
941
942#else
943
944static inline void kvm_eventfd_init(struct kvm *kvm) {}
945
946static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
947{
948 return -EINVAL;
949}
950
951static inline void kvm_irqfd_release(struct kvm *kvm) {}
952
953#ifdef CONFIG_HAVE_KVM_IRQCHIP
954static inline void kvm_irq_routing_update(struct kvm *kvm,
955 struct kvm_irq_routing_table *irq_rt)
956{
957 rcu_assign_pointer(kvm->irq_routing, irq_rt);
958}
959#endif
960
961static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
962{
963 return -ENOSYS;
964}
965
966#endif
967
968#ifdef CONFIG_KVM_APIC_ARCHITECTURE
969static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
970{
971 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
972}
973
974bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
975
976#else
977
978static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
979
980#endif
981
982#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
983
984long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
985 unsigned long arg);
986
987void kvm_free_all_assigned_devices(struct kvm *kvm);
988
989#else
990
991static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
992 unsigned long arg)
993{
994 return -ENOTTY;
995}
996
997static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
998
999#endif
1000
1001static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1002{
1003 set_bit(req, &vcpu->requests);
1004}
1005
1006static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1007{
1008 if (test_bit(req, &vcpu->requests)) {
1009 clear_bit(req, &vcpu->requests);
1010 return true;
1011 } else {
1012 return false;
1013 }
1014}
1015
1016extern bool kvm_rebooting;
1017
1018struct kvm_device_ops;
1019
1020struct kvm_device {
1021 struct kvm_device_ops *ops;
1022 struct kvm *kvm;
1023 void *private;
1024 struct list_head vm_node;
1025};
1026
1027
1028struct kvm_device_ops {
1029 const char *name;
1030 int (*create)(struct kvm_device *dev, u32 type);
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 void (*destroy)(struct kvm_device *dev);
1041
1042 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1043 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1044 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1045 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1046 unsigned long arg);
1047};
1048
1049void kvm_device_get(struct kvm_device *dev);
1050void kvm_device_put(struct kvm_device *dev);
1051struct kvm_device *kvm_device_from_filp(struct file *filp);
1052
1053extern struct kvm_device_ops kvm_mpic_ops;
1054extern struct kvm_device_ops kvm_xics_ops;
1055
1056#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1057
1058static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1059{
1060 vcpu->spin_loop.in_spin_loop = val;
1061}
1062static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1063{
1064 vcpu->spin_loop.dy_eligible = val;
1065}
1066
1067#else
1068
1069static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1070{
1071}
1072
1073static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1074{
1075}
1076
1077static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1078{
1079 return true;
1080}
1081
1082#endif
1083#endif
1084
1085