1#ifndef __KVM_HOST_H
2#define __KVM_HOST_H
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/hardirq.h>
11#include <linux/list.h>
12#include <linux/mutex.h>
13#include <linux/spinlock.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/bug.h>
17#include <linux/mm.h>
18#include <linux/mmu_notifier.h>
19#include <linux/preempt.h>
20#include <linux/msi.h>
21#include <linux/slab.h>
22#include <linux/rcupdate.h>
23#include <linux/ratelimit.h>
24#include <linux/err.h>
25#include <linux/irqflags.h>
26#include <linux/context_tracking.h>
27#include <linux/irqbypass.h>
28#include <linux/swait.h>
29#include <asm/signal.h>
30
31#include <linux/kvm.h>
32#include <linux/kvm_para.h>
33
34#include <linux/kvm_types.h>
35
36#include <asm/kvm_host.h>
37
38#ifndef KVM_MAX_VCPU_ID
39#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
40#endif
41
42
43
44
45
46
47#define KVM_MEMSLOT_INVALID (1UL << 16)
48#define KVM_MEMSLOT_INCOHERENT (1UL << 17)
49
50
51#define KVM_MAX_MMIO_FRAGMENTS 2
52
53#ifndef KVM_ADDRESS_SPACE_NUM
54#define KVM_ADDRESS_SPACE_NUM 1
55#endif
56
57
58
59
60
61
62#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
63#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
64#define KVM_PFN_NOSLOT (0x1ULL << 63)
65
66#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
67#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
68#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
69
70
71
72
73
74static inline bool is_error_pfn(kvm_pfn_t pfn)
75{
76 return !!(pfn & KVM_PFN_ERR_MASK);
77}
78
79
80
81
82
83
84static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
85{
86 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
87}
88
89
90static inline bool is_noslot_pfn(kvm_pfn_t pfn)
91{
92 return pfn == KVM_PFN_NOSLOT;
93}
94
95
96
97
98
99#ifndef KVM_HVA_ERR_BAD
100
101#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
102#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
103
104static inline bool kvm_is_error_hva(unsigned long addr)
105{
106 return addr >= PAGE_OFFSET;
107}
108
109#endif
110
111#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
112
113static inline bool is_error_page(struct page *page)
114{
115 return IS_ERR(page);
116}
117
118
119
120
121
122#define KVM_REQ_TLB_FLUSH 0
123#define KVM_REQ_MMU_RELOAD 1
124#define KVM_REQ_PENDING_TIMER 2
125#define KVM_REQ_UNHALT 3
126
127#define KVM_USERSPACE_IRQ_SOURCE_ID 0
128#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
129
130extern struct kmem_cache *kvm_vcpu_cache;
131
132extern spinlock_t kvm_lock;
133extern struct list_head vm_list;
134
135struct kvm_io_range {
136 gpa_t addr;
137 int len;
138 struct kvm_io_device *dev;
139};
140
141#define NR_IOBUS_DEVS 1000
142
143struct kvm_io_bus {
144 int dev_count;
145 int ioeventfd_count;
146 struct kvm_io_range range[];
147};
148
149enum kvm_bus {
150 KVM_MMIO_BUS,
151 KVM_PIO_BUS,
152 KVM_VIRTIO_CCW_NOTIFY_BUS,
153 KVM_FAST_MMIO_BUS,
154 KVM_NR_BUSES
155};
156
157int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
158 int len, const void *val);
159int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
160 gpa_t addr, int len, const void *val, long cookie);
161int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
162 int len, void *val);
163int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
164 int len, struct kvm_io_device *dev);
165int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
166 struct kvm_io_device *dev);
167struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
168 gpa_t addr);
169
170#ifdef CONFIG_KVM_ASYNC_PF
171struct kvm_async_pf {
172 struct work_struct work;
173 struct list_head link;
174 struct list_head queue;
175 struct kvm_vcpu *vcpu;
176 struct mm_struct *mm;
177 gva_t gva;
178 unsigned long addr;
179 struct kvm_arch_async_pf arch;
180 bool wakeup_all;
181};
182
183void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
184void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
185int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
186 struct kvm_arch_async_pf *arch);
187int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
188#endif
189
190enum {
191 OUTSIDE_GUEST_MODE,
192 IN_GUEST_MODE,
193 EXITING_GUEST_MODE,
194 READING_SHADOW_PAGE_TABLES,
195};
196
197
198
199
200
201struct kvm_mmio_fragment {
202 gpa_t gpa;
203 void *data;
204 unsigned len;
205};
206
207struct kvm_vcpu {
208 struct kvm *kvm;
209#ifdef CONFIG_PREEMPT_NOTIFIERS
210 struct preempt_notifier preempt_notifier;
211#endif
212 int cpu;
213 int vcpu_id;
214 int srcu_idx;
215 int mode;
216 unsigned long requests;
217 unsigned long guest_debug;
218
219 int pre_pcpu;
220 struct list_head blocked_vcpu_list;
221
222 struct mutex mutex;
223 struct kvm_run *run;
224
225 int fpu_active;
226 int guest_fpu_loaded, guest_xcr0_loaded;
227 unsigned char fpu_counter;
228 struct swait_queue_head wq;
229 struct pid *pid;
230 int sigset_active;
231 sigset_t sigset;
232 struct kvm_vcpu_stat stat;
233 unsigned int halt_poll_ns;
234 bool valid_wakeup;
235
236#ifdef CONFIG_HAS_IOMEM
237 int mmio_needed;
238 int mmio_read_completed;
239 int mmio_is_write;
240 int mmio_cur_fragment;
241 int mmio_nr_fragments;
242 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
243#endif
244
245#ifdef CONFIG_KVM_ASYNC_PF
246 struct {
247 u32 queued;
248 struct list_head queue;
249 struct list_head done;
250 spinlock_t lock;
251 } async_pf;
252#endif
253
254#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
255
256
257
258
259
260
261 struct {
262 bool in_spin_loop;
263 bool dy_eligible;
264 } spin_loop;
265#endif
266 bool preempted;
267 struct kvm_vcpu_arch arch;
268 struct dentry *debugfs_dentry;
269};
270
271static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
272{
273 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
274}
275
276
277
278
279
280#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
281
282struct kvm_memory_slot {
283 gfn_t base_gfn;
284 unsigned long npages;
285 unsigned long *dirty_bitmap;
286 struct kvm_arch_memory_slot arch;
287 unsigned long userspace_addr;
288 u32 flags;
289 short id;
290};
291
292static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
293{
294 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
295}
296
297struct kvm_s390_adapter_int {
298 u64 ind_addr;
299 u64 summary_addr;
300 u64 ind_offset;
301 u32 summary_offset;
302 u32 adapter_id;
303};
304
305struct kvm_hv_sint {
306 u32 vcpu;
307 u32 sint;
308};
309
310struct kvm_kernel_irq_routing_entry {
311 u32 gsi;
312 u32 type;
313 int (*set)(struct kvm_kernel_irq_routing_entry *e,
314 struct kvm *kvm, int irq_source_id, int level,
315 bool line_status);
316 union {
317 struct {
318 unsigned irqchip;
319 unsigned pin;
320 } irqchip;
321 struct {
322 u32 address_lo;
323 u32 address_hi;
324 u32 data;
325 u32 flags;
326 u32 devid;
327 } msi;
328 struct kvm_s390_adapter_int adapter;
329 struct kvm_hv_sint hv_sint;
330 };
331 struct hlist_node link;
332};
333
334#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
335struct kvm_irq_routing_table {
336 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
337 u32 nr_rt_entries;
338
339
340
341
342 struct hlist_head map[0];
343};
344#endif
345
346#ifndef KVM_PRIVATE_MEM_SLOTS
347#define KVM_PRIVATE_MEM_SLOTS 0
348#endif
349
350#ifndef KVM_MEM_SLOTS_NUM
351#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
352#endif
353
354#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
355static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
356{
357 return 0;
358}
359#endif
360
361
362
363
364
365
366struct kvm_memslots {
367 u64 generation;
368 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
369
370 short id_to_index[KVM_MEM_SLOTS_NUM];
371 atomic_t lru_slot;
372 int used_slots;
373};
374
375struct kvm {
376 spinlock_t mmu_lock;
377 struct mutex slots_lock;
378 struct mm_struct *mm;
379 struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
380 struct srcu_struct srcu;
381 struct srcu_struct irq_srcu;
382 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
383
384
385
386
387
388
389
390 atomic_t online_vcpus;
391 int created_vcpus;
392 int last_boosted_vcpu;
393 struct list_head vm_list;
394 struct mutex lock;
395 struct kvm_io_bus *buses[KVM_NR_BUSES];
396#ifdef CONFIG_HAVE_KVM_EVENTFD
397 struct {
398 spinlock_t lock;
399 struct list_head items;
400 struct list_head resampler_list;
401 struct mutex resampler_lock;
402 } irqfds;
403 struct list_head ioeventfds;
404#endif
405 struct kvm_vm_stat stat;
406 struct kvm_arch arch;
407 atomic_t users_count;
408#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
409 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
410 spinlock_t ring_lock;
411 struct list_head coalesced_zones;
412#endif
413
414 struct mutex irq_lock;
415#ifdef CONFIG_HAVE_KVM_IRQCHIP
416
417
418
419 struct kvm_irq_routing_table __rcu *irq_routing;
420#endif
421#ifdef CONFIG_HAVE_KVM_IRQFD
422 struct hlist_head irq_ack_notifier_list;
423#endif
424
425#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
426 struct mmu_notifier mmu_notifier;
427 unsigned long mmu_notifier_seq;
428 long mmu_notifier_count;
429#endif
430 long tlbs_dirty;
431 struct list_head devices;
432 struct dentry *debugfs_dentry;
433 struct kvm_stat_data **debugfs_stat_data;
434};
435
436#define kvm_err(fmt, ...) \
437 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
438#define kvm_info(fmt, ...) \
439 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
440#define kvm_debug(fmt, ...) \
441 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
442#define kvm_pr_unimpl(fmt, ...) \
443 pr_err_ratelimited("kvm [%i]: " fmt, \
444 task_tgid_nr(current), ## __VA_ARGS__)
445
446
447#define vcpu_unimpl(vcpu, fmt, ...) \
448 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
449 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
450
451#define vcpu_debug(vcpu, fmt, ...) \
452 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
453#define vcpu_err(vcpu, fmt, ...) \
454 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
455
456static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
457{
458
459
460
461
462 smp_rmb();
463 return kvm->vcpus[i];
464}
465
466#define kvm_for_each_vcpu(idx, vcpup, kvm) \
467 for (idx = 0; \
468 idx < atomic_read(&kvm->online_vcpus) && \
469 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
470 idx++)
471
472static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
473{
474 struct kvm_vcpu *vcpu = NULL;
475 int i;
476
477 if (id < 0)
478 return NULL;
479 if (id < KVM_MAX_VCPUS)
480 vcpu = kvm_get_vcpu(kvm, id);
481 if (vcpu && vcpu->vcpu_id == id)
482 return vcpu;
483 kvm_for_each_vcpu(i, vcpu, kvm)
484 if (vcpu->vcpu_id == id)
485 return vcpu;
486 return NULL;
487}
488
489#define kvm_for_each_memslot(memslot, slots) \
490 for (memslot = &slots->memslots[0]; \
491 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
492 memslot++)
493
494int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
495void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
496
497int __must_check vcpu_load(struct kvm_vcpu *vcpu);
498void vcpu_put(struct kvm_vcpu *vcpu);
499
500#ifdef __KVM_HAVE_IOAPIC
501void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
502void kvm_arch_post_irq_routing_update(struct kvm *kvm);
503#else
504static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
505{
506}
507static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
508{
509}
510#endif
511
512#ifdef CONFIG_HAVE_KVM_IRQFD
513int kvm_irqfd_init(void);
514void kvm_irqfd_exit(void);
515#else
516static inline int kvm_irqfd_init(void)
517{
518 return 0;
519}
520
521static inline void kvm_irqfd_exit(void)
522{
523}
524#endif
525int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
526 struct module *module);
527void kvm_exit(void);
528
529void kvm_get_kvm(struct kvm *kvm);
530void kvm_put_kvm(struct kvm *kvm);
531
532static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
533{
534 return rcu_dereference_check(kvm->memslots[as_id],
535 srcu_read_lock_held(&kvm->srcu)
536 || lockdep_is_held(&kvm->slots_lock));
537}
538
539static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
540{
541 return __kvm_memslots(kvm, 0);
542}
543
544static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
545{
546 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
547
548 return __kvm_memslots(vcpu->kvm, as_id);
549}
550
551static inline struct kvm_memory_slot *
552id_to_memslot(struct kvm_memslots *slots, int id)
553{
554 int index = slots->id_to_index[id];
555 struct kvm_memory_slot *slot;
556
557 slot = &slots->memslots[index];
558
559 WARN_ON(slot->id != id);
560 return slot;
561}
562
563
564
565
566
567
568
569
570
571
572
573
574enum kvm_mr_change {
575 KVM_MR_CREATE,
576 KVM_MR_DELETE,
577 KVM_MR_MOVE,
578 KVM_MR_FLAGS_ONLY,
579};
580
581int kvm_set_memory_region(struct kvm *kvm,
582 const struct kvm_userspace_memory_region *mem);
583int __kvm_set_memory_region(struct kvm *kvm,
584 const struct kvm_userspace_memory_region *mem);
585void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
586 struct kvm_memory_slot *dont);
587int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
588 unsigned long npages);
589void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
590int kvm_arch_prepare_memory_region(struct kvm *kvm,
591 struct kvm_memory_slot *memslot,
592 const struct kvm_userspace_memory_region *mem,
593 enum kvm_mr_change change);
594void kvm_arch_commit_memory_region(struct kvm *kvm,
595 const struct kvm_userspace_memory_region *mem,
596 const struct kvm_memory_slot *old,
597 const struct kvm_memory_slot *new,
598 enum kvm_mr_change change);
599bool kvm_largepages_enabled(void);
600void kvm_disable_largepages(void);
601
602void kvm_arch_flush_shadow_all(struct kvm *kvm);
603
604void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
605 struct kvm_memory_slot *slot);
606
607int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
608 struct page **pages, int nr_pages);
609
610struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
611unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
612unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
613unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
614unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
615 bool *writable);
616void kvm_release_page_clean(struct page *page);
617void kvm_release_page_dirty(struct page *page);
618void kvm_set_page_accessed(struct page *page);
619
620kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
621kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
622kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
623 bool *writable);
624kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
625kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
626kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
627 bool atomic, bool *async, bool write_fault,
628 bool *writable);
629
630void kvm_release_pfn_clean(kvm_pfn_t pfn);
631void kvm_set_pfn_dirty(kvm_pfn_t pfn);
632void kvm_set_pfn_accessed(kvm_pfn_t pfn);
633void kvm_get_pfn(kvm_pfn_t pfn);
634
635int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
636 int len);
637int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
638 unsigned long len);
639int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
640int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
641 void *data, unsigned long len);
642int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
643 int offset, int len);
644int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
645 unsigned long len);
646int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
647 void *data, unsigned long len);
648int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
649 gpa_t gpa, unsigned long len);
650int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
651int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
652struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
653bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
654unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
655void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
656
657struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
658struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
659kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
660kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
661struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
662unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
663unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
664int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
665 int len);
666int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
667 unsigned long len);
668int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
669 unsigned long len);
670int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
671 int offset, int len);
672int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
673 unsigned long len);
674void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
675
676void kvm_vcpu_block(struct kvm_vcpu *vcpu);
677void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
678void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
679void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
680void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
681int kvm_vcpu_yield_to(struct kvm_vcpu *target);
682void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
683void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
684void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
685
686void kvm_flush_remote_tlbs(struct kvm *kvm);
687void kvm_reload_remote_mmus(struct kvm *kvm);
688bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
689
690long kvm_arch_dev_ioctl(struct file *filp,
691 unsigned int ioctl, unsigned long arg);
692long kvm_arch_vcpu_ioctl(struct file *filp,
693 unsigned int ioctl, unsigned long arg);
694int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
695
696int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
697
698int kvm_get_dirty_log(struct kvm *kvm,
699 struct kvm_dirty_log *log, int *is_dirty);
700
701int kvm_get_dirty_log_protect(struct kvm *kvm,
702 struct kvm_dirty_log *log, bool *is_dirty);
703
704void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
705 struct kvm_memory_slot *slot,
706 gfn_t gfn_offset,
707 unsigned long mask);
708
709int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
710 struct kvm_dirty_log *log);
711
712int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
713 bool line_status);
714long kvm_arch_vm_ioctl(struct file *filp,
715 unsigned int ioctl, unsigned long arg);
716
717int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
718int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
719
720int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
721 struct kvm_translation *tr);
722
723int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
724int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
725int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
726 struct kvm_sregs *sregs);
727int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
728 struct kvm_sregs *sregs);
729int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
730 struct kvm_mp_state *mp_state);
731int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
732 struct kvm_mp_state *mp_state);
733int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
734 struct kvm_guest_debug *dbg);
735int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
736
737int kvm_arch_init(void *opaque);
738void kvm_arch_exit(void);
739
740int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
741void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
742
743void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
744
745void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
746void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
747void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
748struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
749int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
750void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
751void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
752
753bool kvm_arch_has_vcpu_debugfs(void);
754int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
755
756int kvm_arch_hardware_enable(void);
757void kvm_arch_hardware_disable(void);
758int kvm_arch_hardware_setup(void);
759void kvm_arch_hardware_unsetup(void);
760void kvm_arch_check_processor_compat(void *rtn);
761int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
762int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
763
764void *kvm_kvzalloc(unsigned long size);
765
766#ifndef __KVM_HAVE_ARCH_VM_ALLOC
767static inline struct kvm *kvm_arch_alloc_vm(void)
768{
769 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
770}
771
772static inline void kvm_arch_free_vm(struct kvm *kvm)
773{
774 kfree(kvm);
775}
776#endif
777
778#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
779void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
780void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
781bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
782#else
783static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
784{
785}
786
787static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
788{
789}
790
791static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
792{
793 return false;
794}
795#endif
796#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
797void kvm_arch_start_assignment(struct kvm *kvm);
798void kvm_arch_end_assignment(struct kvm *kvm);
799bool kvm_arch_has_assigned_device(struct kvm *kvm);
800#else
801static inline void kvm_arch_start_assignment(struct kvm *kvm)
802{
803}
804
805static inline void kvm_arch_end_assignment(struct kvm *kvm)
806{
807}
808
809static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
810{
811 return false;
812}
813#endif
814
815static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
816{
817#ifdef __KVM_HAVE_ARCH_WQP
818 return vcpu->arch.wqp;
819#else
820 return &vcpu->wq;
821#endif
822}
823
824#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
825
826
827
828
829
830bool kvm_arch_intc_initialized(struct kvm *kvm);
831#else
832static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
833{
834 return true;
835}
836#endif
837
838int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
839void kvm_arch_destroy_vm(struct kvm *kvm);
840void kvm_arch_sync_events(struct kvm *kvm);
841
842int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
843void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
844
845bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
846
847struct kvm_irq_ack_notifier {
848 struct hlist_node link;
849 unsigned gsi;
850 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
851};
852
853int kvm_irq_map_gsi(struct kvm *kvm,
854 struct kvm_kernel_irq_routing_entry *entries, int gsi);
855int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
856
857int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
858 bool line_status);
859int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
860 int irq_source_id, int level, bool line_status);
861int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
862 struct kvm *kvm, int irq_source_id,
863 int level, bool line_status);
864bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
865void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
866void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
867void kvm_register_irq_ack_notifier(struct kvm *kvm,
868 struct kvm_irq_ack_notifier *kian);
869void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
870 struct kvm_irq_ack_notifier *kian);
871int kvm_request_irq_source_id(struct kvm *kvm);
872void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
873
874#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
875int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
876void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
877#else
878static inline int kvm_iommu_map_pages(struct kvm *kvm,
879 struct kvm_memory_slot *slot)
880{
881 return 0;
882}
883
884static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
885 struct kvm_memory_slot *slot)
886{
887}
888#endif
889
890
891
892
893
894
895
896static inline struct kvm_memory_slot *
897search_memslots(struct kvm_memslots *slots, gfn_t gfn)
898{
899 int start = 0, end = slots->used_slots;
900 int slot = atomic_read(&slots->lru_slot);
901 struct kvm_memory_slot *memslots = slots->memslots;
902
903 if (gfn >= memslots[slot].base_gfn &&
904 gfn < memslots[slot].base_gfn + memslots[slot].npages)
905 return &memslots[slot];
906
907 while (start < end) {
908 slot = start + (end - start) / 2;
909
910 if (gfn >= memslots[slot].base_gfn)
911 end = slot;
912 else
913 start = slot + 1;
914 }
915
916 if (gfn >= memslots[start].base_gfn &&
917 gfn < memslots[start].base_gfn + memslots[start].npages) {
918 atomic_set(&slots->lru_slot, start);
919 return &memslots[start];
920 }
921
922 return NULL;
923}
924
925static inline struct kvm_memory_slot *
926__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
927{
928 return search_memslots(slots, gfn);
929}
930
931static inline unsigned long
932__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
933{
934 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
935}
936
937static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
938{
939 return gfn_to_memslot(kvm, gfn)->id;
940}
941
942static inline gfn_t
943hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
944{
945 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
946
947 return slot->base_gfn + gfn_offset;
948}
949
950static inline gpa_t gfn_to_gpa(gfn_t gfn)
951{
952 return (gpa_t)gfn << PAGE_SHIFT;
953}
954
955static inline gfn_t gpa_to_gfn(gpa_t gpa)
956{
957 return (gfn_t)(gpa >> PAGE_SHIFT);
958}
959
960static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
961{
962 return (hpa_t)pfn << PAGE_SHIFT;
963}
964
965static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
966{
967 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
968
969 return kvm_is_error_hva(hva);
970}
971
972enum kvm_stat_kind {
973 KVM_STAT_VM,
974 KVM_STAT_VCPU,
975};
976
977struct kvm_stat_data {
978 int offset;
979 struct kvm *kvm;
980};
981
982struct kvm_stats_debugfs_item {
983 const char *name;
984 int offset;
985 enum kvm_stat_kind kind;
986};
987extern struct kvm_stats_debugfs_item debugfs_entries[];
988extern struct dentry *kvm_debugfs_dir;
989
990#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
991static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
992{
993 if (unlikely(kvm->mmu_notifier_count))
994 return 1;
995
996
997
998
999
1000
1001
1002
1003
1004
1005 smp_rmb();
1006 if (kvm->mmu_notifier_seq != mmu_seq)
1007 return 1;
1008 return 0;
1009}
1010#endif
1011
1012#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1013
1014#ifdef CONFIG_S390
1015#define KVM_MAX_IRQ_ROUTES 4096
1016#elif defined(CONFIG_ARM64)
1017#define KVM_MAX_IRQ_ROUTES 4096
1018#else
1019#define KVM_MAX_IRQ_ROUTES 1024
1020#endif
1021
1022int kvm_set_irq_routing(struct kvm *kvm,
1023 const struct kvm_irq_routing_entry *entries,
1024 unsigned nr,
1025 unsigned flags);
1026int kvm_set_routing_entry(struct kvm *kvm,
1027 struct kvm_kernel_irq_routing_entry *e,
1028 const struct kvm_irq_routing_entry *ue);
1029void kvm_free_irq_routing(struct kvm *kvm);
1030
1031#else
1032
1033static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1034
1035#endif
1036
1037int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1038
1039#ifdef CONFIG_HAVE_KVM_EVENTFD
1040
1041void kvm_eventfd_init(struct kvm *kvm);
1042int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1043
1044#ifdef CONFIG_HAVE_KVM_IRQFD
1045int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1046void kvm_irqfd_release(struct kvm *kvm);
1047void kvm_irq_routing_update(struct kvm *);
1048#else
1049static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1050{
1051 return -EINVAL;
1052}
1053
1054static inline void kvm_irqfd_release(struct kvm *kvm) {}
1055#endif
1056
1057#else
1058
1059static inline void kvm_eventfd_init(struct kvm *kvm) {}
1060
1061static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1062{
1063 return -EINVAL;
1064}
1065
1066static inline void kvm_irqfd_release(struct kvm *kvm) {}
1067
1068#ifdef CONFIG_HAVE_KVM_IRQCHIP
1069static inline void kvm_irq_routing_update(struct kvm *kvm)
1070{
1071}
1072#endif
1073void kvm_arch_irq_routing_update(struct kvm *kvm);
1074
1075static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1076{
1077 return -ENOSYS;
1078}
1079
1080#endif
1081
1082static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1083{
1084
1085
1086
1087
1088 smp_wmb();
1089 set_bit(req, &vcpu->requests);
1090}
1091
1092static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1093{
1094 if (test_bit(req, &vcpu->requests)) {
1095 clear_bit(req, &vcpu->requests);
1096
1097
1098
1099
1100
1101 smp_mb__after_atomic();
1102 return true;
1103 } else {
1104 return false;
1105 }
1106}
1107
1108extern bool kvm_rebooting;
1109
1110struct kvm_device {
1111 struct kvm_device_ops *ops;
1112 struct kvm *kvm;
1113 void *private;
1114 struct list_head vm_node;
1115};
1116
1117
1118struct kvm_device_ops {
1119 const char *name;
1120
1121
1122
1123
1124
1125
1126 int (*create)(struct kvm_device *dev, u32 type);
1127
1128
1129
1130
1131
1132 void (*init)(struct kvm_device *dev);
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142 void (*destroy)(struct kvm_device *dev);
1143
1144 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1145 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1146 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1147 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1148 unsigned long arg);
1149};
1150
1151void kvm_device_get(struct kvm_device *dev);
1152void kvm_device_put(struct kvm_device *dev);
1153struct kvm_device *kvm_device_from_filp(struct file *filp);
1154int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1155void kvm_unregister_device_ops(u32 type);
1156
1157extern struct kvm_device_ops kvm_mpic_ops;
1158extern struct kvm_device_ops kvm_xics_ops;
1159extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1160extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
1161
1162#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1163
1164static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1165{
1166 vcpu->spin_loop.in_spin_loop = val;
1167}
1168static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1169{
1170 vcpu->spin_loop.dy_eligible = val;
1171}
1172
1173#else
1174
1175static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1176{
1177}
1178
1179static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1180{
1181}
1182#endif
1183
1184#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
1185bool kvm_arch_has_irq_bypass(void);
1186int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
1187 struct irq_bypass_producer *);
1188void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
1189 struct irq_bypass_producer *);
1190void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
1191void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
1192int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1193 uint32_t guest_irq, bool set);
1194#endif
1195
1196#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
1197
1198static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1199{
1200 return vcpu->valid_wakeup;
1201}
1202
1203#else
1204static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1205{
1206 return true;
1207}
1208#endif
1209
1210#endif
1211