1
2#ifndef __KVM_HOST_H
3#define __KVM_HOST_H
4
5
6#include <linux/types.h>
7#include <linux/hardirq.h>
8#include <linux/list.h>
9#include <linux/mutex.h>
10#include <linux/spinlock.h>
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/bug.h>
14#include <linux/mm.h>
15#include <linux/mmu_notifier.h>
16#include <linux/preempt.h>
17#include <linux/msi.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/rcupdate.h>
21#include <linux/ratelimit.h>
22#include <linux/err.h>
23#include <linux/irqflags.h>
24#include <linux/context_tracking.h>
25#include <linux/irqbypass.h>
26#include <linux/swait.h>
27#include <linux/refcount.h>
28#include <linux/nospec.h>
29#include <asm/signal.h>
30
31#include <linux/kvm.h>
32#include <linux/kvm_para.h>
33
34#include <linux/kvm_types.h>
35
36#include <asm/kvm_host.h>
37
38#ifndef KVM_MAX_VCPU_ID
39#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
40#endif
41
42
43
44
45
46
47#define KVM_MEMSLOT_INVALID (1UL << 16)
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
69
70
71#define KVM_MAX_MMIO_FRAGMENTS 2
72
73#ifndef KVM_ADDRESS_SPACE_NUM
74#define KVM_ADDRESS_SPACE_NUM 1
75#endif
76
77
78
79
80
81
82#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
83#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
84#define KVM_PFN_NOSLOT (0x1ULL << 63)
85
86#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
87#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
88#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
89
90
91
92
93
94static inline bool is_error_pfn(kvm_pfn_t pfn)
95{
96 return !!(pfn & KVM_PFN_ERR_MASK);
97}
98
99
100
101
102
103
104static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
105{
106 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
107}
108
109
110static inline bool is_noslot_pfn(kvm_pfn_t pfn)
111{
112 return pfn == KVM_PFN_NOSLOT;
113}
114
115
116
117
118
119#ifndef KVM_HVA_ERR_BAD
120
121#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
122#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
123
124static inline bool kvm_is_error_hva(unsigned long addr)
125{
126 return addr >= PAGE_OFFSET;
127}
128
129#endif
130
131#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
132
133static inline bool is_error_page(struct page *page)
134{
135 return IS_ERR(page);
136}
137
138#define KVM_REQUEST_MASK GENMASK(7,0)
139#define KVM_REQUEST_NO_WAKEUP BIT(8)
140#define KVM_REQUEST_WAIT BIT(9)
141
142
143
144
145#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
146#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
147#define KVM_REQ_PENDING_TIMER 2
148#define KVM_REQ_UNHALT 3
149#define KVM_REQUEST_ARCH_BASE 8
150
151#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
152 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
153 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
154})
155#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
156
157#define KVM_USERSPACE_IRQ_SOURCE_ID 0
158#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
159
160extern struct mutex kvm_lock;
161extern struct list_head vm_list;
162
163struct kvm_io_range {
164 gpa_t addr;
165 int len;
166 struct kvm_io_device *dev;
167};
168
169#define NR_IOBUS_DEVS 1000
170
171struct kvm_io_bus {
172 int dev_count;
173 int ioeventfd_count;
174 struct kvm_io_range range[];
175};
176
177enum kvm_bus {
178 KVM_MMIO_BUS,
179 KVM_PIO_BUS,
180 KVM_VIRTIO_CCW_NOTIFY_BUS,
181 KVM_FAST_MMIO_BUS,
182 KVM_NR_BUSES
183};
184
185int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
186 int len, const void *val);
187int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
188 gpa_t addr, int len, const void *val, long cookie);
189int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
190 int len, void *val);
191int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
192 int len, struct kvm_io_device *dev);
193void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
194 struct kvm_io_device *dev);
195struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
196 gpa_t addr);
197
198#ifdef CONFIG_KVM_ASYNC_PF
199struct kvm_async_pf {
200 struct work_struct work;
201 struct list_head link;
202 struct list_head queue;
203 struct kvm_vcpu *vcpu;
204 struct mm_struct *mm;
205 gpa_t cr2_or_gpa;
206 unsigned long addr;
207 struct kvm_arch_async_pf arch;
208 bool wakeup_all;
209};
210
211void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
212void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
213int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
214 unsigned long hva, struct kvm_arch_async_pf *arch);
215int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
216#endif
217
218enum {
219 OUTSIDE_GUEST_MODE,
220 IN_GUEST_MODE,
221 EXITING_GUEST_MODE,
222 READING_SHADOW_PAGE_TABLES,
223};
224
225#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
226
227struct kvm_host_map {
228
229
230
231
232
233
234
235
236 struct page *page;
237 void *hva;
238 kvm_pfn_t pfn;
239 kvm_pfn_t gfn;
240};
241
242
243
244
245
246static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
247{
248 return !!map->hva;
249}
250
251
252
253
254
255struct kvm_mmio_fragment {
256 gpa_t gpa;
257 void *data;
258 unsigned len;
259};
260
261struct kvm_vcpu {
262 struct kvm *kvm;
263#ifdef CONFIG_PREEMPT_NOTIFIERS
264 struct preempt_notifier preempt_notifier;
265#endif
266 int cpu;
267 int vcpu_id;
268 int vcpu_idx;
269 int srcu_idx;
270 int mode;
271 u64 requests;
272 unsigned long guest_debug;
273
274 int pre_pcpu;
275 struct list_head blocked_vcpu_list;
276
277 struct mutex mutex;
278 struct kvm_run *run;
279
280 struct swait_queue_head wq;
281 struct pid __rcu *pid;
282 int sigset_active;
283 sigset_t sigset;
284 struct kvm_vcpu_stat stat;
285 unsigned int halt_poll_ns;
286 bool valid_wakeup;
287
288#ifdef CONFIG_HAS_IOMEM
289 int mmio_needed;
290 int mmio_read_completed;
291 int mmio_is_write;
292 int mmio_cur_fragment;
293 int mmio_nr_fragments;
294 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
295#endif
296
297#ifdef CONFIG_KVM_ASYNC_PF
298 struct {
299 u32 queued;
300 struct list_head queue;
301 struct list_head done;
302 spinlock_t lock;
303 } async_pf;
304#endif
305
306#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
307
308
309
310
311
312
313 struct {
314 bool in_spin_loop;
315 bool dy_eligible;
316 } spin_loop;
317#endif
318 bool preempted;
319 bool ready;
320 struct kvm_vcpu_arch arch;
321 struct dentry *debugfs_dentry;
322};
323
324static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
325{
326
327
328
329
330
331 smp_mb__before_atomic();
332 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
333}
334
335
336
337
338
339#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
340
341struct kvm_memory_slot {
342 gfn_t base_gfn;
343 unsigned long npages;
344 unsigned long *dirty_bitmap;
345 struct kvm_arch_memory_slot arch;
346 unsigned long userspace_addr;
347 u32 flags;
348 short id;
349};
350
351static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
352{
353 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
354}
355
356static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
357{
358 unsigned long len = kvm_dirty_bitmap_bytes(memslot);
359
360 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
361}
362
363#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
364#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
365#endif
366
367struct kvm_s390_adapter_int {
368 u64 ind_addr;
369 u64 summary_addr;
370 u64 ind_offset;
371 u32 summary_offset;
372 u32 adapter_id;
373};
374
375struct kvm_hv_sint {
376 u32 vcpu;
377 u32 sint;
378};
379
380struct kvm_kernel_irq_routing_entry {
381 u32 gsi;
382 u32 type;
383 int (*set)(struct kvm_kernel_irq_routing_entry *e,
384 struct kvm *kvm, int irq_source_id, int level,
385 bool line_status);
386 union {
387 struct {
388 unsigned irqchip;
389 unsigned pin;
390 } irqchip;
391 struct {
392 u32 address_lo;
393 u32 address_hi;
394 u32 data;
395 u32 flags;
396 u32 devid;
397 } msi;
398 struct kvm_s390_adapter_int adapter;
399 struct kvm_hv_sint hv_sint;
400 };
401 struct hlist_node link;
402};
403
404#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
405struct kvm_irq_routing_table {
406 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
407 u32 nr_rt_entries;
408
409
410
411
412 struct hlist_head map[0];
413};
414#endif
415
416#ifndef KVM_PRIVATE_MEM_SLOTS
417#define KVM_PRIVATE_MEM_SLOTS 0
418#endif
419
420#ifndef KVM_MEM_SLOTS_NUM
421#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
422#endif
423
424#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
425static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
426{
427 return 0;
428}
429#endif
430
431
432
433
434
435
436struct kvm_memslots {
437 u64 generation;
438
439 short id_to_index[KVM_MEM_SLOTS_NUM];
440 atomic_t lru_slot;
441 int used_slots;
442 struct kvm_memory_slot memslots[];
443};
444
445struct kvm {
446 spinlock_t mmu_lock;
447 struct mutex slots_lock;
448 struct mm_struct *mm;
449 struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
450 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
451
452
453
454
455
456
457
458 atomic_t online_vcpus;
459 int created_vcpus;
460 int last_boosted_vcpu;
461 struct list_head vm_list;
462 struct mutex lock;
463 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
464#ifdef CONFIG_HAVE_KVM_EVENTFD
465 struct {
466 spinlock_t lock;
467 struct list_head items;
468 struct list_head resampler_list;
469 struct mutex resampler_lock;
470 } irqfds;
471 struct list_head ioeventfds;
472#endif
473 struct kvm_vm_stat stat;
474 struct kvm_arch arch;
475 refcount_t users_count;
476#ifdef CONFIG_KVM_MMIO
477 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
478 spinlock_t ring_lock;
479 struct list_head coalesced_zones;
480#endif
481
482 struct mutex irq_lock;
483#ifdef CONFIG_HAVE_KVM_IRQCHIP
484
485
486
487 struct kvm_irq_routing_table __rcu *irq_routing;
488#endif
489#ifdef CONFIG_HAVE_KVM_IRQFD
490 struct hlist_head irq_ack_notifier_list;
491#endif
492
493#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
494 struct mmu_notifier mmu_notifier;
495 unsigned long mmu_notifier_seq;
496 long mmu_notifier_count;
497#endif
498 long tlbs_dirty;
499 struct list_head devices;
500 u64 manual_dirty_log_protect;
501 struct dentry *debugfs_dentry;
502 struct kvm_stat_data **debugfs_stat_data;
503 struct srcu_struct srcu;
504 struct srcu_struct irq_srcu;
505 pid_t userspace_pid;
506};
507
508#define kvm_err(fmt, ...) \
509 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
510#define kvm_info(fmt, ...) \
511 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
512#define kvm_debug(fmt, ...) \
513 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
514#define kvm_debug_ratelimited(fmt, ...) \
515 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
516 ## __VA_ARGS__)
517#define kvm_pr_unimpl(fmt, ...) \
518 pr_err_ratelimited("kvm [%i]: " fmt, \
519 task_tgid_nr(current), ## __VA_ARGS__)
520
521
522#define vcpu_unimpl(vcpu, fmt, ...) \
523 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
524 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
525
526#define vcpu_debug(vcpu, fmt, ...) \
527 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
528#define vcpu_debug_ratelimited(vcpu, fmt, ...) \
529 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
530 ## __VA_ARGS__)
531#define vcpu_err(vcpu, fmt, ...) \
532 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
533
534static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
535{
536 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
537}
538
539static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
540{
541 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
542 lockdep_is_held(&kvm->slots_lock) ||
543 !refcount_read(&kvm->users_count));
544}
545
546static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
547{
548 int num_vcpus = atomic_read(&kvm->online_vcpus);
549 i = array_index_nospec(i, num_vcpus);
550
551
552 smp_rmb();
553 return kvm->vcpus[i];
554}
555
556#define kvm_for_each_vcpu(idx, vcpup, kvm) \
557 for (idx = 0; \
558 idx < atomic_read(&kvm->online_vcpus) && \
559 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
560 idx++)
561
562static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
563{
564 struct kvm_vcpu *vcpu = NULL;
565 int i;
566
567 if (id < 0)
568 return NULL;
569 if (id < KVM_MAX_VCPUS)
570 vcpu = kvm_get_vcpu(kvm, id);
571 if (vcpu && vcpu->vcpu_id == id)
572 return vcpu;
573 kvm_for_each_vcpu(i, vcpu, kvm)
574 if (vcpu->vcpu_id == id)
575 return vcpu;
576 return NULL;
577}
578
579static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
580{
581 return vcpu->vcpu_idx;
582}
583
584#define kvm_for_each_memslot(memslot, slots) \
585 for (memslot = &slots->memslots[0]; \
586 memslot < slots->memslots + slots->used_slots; memslot++) \
587 if (WARN_ON_ONCE(!memslot->npages)) { \
588 } else
589
590void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
591
592void vcpu_load(struct kvm_vcpu *vcpu);
593void vcpu_put(struct kvm_vcpu *vcpu);
594
595#ifdef __KVM_HAVE_IOAPIC
596void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
597void kvm_arch_post_irq_routing_update(struct kvm *kvm);
598#else
599static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
600{
601}
602static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
603{
604}
605#endif
606
607#ifdef CONFIG_HAVE_KVM_IRQFD
608int kvm_irqfd_init(void);
609void kvm_irqfd_exit(void);
610#else
611static inline int kvm_irqfd_init(void)
612{
613 return 0;
614}
615
616static inline void kvm_irqfd_exit(void)
617{
618}
619#endif
620int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
621 struct module *module);
622void kvm_exit(void);
623
624void kvm_get_kvm(struct kvm *kvm);
625void kvm_put_kvm(struct kvm *kvm);
626void kvm_put_kvm_no_destroy(struct kvm *kvm);
627
628static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
629{
630 as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
631 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
632 lockdep_is_held(&kvm->slots_lock) ||
633 !refcount_read(&kvm->users_count));
634}
635
636static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
637{
638 return __kvm_memslots(kvm, 0);
639}
640
641static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
642{
643 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
644
645 return __kvm_memslots(vcpu->kvm, as_id);
646}
647
648static inline
649struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
650{
651 int index = slots->id_to_index[id];
652 struct kvm_memory_slot *slot;
653
654 if (index < 0)
655 return NULL;
656
657 slot = &slots->memslots[index];
658
659 WARN_ON(slot->id != id);
660 return slot;
661}
662
663
664
665
666
667
668
669
670
671
672
673
674enum kvm_mr_change {
675 KVM_MR_CREATE,
676 KVM_MR_DELETE,
677 KVM_MR_MOVE,
678 KVM_MR_FLAGS_ONLY,
679};
680
681int kvm_set_memory_region(struct kvm *kvm,
682 const struct kvm_userspace_memory_region *mem);
683int __kvm_set_memory_region(struct kvm *kvm,
684 const struct kvm_userspace_memory_region *mem);
685void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
686void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
687int kvm_arch_prepare_memory_region(struct kvm *kvm,
688 struct kvm_memory_slot *memslot,
689 const struct kvm_userspace_memory_region *mem,
690 enum kvm_mr_change change);
691void kvm_arch_commit_memory_region(struct kvm *kvm,
692 const struct kvm_userspace_memory_region *mem,
693 struct kvm_memory_slot *old,
694 const struct kvm_memory_slot *new,
695 enum kvm_mr_change change);
696
697void kvm_arch_flush_shadow_all(struct kvm *kvm);
698
699void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
700 struct kvm_memory_slot *slot);
701
702int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
703 struct page **pages, int nr_pages);
704
705struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
706unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
707unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
708unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
709unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
710 bool *writable);
711void kvm_release_page_clean(struct page *page);
712void kvm_release_page_dirty(struct page *page);
713void kvm_set_page_accessed(struct page *page);
714
715kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
716kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
717 bool *writable);
718kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
719kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
720kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
721 bool atomic, bool *async, bool write_fault,
722 bool *writable);
723
724void kvm_release_pfn_clean(kvm_pfn_t pfn);
725void kvm_release_pfn_dirty(kvm_pfn_t pfn);
726void kvm_set_pfn_dirty(kvm_pfn_t pfn);
727void kvm_set_pfn_accessed(kvm_pfn_t pfn);
728void kvm_get_pfn(kvm_pfn_t pfn);
729
730void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
731int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
732 int len);
733int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
734int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
735 void *data, unsigned long len);
736int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
737 int offset, int len);
738int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
739 unsigned long len);
740int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
741 void *data, unsigned long len);
742int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
743 void *data, unsigned int offset,
744 unsigned long len);
745int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
746 gpa_t gpa, unsigned long len);
747
748#define __kvm_put_guest(kvm, gfn, offset, value, type) \
749({ \
750 unsigned long __addr = gfn_to_hva(kvm, gfn); \
751 type __user *__uaddr = (type __user *)(__addr + offset); \
752 int __ret = -EFAULT; \
753 \
754 if (!kvm_is_error_hva(__addr)) \
755 __ret = put_user(value, __uaddr); \
756 if (!__ret) \
757 mark_page_dirty(kvm, gfn); \
758 __ret; \
759})
760
761#define kvm_put_guest(kvm, gpa, value, type) \
762({ \
763 gpa_t __gpa = gpa; \
764 struct kvm *__kvm = kvm; \
765 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
766 offset_in_page(__gpa), (value), type); \
767})
768
769int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
770int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
771struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
772bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
773unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
774void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
775
776struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
777struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
778kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
779kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
780int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
781int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
782 struct gfn_to_pfn_cache *cache, bool atomic);
783struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
784void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
785int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
786 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
787unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
788unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
789int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
790 int len);
791int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
792 unsigned long len);
793int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
794 unsigned long len);
795int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
796 int offset, int len);
797int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
798 unsigned long len);
799void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
800
801void kvm_sigset_activate(struct kvm_vcpu *vcpu);
802void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
803
804void kvm_vcpu_block(struct kvm_vcpu *vcpu);
805void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
806void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
807bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
808void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
809int kvm_vcpu_yield_to(struct kvm_vcpu *target);
810void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
811
812void kvm_flush_remote_tlbs(struct kvm *kvm);
813void kvm_reload_remote_mmus(struct kvm *kvm);
814
815bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
816 struct kvm_vcpu *except,
817 unsigned long *vcpu_bitmap, cpumask_var_t tmp);
818bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
819bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
820 struct kvm_vcpu *except);
821bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
822 unsigned long *vcpu_bitmap);
823
824long kvm_arch_dev_ioctl(struct file *filp,
825 unsigned int ioctl, unsigned long arg);
826long kvm_arch_vcpu_ioctl(struct file *filp,
827 unsigned int ioctl, unsigned long arg);
828vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
829
830int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
831
832void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
833 struct kvm_memory_slot *slot,
834 gfn_t gfn_offset,
835 unsigned long mask);
836void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
837
838#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
839void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
840 struct kvm_memory_slot *memslot);
841#else
842int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
843int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
844 int *is_dirty, struct kvm_memory_slot **memslot);
845#endif
846
847int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
848 bool line_status);
849int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
850 struct kvm_enable_cap *cap);
851long kvm_arch_vm_ioctl(struct file *filp,
852 unsigned int ioctl, unsigned long arg);
853
854int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
855int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
856
857int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
858 struct kvm_translation *tr);
859
860int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
861int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
862int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
863 struct kvm_sregs *sregs);
864int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
865 struct kvm_sregs *sregs);
866int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
867 struct kvm_mp_state *mp_state);
868int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
869 struct kvm_mp_state *mp_state);
870int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
871 struct kvm_guest_debug *dbg);
872int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
873
874int kvm_arch_init(void *opaque);
875void kvm_arch_exit(void);
876
877void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
878
879void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
880void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
881int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
882int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
883void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
884void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
885
886#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
887void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
888#endif
889
890int kvm_arch_hardware_enable(void);
891void kvm_arch_hardware_disable(void);
892int kvm_arch_hardware_setup(void *opaque);
893void kvm_arch_hardware_unsetup(void);
894int kvm_arch_check_processor_compat(void *opaque);
895int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
896bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
897int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
898bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
899int kvm_arch_post_init_vm(struct kvm *kvm);
900void kvm_arch_pre_destroy_vm(struct kvm *kvm);
901
902#ifndef __KVM_HAVE_ARCH_VM_ALLOC
903
904
905
906
907static inline struct kvm *kvm_arch_alloc_vm(void)
908{
909 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
910}
911
912static inline void kvm_arch_free_vm(struct kvm *kvm)
913{
914 kfree(kvm);
915}
916#endif
917
918#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
919static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
920{
921 return -ENOTSUPP;
922}
923#endif
924
925#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
926void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
927void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
928bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
929#else
930static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
931{
932}
933
934static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
935{
936}
937
938static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
939{
940 return false;
941}
942#endif
943#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
944void kvm_arch_start_assignment(struct kvm *kvm);
945void kvm_arch_end_assignment(struct kvm *kvm);
946bool kvm_arch_has_assigned_device(struct kvm *kvm);
947#else
948static inline void kvm_arch_start_assignment(struct kvm *kvm)
949{
950}
951
952static inline void kvm_arch_end_assignment(struct kvm *kvm)
953{
954}
955
956static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
957{
958 return false;
959}
960#endif
961
962static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
963{
964#ifdef __KVM_HAVE_ARCH_WQP
965 return vcpu->arch.wqp;
966#else
967 return &vcpu->wq;
968#endif
969}
970
971#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
972
973
974
975
976
977bool kvm_arch_intc_initialized(struct kvm *kvm);
978#else
979static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
980{
981 return true;
982}
983#endif
984
985int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
986void kvm_arch_destroy_vm(struct kvm *kvm);
987void kvm_arch_sync_events(struct kvm *kvm);
988
989int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
990
991bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
992bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
993bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
994
995struct kvm_irq_ack_notifier {
996 struct hlist_node link;
997 unsigned gsi;
998 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
999};
1000
1001int kvm_irq_map_gsi(struct kvm *kvm,
1002 struct kvm_kernel_irq_routing_entry *entries, int gsi);
1003int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1004
1005int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1006 bool line_status);
1007int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1008 int irq_source_id, int level, bool line_status);
1009int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
1010 struct kvm *kvm, int irq_source_id,
1011 int level, bool line_status);
1012bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1013void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1014void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1015void kvm_register_irq_ack_notifier(struct kvm *kvm,
1016 struct kvm_irq_ack_notifier *kian);
1017void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1018 struct kvm_irq_ack_notifier *kian);
1019int kvm_request_irq_source_id(struct kvm *kvm);
1020void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1021bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031static inline struct kvm_memory_slot *
1032search_memslots(struct kvm_memslots *slots, gfn_t gfn)
1033{
1034 int start = 0, end = slots->used_slots;
1035 int slot = atomic_read(&slots->lru_slot);
1036 struct kvm_memory_slot *memslots = slots->memslots;
1037
1038 if (unlikely(!slots->used_slots))
1039 return NULL;
1040
1041 if (gfn >= memslots[slot].base_gfn &&
1042 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1043 return &memslots[slot];
1044
1045 while (start < end) {
1046 slot = start + (end - start) / 2;
1047
1048 if (gfn >= memslots[slot].base_gfn)
1049 end = slot;
1050 else
1051 start = slot + 1;
1052 }
1053
1054 if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
1055 gfn < memslots[start].base_gfn + memslots[start].npages) {
1056 atomic_set(&slots->lru_slot, start);
1057 return &memslots[start];
1058 }
1059
1060 return NULL;
1061}
1062
1063static inline struct kvm_memory_slot *
1064__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1065{
1066 return search_memslots(slots, gfn);
1067}
1068
1069static inline unsigned long
1070__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1071{
1072 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
1073}
1074
1075static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1076{
1077 return gfn_to_memslot(kvm, gfn)->id;
1078}
1079
1080static inline gfn_t
1081hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1082{
1083 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1084
1085 return slot->base_gfn + gfn_offset;
1086}
1087
1088static inline gpa_t gfn_to_gpa(gfn_t gfn)
1089{
1090 return (gpa_t)gfn << PAGE_SHIFT;
1091}
1092
1093static inline gfn_t gpa_to_gfn(gpa_t gpa)
1094{
1095 return (gfn_t)(gpa >> PAGE_SHIFT);
1096}
1097
1098static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1099{
1100 return (hpa_t)pfn << PAGE_SHIFT;
1101}
1102
1103static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
1104 gpa_t gpa)
1105{
1106 return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
1107}
1108
1109static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
1110{
1111 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1112
1113 return kvm_is_error_hva(hva);
1114}
1115
1116enum kvm_stat_kind {
1117 KVM_STAT_VM,
1118 KVM_STAT_VCPU,
1119};
1120
1121struct kvm_stat_data {
1122 struct kvm *kvm;
1123 struct kvm_stats_debugfs_item *dbgfs_item;
1124};
1125
1126struct kvm_stats_debugfs_item {
1127 const char *name;
1128 int offset;
1129 enum kvm_stat_kind kind;
1130 int mode;
1131};
1132
1133#define KVM_DBGFS_GET_MODE(dbgfs_item) \
1134 ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644)
1135
1136extern struct kvm_stats_debugfs_item debugfs_entries[];
1137extern struct dentry *kvm_debugfs_dir;
1138
1139#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1140static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1141{
1142 if (unlikely(kvm->mmu_notifier_count))
1143 return 1;
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154 smp_rmb();
1155 if (kvm->mmu_notifier_seq != mmu_seq)
1156 return 1;
1157 return 0;
1158}
1159#endif
1160
1161#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1162
1163#define KVM_MAX_IRQ_ROUTES 4096
1164
1165bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1166int kvm_set_irq_routing(struct kvm *kvm,
1167 const struct kvm_irq_routing_entry *entries,
1168 unsigned nr,
1169 unsigned flags);
1170int kvm_set_routing_entry(struct kvm *kvm,
1171 struct kvm_kernel_irq_routing_entry *e,
1172 const struct kvm_irq_routing_entry *ue);
1173void kvm_free_irq_routing(struct kvm *kvm);
1174
1175#else
1176
1177static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1178
1179#endif
1180
1181int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1182
1183#ifdef CONFIG_HAVE_KVM_EVENTFD
1184
1185void kvm_eventfd_init(struct kvm *kvm);
1186int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1187
1188#ifdef CONFIG_HAVE_KVM_IRQFD
1189int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1190void kvm_irqfd_release(struct kvm *kvm);
1191void kvm_irq_routing_update(struct kvm *);
1192#else
1193static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1194{
1195 return -EINVAL;
1196}
1197
1198static inline void kvm_irqfd_release(struct kvm *kvm) {}
1199#endif
1200
1201#else
1202
1203static inline void kvm_eventfd_init(struct kvm *kvm) {}
1204
1205static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1206{
1207 return -EINVAL;
1208}
1209
1210static inline void kvm_irqfd_release(struct kvm *kvm) {}
1211
1212#ifdef CONFIG_HAVE_KVM_IRQCHIP
1213static inline void kvm_irq_routing_update(struct kvm *kvm)
1214{
1215}
1216#endif
1217
1218static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1219{
1220 return -ENOSYS;
1221}
1222
1223#endif
1224
1225void kvm_arch_irq_routing_update(struct kvm *kvm);
1226
1227static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1228{
1229
1230
1231
1232
1233 smp_wmb();
1234 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1235}
1236
1237static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
1238{
1239 return READ_ONCE(vcpu->requests);
1240}
1241
1242static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
1243{
1244 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1245}
1246
1247static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
1248{
1249 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1250}
1251
1252static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1253{
1254 if (kvm_test_request(req, vcpu)) {
1255 kvm_clear_request(req, vcpu);
1256
1257
1258
1259
1260
1261 smp_mb__after_atomic();
1262 return true;
1263 } else {
1264 return false;
1265 }
1266}
1267
1268extern bool kvm_rebooting;
1269
1270extern unsigned int halt_poll_ns;
1271extern unsigned int halt_poll_ns_grow;
1272extern unsigned int halt_poll_ns_grow_start;
1273extern unsigned int halt_poll_ns_shrink;
1274
1275struct kvm_device {
1276 const struct kvm_device_ops *ops;
1277 struct kvm *kvm;
1278 void *private;
1279 struct list_head vm_node;
1280};
1281
1282
1283struct kvm_device_ops {
1284 const char *name;
1285
1286
1287
1288
1289
1290
1291 int (*create)(struct kvm_device *dev, u32 type);
1292
1293
1294
1295
1296
1297 void (*init)(struct kvm_device *dev);
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307 void (*destroy)(struct kvm_device *dev);
1308
1309
1310
1311
1312
1313
1314
1315
1316 void (*release)(struct kvm_device *dev);
1317
1318 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1319 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1320 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1321 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1322 unsigned long arg);
1323 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
1324};
1325
1326void kvm_device_get(struct kvm_device *dev);
1327void kvm_device_put(struct kvm_device *dev);
1328struct kvm_device *kvm_device_from_filp(struct file *filp);
1329int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
1330void kvm_unregister_device_ops(u32 type);
1331
1332extern struct kvm_device_ops kvm_mpic_ops;
1333extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1334extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
1335
1336#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1337
1338static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1339{
1340 vcpu->spin_loop.in_spin_loop = val;
1341}
1342static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1343{
1344 vcpu->spin_loop.dy_eligible = val;
1345}
1346
1347#else
1348
1349static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1350{
1351}
1352
1353static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1354{
1355}
1356#endif
1357
1358struct kvm_vcpu *kvm_get_running_vcpu(void);
1359struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
1360
1361#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
1362bool kvm_arch_has_irq_bypass(void);
1363int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
1364 struct irq_bypass_producer *);
1365void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
1366 struct irq_bypass_producer *);
1367void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
1368void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
1369int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1370 uint32_t guest_irq, bool set);
1371#endif
1372
1373#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
1374
1375static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1376{
1377 return vcpu->valid_wakeup;
1378}
1379
1380#else
1381static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1382{
1383 return true;
1384}
1385#endif
1386
1387#ifdef CONFIG_HAVE_KVM_NO_POLL
1388
1389bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
1390#else
1391static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
1392{
1393 return false;
1394}
1395#endif
1396
1397#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
1398long kvm_arch_vcpu_async_ioctl(struct file *filp,
1399 unsigned int ioctl, unsigned long arg);
1400#else
1401static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
1402 unsigned int ioctl,
1403 unsigned long arg)
1404{
1405 return -ENOIOCTLCMD;
1406}
1407#endif
1408
1409int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
1410 unsigned long start, unsigned long end, bool blockable);
1411
1412#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
1413int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
1414#else
1415static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
1416{
1417 return 0;
1418}
1419#endif
1420
1421typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
1422
1423int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
1424 uintptr_t data, const char *name,
1425 struct task_struct **thread_ptr);
1426
1427#endif
1428