1
2#ifndef __KVM_HOST_H
3#define __KVM_HOST_H
4
5
6#include <linux/types.h>
7#include <linux/hardirq.h>
8#include <linux/list.h>
9#include <linux/mutex.h>
10#include <linux/spinlock.h>
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/sched/stat.h>
14#include <linux/bug.h>
15#include <linux/minmax.h>
16#include <linux/mm.h>
17#include <linux/mmu_notifier.h>
18#include <linux/preempt.h>
19#include <linux/msi.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/rcupdate.h>
23#include <linux/ratelimit.h>
24#include <linux/err.h>
25#include <linux/irqflags.h>
26#include <linux/context_tracking.h>
27#include <linux/irqbypass.h>
28#include <linux/rcuwait.h>
29#include <linux/refcount.h>
30#include <linux/nospec.h>
31#include <linux/notifier.h>
32#include <linux/ftrace.h>
33#include <linux/hashtable.h>
34#include <linux/instrumentation.h>
35#include <linux/interval_tree.h>
36#include <linux/rbtree.h>
37#include <linux/xarray.h>
38#include <asm/signal.h>
39
40#include <linux/kvm.h>
41#include <linux/kvm_para.h>
42
43#include <linux/kvm_types.h>
44
45#include <asm/kvm_host.h>
46#include <linux/kvm_dirty_ring.h>
47
48#ifndef KVM_MAX_VCPU_IDS
49#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
50#endif
51
52
53
54
55
56
57#define KVM_MEMSLOT_INVALID (1UL << 16)
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
79
80
81#define KVM_MAX_MMIO_FRAGMENTS 2
82
83#ifndef KVM_ADDRESS_SPACE_NUM
84#define KVM_ADDRESS_SPACE_NUM 1
85#endif
86
87
88
89
90
91
92#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
93#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
94#define KVM_PFN_NOSLOT (0x1ULL << 63)
95
96#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
97#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
98#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
99
100
101
102
103
104static inline bool is_error_pfn(kvm_pfn_t pfn)
105{
106 return !!(pfn & KVM_PFN_ERR_MASK);
107}
108
109
110
111
112
113
114static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
115{
116 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
117}
118
119
120static inline bool is_noslot_pfn(kvm_pfn_t pfn)
121{
122 return pfn == KVM_PFN_NOSLOT;
123}
124
125
126
127
128
129#ifndef KVM_HVA_ERR_BAD
130
131#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
132#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
133
134static inline bool kvm_is_error_hva(unsigned long addr)
135{
136 return addr >= PAGE_OFFSET;
137}
138
139#endif
140
141#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
142
143static inline bool is_error_page(struct page *page)
144{
145 return IS_ERR(page);
146}
147
148#define KVM_REQUEST_MASK GENMASK(7,0)
149#define KVM_REQUEST_NO_WAKEUP BIT(8)
150#define KVM_REQUEST_WAIT BIT(9)
151
152
153
154
155#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
156#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
157#define KVM_REQ_UNBLOCK 2
158#define KVM_REQ_UNHALT 3
159#define KVM_REQ_VM_DEAD (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
160#define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
161#define KVM_REQUEST_ARCH_BASE 8
162
163#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
164 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
165 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
166})
167#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
168
169bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
170 unsigned long *vcpu_bitmap);
171bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
172bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
173 struct kvm_vcpu *except);
174bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
175 unsigned long *vcpu_bitmap);
176
177#define KVM_USERSPACE_IRQ_SOURCE_ID 0
178#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
179
180extern struct mutex kvm_lock;
181extern struct list_head vm_list;
182
183struct kvm_io_range {
184 gpa_t addr;
185 int len;
186 struct kvm_io_device *dev;
187};
188
189#define NR_IOBUS_DEVS 1000
190
191struct kvm_io_bus {
192 int dev_count;
193 int ioeventfd_count;
194 struct kvm_io_range range[];
195};
196
197enum kvm_bus {
198 KVM_MMIO_BUS,
199 KVM_PIO_BUS,
200 KVM_VIRTIO_CCW_NOTIFY_BUS,
201 KVM_FAST_MMIO_BUS,
202 KVM_NR_BUSES
203};
204
205int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
206 int len, const void *val);
207int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
208 gpa_t addr, int len, const void *val, long cookie);
209int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
210 int len, void *val);
211int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
212 int len, struct kvm_io_device *dev);
213int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
214 struct kvm_io_device *dev);
215struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
216 gpa_t addr);
217
218#ifdef CONFIG_KVM_ASYNC_PF
219struct kvm_async_pf {
220 struct work_struct work;
221 struct list_head link;
222 struct list_head queue;
223 struct kvm_vcpu *vcpu;
224 struct mm_struct *mm;
225 gpa_t cr2_or_gpa;
226 unsigned long addr;
227 struct kvm_arch_async_pf arch;
228 bool wakeup_all;
229 bool notpresent_injected;
230};
231
232void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
233void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
234bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
235 unsigned long hva, struct kvm_arch_async_pf *arch);
236int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
237#endif
238
239#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
240struct kvm_gfn_range {
241 struct kvm_memory_slot *slot;
242 gfn_t start;
243 gfn_t end;
244 pte_t pte;
245 bool may_block;
246};
247bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
248bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
249bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
250bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
251#endif
252
253enum {
254 OUTSIDE_GUEST_MODE,
255 IN_GUEST_MODE,
256 EXITING_GUEST_MODE,
257 READING_SHADOW_PAGE_TABLES,
258};
259
260#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
261
262struct kvm_host_map {
263
264
265
266
267
268
269
270
271 struct page *page;
272 void *hva;
273 kvm_pfn_t pfn;
274 kvm_pfn_t gfn;
275};
276
277
278
279
280
281static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
282{
283 return !!map->hva;
284}
285
286static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
287{
288 return single_task_running() && !need_resched() && ktime_before(cur, stop);
289}
290
291
292
293
294
295struct kvm_mmio_fragment {
296 gpa_t gpa;
297 void *data;
298 unsigned len;
299};
300
301struct kvm_vcpu {
302 struct kvm *kvm;
303#ifdef CONFIG_PREEMPT_NOTIFIERS
304 struct preempt_notifier preempt_notifier;
305#endif
306 int cpu;
307 int vcpu_id;
308 int vcpu_idx;
309 int srcu_idx;
310 int mode;
311 u64 requests;
312 unsigned long guest_debug;
313
314 struct mutex mutex;
315 struct kvm_run *run;
316
317#ifndef __KVM_HAVE_ARCH_WQP
318 struct rcuwait wait;
319#endif
320 struct pid __rcu *pid;
321 int sigset_active;
322 sigset_t sigset;
323 unsigned int halt_poll_ns;
324 bool valid_wakeup;
325
326#ifdef CONFIG_HAS_IOMEM
327 int mmio_needed;
328 int mmio_read_completed;
329 int mmio_is_write;
330 int mmio_cur_fragment;
331 int mmio_nr_fragments;
332 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
333#endif
334
335#ifdef CONFIG_KVM_ASYNC_PF
336 struct {
337 u32 queued;
338 struct list_head queue;
339 struct list_head done;
340 spinlock_t lock;
341 } async_pf;
342#endif
343
344#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
345
346
347
348
349
350
351 struct {
352 bool in_spin_loop;
353 bool dy_eligible;
354 } spin_loop;
355#endif
356 bool preempted;
357 bool ready;
358 struct kvm_vcpu_arch arch;
359 struct kvm_vcpu_stat stat;
360 char stats_id[KVM_STATS_NAME_SIZE];
361 struct kvm_dirty_ring dirty_ring;
362
363
364
365
366
367
368
369 struct kvm_memory_slot *last_used_slot;
370 u64 last_used_slot_gen;
371};
372
373
374
375
376
377static __always_inline void guest_timing_enter_irqoff(void)
378{
379
380
381
382
383 instrumentation_begin();
384 vtime_account_guest_enter();
385 instrumentation_end();
386}
387
388
389
390
391
392
393
394
395
396static __always_inline void guest_context_enter_irqoff(void)
397{
398
399
400
401
402
403
404
405
406 if (!context_tracking_guest_enter()) {
407 instrumentation_begin();
408 rcu_virt_note_context_switch(smp_processor_id());
409 instrumentation_end();
410 }
411}
412
413
414
415
416
417static __always_inline void guest_enter_irqoff(void)
418{
419 guest_timing_enter_irqoff();
420 guest_context_enter_irqoff();
421}
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440static __always_inline void guest_state_enter_irqoff(void)
441{
442 instrumentation_begin();
443 trace_hardirqs_on_prepare();
444 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
445 instrumentation_end();
446
447 guest_context_enter_irqoff();
448 lockdep_hardirqs_on(CALLER_ADDR0);
449}
450
451
452
453
454
455
456
457
458
459static __always_inline void guest_context_exit_irqoff(void)
460{
461 context_tracking_guest_exit();
462}
463
464
465
466
467
468static __always_inline void guest_timing_exit_irqoff(void)
469{
470 instrumentation_begin();
471
472 vtime_account_guest_exit();
473 instrumentation_end();
474}
475
476
477
478
479
480static __always_inline void guest_exit_irqoff(void)
481{
482 guest_context_exit_irqoff();
483 guest_timing_exit_irqoff();
484}
485
486static inline void guest_exit(void)
487{
488 unsigned long flags;
489
490 local_irq_save(flags);
491 guest_exit_irqoff();
492 local_irq_restore(flags);
493}
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512static __always_inline void guest_state_exit_irqoff(void)
513{
514 lockdep_hardirqs_off(CALLER_ADDR0);
515 guest_context_exit_irqoff();
516
517 instrumentation_begin();
518 trace_hardirqs_off_finish();
519 instrumentation_end();
520}
521
522static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
523{
524
525
526
527
528
529 smp_mb__before_atomic();
530 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
531}
532
533
534
535
536
537#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555struct kvm_memory_slot {
556 struct hlist_node id_node[2];
557 struct interval_tree_node hva_node[2];
558 struct rb_node gfn_node[2];
559 gfn_t base_gfn;
560 unsigned long npages;
561 unsigned long *dirty_bitmap;
562 struct kvm_arch_memory_slot arch;
563 unsigned long userspace_addr;
564 u32 flags;
565 short id;
566 u16 as_id;
567};
568
569static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
570{
571 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
572}
573
574static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
575{
576 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
577}
578
579static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
580{
581 unsigned long len = kvm_dirty_bitmap_bytes(memslot);
582
583 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
584}
585
586#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
587#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
588#endif
589
590struct kvm_s390_adapter_int {
591 u64 ind_addr;
592 u64 summary_addr;
593 u64 ind_offset;
594 u32 summary_offset;
595 u32 adapter_id;
596};
597
598struct kvm_hv_sint {
599 u32 vcpu;
600 u32 sint;
601};
602
603struct kvm_xen_evtchn {
604 u32 port;
605 u32 vcpu;
606 u32 priority;
607};
608
609struct kvm_kernel_irq_routing_entry {
610 u32 gsi;
611 u32 type;
612 int (*set)(struct kvm_kernel_irq_routing_entry *e,
613 struct kvm *kvm, int irq_source_id, int level,
614 bool line_status);
615 union {
616 struct {
617 unsigned irqchip;
618 unsigned pin;
619 } irqchip;
620 struct {
621 u32 address_lo;
622 u32 address_hi;
623 u32 data;
624 u32 flags;
625 u32 devid;
626 } msi;
627 struct kvm_s390_adapter_int adapter;
628 struct kvm_hv_sint hv_sint;
629 struct kvm_xen_evtchn xen_evtchn;
630 };
631 struct hlist_node link;
632};
633
634#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
635struct kvm_irq_routing_table {
636 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
637 u32 nr_rt_entries;
638
639
640
641
642 struct hlist_head map[];
643};
644#endif
645
646#ifndef KVM_PRIVATE_MEM_SLOTS
647#define KVM_PRIVATE_MEM_SLOTS 0
648#endif
649
650#define KVM_MEM_SLOTS_NUM SHRT_MAX
651#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)
652
653#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
654static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
655{
656 return 0;
657}
658#endif
659
660struct kvm_memslots {
661 u64 generation;
662 atomic_long_t last_used_slot;
663 struct rb_root_cached hva_tree;
664 struct rb_root gfn_tree;
665
666
667
668
669
670
671
672
673 DECLARE_HASHTABLE(id_hash, 7);
674 int node_idx;
675};
676
677struct kvm {
678#ifdef KVM_HAVE_MMU_RWLOCK
679 rwlock_t mmu_lock;
680#else
681 spinlock_t mmu_lock;
682#endif
683
684 struct mutex slots_lock;
685
686
687
688
689
690
691
692
693 struct mutex slots_arch_lock;
694 struct mm_struct *mm;
695 unsigned long nr_memslot_pages;
696
697 struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
698
699 struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
700 struct xarray vcpu_array;
701
702
703 spinlock_t mn_invalidate_lock;
704 unsigned long mn_active_invalidate_count;
705 struct rcuwait mn_memslots_update_rcuwait;
706
707
708 spinlock_t gpc_lock;
709 struct list_head gpc_list;
710
711
712
713
714
715
716
717 atomic_t online_vcpus;
718 int created_vcpus;
719 int last_boosted_vcpu;
720 struct list_head vm_list;
721 struct mutex lock;
722 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
723#ifdef CONFIG_HAVE_KVM_EVENTFD
724 struct {
725 spinlock_t lock;
726 struct list_head items;
727 struct list_head resampler_list;
728 struct mutex resampler_lock;
729 } irqfds;
730 struct list_head ioeventfds;
731#endif
732 struct kvm_vm_stat stat;
733 struct kvm_arch arch;
734 refcount_t users_count;
735#ifdef CONFIG_KVM_MMIO
736 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
737 spinlock_t ring_lock;
738 struct list_head coalesced_zones;
739#endif
740
741 struct mutex irq_lock;
742#ifdef CONFIG_HAVE_KVM_IRQCHIP
743
744
745
746 struct kvm_irq_routing_table __rcu *irq_routing;
747#endif
748#ifdef CONFIG_HAVE_KVM_IRQFD
749 struct hlist_head irq_ack_notifier_list;
750#endif
751
752#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
753 struct mmu_notifier mmu_notifier;
754 unsigned long mmu_notifier_seq;
755 long mmu_notifier_count;
756 unsigned long mmu_notifier_range_start;
757 unsigned long mmu_notifier_range_end;
758#endif
759 struct list_head devices;
760 u64 manual_dirty_log_protect;
761 struct dentry *debugfs_dentry;
762 struct kvm_stat_data **debugfs_stat_data;
763 struct srcu_struct srcu;
764 struct srcu_struct irq_srcu;
765 pid_t userspace_pid;
766 unsigned int max_halt_poll_ns;
767 u32 dirty_ring_size;
768 bool vm_bugged;
769 bool vm_dead;
770
771#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
772 struct notifier_block pm_notifier;
773#endif
774 char stats_id[KVM_STATS_NAME_SIZE];
775};
776
777#define kvm_err(fmt, ...) \
778 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
779#define kvm_info(fmt, ...) \
780 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
781#define kvm_debug(fmt, ...) \
782 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
783#define kvm_debug_ratelimited(fmt, ...) \
784 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
785 ## __VA_ARGS__)
786#define kvm_pr_unimpl(fmt, ...) \
787 pr_err_ratelimited("kvm [%i]: " fmt, \
788 task_tgid_nr(current), ## __VA_ARGS__)
789
790
791#define vcpu_unimpl(vcpu, fmt, ...) \
792 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
793 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
794
795#define vcpu_debug(vcpu, fmt, ...) \
796 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
797#define vcpu_debug_ratelimited(vcpu, fmt, ...) \
798 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
799 ## __VA_ARGS__)
800#define vcpu_err(vcpu, fmt, ...) \
801 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
802
803static inline void kvm_vm_dead(struct kvm *kvm)
804{
805 kvm->vm_dead = true;
806 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
807}
808
809static inline void kvm_vm_bugged(struct kvm *kvm)
810{
811 kvm->vm_bugged = true;
812 kvm_vm_dead(kvm);
813}
814
815
816#define KVM_BUG(cond, kvm, fmt...) \
817({ \
818 int __ret = (cond); \
819 \
820 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
821 kvm_vm_bugged(kvm); \
822 unlikely(__ret); \
823})
824
825#define KVM_BUG_ON(cond, kvm) \
826({ \
827 int __ret = (cond); \
828 \
829 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
830 kvm_vm_bugged(kvm); \
831 unlikely(__ret); \
832})
833
834static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
835{
836 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
837}
838
839static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
840{
841 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
842 lockdep_is_held(&kvm->slots_lock) ||
843 !refcount_read(&kvm->users_count));
844}
845
846static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
847{
848 int num_vcpus = atomic_read(&kvm->online_vcpus);
849 i = array_index_nospec(i, num_vcpus);
850
851
852 smp_rmb();
853 return xa_load(&kvm->vcpu_array, i);
854}
855
856#define kvm_for_each_vcpu(idx, vcpup, kvm) \
857 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
858 (atomic_read(&kvm->online_vcpus) - 1))
859
860static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
861{
862 struct kvm_vcpu *vcpu = NULL;
863 unsigned long i;
864
865 if (id < 0)
866 return NULL;
867 if (id < KVM_MAX_VCPUS)
868 vcpu = kvm_get_vcpu(kvm, id);
869 if (vcpu && vcpu->vcpu_id == id)
870 return vcpu;
871 kvm_for_each_vcpu(i, vcpu, kvm)
872 if (vcpu->vcpu_id == id)
873 return vcpu;
874 return NULL;
875}
876
877static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
878{
879 return vcpu->vcpu_idx;
880}
881
882void kvm_destroy_vcpus(struct kvm *kvm);
883
884void vcpu_load(struct kvm_vcpu *vcpu);
885void vcpu_put(struct kvm_vcpu *vcpu);
886
887#ifdef __KVM_HAVE_IOAPIC
888void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
889void kvm_arch_post_irq_routing_update(struct kvm *kvm);
890#else
891static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
892{
893}
894static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
895{
896}
897#endif
898
899#ifdef CONFIG_HAVE_KVM_IRQFD
900int kvm_irqfd_init(void);
901void kvm_irqfd_exit(void);
902#else
903static inline int kvm_irqfd_init(void)
904{
905 return 0;
906}
907
908static inline void kvm_irqfd_exit(void)
909{
910}
911#endif
912int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
913 struct module *module);
914void kvm_exit(void);
915
916void kvm_get_kvm(struct kvm *kvm);
917bool kvm_get_kvm_safe(struct kvm *kvm);
918void kvm_put_kvm(struct kvm *kvm);
919bool file_is_kvm(struct file *file);
920void kvm_put_kvm_no_destroy(struct kvm *kvm);
921
922static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
923{
924 as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
925 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
926 lockdep_is_held(&kvm->slots_lock) ||
927 !refcount_read(&kvm->users_count));
928}
929
930static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
931{
932 return __kvm_memslots(kvm, 0);
933}
934
935static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
936{
937 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
938
939 return __kvm_memslots(vcpu->kvm, as_id);
940}
941
942static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
943{
944 return RB_EMPTY_ROOT(&slots->gfn_tree);
945}
946
947#define kvm_for_each_memslot(memslot, bkt, slots) \
948 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
949 if (WARN_ON_ONCE(!memslot->npages)) { \
950 } else
951
952static inline
953struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
954{
955 struct kvm_memory_slot *slot;
956 int idx = slots->node_idx;
957
958 hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
959 if (slot->id == id)
960 return slot;
961 }
962
963 return NULL;
964}
965
966
967struct kvm_memslot_iter {
968 struct kvm_memslots *slots;
969 struct rb_node *node;
970 struct kvm_memory_slot *slot;
971};
972
973static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
974{
975 iter->node = rb_next(iter->node);
976 if (!iter->node)
977 return;
978
979 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
980}
981
982static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
983 struct kvm_memslots *slots,
984 gfn_t start)
985{
986 int idx = slots->node_idx;
987 struct rb_node *tmp;
988 struct kvm_memory_slot *slot;
989
990 iter->slots = slots;
991
992
993
994
995
996 iter->node = NULL;
997 for (tmp = slots->gfn_tree.rb_node; tmp; ) {
998 slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
999 if (start < slot->base_gfn) {
1000 iter->node = tmp;
1001 tmp = tmp->rb_left;
1002 } else {
1003 tmp = tmp->rb_right;
1004 }
1005 }
1006
1007
1008
1009
1010
1011 if (iter->node) {
1012
1013
1014
1015
1016
1017 tmp = rb_prev(iter->node);
1018 if (tmp)
1019 iter->node = tmp;
1020 } else {
1021
1022 iter->node = rb_last(&slots->gfn_tree);
1023 }
1024
1025 if (iter->node) {
1026 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 if (iter->slot->base_gfn + iter->slot->npages <= start)
1038 kvm_memslot_iter_next(iter);
1039 }
1040}
1041
1042static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
1043{
1044 if (!iter->node)
1045 return false;
1046
1047
1048
1049
1050
1051 return iter->slot->base_gfn < end;
1052}
1053
1054
1055#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
1056 for (kvm_memslot_iter_start(iter, slots, start); \
1057 kvm_memslot_iter_is_valid(iter, end); \
1058 kvm_memslot_iter_next(iter))
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071enum kvm_mr_change {
1072 KVM_MR_CREATE,
1073 KVM_MR_DELETE,
1074 KVM_MR_MOVE,
1075 KVM_MR_FLAGS_ONLY,
1076};
1077
1078int kvm_set_memory_region(struct kvm *kvm,
1079 const struct kvm_userspace_memory_region *mem);
1080int __kvm_set_memory_region(struct kvm *kvm,
1081 const struct kvm_userspace_memory_region *mem);
1082void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
1083void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
1084int kvm_arch_prepare_memory_region(struct kvm *kvm,
1085 const struct kvm_memory_slot *old,
1086 struct kvm_memory_slot *new,
1087 enum kvm_mr_change change);
1088void kvm_arch_commit_memory_region(struct kvm *kvm,
1089 struct kvm_memory_slot *old,
1090 const struct kvm_memory_slot *new,
1091 enum kvm_mr_change change);
1092
1093void kvm_arch_flush_shadow_all(struct kvm *kvm);
1094
1095void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1096 struct kvm_memory_slot *slot);
1097
1098int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1099 struct page **pages, int nr_pages);
1100
1101struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
1102unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1103unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1104unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
1105unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
1106 bool *writable);
1107void kvm_release_page_clean(struct page *page);
1108void kvm_release_page_dirty(struct page *page);
1109void kvm_set_page_accessed(struct page *page);
1110
1111kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
1112kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1113 bool *writable);
1114kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
1115kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
1116kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
1117 bool atomic, bool *async, bool write_fault,
1118 bool *writable, hva_t *hva);
1119
1120void kvm_release_pfn_clean(kvm_pfn_t pfn);
1121void kvm_release_pfn_dirty(kvm_pfn_t pfn);
1122void kvm_set_pfn_dirty(kvm_pfn_t pfn);
1123void kvm_set_pfn_accessed(kvm_pfn_t pfn);
1124
1125void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
1126int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1127 int len);
1128int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
1129int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1130 void *data, unsigned long len);
1131int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1132 void *data, unsigned int offset,
1133 unsigned long len);
1134int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1135 int offset, int len);
1136int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1137 unsigned long len);
1138int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1139 void *data, unsigned long len);
1140int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1141 void *data, unsigned int offset,
1142 unsigned long len);
1143int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1144 gpa_t gpa, unsigned long len);
1145
1146#define __kvm_get_guest(kvm, gfn, offset, v) \
1147({ \
1148 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1149 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1150 int __ret = -EFAULT; \
1151 \
1152 if (!kvm_is_error_hva(__addr)) \
1153 __ret = get_user(v, __uaddr); \
1154 __ret; \
1155})
1156
1157#define kvm_get_guest(kvm, gpa, v) \
1158({ \
1159 gpa_t __gpa = gpa; \
1160 struct kvm *__kvm = kvm; \
1161 \
1162 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
1163 offset_in_page(__gpa), v); \
1164})
1165
1166#define __kvm_put_guest(kvm, gfn, offset, v) \
1167({ \
1168 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1169 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1170 int __ret = -EFAULT; \
1171 \
1172 if (!kvm_is_error_hva(__addr)) \
1173 __ret = put_user(v, __uaddr); \
1174 if (!__ret) \
1175 mark_page_dirty(kvm, gfn); \
1176 __ret; \
1177})
1178
1179#define kvm_put_guest(kvm, gpa, v) \
1180({ \
1181 gpa_t __gpa = gpa; \
1182 struct kvm *__kvm = kvm; \
1183 \
1184 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
1185 offset_in_page(__gpa), v); \
1186})
1187
1188int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
1189struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1190bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
1191bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1192unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
1193void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1194void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1195
1196struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
1197struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
1198kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
1199kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1200int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
1201struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
1202void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
1203unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
1204unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
1205int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
1206 int len);
1207int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1208 unsigned long len);
1209int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1210 unsigned long len);
1211int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
1212 int offset, int len);
1213int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
1214 unsigned long len);
1215void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1243 struct kvm_vcpu *vcpu, bool guest_uses_pa,
1244 bool kernel_map, gpa_t gpa, unsigned long len,
1245 bool dirty);
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1267 gpa_t gpa, unsigned long len);
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1289 gpa_t gpa, unsigned long len, bool dirty);
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
1314
1315void kvm_sigset_activate(struct kvm_vcpu *vcpu);
1316void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
1317
1318void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1319bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
1320void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
1321void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
1322bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
1323void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
1324int kvm_vcpu_yield_to(struct kvm_vcpu *target);
1325void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
1326
1327void kvm_flush_remote_tlbs(struct kvm *kvm);
1328void kvm_reload_remote_mmus(struct kvm *kvm);
1329
1330#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
1331int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
1332int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
1333void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
1334void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
1335#endif
1336
1337void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
1338 unsigned long end);
1339void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
1340 unsigned long end);
1341
1342long kvm_arch_dev_ioctl(struct file *filp,
1343 unsigned int ioctl, unsigned long arg);
1344long kvm_arch_vcpu_ioctl(struct file *filp,
1345 unsigned int ioctl, unsigned long arg);
1346vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
1347
1348int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
1349
1350void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1351 struct kvm_memory_slot *slot,
1352 gfn_t gfn_offset,
1353 unsigned long mask);
1354void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1355
1356#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1357void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1358 const struct kvm_memory_slot *memslot);
1359#else
1360int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
1361int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1362 int *is_dirty, struct kvm_memory_slot **memslot);
1363#endif
1364
1365int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1366 bool line_status);
1367int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1368 struct kvm_enable_cap *cap);
1369long kvm_arch_vm_ioctl(struct file *filp,
1370 unsigned int ioctl, unsigned long arg);
1371
1372int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1373int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1374
1375int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1376 struct kvm_translation *tr);
1377
1378int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1379int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1380int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1381 struct kvm_sregs *sregs);
1382int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1383 struct kvm_sregs *sregs);
1384int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1385 struct kvm_mp_state *mp_state);
1386int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1387 struct kvm_mp_state *mp_state);
1388int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1389 struct kvm_guest_debug *dbg);
1390int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
1391
1392int kvm_arch_init(void *opaque);
1393void kvm_arch_exit(void);
1394
1395void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
1396
1397void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
1398void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
1399int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1400int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
1401void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
1402void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
1403
1404#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
1405int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1406#endif
1407
1408#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
1409void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
1410#endif
1411
1412int kvm_arch_hardware_enable(void);
1413void kvm_arch_hardware_disable(void);
1414int kvm_arch_hardware_setup(void *opaque);
1415void kvm_arch_hardware_unsetup(void);
1416int kvm_arch_check_processor_compat(void *opaque);
1417int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1418bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1419int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1420bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1421bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
1422int kvm_arch_post_init_vm(struct kvm *kvm);
1423void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1424int kvm_arch_create_vm_debugfs(struct kvm *kvm);
1425
1426#ifndef __KVM_HAVE_ARCH_VM_ALLOC
1427
1428
1429
1430
1431static inline struct kvm *kvm_arch_alloc_vm(void)
1432{
1433 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
1434}
1435#endif
1436
1437static inline void __kvm_arch_free_vm(struct kvm *kvm)
1438{
1439 kvfree(kvm);
1440}
1441
1442#ifndef __KVM_HAVE_ARCH_VM_FREE
1443static inline void kvm_arch_free_vm(struct kvm *kvm)
1444{
1445 __kvm_arch_free_vm(kvm);
1446}
1447#endif
1448
1449#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1450static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1451{
1452 return -ENOTSUPP;
1453}
1454#endif
1455
1456#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
1457void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1458void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1459bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1460#else
1461static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
1462{
1463}
1464
1465static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
1466{
1467}
1468
1469static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
1470{
1471 return false;
1472}
1473#endif
1474#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1475void kvm_arch_start_assignment(struct kvm *kvm);
1476void kvm_arch_end_assignment(struct kvm *kvm);
1477bool kvm_arch_has_assigned_device(struct kvm *kvm);
1478#else
1479static inline void kvm_arch_start_assignment(struct kvm *kvm)
1480{
1481}
1482
1483static inline void kvm_arch_end_assignment(struct kvm *kvm)
1484{
1485}
1486
1487static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
1488{
1489 return false;
1490}
1491#endif
1492
1493static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
1494{
1495#ifdef __KVM_HAVE_ARCH_WQP
1496 return vcpu->arch.waitp;
1497#else
1498 return &vcpu->wait;
1499#endif
1500}
1501
1502
1503
1504
1505
1506static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
1507{
1508 return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
1509}
1510
1511static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
1512{
1513 return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
1514}
1515
1516#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
1517
1518
1519
1520
1521
1522bool kvm_arch_intc_initialized(struct kvm *kvm);
1523#else
1524static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
1525{
1526 return true;
1527}
1528#endif
1529
1530#ifdef CONFIG_GUEST_PERF_EVENTS
1531unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
1532
1533void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
1534void kvm_unregister_perf_callbacks(void);
1535#else
1536static inline void kvm_register_perf_callbacks(void *ign) {}
1537static inline void kvm_unregister_perf_callbacks(void) {}
1538#endif
1539
1540int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1541void kvm_arch_destroy_vm(struct kvm *kvm);
1542void kvm_arch_sync_events(struct kvm *kvm);
1543
1544int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
1545
1546bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
1547bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
1548
1549struct kvm_irq_ack_notifier {
1550 struct hlist_node link;
1551 unsigned gsi;
1552 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
1553};
1554
1555int kvm_irq_map_gsi(struct kvm *kvm,
1556 struct kvm_kernel_irq_routing_entry *entries, int gsi);
1557int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1558
1559int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1560 bool line_status);
1561int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1562 int irq_source_id, int level, bool line_status);
1563int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
1564 struct kvm *kvm, int irq_source_id,
1565 int level, bool line_status);
1566bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1567void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1568void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1569void kvm_register_irq_ack_notifier(struct kvm *kvm,
1570 struct kvm_irq_ack_notifier *kian);
1571void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1572 struct kvm_irq_ack_notifier *kian);
1573int kvm_request_irq_source_id(struct kvm *kvm);
1574void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1575bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1576
1577
1578
1579
1580
1581static inline struct kvm_memory_slot *
1582try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1583{
1584 if (!slot)
1585 return NULL;
1586
1587 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
1588 return slot;
1589 else
1590 return NULL;
1591}
1592
1593
1594
1595
1596
1597
1598
1599
1600static inline struct kvm_memory_slot *
1601search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1602{
1603 struct kvm_memory_slot *slot;
1604 struct rb_node *node;
1605 int idx = slots->node_idx;
1606
1607 slot = NULL;
1608 for (node = slots->gfn_tree.rb_node; node; ) {
1609 slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
1610 if (gfn >= slot->base_gfn) {
1611 if (gfn < slot->base_gfn + slot->npages)
1612 return slot;
1613 node = node->rb_right;
1614 } else
1615 node = node->rb_left;
1616 }
1617
1618 return approx ? slot : NULL;
1619}
1620
1621static inline struct kvm_memory_slot *
1622____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1623{
1624 struct kvm_memory_slot *slot;
1625
1626 slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
1627 slot = try_get_memslot(slot, gfn);
1628 if (slot)
1629 return slot;
1630
1631 slot = search_memslots(slots, gfn, approx);
1632 if (slot) {
1633 atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
1634 return slot;
1635 }
1636
1637 return NULL;
1638}
1639
1640
1641
1642
1643
1644
1645static inline struct kvm_memory_slot *
1646__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1647{
1648 return ____gfn_to_memslot(slots, gfn, false);
1649}
1650
1651static inline unsigned long
1652__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
1653{
1654
1655
1656
1657
1658
1659
1660 unsigned long offset = gfn - slot->base_gfn;
1661 offset = array_index_nospec(offset, slot->npages);
1662 return slot->userspace_addr + offset * PAGE_SIZE;
1663}
1664
1665static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1666{
1667 return gfn_to_memslot(kvm, gfn)->id;
1668}
1669
1670static inline gfn_t
1671hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1672{
1673 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1674
1675 return slot->base_gfn + gfn_offset;
1676}
1677
1678static inline gpa_t gfn_to_gpa(gfn_t gfn)
1679{
1680 return (gpa_t)gfn << PAGE_SHIFT;
1681}
1682
1683static inline gfn_t gpa_to_gfn(gpa_t gpa)
1684{
1685 return (gfn_t)(gpa >> PAGE_SHIFT);
1686}
1687
1688static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1689{
1690 return (hpa_t)pfn << PAGE_SHIFT;
1691}
1692
1693static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
1694 gpa_t gpa)
1695{
1696 return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
1697}
1698
1699static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
1700{
1701 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1702
1703 return kvm_is_error_hva(hva);
1704}
1705
1706enum kvm_stat_kind {
1707 KVM_STAT_VM,
1708 KVM_STAT_VCPU,
1709};
1710
1711struct kvm_stat_data {
1712 struct kvm *kvm;
1713 const struct _kvm_stats_desc *desc;
1714 enum kvm_stat_kind kind;
1715};
1716
1717struct _kvm_stats_desc {
1718 struct kvm_stats_desc desc;
1719 char name[KVM_STATS_NAME_SIZE];
1720};
1721
1722#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
1723 .flags = type | unit | base | \
1724 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
1725 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
1726 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
1727 .exponent = exp, \
1728 .size = sz, \
1729 .bucket_size = bsz
1730
1731#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1732 { \
1733 { \
1734 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1735 .offset = offsetof(struct kvm_vm_stat, generic.stat) \
1736 }, \
1737 .name = #stat, \
1738 }
1739#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1740 { \
1741 { \
1742 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1743 .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
1744 }, \
1745 .name = #stat, \
1746 }
1747#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1748 { \
1749 { \
1750 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1751 .offset = offsetof(struct kvm_vm_stat, stat) \
1752 }, \
1753 .name = #stat, \
1754 }
1755#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1756 { \
1757 { \
1758 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1759 .offset = offsetof(struct kvm_vcpu_stat, stat) \
1760 }, \
1761 .name = #stat, \
1762 }
1763
1764#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
1765 SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
1766
1767#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
1768 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
1769 unit, base, exponent, 1, 0)
1770#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
1771 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
1772 unit, base, exponent, 1, 0)
1773#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
1774 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
1775 unit, base, exponent, 1, 0)
1776#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
1777 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
1778 unit, base, exponent, sz, bsz)
1779#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
1780 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
1781 unit, base, exponent, sz, 0)
1782
1783
1784#define STATS_DESC_COUNTER(SCOPE, name) \
1785 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
1786 KVM_STATS_BASE_POW10, 0)
1787
1788#define STATS_DESC_ICOUNTER(SCOPE, name) \
1789 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
1790 KVM_STATS_BASE_POW10, 0)
1791
1792#define STATS_DESC_PCOUNTER(SCOPE, name) \
1793 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
1794 KVM_STATS_BASE_POW10, 0)
1795
1796
1797#define STATS_DESC_TIME_NSEC(SCOPE, name) \
1798 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
1799 KVM_STATS_BASE_POW10, -9)
1800
1801#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
1802 STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
1803 KVM_STATS_BASE_POW10, -9, sz, bsz)
1804
1805#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
1806 STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
1807 KVM_STATS_BASE_POW10, -9, sz)
1808
1809#define KVM_GENERIC_VM_STATS() \
1810 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
1811 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
1812
1813#define KVM_GENERIC_VCPU_STATS() \
1814 STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
1815 STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
1816 STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
1817 STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
1818 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
1819 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
1820 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
1821 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
1822 HALT_POLL_HIST_COUNT), \
1823 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
1824 HALT_POLL_HIST_COUNT), \
1825 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
1826 HALT_POLL_HIST_COUNT), \
1827 STATS_DESC_ICOUNTER(VCPU_GENERIC, blocking)
1828
1829extern struct dentry *kvm_debugfs_dir;
1830
1831ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
1832 const struct _kvm_stats_desc *desc,
1833 void *stats, size_t size_stats,
1834 char __user *user_buffer, size_t size, loff_t *offset);
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
1846 u64 value, size_t bucket_size)
1847{
1848 size_t index = div64_u64(value, bucket_size);
1849
1850 index = min(index, size - 1);
1851 ++data[index];
1852}
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
1863{
1864 size_t index = fls64(value);
1865
1866 index = min(index, size - 1);
1867 ++data[index];
1868}
1869
1870#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
1871 kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
1872#define KVM_STATS_LOG_HIST_UPDATE(array, value) \
1873 kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
1874
1875
1876extern const struct kvm_stats_header kvm_vm_stats_header;
1877extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
1878extern const struct kvm_stats_header kvm_vcpu_stats_header;
1879extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
1880
1881#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1882static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1883{
1884 if (unlikely(kvm->mmu_notifier_count))
1885 return 1;
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896 smp_rmb();
1897 if (kvm->mmu_notifier_seq != mmu_seq)
1898 return 1;
1899 return 0;
1900}
1901
1902static inline int mmu_notifier_retry_hva(struct kvm *kvm,
1903 unsigned long mmu_seq,
1904 unsigned long hva)
1905{
1906 lockdep_assert_held(&kvm->mmu_lock);
1907
1908
1909
1910
1911
1912
1913 if (unlikely(kvm->mmu_notifier_count) &&
1914 hva >= kvm->mmu_notifier_range_start &&
1915 hva < kvm->mmu_notifier_range_end)
1916 return 1;
1917 if (kvm->mmu_notifier_seq != mmu_seq)
1918 return 1;
1919 return 0;
1920}
1921#endif
1922
1923#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1924
1925#define KVM_MAX_IRQ_ROUTES 4096
1926
1927bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1928int kvm_set_irq_routing(struct kvm *kvm,
1929 const struct kvm_irq_routing_entry *entries,
1930 unsigned nr,
1931 unsigned flags);
1932int kvm_set_routing_entry(struct kvm *kvm,
1933 struct kvm_kernel_irq_routing_entry *e,
1934 const struct kvm_irq_routing_entry *ue);
1935void kvm_free_irq_routing(struct kvm *kvm);
1936
1937#else
1938
1939static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1940
1941#endif
1942
1943int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1944
1945#ifdef CONFIG_HAVE_KVM_EVENTFD
1946
1947void kvm_eventfd_init(struct kvm *kvm);
1948int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1949
1950#ifdef CONFIG_HAVE_KVM_IRQFD
1951int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1952void kvm_irqfd_release(struct kvm *kvm);
1953void kvm_irq_routing_update(struct kvm *);
1954#else
1955static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1956{
1957 return -EINVAL;
1958}
1959
1960static inline void kvm_irqfd_release(struct kvm *kvm) {}
1961#endif
1962
1963#else
1964
1965static inline void kvm_eventfd_init(struct kvm *kvm) {}
1966
1967static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1968{
1969 return -EINVAL;
1970}
1971
1972static inline void kvm_irqfd_release(struct kvm *kvm) {}
1973
1974#ifdef CONFIG_HAVE_KVM_IRQCHIP
1975static inline void kvm_irq_routing_update(struct kvm *kvm)
1976{
1977}
1978#endif
1979
1980static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1981{
1982 return -ENOSYS;
1983}
1984
1985#endif
1986
1987void kvm_arch_irq_routing_update(struct kvm *kvm);
1988
1989static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1990{
1991
1992
1993
1994
1995 smp_wmb();
1996 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1997}
1998
1999static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
2000{
2001 return READ_ONCE(vcpu->requests);
2002}
2003
2004static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
2005{
2006 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2007}
2008
2009static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
2010{
2011 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2012}
2013
2014static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
2015{
2016 if (kvm_test_request(req, vcpu)) {
2017 kvm_clear_request(req, vcpu);
2018
2019
2020
2021
2022
2023 smp_mb__after_atomic();
2024 return true;
2025 } else {
2026 return false;
2027 }
2028}
2029
2030extern bool kvm_rebooting;
2031
2032extern unsigned int halt_poll_ns;
2033extern unsigned int halt_poll_ns_grow;
2034extern unsigned int halt_poll_ns_grow_start;
2035extern unsigned int halt_poll_ns_shrink;
2036
2037struct kvm_device {
2038 const struct kvm_device_ops *ops;
2039 struct kvm *kvm;
2040 void *private;
2041 struct list_head vm_node;
2042};
2043
2044
2045struct kvm_device_ops {
2046 const char *name;
2047
2048
2049
2050
2051
2052
2053 int (*create)(struct kvm_device *dev, u32 type);
2054
2055
2056
2057
2058
2059 void (*init)(struct kvm_device *dev);
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 void (*destroy)(struct kvm_device *dev);
2070
2071
2072
2073
2074
2075
2076
2077
2078 void (*release)(struct kvm_device *dev);
2079
2080 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2081 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2082 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2083 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
2084 unsigned long arg);
2085 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
2086};
2087
2088void kvm_device_get(struct kvm_device *dev);
2089void kvm_device_put(struct kvm_device *dev);
2090struct kvm_device *kvm_device_from_filp(struct file *filp);
2091int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
2092void kvm_unregister_device_ops(u32 type);
2093
2094extern struct kvm_device_ops kvm_mpic_ops;
2095extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
2096extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
2097
2098#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2099
2100static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2101{
2102 vcpu->spin_loop.in_spin_loop = val;
2103}
2104static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2105{
2106 vcpu->spin_loop.dy_eligible = val;
2107}
2108
2109#else
2110
2111static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2112{
2113}
2114
2115static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2116{
2117}
2118#endif
2119
2120static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
2121{
2122 return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
2123 !(memslot->flags & KVM_MEMSLOT_INVALID));
2124}
2125
2126struct kvm_vcpu *kvm_get_running_vcpu(void);
2127struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
2128
2129#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
2130bool kvm_arch_has_irq_bypass(void);
2131int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
2132 struct irq_bypass_producer *);
2133void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
2134 struct irq_bypass_producer *);
2135void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
2136void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
2137int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
2138 uint32_t guest_irq, bool set);
2139bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
2140 struct kvm_kernel_irq_routing_entry *);
2141#endif
2142
2143#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
2144
2145static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2146{
2147 return vcpu->valid_wakeup;
2148}
2149
2150#else
2151static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2152{
2153 return true;
2154}
2155#endif
2156
2157#ifdef CONFIG_HAVE_KVM_NO_POLL
2158
2159bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
2160#else
2161static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
2162{
2163 return false;
2164}
2165#endif
2166
2167#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
2168long kvm_arch_vcpu_async_ioctl(struct file *filp,
2169 unsigned int ioctl, unsigned long arg);
2170#else
2171static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
2172 unsigned int ioctl,
2173 unsigned long arg)
2174{
2175 return -ENOIOCTLCMD;
2176}
2177#endif
2178
2179void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
2180 unsigned long start, unsigned long end);
2181
2182#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
2183int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
2184#else
2185static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
2186{
2187 return 0;
2188}
2189#endif
2190
2191typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
2192
2193int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
2194 uintptr_t data, const char *name,
2195 struct task_struct **thread_ptr);
2196
2197#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
2198static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
2199{
2200 vcpu->run->exit_reason = KVM_EXIT_INTR;
2201 vcpu->stat.signal_exits++;
2202}
2203#endif
2204
2205
2206
2207
2208
2209
2210#define KVM_DIRTY_RING_RSVD_ENTRIES 64
2211
2212
2213#define KVM_DIRTY_RING_MAX_ENTRIES 65536
2214
2215#endif
2216