1
2#ifndef ASM_KVM_CACHE_REGS_H
3#define ASM_KVM_CACHE_REGS_H
4
5#include <linux/kvm_host.h>
6
7#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8#define KVM_POSSIBLE_CR4_GUEST_BITS \
9 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
10 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
11
12#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
13static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
14{ \
15 return vcpu->arch.regs[VCPU_REGS_##uname]; \
16} \
17static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
18 unsigned long val) \
19{ \
20 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
21}
22BUILD_KVM_GPR_ACCESSORS(rax, RAX)
23BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
24BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
25BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
26BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
27BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
28BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
29#ifdef CONFIG_X86_64
30BUILD_KVM_GPR_ACCESSORS(r8, R8)
31BUILD_KVM_GPR_ACCESSORS(r9, R9)
32BUILD_KVM_GPR_ACCESSORS(r10, R10)
33BUILD_KVM_GPR_ACCESSORS(r11, R11)
34BUILD_KVM_GPR_ACCESSORS(r12, R12)
35BUILD_KVM_GPR_ACCESSORS(r13, R13)
36BUILD_KVM_GPR_ACCESSORS(r14, R14)
37BUILD_KVM_GPR_ACCESSORS(r15, R15)
38#endif
39
40static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
41 enum kvm_reg reg)
42{
43 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
44}
45
46static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
47 enum kvm_reg reg)
48{
49 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
50}
51
52static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
53 enum kvm_reg reg)
54{
55 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
56}
57
58static inline void kvm_register_clear_available(struct kvm_vcpu *vcpu,
59 enum kvm_reg reg)
60{
61 __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
62 __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
63}
64
65static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
66 enum kvm_reg reg)
67{
68 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
69 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
70}
71
72
73
74
75
76
77static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
78{
79 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
80 return 0;
81
82 if (!kvm_register_is_available(vcpu, reg))
83 static_call(kvm_x86_cache_reg)(vcpu, reg);
84
85 return vcpu->arch.regs[reg];
86}
87
88static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
89 unsigned long val)
90{
91 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
92 return;
93
94 vcpu->arch.regs[reg] = val;
95 kvm_register_mark_dirty(vcpu, reg);
96}
97
98static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
99{
100 return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
101}
102
103static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
104{
105 kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
106}
107
108static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
109{
110 return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
111}
112
113static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
114{
115 kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
116}
117
118static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
119{
120 might_sleep();
121
122 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
123 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
124
125 return vcpu->arch.walk_mmu->pdptrs[index];
126}
127
128static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
129{
130 vcpu->arch.walk_mmu->pdptrs[index] = value;
131}
132
133static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
134{
135 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
136 if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
137 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
138 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
139 return vcpu->arch.cr0 & mask;
140}
141
142static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
143{
144 return kvm_read_cr0_bits(vcpu, ~0UL);
145}
146
147static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
148{
149 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
150 if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
151 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
152 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
153 return vcpu->arch.cr4 & mask;
154}
155
156static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
157{
158 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
159 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
160 return vcpu->arch.cr3;
161}
162
163static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
164{
165 return kvm_read_cr4_bits(vcpu, ~0UL);
166}
167
168static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
169{
170 return (kvm_rax_read(vcpu) & -1u)
171 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
172}
173
174static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
175{
176 vcpu->arch.hflags |= HF_GUEST_MASK;
177 vcpu->stat.guest_mode = 1;
178}
179
180static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
181{
182 vcpu->arch.hflags &= ~HF_GUEST_MASK;
183
184 if (vcpu->arch.load_eoi_exitmap_pending) {
185 vcpu->arch.load_eoi_exitmap_pending = false;
186 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
187 }
188
189 vcpu->stat.guest_mode = 0;
190}
191
192static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
193{
194 return vcpu->arch.hflags & HF_GUEST_MASK;
195}
196
197static inline bool is_smm(struct kvm_vcpu *vcpu)
198{
199 return vcpu->arch.hflags & HF_SMM_MASK;
200}
201
202#endif
203