1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kvm_host.h>
25#include <asm/kvm_emulate.h>
26#include <asm/kvm_hyp.h>
27
28
29
30
31
32
33
34
35
36static const unsigned short cc_map[16] = {
37 0xF0F0,
38 0x0F0F,
39 0xCCCC,
40 0x3333,
41 0xFF00,
42 0x00FF,
43 0xAAAA,
44 0x5555,
45 0x0C0C,
46 0xF3F3,
47 0xAA55,
48 0x55AA,
49 0x0A05,
50 0xF5FA,
51 0xFFFF,
52 0
53};
54
55
56
57
58bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
59{
60 unsigned long cpsr;
61 u32 cpsr_cond;
62 int cond;
63
64
65 if (kvm_vcpu_get_hsr(vcpu) >> 30)
66 return true;
67
68
69 cond = kvm_vcpu_get_condition(vcpu);
70 if (cond == 0xE)
71 return true;
72
73 cpsr = *vcpu_cpsr(vcpu);
74
75 if (cond < 0) {
76
77 unsigned long it;
78
79 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
80
81
82 if (it == 0)
83 return true;
84
85
86 cond = (it >> 4);
87 }
88
89 cpsr_cond = cpsr >> 28;
90
91 if (!((cc_map[cond] >> cpsr_cond) & 1))
92 return false;
93
94 return true;
95}
96
97
98
99
100
101
102
103
104
105
106
107static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
108{
109 unsigned long itbits, cond;
110 unsigned long cpsr = *vcpu_cpsr(vcpu);
111 bool is_arm = !(cpsr & PSR_AA32_T_BIT);
112
113 if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
114 return;
115
116 cond = (cpsr & 0xe000) >> 13;
117 itbits = (cpsr & 0x1c00) >> (10 - 2);
118 itbits |= (cpsr & (0x3 << 25)) >> 25;
119
120
121 if ((itbits & 0x7) == 0)
122 itbits = cond = 0;
123 else
124 itbits = (itbits << 1) & 0x1f;
125
126 cpsr &= ~PSR_AA32_IT_MASK;
127 cpsr |= cond << 13;
128 cpsr |= (itbits & 0x1c) << (10 - 2);
129 cpsr |= (itbits & 0x3) << 25;
130 *vcpu_cpsr(vcpu) = cpsr;
131}
132
133
134
135
136
137void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
138{
139 bool is_thumb;
140
141 is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
142 if (is_thumb && !is_wide_instr)
143 *vcpu_pc(vcpu) += 2;
144 else
145 *vcpu_pc(vcpu) += 4;
146 kvm_adjust_itstate(vcpu);
147}
148
149
150
151
152static const u8 return_offsets[8][2] = {
153 [0] = { 0, 0 },
154 [1] = { 4, 2 },
155 [2] = { 0, 0 },
156 [3] = { 4, 4 },
157 [4] = { 8, 8 },
158 [5] = { 0, 0 },
159 [6] = { 4, 4 },
160 [7] = { 4, 4 },
161};
162
163static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
164{
165 unsigned long cpsr;
166 unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
167 bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
168 u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
169 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
170
171 cpsr = mode | PSR_AA32_I_BIT;
172
173 if (sctlr & (1 << 30))
174 cpsr |= PSR_AA32_T_BIT;
175 if (sctlr & (1 << 25))
176 cpsr |= PSR_AA32_E_BIT;
177
178 *vcpu_cpsr(vcpu) = cpsr;
179
180
181 vcpu_write_spsr(vcpu, new_spsr_value);
182 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
183
184
185 if (sctlr & (1 << 13))
186 vect_offset += 0xffff0000;
187 else
188 vect_offset += vcpu_cp15(vcpu, c12_VBAR);
189
190 *vcpu_pc(vcpu) = vect_offset;
191}
192
193void kvm_inject_undef32(struct kvm_vcpu *vcpu)
194{
195 prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
196}
197
198
199
200
201
202static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
203 unsigned long addr)
204{
205 u32 vect_offset;
206 u32 *far, *fsr;
207 bool is_lpae;
208
209 if (is_pabt) {
210 vect_offset = 12;
211 far = &vcpu_cp15(vcpu, c6_IFAR);
212 fsr = &vcpu_cp15(vcpu, c5_IFSR);
213 } else {
214 vect_offset = 16;
215 far = &vcpu_cp15(vcpu, c6_DFAR);
216 fsr = &vcpu_cp15(vcpu, c5_DFSR);
217 }
218
219 prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
220
221 *far = addr;
222
223
224 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
225 if (is_lpae)
226 *fsr = 1 << 9 | 0x34;
227 else
228 *fsr = 0x14;
229}
230
231void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
232{
233 inject_abt32(vcpu, false, addr);
234}
235
236void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr)
237{
238 inject_abt32(vcpu, true, addr);
239}
240