1
2
3
4
5
6
7#include <linux/kvm_host.h>
8#include <linux/random.h>
9#include <linux/memblock.h>
10#include <asm/alternative.h>
11#include <asm/debug-monitors.h>
12#include <asm/insn.h>
13#include <asm/kvm_mmu.h>
14#include <asm/memory.h>
15
16
17
18
19static u8 tag_lsb;
20
21
22
23static u64 tag_val;
24static u64 va_mask;
25
26
27
28
29static u64 __early_kern_hyp_va(u64 addr)
30{
31 addr &= va_mask;
32 addr |= tag_val << tag_lsb;
33 return addr;
34}
35
36
37
38
39static void init_hyp_physvirt_offset(void)
40{
41 u64 kern_va, hyp_va;
42
43
44 kern_va = (u64)lm_alias(__hyp_text_start);
45 hyp_va = __early_kern_hyp_va(kern_va);
46 hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
47}
48
49
50
51
52
53
54
55
56
57
58
59
60__init void kvm_compute_layout(void)
61{
62 phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
63 u64 hyp_va_msb;
64
65
66 hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
67 hyp_va_msb ^= BIT(vabits_actual - 1);
68
69 tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
70 (u64)(high_memory - 1));
71
72 va_mask = GENMASK_ULL(tag_lsb - 1, 0);
73 tag_val = hyp_va_msb;
74
75 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
76
77 tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
78 }
79 tag_val >>= tag_lsb;
80
81 init_hyp_physvirt_offset();
82}
83
84static u32 compute_instruction(int n, u32 rd, u32 rn)
85{
86 u32 insn = AARCH64_BREAK_FAULT;
87
88 switch (n) {
89 case 0:
90 insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND,
91 AARCH64_INSN_VARIANT_64BIT,
92 rn, rd, va_mask);
93 break;
94
95 case 1:
96
97 insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
98 rn, rn, rd,
99 tag_lsb);
100 break;
101
102 case 2:
103 insn = aarch64_insn_gen_add_sub_imm(rd, rn,
104 tag_val & GENMASK(11, 0),
105 AARCH64_INSN_VARIANT_64BIT,
106 AARCH64_INSN_ADSB_ADD);
107 break;
108
109 case 3:
110 insn = aarch64_insn_gen_add_sub_imm(rd, rn,
111 tag_val & GENMASK(23, 12),
112 AARCH64_INSN_VARIANT_64BIT,
113 AARCH64_INSN_ADSB_ADD);
114 break;
115
116 case 4:
117
118 insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
119 rn, rn, rd, 64 - tag_lsb);
120 break;
121 }
122
123 return insn;
124}
125
126void __init kvm_update_va_mask(struct alt_instr *alt,
127 __le32 *origptr, __le32 *updptr, int nr_inst)
128{
129 int i;
130
131 BUG_ON(nr_inst != 5);
132
133 for (i = 0; i < nr_inst; i++) {
134 u32 rd, rn, insn, oinsn;
135
136
137
138
139
140
141
142
143
144 if (has_vhe() || (!tag_val && i > 0)) {
145 updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
146 continue;
147 }
148
149 oinsn = le32_to_cpu(origptr[i]);
150 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
151 rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn);
152
153 insn = compute_instruction(i, rd, rn);
154 BUG_ON(insn == AARCH64_BREAK_FAULT);
155
156 updptr[i] = cpu_to_le32(insn);
157 }
158}
159
160void kvm_patch_vector_branch(struct alt_instr *alt,
161 __le32 *origptr, __le32 *updptr, int nr_inst)
162{
163 u64 addr;
164 u32 insn;
165
166 BUG_ON(nr_inst != 4);
167
168 if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
169 return;
170
171
172
173
174 addr = __early_kern_hyp_va((u64)kvm_ksym_ref(__kvm_hyp_vector));
175
176
177 addr |= ((u64)origptr & GENMASK_ULL(10, 7));
178
179
180
181
182
183 addr += KVM_VECTOR_PREAMBLE;
184
185
186 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
187 (u16)addr,
188 0,
189 AARCH64_INSN_VARIANT_64BIT,
190 AARCH64_INSN_MOVEWIDE_ZERO);
191 *updptr++ = cpu_to_le32(insn);
192
193
194 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
195 (u16)(addr >> 16),
196 16,
197 AARCH64_INSN_VARIANT_64BIT,
198 AARCH64_INSN_MOVEWIDE_KEEP);
199 *updptr++ = cpu_to_le32(insn);
200
201
202 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
203 (u16)(addr >> 32),
204 32,
205 AARCH64_INSN_VARIANT_64BIT,
206 AARCH64_INSN_MOVEWIDE_KEEP);
207 *updptr++ = cpu_to_le32(insn);
208
209
210 insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0,
211 AARCH64_INSN_BRANCH_NOLINK);
212 *updptr++ = cpu_to_le32(insn);
213}
214
215static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
216{
217 u32 insn, oinsn, rd;
218
219 BUG_ON(nr_inst != 4);
220
221
222 oinsn = le32_to_cpu(*origptr);
223 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
224
225
226 insn = aarch64_insn_gen_movewide(rd,
227 (u16)val,
228 0,
229 AARCH64_INSN_VARIANT_64BIT,
230 AARCH64_INSN_MOVEWIDE_ZERO);
231 *updptr++ = cpu_to_le32(insn);
232
233
234 insn = aarch64_insn_gen_movewide(rd,
235 (u16)(val >> 16),
236 16,
237 AARCH64_INSN_VARIANT_64BIT,
238 AARCH64_INSN_MOVEWIDE_KEEP);
239 *updptr++ = cpu_to_le32(insn);
240
241
242 insn = aarch64_insn_gen_movewide(rd,
243 (u16)(val >> 32),
244 32,
245 AARCH64_INSN_VARIANT_64BIT,
246 AARCH64_INSN_MOVEWIDE_KEEP);
247 *updptr++ = cpu_to_le32(insn);
248
249
250 insn = aarch64_insn_gen_movewide(rd,
251 (u16)(val >> 48),
252 48,
253 AARCH64_INSN_VARIANT_64BIT,
254 AARCH64_INSN_MOVEWIDE_KEEP);
255 *updptr++ = cpu_to_le32(insn);
256}
257
258void kvm_update_kimg_phys_offset(struct alt_instr *alt,
259 __le32 *origptr, __le32 *updptr, int nr_inst)
260{
261 generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst);
262}
263
264void kvm_get_kimage_voffset(struct alt_instr *alt,
265 __le32 *origptr, __le32 *updptr, int nr_inst)
266{
267 generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
268}
269