1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) "alternatives: " fmt
21
22#include <linux/init.h>
23#include <linux/cpu.h>
24#include <asm/cacheflush.h>
25#include <asm/alternative.h>
26#include <asm/cpufeature.h>
27#include <asm/insn.h>
28#include <asm/sections.h>
29#include <linux/stop_machine.h>
30
31#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
32#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
33#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
34
35static int all_alternatives_applied;
36
37static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
38
39struct alt_region {
40 struct alt_instr *begin;
41 struct alt_instr *end;
42};
43
44bool alternative_is_applied(u16 cpufeature)
45{
46 if (WARN_ON(cpufeature >= ARM64_NCAPS))
47 return false;
48
49 return test_bit(cpufeature, applied_alternatives);
50}
51
52
53
54
55static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
56{
57 unsigned long replptr;
58
59 if (kernel_text_address(pc))
60 return 1;
61
62 replptr = (unsigned long)ALT_REPL_PTR(alt);
63 if (pc >= replptr && pc <= (replptr + alt->alt_len))
64 return 0;
65
66
67
68
69
70 BUG();
71}
72
73#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
74
75static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
76{
77 u32 insn;
78
79 insn = le32_to_cpu(*altinsnptr);
80
81 if (aarch64_insn_is_branch_imm(insn)) {
82 s32 offset = aarch64_get_branch_offset(insn);
83 unsigned long target;
84
85 target = (unsigned long)altinsnptr + offset;
86
87
88
89
90
91
92 if (branch_insn_requires_update(alt, target)) {
93 offset = target - (unsigned long)insnptr;
94 insn = aarch64_set_branch_offset(insn, offset);
95 }
96 } else if (aarch64_insn_is_adrp(insn)) {
97 s32 orig_offset, new_offset;
98 unsigned long target;
99
100
101
102
103
104
105 orig_offset = aarch64_insn_adrp_get_offset(insn);
106 target = align_down(altinsnptr, SZ_4K) + orig_offset;
107 new_offset = target - align_down(insnptr, SZ_4K);
108 insn = aarch64_insn_adrp_set_offset(insn, new_offset);
109 } else if (aarch64_insn_uses_literal(insn)) {
110
111
112
113
114 BUG();
115 }
116
117 return insn;
118}
119
120static void patch_alternative(struct alt_instr *alt,
121 __le32 *origptr, __le32 *updptr, int nr_inst)
122{
123 __le32 *replptr;
124 int i;
125
126 replptr = ALT_REPL_PTR(alt);
127 for (i = 0; i < nr_inst; i++) {
128 u32 insn;
129
130 insn = get_alt_insn(alt, origptr + i, replptr + i);
131 updptr[i] = cpu_to_le32(insn);
132 }
133}
134
135
136
137
138
139
140static void clean_dcache_range_nopatch(u64 start, u64 end)
141{
142 u64 cur, d_size, ctr_el0;
143
144 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
145 d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
146 CTR_DMINLINE_SHIFT);
147 cur = start & ~(d_size - 1);
148 do {
149
150
151
152
153
154 asm volatile("dc civac, %0" : : "r" (cur) : "memory");
155 } while (cur += d_size, cur < end);
156}
157
158static void __apply_alternatives(void *alt_region, bool is_module,
159 unsigned long *feature_mask)
160{
161 struct alt_instr *alt;
162 struct alt_region *region = alt_region;
163 __le32 *origptr, *updptr;
164 alternative_cb_t alt_cb;
165
166 for (alt = region->begin; alt < region->end; alt++) {
167 int nr_inst;
168
169 if (!test_bit(alt->cpufeature, feature_mask))
170 continue;
171
172
173 if (alt->cpufeature < ARM64_CB_PATCH &&
174 !cpus_have_cap(alt->cpufeature))
175 continue;
176
177 if (alt->cpufeature == ARM64_CB_PATCH)
178 BUG_ON(alt->alt_len != 0);
179 else
180 BUG_ON(alt->alt_len != alt->orig_len);
181
182 pr_info_once("patching kernel code\n");
183
184 origptr = ALT_ORIG_PTR(alt);
185 updptr = is_module ? origptr : lm_alias(origptr);
186 nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
187
188 if (alt->cpufeature < ARM64_CB_PATCH)
189 alt_cb = patch_alternative;
190 else
191 alt_cb = ALT_REPL_PTR(alt);
192
193 alt_cb(alt, origptr, updptr, nr_inst);
194
195 if (!is_module) {
196 clean_dcache_range_nopatch((u64)origptr,
197 (u64)(origptr + nr_inst));
198 }
199 }
200
201
202
203
204
205 if (!is_module) {
206 dsb(ish);
207 __flush_icache_all();
208 isb();
209
210
211 bitmap_or(applied_alternatives, applied_alternatives,
212 feature_mask, ARM64_NCAPS);
213 bitmap_and(applied_alternatives, applied_alternatives,
214 cpu_hwcaps, ARM64_NCAPS);
215 }
216}
217
218
219
220
221
222static int __apply_alternatives_multi_stop(void *unused)
223{
224 struct alt_region region = {
225 .begin = (struct alt_instr *)__alt_instructions,
226 .end = (struct alt_instr *)__alt_instructions_end,
227 };
228
229
230 if (smp_processor_id()) {
231 while (!READ_ONCE(all_alternatives_applied))
232 cpu_relax();
233 isb();
234 } else {
235 DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE);
236
237 bitmap_complement(remaining_capabilities, boot_capabilities,
238 ARM64_NPATCHABLE);
239
240 BUG_ON(all_alternatives_applied);
241 __apply_alternatives(®ion, false, remaining_capabilities);
242
243 WRITE_ONCE(all_alternatives_applied, 1);
244 }
245
246 return 0;
247}
248
249void __init apply_alternatives_all(void)
250{
251
252 stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
253}
254
255
256
257
258
259
260void __init apply_boot_alternatives(void)
261{
262 struct alt_region region = {
263 .begin = (struct alt_instr *)__alt_instructions,
264 .end = (struct alt_instr *)__alt_instructions_end,
265 };
266
267
268 WARN_ON(smp_processor_id() != 0);
269
270 __apply_alternatives(®ion, false, &boot_capabilities[0]);
271}
272
273#ifdef CONFIG_MODULES
274void apply_alternatives_module(void *start, size_t length)
275{
276 struct alt_region region = {
277 .begin = start,
278 .end = start + length,
279 };
280 DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
281
282 bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
283
284 __apply_alternatives(®ion, true, &all_capabilities[0]);
285}
286#endif
287