1
2#include <linux/kasan.h>
3#include <linux/sched/task.h>
4#include <linux/memblock.h>
5#include <asm/pgalloc.h>
6#include <asm/pgtable.h>
7#include <asm/kasan.h>
8#include <asm/mem_detect.h>
9#include <asm/processor.h>
10#include <asm/sclp.h>
11#include <asm/facility.h>
12#include <asm/sections.h>
13#include <asm/setup.h>
14
15static unsigned long segment_pos __initdata;
16static unsigned long segment_low __initdata;
17static unsigned long pgalloc_pos __initdata;
18static unsigned long pgalloc_low __initdata;
19static unsigned long pgalloc_freeable __initdata;
20static bool has_edat __initdata;
21static bool has_nx __initdata;
22
23#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
24
25static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
26
27static void __init kasan_early_panic(const char *reason)
28{
29 sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
30 sclp_early_printk(reason);
31 disabled_wait();
32}
33
34static void * __init kasan_early_alloc_segment(void)
35{
36 segment_pos -= _SEGMENT_SIZE;
37
38 if (segment_pos < segment_low)
39 kasan_early_panic("out of memory during initialisation\n");
40
41 return (void *)segment_pos;
42}
43
44static void * __init kasan_early_alloc_pages(unsigned int order)
45{
46 pgalloc_pos -= (PAGE_SIZE << order);
47
48 if (pgalloc_pos < pgalloc_low)
49 kasan_early_panic("out of memory during initialisation\n");
50
51 return (void *)pgalloc_pos;
52}
53
54static void * __init kasan_early_crst_alloc(unsigned long val)
55{
56 unsigned long *table;
57
58 table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
59 if (table)
60 crst_table_init(table, val);
61 return table;
62}
63
64static pte_t * __init kasan_early_pte_alloc(void)
65{
66 static void *pte_leftover;
67 pte_t *pte;
68
69 BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
70
71 if (!pte_leftover) {
72 pte_leftover = kasan_early_alloc_pages(0);
73 pte = pte_leftover + _PAGE_TABLE_SIZE;
74 } else {
75 pte = pte_leftover;
76 pte_leftover = NULL;
77 }
78 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
79 return pte;
80}
81
82enum populate_mode {
83 POPULATE_ONE2ONE,
84 POPULATE_MAP,
85 POPULATE_ZERO_SHADOW,
86 POPULATE_SHALLOW
87};
88static void __init kasan_early_vmemmap_populate(unsigned long address,
89 unsigned long end,
90 enum populate_mode mode)
91{
92 unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
93 pgd_t *pg_dir;
94 p4d_t *p4_dir;
95 pud_t *pu_dir;
96 pmd_t *pm_dir;
97 pte_t *pt_dir;
98
99 pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
100 if (!has_nx)
101 pgt_prot_zero &= ~_PAGE_NOEXEC;
102 pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
103 sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
104
105 while (address < end) {
106 pg_dir = pgd_offset_k(address);
107 if (pgd_none(*pg_dir)) {
108 if (mode == POPULATE_ZERO_SHADOW &&
109 IS_ALIGNED(address, PGDIR_SIZE) &&
110 end - address >= PGDIR_SIZE) {
111 pgd_populate(&init_mm, pg_dir,
112 kasan_early_shadow_p4d);
113 address = (address + PGDIR_SIZE) & PGDIR_MASK;
114 continue;
115 }
116 p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
117 pgd_populate(&init_mm, pg_dir, p4_dir);
118 }
119
120 if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
121 mode == POPULATE_SHALLOW) {
122 address = (address + P4D_SIZE) & P4D_MASK;
123 continue;
124 }
125
126 p4_dir = p4d_offset(pg_dir, address);
127 if (p4d_none(*p4_dir)) {
128 if (mode == POPULATE_ZERO_SHADOW &&
129 IS_ALIGNED(address, P4D_SIZE) &&
130 end - address >= P4D_SIZE) {
131 p4d_populate(&init_mm, p4_dir,
132 kasan_early_shadow_pud);
133 address = (address + P4D_SIZE) & P4D_MASK;
134 continue;
135 }
136 pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
137 p4d_populate(&init_mm, p4_dir, pu_dir);
138 }
139
140 if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
141 mode == POPULATE_SHALLOW) {
142 address = (address + PUD_SIZE) & PUD_MASK;
143 continue;
144 }
145
146 pu_dir = pud_offset(p4_dir, address);
147 if (pud_none(*pu_dir)) {
148 if (mode == POPULATE_ZERO_SHADOW &&
149 IS_ALIGNED(address, PUD_SIZE) &&
150 end - address >= PUD_SIZE) {
151 pud_populate(&init_mm, pu_dir,
152 kasan_early_shadow_pmd);
153 address = (address + PUD_SIZE) & PUD_MASK;
154 continue;
155 }
156 pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
157 pud_populate(&init_mm, pu_dir, pm_dir);
158 }
159
160 pm_dir = pmd_offset(pu_dir, address);
161 if (pmd_none(*pm_dir)) {
162 if (mode == POPULATE_ZERO_SHADOW &&
163 IS_ALIGNED(address, PMD_SIZE) &&
164 end - address >= PMD_SIZE) {
165 pmd_populate(&init_mm, pm_dir,
166 kasan_early_shadow_pte);
167 address = (address + PMD_SIZE) & PMD_MASK;
168 continue;
169 }
170
171 if (has_edat && address && end - address >= PMD_SIZE &&
172 mode != POPULATE_ZERO_SHADOW) {
173 void *page;
174
175 if (mode == POPULATE_ONE2ONE) {
176 page = (void *)address;
177 } else {
178 page = kasan_early_alloc_segment();
179 memset(page, 0, _SEGMENT_SIZE);
180 }
181 pmd_val(*pm_dir) = __pa(page) | sgt_prot;
182 address = (address + PMD_SIZE) & PMD_MASK;
183 continue;
184 }
185
186 pt_dir = kasan_early_pte_alloc();
187 pmd_populate(&init_mm, pm_dir, pt_dir);
188 } else if (pmd_large(*pm_dir)) {
189 address = (address + PMD_SIZE) & PMD_MASK;
190 continue;
191 }
192
193 pt_dir = pte_offset_kernel(pm_dir, address);
194 if (pte_none(*pt_dir)) {
195 void *page;
196
197 switch (mode) {
198 case POPULATE_ONE2ONE:
199 page = (void *)address;
200 pte_val(*pt_dir) = __pa(page) | pgt_prot;
201 break;
202 case POPULATE_MAP:
203 page = kasan_early_alloc_pages(0);
204 memset(page, 0, PAGE_SIZE);
205 pte_val(*pt_dir) = __pa(page) | pgt_prot;
206 break;
207 case POPULATE_ZERO_SHADOW:
208 page = kasan_early_shadow_page;
209 pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
210 break;
211 case POPULATE_SHALLOW:
212
213 break;
214 }
215 }
216 address += PAGE_SIZE;
217 }
218}
219
220static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
221{
222 unsigned long asce_bits;
223
224 asce_bits = asce_type | _ASCE_TABLE_LENGTH;
225 S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
226 S390_lowcore.user_asce = S390_lowcore.kernel_asce;
227
228 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
229 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
230 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
231}
232
233static void __init kasan_enable_dat(void)
234{
235 psw_t psw;
236
237 psw.mask = __extract_psw();
238 psw_bits(psw).dat = 1;
239 psw_bits(psw).as = PSW_BITS_AS_HOME;
240 __load_psw_mask(psw.mask);
241}
242
243static void __init kasan_early_detect_facilities(void)
244{
245 if (test_facility(8)) {
246 has_edat = true;
247 __ctl_set_bit(0, 23);
248 }
249 if (!noexec_disabled && test_facility(130)) {
250 has_nx = true;
251 __ctl_set_bit(0, 20);
252 }
253}
254
255void __init kasan_early_init(void)
256{
257 unsigned long untracked_mem_end;
258 unsigned long shadow_alloc_size;
259 unsigned long initrd_end;
260 unsigned long asce_type;
261 unsigned long memsize;
262 unsigned long vmax;
263 unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
264 pte_t pte_z;
265 pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
266 pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
267 p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
268
269 kasan_early_detect_facilities();
270 if (!has_nx)
271 pgt_prot &= ~_PAGE_NOEXEC;
272 pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
273
274 memsize = get_mem_detect_end();
275 if (!memsize)
276 kasan_early_panic("cannot detect physical memory size\n");
277
278 if (memory_end_set && memsize > memory_end)
279 memsize = memory_end;
280 if (IS_ENABLED(CONFIG_CRASH_DUMP) && OLDMEM_BASE)
281 memsize = min(memsize, OLDMEM_SIZE);
282 memsize = min(memsize, KASAN_SHADOW_START);
283
284 if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
285
286 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
287 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
288 crst_table_init((unsigned long *)early_pg_dir,
289 _REGION2_ENTRY_EMPTY);
290 untracked_mem_end = vmax = _REGION1_SIZE;
291 asce_type = _ASCE_TYPE_REGION2;
292 } else {
293
294 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
295 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
296 crst_table_init((unsigned long *)early_pg_dir,
297 _REGION3_ENTRY_EMPTY);
298 untracked_mem_end = vmax = _REGION2_SIZE;
299 asce_type = _ASCE_TYPE_REGION3;
300 }
301
302
303 crst_table_init((unsigned long *)kasan_early_shadow_p4d,
304 p4d_val(p4d_z));
305 crst_table_init((unsigned long *)kasan_early_shadow_pud,
306 pud_val(pud_z));
307 crst_table_init((unsigned long *)kasan_early_shadow_pmd,
308 pmd_val(pmd_z));
309 memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
310
311 shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
312 pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
313 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
314 initrd_end =
315 round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
316 pgalloc_low = max(pgalloc_low, initrd_end);
317 }
318
319 if (pgalloc_low + shadow_alloc_size > memsize)
320 kasan_early_panic("out of memory during initialisation\n");
321
322 if (has_edat) {
323 segment_pos = round_down(memsize, _SEGMENT_SIZE);
324 segment_low = segment_pos - shadow_alloc_size;
325 pgalloc_pos = segment_low;
326 } else {
327 pgalloc_pos = memsize;
328 }
329 init_mm.pgd = early_pg_dir;
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366 kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
367 if (IS_ENABLED(CONFIG_MODULES))
368 untracked_mem_end = vmax - MODULES_LEN;
369 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
370 untracked_mem_end = vmax - vmalloc_size - MODULES_LEN;
371
372 kasan_early_vmemmap_populate(__sha(untracked_mem_end),
373 __sha(vmax), POPULATE_SHALLOW);
374 }
375
376 kasan_early_vmemmap_populate(__sha(max_physmem_end),
377 __sha(untracked_mem_end),
378 POPULATE_ZERO_SHADOW);
379
380 pgalloc_freeable = pgalloc_pos;
381
382 kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
383 kasan_set_pgd(early_pg_dir, asce_type);
384 kasan_enable_dat();
385
386 init_task.kasan_depth = 0;
387 memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
388 sclp_early_printk("KernelAddressSanitizer initialized\n");
389}
390
391void __init kasan_copy_shadow(pgd_t *pg_dir)
392{
393
394
395
396
397
398
399 pgd_t *pg_dir_src;
400 pgd_t *pg_dir_dst;
401 p4d_t *p4_dir_src;
402 p4d_t *p4_dir_dst;
403 pud_t *pu_dir_src;
404 pud_t *pu_dir_dst;
405
406 pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
407 pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
408 p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
409 p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
410 if (!p4d_folded(*p4_dir_src)) {
411
412 memcpy(p4_dir_dst, p4_dir_src,
413 (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
414 return;
415 }
416
417 pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
418 pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
419 memcpy(pu_dir_dst, pu_dir_src,
420 (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
421}
422
423void __init kasan_free_early_identity(void)
424{
425 memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
426}
427