1
2#define DISABLE_BRANCH_PROFILING
3#define pr_fmt(fmt) "kasan: " fmt
4
5
6#define USE_EARLY_PGTABLE_L5
7
8#include <linux/memblock.h>
9#include <linux/kasan.h>
10#include <linux/kdebug.h>
11#include <linux/mm.h>
12#include <linux/sched.h>
13#include <linux/sched/task.h>
14#include <linux/vmalloc.h>
15
16#include <asm/e820/types.h>
17#include <asm/pgalloc.h>
18#include <asm/tlbflush.h>
19#include <asm/sections.h>
20#include <asm/pgtable.h>
21#include <asm/cpu_entry_area.h>
22
23extern struct range pfn_mapped[E820_MAX_ENTRIES];
24
25static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
26
27static __init void *early_alloc(size_t size, int nid, bool panic)
28{
29 if (panic)
30 return memblock_alloc_try_nid(size, size,
31 __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
32 else
33 return memblock_alloc_try_nid_nopanic(size, size,
34 __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
35}
36
37static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
38 unsigned long end, int nid)
39{
40 pte_t *pte;
41
42 if (pmd_none(*pmd)) {
43 void *p;
44
45 if (boot_cpu_has(X86_FEATURE_PSE) &&
46 ((end - addr) == PMD_SIZE) &&
47 IS_ALIGNED(addr, PMD_SIZE)) {
48 p = early_alloc(PMD_SIZE, nid, false);
49 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
50 return;
51 else if (p)
52 memblock_free(__pa(p), PMD_SIZE);
53 }
54
55 p = early_alloc(PAGE_SIZE, nid, true);
56 pmd_populate_kernel(&init_mm, pmd, p);
57 }
58
59 pte = pte_offset_kernel(pmd, addr);
60 do {
61 pte_t entry;
62 void *p;
63
64 if (!pte_none(*pte))
65 continue;
66
67 p = early_alloc(PAGE_SIZE, nid, true);
68 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
69 set_pte_at(&init_mm, addr, pte, entry);
70 } while (pte++, addr += PAGE_SIZE, addr != end);
71}
72
73static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
74 unsigned long end, int nid)
75{
76 pmd_t *pmd;
77 unsigned long next;
78
79 if (pud_none(*pud)) {
80 void *p;
81
82 if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
83 ((end - addr) == PUD_SIZE) &&
84 IS_ALIGNED(addr, PUD_SIZE)) {
85 p = early_alloc(PUD_SIZE, nid, false);
86 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
87 return;
88 else if (p)
89 memblock_free(__pa(p), PUD_SIZE);
90 }
91
92 p = early_alloc(PAGE_SIZE, nid, true);
93 pud_populate(&init_mm, pud, p);
94 }
95
96 pmd = pmd_offset(pud, addr);
97 do {
98 next = pmd_addr_end(addr, end);
99 if (!pmd_large(*pmd))
100 kasan_populate_pmd(pmd, addr, next, nid);
101 } while (pmd++, addr = next, addr != end);
102}
103
104static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
105 unsigned long end, int nid)
106{
107 pud_t *pud;
108 unsigned long next;
109
110 if (p4d_none(*p4d)) {
111 void *p = early_alloc(PAGE_SIZE, nid, true);
112
113 p4d_populate(&init_mm, p4d, p);
114 }
115
116 pud = pud_offset(p4d, addr);
117 do {
118 next = pud_addr_end(addr, end);
119 if (!pud_large(*pud))
120 kasan_populate_pud(pud, addr, next, nid);
121 } while (pud++, addr = next, addr != end);
122}
123
124static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
125 unsigned long end, int nid)
126{
127 void *p;
128 p4d_t *p4d;
129 unsigned long next;
130
131 if (pgd_none(*pgd)) {
132 p = early_alloc(PAGE_SIZE, nid, true);
133 pgd_populate(&init_mm, pgd, p);
134 }
135
136 p4d = p4d_offset(pgd, addr);
137 do {
138 next = p4d_addr_end(addr, end);
139 kasan_populate_p4d(p4d, addr, next, nid);
140 } while (p4d++, addr = next, addr != end);
141}
142
143static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
144 int nid)
145{
146 pgd_t *pgd;
147 unsigned long next;
148
149 addr = addr & PAGE_MASK;
150 end = round_up(end, PAGE_SIZE);
151 pgd = pgd_offset_k(addr);
152 do {
153 next = pgd_addr_end(addr, end);
154 kasan_populate_pgd(pgd, addr, next, nid);
155 } while (pgd++, addr = next, addr != end);
156}
157
158static void __init map_range(struct range *range)
159{
160 unsigned long start;
161 unsigned long end;
162
163 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
164 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
165
166 kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
167}
168
169static void __init clear_pgds(unsigned long start,
170 unsigned long end)
171{
172 pgd_t *pgd;
173
174 unsigned long pgd_end = end & PGDIR_MASK;
175
176 for (; start < pgd_end; start += PGDIR_SIZE) {
177 pgd = pgd_offset_k(start);
178
179
180
181
182 if (pgtable_l5_enabled())
183 pgd_clear(pgd);
184 else
185 p4d_clear(p4d_offset(pgd, start));
186 }
187
188 pgd = pgd_offset_k(start);
189 for (; start < end; start += P4D_SIZE)
190 p4d_clear(p4d_offset(pgd, start));
191}
192
193static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
194{
195 unsigned long p4d;
196
197 if (!pgtable_l5_enabled())
198 return (p4d_t *)pgd;
199
200 p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
201 p4d += __START_KERNEL_map - phys_base;
202 return (p4d_t *)p4d + p4d_index(addr);
203}
204
205static void __init kasan_early_p4d_populate(pgd_t *pgd,
206 unsigned long addr,
207 unsigned long end)
208{
209 pgd_t pgd_entry;
210 p4d_t *p4d, p4d_entry;
211 unsigned long next;
212
213 if (pgd_none(*pgd)) {
214 pgd_entry = __pgd(_KERNPG_TABLE |
215 __pa_nodebug(kasan_early_shadow_p4d));
216 set_pgd(pgd, pgd_entry);
217 }
218
219 p4d = early_p4d_offset(pgd, addr);
220 do {
221 next = p4d_addr_end(addr, end);
222
223 if (!p4d_none(*p4d))
224 continue;
225
226 p4d_entry = __p4d(_KERNPG_TABLE |
227 __pa_nodebug(kasan_early_shadow_pud));
228 set_p4d(p4d, p4d_entry);
229 } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
230}
231
232static void __init kasan_map_early_shadow(pgd_t *pgd)
233{
234
235 unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
236 unsigned long end = KASAN_SHADOW_END;
237 unsigned long next;
238
239 pgd += pgd_index(addr);
240 do {
241 next = pgd_addr_end(addr, end);
242 kasan_early_p4d_populate(pgd, addr, next);
243 } while (pgd++, addr = next, addr != end);
244}
245
246#ifdef CONFIG_KASAN_INLINE
247static int kasan_die_handler(struct notifier_block *self,
248 unsigned long val,
249 void *data)
250{
251 if (val == DIE_GPF) {
252 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
253 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
254 }
255 return NOTIFY_OK;
256}
257
258static struct notifier_block kasan_die_notifier = {
259 .notifier_call = kasan_die_handler,
260};
261#endif
262
263void __init kasan_early_init(void)
264{
265 int i;
266 pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
267 __PAGE_KERNEL | _PAGE_ENC;
268 pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
269 pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
270 p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
271
272
273 pte_val &= __default_kernel_pte_mask;
274 pmd_val &= __default_kernel_pte_mask;
275 pud_val &= __default_kernel_pte_mask;
276 p4d_val &= __default_kernel_pte_mask;
277
278 for (i = 0; i < PTRS_PER_PTE; i++)
279 kasan_early_shadow_pte[i] = __pte(pte_val);
280
281 for (i = 0; i < PTRS_PER_PMD; i++)
282 kasan_early_shadow_pmd[i] = __pmd(pmd_val);
283
284 for (i = 0; i < PTRS_PER_PUD; i++)
285 kasan_early_shadow_pud[i] = __pud(pud_val);
286
287 for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
288 kasan_early_shadow_p4d[i] = __p4d(p4d_val);
289
290 kasan_map_early_shadow(early_top_pgt);
291 kasan_map_early_shadow(init_top_pgt);
292}
293
294void __init kasan_init(void)
295{
296 int i;
297 void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
298
299#ifdef CONFIG_KASAN_INLINE
300 register_die_notifier(&kasan_die_notifier);
301#endif
302
303 memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318 if (pgtable_l5_enabled()) {
319 void *ptr;
320
321 ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
322 memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
323 set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
324 __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
325 }
326
327 load_cr3(early_top_pgt);
328 __flush_tlb_all();
329
330 clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
331
332 kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
333 kasan_mem_to_shadow((void *)PAGE_OFFSET));
334
335 for (i = 0; i < E820_MAX_ENTRIES; i++) {
336 if (pfn_mapped[i].end == 0)
337 break;
338
339 map_range(&pfn_mapped[i]);
340 }
341
342 shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
343 shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
344 shadow_cpu_entry_begin = (void *)round_down(
345 (unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
346
347 shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
348 CPU_ENTRY_AREA_MAP_SIZE);
349 shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
350 shadow_cpu_entry_end = (void *)round_up(
351 (unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
352
353 kasan_populate_early_shadow(
354 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
355 shadow_cpu_entry_begin);
356
357 kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
358 (unsigned long)shadow_cpu_entry_end, 0);
359
360 kasan_populate_early_shadow(shadow_cpu_entry_end,
361 kasan_mem_to_shadow((void *)__START_KERNEL_map));
362
363 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
364 (unsigned long)kasan_mem_to_shadow(_end),
365 early_pfn_to_nid(__pa(_stext)));
366
367 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
368 (void *)KASAN_SHADOW_END);
369
370 load_cr3(init_top_pgt);
371 __flush_tlb_all();
372
373
374
375
376
377
378 memset(kasan_early_shadow_page, 0, PAGE_SIZE);
379 for (i = 0; i < PTRS_PER_PTE; i++) {
380 pte_t pte;
381 pgprot_t prot;
382
383 prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
384 pgprot_val(prot) &= __default_kernel_pte_mask;
385
386 pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
387 set_pte(&kasan_early_shadow_pte[i], pte);
388 }
389
390 __flush_tlb_all();
391
392 init_task.kasan_depth = 0;
393 pr_info("KernelAddressSanitizer initialized\n");
394}
395