1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/mman.h>
25#include <linux/nodemask.h>
26#include <linux/memblock.h>
27#include <linux/fs.h>
28#include <linux/io.h>
29
30#include <asm/cputype.h>
31#include <asm/sections.h>
32#include <asm/setup.h>
33#include <asm/sizes.h>
34#include <asm/tlb.h>
35#include <asm/mmu_context.h>
36
37#include "mm.h"
38
39
40
41
42
43struct page *empty_zero_page;
44EXPORT_SYMBOL(empty_zero_page);
45
46pgprot_t pgprot_default;
47EXPORT_SYMBOL(pgprot_default);
48
49static pmdval_t prot_sect_kernel;
50
51struct cachepolicy {
52 const char policy[16];
53 u64 mair;
54 u64 tcr;
55};
56
57static struct cachepolicy cache_policies[] __initdata = {
58 {
59 .policy = "uncached",
60 .mair = 0x44,
61 .tcr = TCR_IRGN_NC | TCR_ORGN_NC,
62 }, {
63 .policy = "writethrough",
64 .mair = 0xaa,
65 .tcr = TCR_IRGN_WT | TCR_ORGN_WT,
66 }, {
67 .policy = "writeback",
68 .mair = 0xee,
69 .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
70 }
71};
72
73
74
75
76
77
78static int __init early_cachepolicy(char *p)
79{
80 int i;
81 u64 tmp;
82
83 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
84 int len = strlen(cache_policies[i].policy);
85
86 if (memcmp(p, cache_policies[i].policy, len) == 0)
87 break;
88 }
89 if (i == ARRAY_SIZE(cache_policies)) {
90 pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
91 return 0;
92 }
93
94 flush_cache_all();
95
96
97
98
99 asm volatile(
100 " mrs %0, mair_el1\n"
101 " bfi %0, %1, #%2, #8\n"
102 " msr mair_el1, %0\n"
103 " isb\n"
104 : "=&r" (tmp)
105 : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
106
107
108
109
110 asm volatile(
111 " mrs %0, tcr_el1\n"
112 " bic %0, %0, %2\n"
113 " orr %0, %0, %1\n"
114 " msr tcr_el1, %0\n"
115 " isb\n"
116 : "=&r" (tmp)
117 : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
118
119 flush_cache_all();
120
121 return 0;
122}
123early_param("cachepolicy", early_cachepolicy);
124
125
126
127
128static void __init init_mem_pgprot(void)
129{
130 pteval_t default_pgprot;
131 int i;
132
133 default_pgprot = PTE_ATTRINDX(MT_NORMAL);
134 prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
135
136#ifdef CONFIG_SMP
137
138
139
140 default_pgprot |= PTE_SHARED;
141 prot_sect_kernel |= PMD_SECT_S;
142#endif
143
144 for (i = 0; i < 16; i++) {
145 unsigned long v = pgprot_val(protection_map[i]);
146 protection_map[i] = __pgprot(v | default_pgprot);
147 }
148
149 pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
150}
151
152pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
153 unsigned long size, pgprot_t vma_prot)
154{
155 if (!pfn_valid(pfn))
156 return pgprot_noncached(vma_prot);
157 else if (file->f_flags & O_SYNC)
158 return pgprot_writecombine(vma_prot);
159 return vma_prot;
160}
161EXPORT_SYMBOL(phys_mem_access_prot);
162
163static void __init *early_alloc(unsigned long sz)
164{
165 void *ptr = __va(memblock_alloc(sz, sz));
166 memset(ptr, 0, sz);
167 return ptr;
168}
169
170static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
171 unsigned long end, unsigned long pfn)
172{
173 pte_t *pte;
174
175 if (pmd_none(*pmd)) {
176 pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
177 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
178 }
179 BUG_ON(pmd_bad(*pmd));
180
181 pte = pte_offset_kernel(pmd, addr);
182 do {
183 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
184 pfn++;
185 } while (pte++, addr += PAGE_SIZE, addr != end);
186}
187
188static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
189 unsigned long end, phys_addr_t phys)
190{
191 pmd_t *pmd;
192 unsigned long next;
193
194
195
196
197 if (pud_none(*pud) || pud_bad(*pud)) {
198 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
199 pud_populate(&init_mm, pud, pmd);
200 }
201
202 pmd = pmd_offset(pud, addr);
203 do {
204 next = pmd_addr_end(addr, end);
205
206 if (((addr | next | phys) & ~SECTION_MASK) == 0)
207 set_pmd(pmd, __pmd(phys | prot_sect_kernel));
208 else
209 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
210 phys += next - addr;
211 } while (pmd++, addr = next, addr != end);
212}
213
214static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
215 unsigned long end, unsigned long phys)
216{
217 pud_t *pud = pud_offset(pgd, addr);
218 unsigned long next;
219
220 do {
221 next = pud_addr_end(addr, end);
222 alloc_init_pmd(pud, addr, next, phys);
223 phys += next - addr;
224 } while (pud++, addr = next, addr != end);
225}
226
227
228
229
230
231static void __init create_mapping(phys_addr_t phys, unsigned long virt,
232 phys_addr_t size)
233{
234 unsigned long addr, length, end, next;
235 pgd_t *pgd;
236
237 if (virt < VMALLOC_START) {
238 pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n",
239 phys, virt);
240 return;
241 }
242
243 addr = virt & PAGE_MASK;
244 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
245
246 pgd = pgd_offset_k(addr);
247 end = addr + length;
248 do {
249 next = pgd_addr_end(addr, end);
250 alloc_init_pud(pgd, addr, next, phys);
251 phys += next - addr;
252 } while (pgd++, addr = next, addr != end);
253}
254
255#ifdef CONFIG_EARLY_PRINTK
256
257
258
259
260
261void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
262{
263 unsigned long size, mask;
264 bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
265 pgd_t *pgd;
266 pud_t *pud;
267 pmd_t *pmd;
268 pte_t *pte;
269
270
271
272
273
274 size = page64k ? PAGE_SIZE : SECTION_SIZE;
275 mask = ~(size - 1);
276
277 pgd = pgd_offset_k(virt);
278 pud = pud_offset(pgd, virt);
279 if (pud_none(*pud))
280 return NULL;
281 pmd = pmd_offset(pud, virt);
282
283 if (page64k) {
284 if (pmd_none(*pmd))
285 return NULL;
286 pte = pte_offset_kernel(pmd, virt);
287 set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
288 } else {
289 set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
290 }
291
292 return (void __iomem *)((virt & mask) + (phys & ~mask));
293}
294#endif
295
296static void __init map_mem(void)
297{
298 struct memblock_region *reg;
299
300
301
302
303
304
305
306
307
308 memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE);
309
310
311 for_each_memblock(memory, reg) {
312 phys_addr_t start = reg->base;
313 phys_addr_t end = start + reg->size;
314
315 if (start >= end)
316 break;
317
318 create_mapping(start, __phys_to_virt(start), end - start);
319 }
320
321
322 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
323}
324
325
326
327
328
329void __init paging_init(void)
330{
331 void *zero_page;
332
333 init_mem_pgprot();
334 map_mem();
335
336
337
338
339
340 flush_cache_all();
341 flush_tlb_all();
342
343
344 zero_page = early_alloc(PAGE_SIZE);
345
346 bootmem_init();
347
348 empty_zero_page = virt_to_page(zero_page);
349
350
351
352
353
354 cpu_set_reserved_ttbr0();
355 flush_tlb_all();
356}
357
358
359
360
361void setup_mm_for_reboot(void)
362{
363 cpu_switch_mm(idmap_pg_dir, &init_mm);
364 flush_tlb_all();
365}
366
367
368
369
370int kern_addr_valid(unsigned long addr)
371{
372 pgd_t *pgd;
373 pud_t *pud;
374 pmd_t *pmd;
375 pte_t *pte;
376
377 if ((((long)addr) >> VA_BITS) != -1UL)
378 return 0;
379
380 pgd = pgd_offset_k(addr);
381 if (pgd_none(*pgd))
382 return 0;
383
384 pud = pud_offset(pgd, addr);
385 if (pud_none(*pud))
386 return 0;
387
388 pmd = pmd_offset(pud, addr);
389 if (pmd_none(*pmd))
390 return 0;
391
392 pte = pte_offset_kernel(pmd, addr);
393 if (pte_none(*pte))
394 return 0;
395
396 return pfn_valid(pte_pfn(*pte));
397}
398#ifdef CONFIG_SPARSEMEM_VMEMMAP
399#ifdef CONFIG_ARM64_64K_PAGES
400int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
401{
402 return vmemmap_populate_basepages(start, end, node);
403}
404#else
405int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
406{
407 unsigned long addr = start;
408 unsigned long next;
409 pgd_t *pgd;
410 pud_t *pud;
411 pmd_t *pmd;
412
413 do {
414 next = pmd_addr_end(addr, end);
415
416 pgd = vmemmap_pgd_populate(addr, node);
417 if (!pgd)
418 return -ENOMEM;
419
420 pud = vmemmap_pud_populate(pgd, addr, node);
421 if (!pud)
422 return -ENOMEM;
423
424 pmd = pmd_offset(pud, addr);
425 if (pmd_none(*pmd)) {
426 void *p = NULL;
427
428 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
429 if (!p)
430 return -ENOMEM;
431
432 set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel));
433 } else
434 vmemmap_verify((pte_t *)pmd, node, addr, next);
435 } while (addr = next, addr != end);
436
437 return 0;
438}
439#endif
440void vmemmap_free(unsigned long start, unsigned long end)
441{
442}
443#endif
444