1
2
3
4
5
6
7
8
9
10#include <common.h>
11#include <cpu_func.h>
12#include <hang.h>
13#include <asm/system.h>
14#include <asm/armv8/mmu.h>
15
16DECLARE_GLOBAL_DATA_PTR;
17
18#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
40{
41 u64 max_addr = 0;
42 u64 ips, va_bits;
43 u64 tcr;
44 int i;
45
46
47 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
48 max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
49
50
51 if (max_addr > (1ULL << 44)) {
52 ips = 5;
53 va_bits = 48;
54 } else if (max_addr > (1ULL << 42)) {
55 ips = 4;
56 va_bits = 44;
57 } else if (max_addr > (1ULL << 40)) {
58 ips = 3;
59 va_bits = 42;
60 } else if (max_addr > (1ULL << 36)) {
61 ips = 2;
62 va_bits = 40;
63 } else if (max_addr > (1ULL << 32)) {
64 ips = 1;
65 va_bits = 36;
66 } else {
67 ips = 0;
68 va_bits = 32;
69 }
70
71 if (el == 1) {
72 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
73 } else if (el == 2) {
74 tcr = TCR_EL2_RSVD | (ips << 16);
75 } else {
76 tcr = TCR_EL3_RSVD | (ips << 16);
77 }
78
79
80 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
81 tcr |= TCR_T0SZ(va_bits);
82
83 if (pips)
84 *pips = ips;
85 if (pva_bits)
86 *pva_bits = va_bits;
87
88 return tcr;
89}
90
91#define MAX_PTE_ENTRIES 512
92
93static int pte_type(u64 *pte)
94{
95 return *pte & PTE_TYPE_MASK;
96}
97
98
99static int level2shift(int level)
100{
101
102 return (12 + 9 * (3 - level));
103}
104
105static u64 *find_pte(u64 addr, int level)
106{
107 int start_level = 0;
108 u64 *pte;
109 u64 idx;
110 u64 va_bits;
111 int i;
112
113 debug("addr=%llx level=%d\n", addr, level);
114
115 get_tcr(0, NULL, &va_bits);
116 if (va_bits < 39)
117 start_level = 1;
118
119 if (level < start_level)
120 return NULL;
121
122
123 pte = (u64*)gd->arch.tlb_addr;
124 for (i = start_level; i < 4; i++) {
125 idx = (addr >> level2shift(i)) & 0x1FF;
126 pte += idx;
127 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
128
129
130 if (i == level)
131 return pte;
132
133 if (pte_type(pte) != PTE_TYPE_TABLE)
134 return NULL;
135
136 pte = (u64*)(*pte & 0x0000fffffffff000ULL);
137 }
138
139
140 return NULL;
141}
142
143
144static u64 *create_table(void)
145{
146 u64 *new_table = (u64*)gd->arch.tlb_fillptr;
147 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
148
149
150 gd->arch.tlb_fillptr += pt_len;
151
152 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
153 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
154 "Please increase the size in get_page_table_size()",
155 gd->arch.tlb_fillptr - gd->arch.tlb_addr,
156 gd->arch.tlb_size);
157
158
159 memset(new_table, 0, pt_len);
160
161 return new_table;
162}
163
164static void set_pte_table(u64 *pte, u64 *table)
165{
166
167 debug("Setting %p to addr=%p\n", pte, table);
168 *pte = PTE_TYPE_TABLE | (ulong)table;
169}
170
171
172static void split_block(u64 *pte, int level)
173{
174 u64 old_pte = *pte;
175 u64 *new_table;
176 u64 i = 0;
177
178 int levelshift = level2shift(level + 1);
179
180 if (pte_type(pte) != PTE_TYPE_BLOCK)
181 panic("PTE %p (%llx) is not a block. Some driver code wants to "
182 "modify dcache settings for an range not covered in "
183 "mem_map.", pte, old_pte);
184
185 new_table = create_table();
186 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
187
188 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
189 new_table[i] = old_pte | (i << levelshift);
190
191
192 if ((level + 1) == 3)
193 new_table[i] |= PTE_TYPE_TABLE;
194
195 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
196 }
197
198
199 set_pte_table(pte, new_table);
200}
201
202
203static void add_map(struct mm_region *map)
204{
205 u64 *pte;
206 u64 virt = map->virt;
207 u64 phys = map->phys;
208 u64 size = map->size;
209 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
210 u64 blocksize;
211 int level;
212 u64 *new_table;
213
214 while (size) {
215 pte = find_pte(virt, 0);
216 if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
217 debug("Creating table for virt 0x%llx\n", virt);
218 new_table = create_table();
219 set_pte_table(pte, new_table);
220 }
221
222 for (level = 1; level < 4; level++) {
223 pte = find_pte(virt, level);
224 if (!pte)
225 panic("pte not found\n");
226
227 blocksize = 1ULL << level2shift(level);
228 debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
229 virt, size, blocksize);
230 if (size >= blocksize && !(virt & (blocksize - 1))) {
231
232 debug("Setting PTE %p to block virt=%llx\n",
233 pte, virt);
234 if (level == 3)
235 *pte = phys | attrs | PTE_TYPE_PAGE;
236 else
237 *pte = phys | attrs;
238 virt += blocksize;
239 phys += blocksize;
240 size -= blocksize;
241 break;
242 } else if (pte_type(pte) == PTE_TYPE_FAULT) {
243
244 debug("Creating subtable for virt 0x%llx blksize=%llx\n",
245 virt, blocksize);
246 new_table = create_table();
247 set_pte_table(pte, new_table);
248 } else if (pte_type(pte) == PTE_TYPE_BLOCK) {
249 debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
250 virt, blocksize);
251 split_block(pte, level);
252 }
253 }
254 }
255}
256
257enum pte_type {
258 PTE_INVAL,
259 PTE_BLOCK,
260 PTE_LEVEL,
261};
262
263
264
265
266
267
268
269static int count_required_pts(u64 addr, int level, u64 maxaddr)
270{
271 int levelshift = level2shift(level);
272 u64 levelsize = 1ULL << levelshift;
273 u64 levelmask = levelsize - 1;
274 u64 levelend = addr + levelsize;
275 int r = 0;
276 int i;
277 enum pte_type pte_type = PTE_INVAL;
278
279 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
280 struct mm_region *map = &mem_map[i];
281 u64 start = map->virt;
282 u64 end = start + map->size;
283
284
285 if (max(addr, start) <= min(levelend, end)) {
286 start = max(addr, start);
287 end = min(levelend, end);
288
289
290 if ((start & levelmask) || (end & levelmask)) {
291 pte_type = PTE_LEVEL;
292 break;
293 }
294
295
296 if (level <= 0) {
297 pte_type = PTE_LEVEL;
298 break;
299 }
300
301
302 pte_type = PTE_BLOCK;
303 }
304 }
305
306
307
308
309
310 if (pte_type == PTE_LEVEL) {
311 int sublevel = level + 1;
312 u64 sublevelsize = 1ULL << level2shift(sublevel);
313
314
315 r = 1;
316
317
318 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
319 r += count_required_pts(addr, sublevel, maxaddr);
320 addr += sublevelsize;
321
322 if (addr >= maxaddr) {
323
324
325
326
327 break;
328 }
329 }
330 }
331
332 return r;
333}
334
335
336__weak u64 get_page_table_size(void)
337{
338 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
339 u64 size = 0;
340 u64 va_bits;
341 int start_level = 0;
342
343 get_tcr(0, NULL, &va_bits);
344 if (va_bits < 39)
345 start_level = 1;
346
347
348 size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits);
349
350
351
352
353
354 size *= 2;
355
356
357
358
359
360 size += one_pt * 4;
361
362 return size;
363}
364
365void setup_pgtables(void)
366{
367 int i;
368
369 if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
370 panic("Page table pointer not setup.");
371
372
373
374
375
376
377 create_table();
378
379
380 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
381 add_map(&mem_map[i]);
382}
383
384static void setup_all_pgtables(void)
385{
386 u64 tlb_addr = gd->arch.tlb_addr;
387 u64 tlb_size = gd->arch.tlb_size;
388
389
390 gd->arch.tlb_fillptr = tlb_addr;
391
392
393 setup_pgtables();
394
395
396 gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
397 (uintptr_t)gd->arch.tlb_addr;
398 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
399 setup_pgtables();
400 gd->arch.tlb_emerg = gd->arch.tlb_addr;
401 gd->arch.tlb_addr = tlb_addr;
402 gd->arch.tlb_size = tlb_size;
403}
404
405
406__weak void mmu_setup(void)
407{
408 int el;
409
410
411 if (!gd->arch.tlb_fillptr)
412 setup_all_pgtables();
413
414 el = current_el();
415 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
416 MEMORY_ATTRIBUTES);
417
418
419 set_sctlr(get_sctlr() | CR_M);
420}
421
422
423
424
425void invalidate_dcache_all(void)
426{
427 __asm_invalidate_dcache_all();
428 __asm_invalidate_l3_dcache();
429}
430
431
432
433
434
435
436inline void flush_dcache_all(void)
437{
438 int ret;
439
440 __asm_flush_dcache_all();
441 ret = __asm_flush_l3_dcache();
442 if (ret)
443 debug("flushing dcache returns 0x%x\n", ret);
444 else
445 debug("flushing dcache successfully.\n");
446}
447
448#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
449
450
451
452void invalidate_dcache_range(unsigned long start, unsigned long stop)
453{
454 __asm_invalidate_dcache_range(start, stop);
455}
456
457
458
459
460void flush_dcache_range(unsigned long start, unsigned long stop)
461{
462 __asm_flush_dcache_range(start, stop);
463}
464#else
465void invalidate_dcache_range(unsigned long start, unsigned long stop)
466{
467}
468
469void flush_dcache_range(unsigned long start, unsigned long stop)
470{
471}
472#endif
473
474void dcache_enable(void)
475{
476
477 if (!(get_sctlr() & CR_M)) {
478 invalidate_dcache_all();
479 __asm_invalidate_tlb_all();
480 mmu_setup();
481 }
482
483 set_sctlr(get_sctlr() | CR_C);
484}
485
486void dcache_disable(void)
487{
488 uint32_t sctlr;
489
490 sctlr = get_sctlr();
491
492
493 if (!(sctlr & CR_C))
494 return;
495
496 set_sctlr(sctlr & ~(CR_C|CR_M));
497
498 flush_dcache_all();
499 __asm_invalidate_tlb_all();
500}
501
502int dcache_status(void)
503{
504 return (get_sctlr() & CR_C) != 0;
505}
506
507u64 *__weak arch_get_page_table(void) {
508 puts("No page table offset defined\n");
509
510 return NULL;
511}
512
513static bool is_aligned(u64 addr, u64 size, u64 align)
514{
515 return !(addr & (align - 1)) && !(size & (align - 1));
516}
517
518
519static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
520{
521 int levelshift = level2shift(level);
522 u64 levelsize = 1ULL << levelshift;
523 u64 *pte = find_pte(start, level);
524
525
526 if (is_aligned(start, size, levelsize)) {
527 if (flag) {
528 *pte &= ~PMD_ATTRMASK;
529 *pte |= attrs & PMD_ATTRMASK;
530 } else {
531 *pte &= ~PMD_ATTRINDX_MASK;
532 *pte |= attrs & PMD_ATTRINDX_MASK;
533 }
534 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
535
536 return levelsize;
537 }
538
539
540 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
541
542
543 if (pte_type(pte) == PTE_TYPE_BLOCK)
544 split_block(pte, level);
545
546
547 if (pte_type(pte) != PTE_TYPE_TABLE)
548 panic("PTE %p (%llx) for addr=%llx should be a table",
549 pte, *pte, start);
550
551
552 return 0;
553}
554
555void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
556 enum dcache_option option)
557{
558 u64 attrs = PMD_ATTRINDX(option);
559 u64 real_start = start;
560 u64 real_size = size;
561
562 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
563
564 if (!gd->arch.tlb_emerg)
565 panic("Emergency page table not setup.");
566
567
568
569
570
571
572 __asm_switch_ttbr(gd->arch.tlb_emerg);
573
574
575
576
577
578 while (size > 0) {
579 int level;
580 u64 r;
581
582 for (level = 1; level < 4; level++) {
583
584 r = set_one_region(start, size, attrs, false, level);
585 if (r) {
586
587 size -= r;
588 start += r;
589 break;
590 }
591 }
592
593 }
594
595
596 __asm_switch_ttbr(gd->arch.tlb_addr);
597
598
599
600
601
602 flush_dcache_range(real_start, real_start + real_size);
603}
604
605
606
607
608
609
610void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
611{
612 int level;
613 u64 r, size, start;
614
615 start = addr;
616 size = siz;
617
618
619
620
621 while (size > 0) {
622 for (level = 1; level < 4; level++) {
623
624 r = set_one_region(start, size, PTE_TYPE_FAULT, true,
625 level);
626 if (r) {
627
628 size -= r;
629 start += r;
630 break;
631 }
632 }
633 }
634
635 flush_dcache_range(gd->arch.tlb_addr,
636 gd->arch.tlb_addr + gd->arch.tlb_size);
637 __asm_invalidate_tlb_all();
638
639
640
641
642
643 start = addr;
644 size = siz;
645 while (size > 0) {
646 for (level = 1; level < 4; level++) {
647
648 r = set_one_region(start, size, attrs, true, level);
649 if (r) {
650
651 size -= r;
652 start += r;
653 break;
654 }
655 }
656 }
657 flush_dcache_range(gd->arch.tlb_addr,
658 gd->arch.tlb_addr + gd->arch.tlb_size);
659 __asm_invalidate_tlb_all();
660}
661
662#else
663
664
665
666
667
668
669#ifndef CONFIG_SPL_BUILD
670#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
671#endif
672
673void invalidate_dcache_all(void)
674{
675}
676
677void flush_dcache_all(void)
678{
679}
680
681void dcache_enable(void)
682{
683}
684
685void dcache_disable(void)
686{
687}
688
689int dcache_status(void)
690{
691 return 0;
692}
693
694void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
695 enum dcache_option option)
696{
697}
698
699#endif
700
701#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
702
703void icache_enable(void)
704{
705 invalidate_icache_all();
706 set_sctlr(get_sctlr() | CR_I);
707}
708
709void icache_disable(void)
710{
711 set_sctlr(get_sctlr() & ~CR_I);
712}
713
714int icache_status(void)
715{
716 return (get_sctlr() & CR_I) != 0;
717}
718
719void invalidate_icache_all(void)
720{
721 __asm_invalidate_icache_all();
722 __asm_invalidate_l3_icache();
723}
724
725#else
726
727void icache_enable(void)
728{
729}
730
731void icache_disable(void)
732{
733}
734
735int icache_status(void)
736{
737 return 0;
738}
739
740void invalidate_icache_all(void)
741{
742}
743
744#endif
745
746
747
748
749
750void __weak enable_caches(void)
751{
752 icache_enable();
753 dcache_enable();
754}
755