linux/arch/metag/mm/mmu-meta2.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008,2009,2010,2011 Imagination Technologies Ltd.
   4 *
   5 * Meta 2 enhanced mode MMU handling code.
   6 *
   7 */
   8
   9#include <linux/mm.h>
  10#include <linux/init.h>
  11#include <linux/kernel.h>
  12#include <linux/io.h>
  13#include <linux/bootmem.h>
  14#include <linux/syscore_ops.h>
  15
  16#include <asm/mmu.h>
  17#include <asm/mmu_context.h>
  18
  19unsigned long mmu_read_first_level_page(unsigned long vaddr)
  20{
  21        unsigned int cpu = hard_processor_id();
  22        unsigned long offset, linear_base, linear_limit;
  23        unsigned int phys0;
  24        pgd_t *pgd, entry;
  25
  26        if (is_global_space(vaddr))
  27                vaddr &= ~0x80000000;
  28
  29        offset = vaddr >> PGDIR_SHIFT;
  30
  31        phys0 = metag_in32(mmu_phys0_addr(cpu));
  32
  33        /* Top bit of linear base is always zero. */
  34        linear_base = (phys0 >> PGDIR_SHIFT) & 0x1ff;
  35
  36        /* Limit in the range 0 (4MB) to 9 (2GB). */
  37        linear_limit = 1 << ((phys0 >> 8) & 0xf);
  38        linear_limit += linear_base;
  39
  40        /*
  41         * If offset is below linear base or above the limit then no
  42         * mapping exists.
  43         */
  44        if (offset < linear_base || offset > linear_limit)
  45                return 0;
  46
  47        offset -= linear_base;
  48        pgd = (pgd_t *)mmu_get_base();
  49        entry = pgd[offset];
  50
  51        return pgd_val(entry);
  52}
  53
  54unsigned long mmu_read_second_level_page(unsigned long vaddr)
  55{
  56        return __builtin_meta2_cacherd((void *)(vaddr & PAGE_MASK));
  57}
  58
  59unsigned long mmu_get_base(void)
  60{
  61        unsigned int cpu = hard_processor_id();
  62        unsigned long stride;
  63
  64        stride = cpu * LINSYSMEMTnX_STRIDE;
  65
  66        /*
  67         * Bits 18:2 of the MMCU_TnLocal_TABLE_PHYS1 register should be
  68         * used as an offset to the start of the top-level pgd table.
  69         */
  70        stride += (metag_in32(mmu_phys1_addr(cpu)) & 0x7fffc);
  71
  72        if (is_global_space(PAGE_OFFSET))
  73                stride += LINSYSMEMTXG_OFFSET;
  74
  75        return LINSYSMEMT0L_BASE + stride;
  76}
  77
  78#define FIRST_LEVEL_MASK        0xffffffc0
  79#define SECOND_LEVEL_MASK       0xfffff000
  80#define SECOND_LEVEL_ALIGN      64
  81
  82static void repriv_mmu_tables(void)
  83{
  84        unsigned long phys0_addr;
  85        unsigned int g;
  86
  87        /*
  88         * Check that all the mmu table regions are priv protected, and if not
  89         * fix them and emit a warning. If we left them without priv protection
  90         * then userland processes would have access to a 2M window into
  91         * physical memory near where the page tables are.
  92         */
  93        phys0_addr = MMCU_T0LOCAL_TABLE_PHYS0;
  94        for (g = 0; g < 2; ++g) {
  95                unsigned int t, phys0;
  96                unsigned long flags;
  97                for (t = 0; t < 4; ++t) {
  98                        __global_lock2(flags);
  99                        phys0 = metag_in32(phys0_addr);
 100                        if ((phys0 & _PAGE_PRESENT) && !(phys0 & _PAGE_PRIV)) {
 101                                pr_warn("Fixing priv protection on T%d %s MMU table region\n",
 102                                        t,
 103                                        g ? "global" : "local");
 104                                phys0 |= _PAGE_PRIV;
 105                                metag_out32(phys0, phys0_addr);
 106                        }
 107                        __global_unlock2(flags);
 108
 109                        phys0_addr += MMCU_TnX_TABLE_PHYSX_STRIDE;
 110                }
 111
 112                phys0_addr += MMCU_TXG_TABLE_PHYSX_OFFSET
 113                            - 4*MMCU_TnX_TABLE_PHYSX_STRIDE;
 114        }
 115}
 116
 117#ifdef CONFIG_METAG_SUSPEND_MEM
 118static void mmu_resume(void)
 119{
 120        /*
 121         * If a full suspend to RAM has happened then the original bad MMU table
 122         * priv may have been restored, so repriv them again.
 123         */
 124        repriv_mmu_tables();
 125}
 126#else
 127#define mmu_resume NULL
 128#endif  /* CONFIG_METAG_SUSPEND_MEM */
 129
 130static struct syscore_ops mmu_syscore_ops = {
 131        .resume  = mmu_resume,
 132};
 133
 134void __init mmu_init(unsigned long mem_end)
 135{
 136        unsigned long entry, addr;
 137        pgd_t *p_swapper_pg_dir;
 138#ifdef CONFIG_KERNEL_4M_PAGES
 139        unsigned long mem_size = mem_end - PAGE_OFFSET;
 140        unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22);
 141        unsigned int second_level_entry = 0;
 142        unsigned long *second_level_table;
 143#endif
 144
 145        /*
 146         * Now copy over any MMU pgd entries already in the mmu page tables
 147         * over to our root init process (swapper_pg_dir) map.  This map is
 148         * then inherited by all other processes, which means all processes
 149         * inherit a map of the kernel space.
 150         */
 151        addr = META_MEMORY_BASE;
 152        entry = pgd_index(META_MEMORY_BASE);
 153        p_swapper_pg_dir = pgd_offset_k(0) + entry;
 154
 155        while (entry < (PTRS_PER_PGD - pgd_index(META_MEMORY_BASE))) {
 156                unsigned long pgd_entry;
 157                /* copy over the current MMU value */
 158                pgd_entry = mmu_read_first_level_page(addr);
 159                pgd_val(*p_swapper_pg_dir) = pgd_entry;
 160
 161                p_swapper_pg_dir++;
 162                addr += PGDIR_SIZE;
 163                entry++;
 164        }
 165
 166#ifdef CONFIG_KERNEL_4M_PAGES
 167        /*
 168         * At this point we can also map the kernel with 4MB pages to
 169         * reduce TLB pressure.
 170         */
 171        second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages);
 172
 173        addr = PAGE_OFFSET;
 174        entry = pgd_index(PAGE_OFFSET);
 175        p_swapper_pg_dir = pgd_offset_k(0) + entry;
 176
 177        while (pages > 0) {
 178                unsigned long phys_addr, second_level_phys;
 179                pte_t *pte = (pte_t *)&second_level_table[second_level_entry];
 180
 181                phys_addr = __pa(addr);
 182
 183                second_level_phys = __pa(pte);
 184
 185                pgd_val(*p_swapper_pg_dir) = ((second_level_phys &
 186                                               FIRST_LEVEL_MASK) |
 187                                              _PAGE_SZ_4M |
 188                                              _PAGE_PRESENT);
 189
 190                pte_val(*pte) = ((phys_addr & SECOND_LEVEL_MASK) |
 191                                 _PAGE_PRESENT | _PAGE_DIRTY |
 192                                 _PAGE_ACCESSED | _PAGE_WRITE |
 193                                 _PAGE_CACHEABLE | _PAGE_KERNEL);
 194
 195                p_swapper_pg_dir++;
 196                addr += PGDIR_SIZE;
 197                /* Second level pages must be 64byte aligned. */
 198                second_level_entry += (SECOND_LEVEL_ALIGN /
 199                                       sizeof(unsigned long));
 200                pages--;
 201        }
 202        load_pgd(swapper_pg_dir, hard_processor_id());
 203        flush_tlb_all();
 204#endif
 205
 206        repriv_mmu_tables();
 207        register_syscore_ops(&mmu_syscore_ops);
 208}
 209