linux/arch/m68k/mm/mcfmmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Based upon linux/arch/m68k/mm/sun3mmu.c
   4 * Based upon linux/arch/ppc/mm/mmu_context.c
   5 *
   6 * Implementations of mm routines specific to the Coldfire MMU.
   7 *
   8 * Copyright (c) 2008 Freescale Semiconductor, Inc.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/types.h>
  13#include <linux/mm.h>
  14#include <linux/init.h>
  15#include <linux/string.h>
  16#include <linux/memblock.h>
  17
  18#include <asm/setup.h>
  19#include <asm/page.h>
  20#include <asm/pgtable.h>
  21#include <asm/mmu_context.h>
  22#include <asm/mcf_pgalloc.h>
  23#include <asm/tlbflush.h>
  24
  25#define KMAPAREA(x)     ((x >= VMALLOC_START) && (x < KMAP_END))
  26
  27mm_context_t next_mmu_context;
  28unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
  29atomic_t nr_free_contexts;
  30struct mm_struct *context_mm[LAST_CONTEXT+1];
  31unsigned long num_pages;
  32
  33/*
  34 * ColdFire paging_init derived from sun3.
  35 */
  36void __init paging_init(void)
  37{
  38        pgd_t *pg_dir;
  39        pte_t *pg_table;
  40        unsigned long address, size;
  41        unsigned long next_pgtable, bootmem_end;
  42        unsigned long zones_size[MAX_NR_ZONES];
  43        enum zone_type zone;
  44        int i;
  45
  46        empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
  47        if (!empty_zero_page)
  48                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
  49                      __func__, PAGE_SIZE, PAGE_SIZE);
  50
  51        pg_dir = swapper_pg_dir;
  52        memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
  53
  54        size = num_pages * sizeof(pte_t);
  55        size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
  56        next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
  57        if (!next_pgtable)
  58                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
  59                      __func__, size, PAGE_SIZE);
  60
  61        bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
  62        pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
  63
  64        address = PAGE_OFFSET;
  65        while (address < (unsigned long)high_memory) {
  66                pg_table = (pte_t *) next_pgtable;
  67                next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
  68                pgd_val(*pg_dir) = (unsigned long) pg_table;
  69                pg_dir++;
  70
  71                /* now change pg_table to kernel virtual addresses */
  72                for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
  73                        pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
  74                        if (address >= (unsigned long) high_memory)
  75                                pte_val(pte) = 0;
  76
  77                        set_pte(pg_table, pte);
  78                        address += PAGE_SIZE;
  79                }
  80        }
  81
  82        current->mm = NULL;
  83
  84        for (zone = 0; zone < MAX_NR_ZONES; zone++)
  85                zones_size[zone] = 0x0;
  86        zones_size[ZONE_DMA] = num_pages;
  87        free_area_init(zones_size);
  88}
  89
  90int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
  91{
  92        unsigned long flags, mmuar, mmutr;
  93        struct mm_struct *mm;
  94        pgd_t *pgd;
  95        pmd_t *pmd;
  96        pte_t *pte;
  97        int asid;
  98
  99        local_irq_save(flags);
 100
 101        mmuar = (dtlb) ? mmu_read(MMUAR) :
 102                regs->pc + (extension_word * sizeof(long));
 103
 104        mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
 105        if (!mm) {
 106                local_irq_restore(flags);
 107                return -1;
 108        }
 109
 110        pgd = pgd_offset(mm, mmuar);
 111        if (pgd_none(*pgd))  {
 112                local_irq_restore(flags);
 113                return -1;
 114        }
 115
 116        pmd = pmd_offset(pgd, mmuar);
 117        if (pmd_none(*pmd)) {
 118                local_irq_restore(flags);
 119                return -1;
 120        }
 121
 122        pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
 123                                : pte_offset_map(pmd, mmuar);
 124        if (pte_none(*pte) || !pte_present(*pte)) {
 125                local_irq_restore(flags);
 126                return -1;
 127        }
 128
 129        if (write) {
 130                if (!pte_write(*pte)) {
 131                        local_irq_restore(flags);
 132                        return -1;
 133                }
 134                set_pte(pte, pte_mkdirty(*pte));
 135        }
 136
 137        set_pte(pte, pte_mkyoung(*pte));
 138        asid = mm->context & 0xff;
 139        if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
 140                set_pte(pte, pte_wrprotect(*pte));
 141
 142        mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
 143        if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
 144                mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
 145        mmu_write(MMUTR, mmutr);
 146
 147        mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
 148                ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
 149
 150        if (dtlb)
 151                mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
 152        else
 153                mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
 154
 155        local_irq_restore(flags);
 156        return 0;
 157}
 158
 159void __init cf_bootmem_alloc(void)
 160{
 161        unsigned long memstart;
 162
 163        /* _rambase and _ramend will be naturally page aligned */
 164        m68k_memory[0].addr = _rambase;
 165        m68k_memory[0].size = _ramend - _rambase;
 166
 167        memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
 168
 169        /* compute total pages in system */
 170        num_pages = PFN_DOWN(_ramend - _rambase);
 171
 172        /* page numbers */
 173        memstart = PAGE_ALIGN(_ramstart);
 174        min_low_pfn = PFN_DOWN(_rambase);
 175        max_pfn = max_low_pfn = PFN_DOWN(_ramend);
 176        high_memory = (void *)_ramend;
 177
 178        /* Reserve kernel text/data/bss */
 179        memblock_reserve(_rambase, memstart - _rambase);
 180
 181        m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
 182        module_fixup(NULL, __start_fixup, __stop_fixup);
 183
 184        /* setup node data */
 185        m68k_setup_node(0);
 186}
 187
 188/*
 189 * Initialize the context management stuff.
 190 * The following was taken from arch/ppc/mmu_context.c
 191 */
 192void __init cf_mmu_context_init(void)
 193{
 194        /*
 195         * Some processors have too few contexts to reserve one for
 196         * init_mm, and require using context 0 for a normal task.
 197         * Other processors reserve the use of context zero for the kernel.
 198         * This code assumes FIRST_CONTEXT < 32.
 199         */
 200        context_map[0] = (1 << FIRST_CONTEXT) - 1;
 201        next_mmu_context = FIRST_CONTEXT;
 202        atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
 203}
 204
 205/*
 206 * Steal a context from a task that has one at the moment.
 207 * This is only used on 8xx and 4xx and we presently assume that
 208 * they don't do SMP.  If they do then thicfpgalloc.hs will have to check
 209 * whether the MM we steal is in use.
 210 * We also assume that this is only used on systems that don't
 211 * use an MMU hash table - this is true for 8xx and 4xx.
 212 * This isn't an LRU system, it just frees up each context in
 213 * turn (sort-of pseudo-random replacement :).  This would be the
 214 * place to implement an LRU scheme if anyone was motivated to do it.
 215 *  -- paulus
 216 */
 217void steal_context(void)
 218{
 219        struct mm_struct *mm;
 220        /*
 221         * free up context `next_mmu_context'
 222         * if we shouldn't free context 0, don't...
 223         */
 224        if (next_mmu_context < FIRST_CONTEXT)
 225                next_mmu_context = FIRST_CONTEXT;
 226        mm = context_mm[next_mmu_context];
 227        flush_tlb_mm(mm);
 228        destroy_context(mm);
 229}
 230
 231