linux/arch/xtensa/mm/mmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * xtensa mmu stuff
   4 *
   5 * Extracted from init.c
   6 */
   7#include <linux/memblock.h>
   8#include <linux/percpu.h>
   9#include <linux/init.h>
  10#include <linux/string.h>
  11#include <linux/slab.h>
  12#include <linux/cache.h>
  13
  14#include <asm/tlb.h>
  15#include <asm/tlbflush.h>
  16#include <asm/mmu_context.h>
  17#include <asm/page.h>
  18#include <asm/initialize_mmu.h>
  19#include <asm/io.h>
  20
  21#if defined(CONFIG_HIGHMEM)
  22static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
  23{
  24        pmd_t *pmd = pmd_off_k(vaddr);
  25        pte_t *pte;
  26        unsigned long i;
  27
  28        n_pages = ALIGN(n_pages, PTRS_PER_PTE);
  29
  30        pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
  31                 __func__, vaddr, n_pages);
  32
  33        pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
  34        if (!pte)
  35                panic("%s: Failed to allocate %lu bytes align=%lx\n",
  36                      __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
  37
  38        for (i = 0; i < n_pages; ++i)
  39                pte_clear(NULL, 0, pte + i);
  40
  41        for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
  42                pte_t *cur_pte = pte + i;
  43
  44                BUG_ON(!pmd_none(*pmd));
  45                set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
  46                BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
  47                pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
  48                         __func__, pmd, cur_pte);
  49        }
  50        return pte;
  51}
  52
  53static void __init fixedrange_init(void)
  54{
  55        BUILD_BUG_ON(FIXADDR_START < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
  56        init_pmd(FIXADDR_START, __end_of_fixed_addresses);
  57}
  58#endif
  59
  60void __init paging_init(void)
  61{
  62#ifdef CONFIG_HIGHMEM
  63        fixedrange_init();
  64        pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
  65        kmap_init();
  66#endif
  67}
  68
  69/*
  70 * Flush the mmu and reset associated register to default values.
  71 */
  72void init_mmu(void)
  73{
  74#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
  75        /*
  76         * Writing zeros to the instruction and data TLBCFG special
  77         * registers ensure that valid values exist in the register.
  78         *
  79         * For existing PGSZID<w> fields, zero selects the first element
  80         * of the page-size array.  For nonexistent PGSZID<w> fields,
  81         * zero is the best value to write.  Also, when changing PGSZID<w>
  82         * fields, the corresponding TLB must be flushed.
  83         */
  84        set_itlbcfg_register(0);
  85        set_dtlbcfg_register(0);
  86#endif
  87        init_kio();
  88        local_flush_tlb_all();
  89
  90        /* Set rasid register to a known value. */
  91
  92        set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
  93
  94        /* Set PTEVADDR special register to the start of the page
  95         * table, which is in kernel mappable space (ie. not
  96         * statically mapped).  This register's value is undefined on
  97         * reset.
  98         */
  99        set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR);
 100}
 101
 102void init_kio(void)
 103{
 104#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
 105        /*
 106         * Update the IO area mapping in case xtensa_kio_paddr has changed
 107         */
 108        write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
 109                        XCHAL_KIO_CACHED_VADDR + 6);
 110        write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
 111                        XCHAL_KIO_CACHED_VADDR + 6);
 112        write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
 113                        XCHAL_KIO_BYPASS_VADDR + 6);
 114        write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
 115                        XCHAL_KIO_BYPASS_VADDR + 6);
 116#endif
 117}
 118