linux/arch/arm/mm/nommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/arch/arm/mm/nommu.c
   4 *
   5 * ARM uCLinux supporting functions.
   6 */
   7#include <linux/module.h>
   8#include <linux/mm.h>
   9#include <linux/pagemap.h>
  10#include <linux/io.h>
  11#include <linux/memblock.h>
  12#include <linux/kernel.h>
  13
  14#include <asm/cacheflush.h>
  15#include <asm/cp15.h>
  16#include <asm/sections.h>
  17#include <asm/page.h>
  18#include <asm/setup.h>
  19#include <asm/traps.h>
  20#include <asm/mach/arch.h>
  21#include <asm/cputype.h>
  22#include <asm/mpu.h>
  23#include <asm/procinfo.h>
  24
  25#include "mm.h"
  26
  27unsigned long vectors_base;
  28
  29#ifdef CONFIG_ARM_MPU
  30struct mpu_rgn_info mpu_rgn_info;
  31#endif
  32
  33#ifdef CONFIG_CPU_CP15
  34#ifdef CONFIG_CPU_HIGH_VECTOR
  35unsigned long setup_vectors_base(void)
  36{
  37        unsigned long reg = get_cr();
  38
  39        set_cr(reg | CR_V);
  40        return 0xffff0000;
  41}
  42#else /* CONFIG_CPU_HIGH_VECTOR */
  43/* Write exception base address to VBAR */
  44static inline void set_vbar(unsigned long val)
  45{
  46        asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc");
  47}
  48
  49/*
  50 * Security extensions, bits[7:4], permitted values,
  51 * 0b0000 - not implemented, 0b0001/0b0010 - implemented
  52 */
  53static inline bool security_extensions_enabled(void)
  54{
  55        /* Check CPUID Identification Scheme before ID_PFR1 read */
  56        if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
  57                return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
  58                        cpuid_feature_extract(CPUID_EXT_PFR1, 20);
  59        return 0;
  60}
  61
  62unsigned long setup_vectors_base(void)
  63{
  64        unsigned long base = 0, reg = get_cr();
  65
  66        set_cr(reg & ~CR_V);
  67        if (security_extensions_enabled()) {
  68                if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM))
  69                        base = CONFIG_DRAM_BASE;
  70                set_vbar(base);
  71        } else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) {
  72                if (CONFIG_DRAM_BASE != 0)
  73                        pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n");
  74        }
  75
  76        return base;
  77}
  78#endif /* CONFIG_CPU_HIGH_VECTOR */
  79#endif /* CONFIG_CPU_CP15 */
  80
  81void __init arm_mm_memblock_reserve(void)
  82{
  83#ifndef CONFIG_CPU_V7M
  84        vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0;
  85        /*
  86         * Register the exception vector page.
  87         * some architectures which the DRAM is the exception vector to trap,
  88         * alloc_page breaks with error, although it is not NULL, but "0."
  89         */
  90        memblock_reserve(vectors_base, 2 * PAGE_SIZE);
  91#else /* ifndef CONFIG_CPU_V7M */
  92        /*
  93         * There is no dedicated vector page on V7-M. So nothing needs to be
  94         * reserved here.
  95         */
  96#endif
  97        /*
  98         * In any case, always ensure address 0 is never used as many things
  99         * get very confused if 0 is returned as a legitimate address.
 100         */
 101        memblock_reserve(0, 1);
 102}
 103
 104static void __init adjust_lowmem_bounds_mpu(void)
 105{
 106        unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
 107
 108        switch (pmsa) {
 109        case MMFR0_PMSAv7:
 110                pmsav7_adjust_lowmem_bounds();
 111                break;
 112        case MMFR0_PMSAv8:
 113                pmsav8_adjust_lowmem_bounds();
 114                break;
 115        default:
 116                break;
 117        }
 118}
 119
 120static void __init mpu_setup(void)
 121{
 122        unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
 123
 124        switch (pmsa) {
 125        case MMFR0_PMSAv7:
 126                pmsav7_setup();
 127                break;
 128        case MMFR0_PMSAv8:
 129                pmsav8_setup();
 130                break;
 131        default:
 132                break;
 133        }
 134}
 135
 136void __init adjust_lowmem_bounds(void)
 137{
 138        phys_addr_t end;
 139        adjust_lowmem_bounds_mpu();
 140        end = memblock_end_of_DRAM();
 141        high_memory = __va(end - 1) + 1;
 142        memblock_set_current_limit(end);
 143}
 144
 145/*
 146 * paging_init() sets up the page tables, initialises the zone memory
 147 * maps, and sets up the zero page, bad page and bad page tables.
 148 */
 149void __init paging_init(const struct machine_desc *mdesc)
 150{
 151        early_trap_init((void *)vectors_base);
 152        mpu_setup();
 153        bootmem_init();
 154}
 155
 156/*
 157 * We don't need to do anything here for nommu machines.
 158 */
 159void setup_mm_for_reboot(void)
 160{
 161}
 162
 163void flush_dcache_page(struct page *page)
 164{
 165        __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
 166}
 167EXPORT_SYMBOL(flush_dcache_page);
 168
 169void flush_kernel_dcache_page(struct page *page)
 170{
 171        __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
 172}
 173EXPORT_SYMBOL(flush_kernel_dcache_page);
 174
 175void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 176                       unsigned long uaddr, void *dst, const void *src,
 177                       unsigned long len)
 178{
 179        memcpy(dst, src, len);
 180        if (vma->vm_flags & VM_EXEC)
 181                __cpuc_coherent_user_range(uaddr, uaddr + len);
 182}
 183
 184void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
 185                                size_t size, unsigned int mtype)
 186{
 187        if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
 188                return NULL;
 189        return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
 190}
 191EXPORT_SYMBOL(__arm_ioremap_pfn);
 192
 193void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
 194                                   unsigned int mtype, void *caller)
 195{
 196        return (void __iomem *)phys_addr;
 197}
 198
 199void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
 200
 201void __iomem *ioremap(resource_size_t res_cookie, size_t size)
 202{
 203        return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
 204                                    __builtin_return_address(0));
 205}
 206EXPORT_SYMBOL(ioremap);
 207
 208void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
 209        __alias(ioremap_cached);
 210
 211void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
 212{
 213        return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
 214                                    __builtin_return_address(0));
 215}
 216EXPORT_SYMBOL(ioremap_cache);
 217EXPORT_SYMBOL(ioremap_cached);
 218
 219void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
 220{
 221        return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
 222                                    __builtin_return_address(0));
 223}
 224EXPORT_SYMBOL(ioremap_wc);
 225
 226#ifdef CONFIG_PCI
 227
 228#include <asm/mach/map.h>
 229
 230void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
 231{
 232        return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
 233                                   __builtin_return_address(0));
 234}
 235EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
 236#endif
 237
 238void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
 239{
 240        return (void *)phys_addr;
 241}
 242
 243void __iounmap(volatile void __iomem *addr)
 244{
 245}
 246EXPORT_SYMBOL(__iounmap);
 247
 248void (*arch_iounmap)(volatile void __iomem *);
 249
 250void iounmap(volatile void __iomem *addr)
 251{
 252}
 253EXPORT_SYMBOL(iounmap);
 254