linux/arch/arm/mm/nommu.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/nommu.c
   3 *
   4 * ARM uCLinux supporting functions.
   5 */
   6#include <linux/module.h>
   7#include <linux/mm.h>
   8#include <linux/pagemap.h>
   9#include <linux/io.h>
  10#include <linux/memblock.h>
  11
  12#include <asm/cacheflush.h>
  13#include <asm/sections.h>
  14#include <asm/page.h>
  15#include <asm/setup.h>
  16#include <asm/mach/arch.h>
  17
  18#include "mm.h"
  19
  20void __init arm_mm_memblock_reserve(void)
  21{
  22        /*
  23         * Register the exception vector page.
  24         * some architectures which the DRAM is the exception vector to trap,
  25         * alloc_page breaks with error, although it is not NULL, but "0."
  26         */
  27        memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
  28}
  29
  30/*
  31 * paging_init() sets up the page tables, initialises the zone memory
  32 * maps, and sets up the zero page, bad page and bad page tables.
  33 */
  34void __init paging_init(struct machine_desc *mdesc)
  35{
  36        bootmem_init();
  37}
  38
  39/*
  40 * We don't need to do anything here for nommu machines.
  41 */
  42void setup_mm_for_reboot(char mode)
  43{
  44}
  45
  46void flush_dcache_page(struct page *page)
  47{
  48        __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
  49}
  50EXPORT_SYMBOL(flush_dcache_page);
  51
  52void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  53                       unsigned long uaddr, void *dst, const void *src,
  54                       unsigned long len)
  55{
  56        memcpy(dst, src, len);
  57        if (vma->vm_flags & VM_EXEC)
  58                __cpuc_coherent_user_range(uaddr, uaddr + len);
  59}
  60
  61void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
  62                                size_t size, unsigned int mtype)
  63{
  64        if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
  65                return NULL;
  66        return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
  67}
  68EXPORT_SYMBOL(__arm_ioremap_pfn);
  69
  70void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
  71                           size_t size, unsigned int mtype, void *caller)
  72{
  73        return __arm_ioremap_pfn(pfn, offset, size, mtype);
  74}
  75
  76void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
  77                            unsigned int mtype)
  78{
  79        return (void __iomem *)phys_addr;
  80}
  81EXPORT_SYMBOL(__arm_ioremap);
  82
  83void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
  84                                   unsigned int mtype, void *caller)
  85{
  86        return __arm_ioremap(phys_addr, size, mtype);
  87}
  88
  89void __iounmap(volatile void __iomem *addr)
  90{
  91}
  92EXPORT_SYMBOL(__iounmap);
  93