linux/arch/arm/include/asm/pgtable.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/pgtable.h
   3 *
   4 *  Copyright (C) 1995-2002 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#ifndef _ASMARM_PGTABLE_H
  11#define _ASMARM_PGTABLE_H
  12
  13#include <linux/const.h>
  14#include <asm-generic/4level-fixup.h>
  15#include <asm/proc-fns.h>
  16
  17#ifndef CONFIG_MMU
  18
  19#include "pgtable-nommu.h"
  20
  21#else
  22
  23#include <asm/memory.h>
  24#include <mach/vmalloc.h>
  25#include <asm/pgtable-hwdef.h>
  26
  27/*
  28 * Just any arbitrary offset to the start of the vmalloc VM area: the
  29 * current 8MB value just means that there will be a 8MB "hole" after the
  30 * physical memory until the kernel virtual memory starts.  That means that
  31 * any out-of-bounds memory accesses will hopefully be caught.
  32 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  33 * area for the same reason. ;)
  34 *
  35 * Note that platforms may override VMALLOC_START, but they must provide
  36 * VMALLOC_END.  VMALLOC_END defines the (exclusive) limit of this space,
  37 * which may not overlap IO space.
  38 */
  39#ifndef VMALLOC_START
  40#define VMALLOC_OFFSET          (8*1024*1024)
  41#define VMALLOC_START           (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  42#endif
  43
  44/*
  45 * Hardware-wise, we have a two level page table structure, where the first
  46 * level has 4096 entries, and the second level has 256 entries.  Each entry
  47 * is one 32-bit word.  Most of the bits in the second level entry are used
  48 * by hardware, and there aren't any "accessed" and "dirty" bits.
  49 *
  50 * Linux on the other hand has a three level page table structure, which can
  51 * be wrapped to fit a two level page table structure easily - using the PGD
  52 * and PTE only.  However, Linux also expects one "PTE" table per page, and
  53 * at least a "dirty" bit.
  54 *
  55 * Therefore, we tweak the implementation slightly - we tell Linux that we
  56 * have 2048 entries in the first level, each of which is 8 bytes (iow, two
  57 * hardware pointers to the second level.)  The second level contains two
  58 * hardware PTE tables arranged contiguously, preceded by Linux versions
  59 * which contain the state information Linux needs.  We, therefore, end up
  60 * with 512 entries in the "PTE" level.
  61 *
  62 * This leads to the page tables having the following layout:
  63 *
  64 *    pgd             pte
  65 * |        |
  66 * +--------+
  67 * |        |       +------------+ +0
  68 * +- - - - +       | Linux pt 0 |
  69 * |        |       +------------+ +1024
  70 * +--------+ +0    | Linux pt 1 |
  71 * |        |-----> +------------+ +2048
  72 * +- - - - + +4    |  h/w pt 0  |
  73 * |        |-----> +------------+ +3072
  74 * +--------+ +8    |  h/w pt 1  |
  75 * |        |       +------------+ +4096
  76 *
  77 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
  78 * PTE_xxx for definitions of bits appearing in the "h/w pt".
  79 *
  80 * PMD_xxx definitions refer to bits in the first level page table.
  81 *
  82 * The "dirty" bit is emulated by only granting hardware write permission
  83 * iff the page is marked "writable" and "dirty" in the Linux PTE.  This
  84 * means that a write to a clean page will cause a permission fault, and
  85 * the Linux MM layer will mark the page dirty via handle_pte_fault().
  86 * For the hardware to notice the permission change, the TLB entry must
  87 * be flushed, and ptep_set_access_flags() does that for us.
  88 *
  89 * The "accessed" or "young" bit is emulated by a similar method; we only
  90 * allow accesses to the page if the "young" bit is set.  Accesses to the
  91 * page will cause a fault, and handle_pte_fault() will set the young bit
  92 * for us as long as the page is marked present in the corresponding Linux
  93 * PTE entry.  Again, ptep_set_access_flags() will ensure that the TLB is
  94 * up to date.
  95 *
  96 * However, when the "young" bit is cleared, we deny access to the page
  97 * by clearing the hardware PTE.  Currently Linux does not flush the TLB
  98 * for us in this case, which means the TLB will retain the transation
  99 * until either the TLB entry is evicted under pressure, or a context
 100 * switch which changes the user space mapping occurs.
 101 */
 102#define PTRS_PER_PTE            512
 103#define PTRS_PER_PMD            1
 104#define PTRS_PER_PGD            2048
 105
 106#define PTE_HWTABLE_PTRS        (PTRS_PER_PTE)
 107#define PTE_HWTABLE_OFF         (PTE_HWTABLE_PTRS * sizeof(pte_t))
 108#define PTE_HWTABLE_SIZE        (PTRS_PER_PTE * sizeof(u32))
 109
 110/*
 111 * PMD_SHIFT determines the size of the area a second-level page table can map
 112 * PGDIR_SHIFT determines what a third-level page table entry can map
 113 */
 114#define PMD_SHIFT               21
 115#define PGDIR_SHIFT             21
 116
 117#define LIBRARY_TEXT_START      0x0c000000
 118
 119#ifndef __ASSEMBLY__
 120extern void __pte_error(const char *file, int line, pte_t);
 121extern void __pmd_error(const char *file, int line, pmd_t);
 122extern void __pgd_error(const char *file, int line, pgd_t);
 123
 124#define pte_ERROR(pte)          __pte_error(__FILE__, __LINE__, pte)
 125#define pmd_ERROR(pmd)          __pmd_error(__FILE__, __LINE__, pmd)
 126#define pgd_ERROR(pgd)          __pgd_error(__FILE__, __LINE__, pgd)
 127#endif /* !__ASSEMBLY__ */
 128
 129#define PMD_SIZE                (1UL << PMD_SHIFT)
 130#define PMD_MASK                (~(PMD_SIZE-1))
 131#define PGDIR_SIZE              (1UL << PGDIR_SHIFT)
 132#define PGDIR_MASK              (~(PGDIR_SIZE-1))
 133
 134/*
 135 * This is the lowest virtual address we can permit any user space
 136 * mapping to be mapped at.  This is particularly important for
 137 * non-high vector CPUs.
 138 */
 139#define FIRST_USER_ADDRESS      PAGE_SIZE
 140
 141#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
 142
 143/*
 144 * section address mask and size definitions.
 145 */
 146#define SECTION_SHIFT           20
 147#define SECTION_SIZE            (1UL << SECTION_SHIFT)
 148#define SECTION_MASK            (~(SECTION_SIZE-1))
 149
 150/*
 151 * ARMv6 supersection address mask and size definitions.
 152 */
 153#define SUPERSECTION_SHIFT      24
 154#define SUPERSECTION_SIZE       (1UL << SUPERSECTION_SHIFT)
 155#define SUPERSECTION_MASK       (~(SUPERSECTION_SIZE-1))
 156
 157/*
 158 * "Linux" PTE definitions.
 159 *
 160 * We keep two sets of PTEs - the hardware and the linux version.
 161 * This allows greater flexibility in the way we map the Linux bits
 162 * onto the hardware tables, and allows us to have YOUNG and DIRTY
 163 * bits.
 164 *
 165 * The PTE table pointer refers to the hardware entries; the "Linux"
 166 * entries are stored 1024 bytes below.
 167 */
 168#define L_PTE_PRESENT           (_AT(pteval_t, 1) << 0)
 169#define L_PTE_YOUNG             (_AT(pteval_t, 1) << 1)
 170#define L_PTE_FILE              (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
 171#define L_PTE_DIRTY             (_AT(pteval_t, 1) << 6)
 172#define L_PTE_RDONLY            (_AT(pteval_t, 1) << 7)
 173#define L_PTE_USER              (_AT(pteval_t, 1) << 8)
 174#define L_PTE_XN                (_AT(pteval_t, 1) << 9)
 175#define L_PTE_SHARED            (_AT(pteval_t, 1) << 10)        /* shared(v6), coherent(xsc3) */
 176
 177/*
 178 * These are the memory types, defined to be compatible with
 179 * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB
 180 */
 181#define L_PTE_MT_UNCACHED       (_AT(pteval_t, 0x00) << 2)      /* 0000 */
 182#define L_PTE_MT_BUFFERABLE     (_AT(pteval_t, 0x01) << 2)      /* 0001 */
 183#define L_PTE_MT_WRITETHROUGH   (_AT(pteval_t, 0x02) << 2)      /* 0010 */
 184#define L_PTE_MT_WRITEBACK      (_AT(pteval_t, 0x03) << 2)      /* 0011 */
 185#define L_PTE_MT_MINICACHE      (_AT(pteval_t, 0x06) << 2)      /* 0110 (sa1100, xscale) */
 186#define L_PTE_MT_WRITEALLOC     (_AT(pteval_t, 0x07) << 2)      /* 0111 */
 187#define L_PTE_MT_DEV_SHARED     (_AT(pteval_t, 0x04) << 2)      /* 0100 */
 188#define L_PTE_MT_DEV_NONSHARED  (_AT(pteval_t, 0x0c) << 2)      /* 1100 */
 189#define L_PTE_MT_DEV_WC         (_AT(pteval_t, 0x09) << 2)      /* 1001 */
 190#define L_PTE_MT_DEV_CACHED     (_AT(pteval_t, 0x0b) << 2)      /* 1011 */
 191#define L_PTE_MT_MASK           (_AT(pteval_t, 0x0f) << 2)
 192
 193#ifndef __ASSEMBLY__
 194
 195/*
 196 * The pgprot_* and protection_map entries will be fixed up in runtime
 197 * to include the cachable and bufferable bits based on memory policy,
 198 * as well as any architecture dependent bits like global/ASID and SMP
 199 * shared mapping bits.
 200 */
 201#define _L_PTE_DEFAULT  L_PTE_PRESENT | L_PTE_YOUNG
 202
 203extern pgprot_t         pgprot_user;
 204extern pgprot_t         pgprot_kernel;
 205
 206#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
 207
 208#define PAGE_NONE               _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
 209#define PAGE_SHARED             _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
 210#define PAGE_SHARED_EXEC        _MOD_PROT(pgprot_user, L_PTE_USER)
 211#define PAGE_COPY               _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
 212#define PAGE_COPY_EXEC          _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
 213#define PAGE_READONLY           _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
 214#define PAGE_READONLY_EXEC      _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
 215#define PAGE_KERNEL             _MOD_PROT(pgprot_kernel, L_PTE_XN)
 216#define PAGE_KERNEL_EXEC        pgprot_kernel
 217
 218#define __PAGE_NONE             __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
 219#define __PAGE_SHARED           __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
 220#define __PAGE_SHARED_EXEC      __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
 221#define __PAGE_COPY             __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
 222#define __PAGE_COPY_EXEC        __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
 223#define __PAGE_READONLY         __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
 224#define __PAGE_READONLY_EXEC    __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
 225
 226#define __pgprot_modify(prot,mask,bits)         \
 227        __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
 228
 229#define pgprot_noncached(prot) \
 230        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
 231
 232#define pgprot_writecombine(prot) \
 233        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
 234
 235#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 236#define pgprot_dmacoherent(prot) \
 237        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
 238#define __HAVE_PHYS_MEM_ACCESS_PROT
 239struct file;
 240extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 241                                     unsigned long size, pgprot_t vma_prot);
 242#else
 243#define pgprot_dmacoherent(prot) \
 244        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
 245#endif
 246
 247#endif /* __ASSEMBLY__ */
 248
 249/*
 250 * The table below defines the page protection levels that we insert into our
 251 * Linux page table version.  These get translated into the best that the
 252 * architecture can perform.  Note that on most ARM hardware:
 253 *  1) We cannot do execute protection
 254 *  2) If we could do execute protection, then read is implied
 255 *  3) write implies read permissions
 256 */
 257#define __P000  __PAGE_NONE
 258#define __P001  __PAGE_READONLY
 259#define __P010  __PAGE_COPY
 260#define __P011  __PAGE_COPY
 261#define __P100  __PAGE_READONLY_EXEC
 262#define __P101  __PAGE_READONLY_EXEC
 263#define __P110  __PAGE_COPY_EXEC
 264#define __P111  __PAGE_COPY_EXEC
 265
 266#define __S000  __PAGE_NONE
 267#define __S001  __PAGE_READONLY
 268#define __S010  __PAGE_SHARED
 269#define __S011  __PAGE_SHARED
 270#define __S100  __PAGE_READONLY_EXEC
 271#define __S101  __PAGE_READONLY_EXEC
 272#define __S110  __PAGE_SHARED_EXEC
 273#define __S111  __PAGE_SHARED_EXEC
 274
 275#ifndef __ASSEMBLY__
 276/*
 277 * ZERO_PAGE is a global shared page that is always zero: used
 278 * for zero-mapped memory areas etc..
 279 */
 280extern struct page *empty_zero_page;
 281#define ZERO_PAGE(vaddr)        (empty_zero_page)
 282
 283
 284extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 285
 286/* to find an entry in a page-table-directory */
 287#define pgd_index(addr)         ((addr) >> PGDIR_SHIFT)
 288
 289#define pgd_offset(mm, addr)    ((mm)->pgd + pgd_index(addr))
 290
 291/* to find an entry in a kernel page-table-directory */
 292#define pgd_offset_k(addr)      pgd_offset(&init_mm, addr)
 293
 294/*
 295 * The "pgd_xxx()" functions here are trivial for a folded two-level
 296 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 297 * into the pgd entry)
 298 */
 299#define pgd_none(pgd)           (0)
 300#define pgd_bad(pgd)            (0)
 301#define pgd_present(pgd)        (1)
 302#define pgd_clear(pgdp)         do { } while (0)
 303#define set_pgd(pgd,pgdp)       do { } while (0)
 304
 305
 306/* Find an entry in the second-level page table.. */
 307#define pmd_offset(dir, addr)   ((pmd_t *)(dir))
 308
 309#define pmd_none(pmd)           (!pmd_val(pmd))
 310#define pmd_present(pmd)        (pmd_val(pmd))
 311#define pmd_bad(pmd)            (pmd_val(pmd) & 2)
 312
 313#define copy_pmd(pmdpd,pmdps)           \
 314        do {                            \
 315                pmdpd[0] = pmdps[0];    \
 316                pmdpd[1] = pmdps[1];    \
 317                flush_pmd_entry(pmdpd); \
 318        } while (0)
 319
 320#define pmd_clear(pmdp)                 \
 321        do {                            \
 322                pmdp[0] = __pmd(0);     \
 323                pmdp[1] = __pmd(0);     \
 324                clean_pmd_entry(pmdp);  \
 325        } while (0)
 326
 327static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 328{
 329        return __va(pmd_val(pmd) & PAGE_MASK);
 330}
 331
 332#define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
 333
 334/* we don't need complex calculations here as the pmd is folded into the pgd */
 335#define pmd_addr_end(addr,end)  (end)
 336
 337
 338#ifndef CONFIG_HIGHPTE
 339#define __pte_map(pmd)          pmd_page_vaddr(*(pmd))
 340#define __pte_unmap(pte)        do { } while (0)
 341#else
 342#define __pte_map(pmd)          (pte_t *)kmap_atomic(pmd_page(*(pmd)))
 343#define __pte_unmap(pte)        kunmap_atomic(pte)
 344#endif
 345
 346#define pte_index(addr)         (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 347
 348#define pte_offset_kernel(pmd,addr)     (pmd_page_vaddr(*(pmd)) + pte_index(addr))
 349
 350#define pte_offset_map(pmd,addr)        (__pte_map(pmd) + pte_index(addr))
 351#define pte_unmap(pte)                  __pte_unmap(pte)
 352
 353#define pte_pfn(pte)            (pte_val(pte) >> PAGE_SHIFT)
 354#define pfn_pte(pfn,prot)       __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 355
 356#define pte_page(pte)           pfn_to_page(pte_pfn(pte))
 357#define mk_pte(page,prot)       pfn_pte(page_to_pfn(page), prot)
 358
 359#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
 360#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
 361
 362#if __LINUX_ARM_ARCH__ < 6
 363static inline void __sync_icache_dcache(pte_t pteval)
 364{
 365}
 366#else
 367extern void __sync_icache_dcache(pte_t pteval);
 368#endif
 369
 370static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 371                              pte_t *ptep, pte_t pteval)
 372{
 373        if (addr >= TASK_SIZE)
 374                set_pte_ext(ptep, pteval, 0);
 375        else {
 376                __sync_icache_dcache(pteval);
 377                set_pte_ext(ptep, pteval, PTE_EXT_NG);
 378        }
 379}
 380
 381#define pte_none(pte)           (!pte_val(pte))
 382#define pte_present(pte)        (pte_val(pte) & L_PTE_PRESENT)
 383#define pte_write(pte)          (!(pte_val(pte) & L_PTE_RDONLY))
 384#define pte_dirty(pte)          (pte_val(pte) & L_PTE_DIRTY)
 385#define pte_young(pte)          (pte_val(pte) & L_PTE_YOUNG)
 386#define pte_exec(pte)           (!(pte_val(pte) & L_PTE_XN))
 387#define pte_special(pte)        (0)
 388
 389#define pte_present_user(pte) \
 390        ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
 391         (L_PTE_PRESENT | L_PTE_USER))
 392
 393#define PTE_BIT_FUNC(fn,op) \
 394static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
 395
 396PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
 397PTE_BIT_FUNC(mkwrite,   &= ~L_PTE_RDONLY);
 398PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);
 399PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);
 400PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);
 401PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG);
 402
 403static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 404
 405static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 406{
 407        const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
 408        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 409        return pte;
 410}
 411
 412/*
 413 * Encode and decode a swap entry.  Swap entries are stored in the Linux
 414 * page tables as follows:
 415 *
 416 *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
 417 *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
 418 *   <--------------- offset --------------------> <- type --> 0 0 0
 419 *
 420 * This gives us up to 63 swap files and 32GB per swap file.  Note that
 421 * the offset field is always non-zero.
 422 */
 423#define __SWP_TYPE_SHIFT        3
 424#define __SWP_TYPE_BITS         6
 425#define __SWP_TYPE_MASK         ((1 << __SWP_TYPE_BITS) - 1)
 426#define __SWP_OFFSET_SHIFT      (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
 427
 428#define __swp_type(x)           (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
 429#define __swp_offset(x)         ((x).val >> __SWP_OFFSET_SHIFT)
 430#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
 431
 432#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 433#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
 434
 435/*
 436 * It is an error for the kernel to have more swap files than we can
 437 * encode in the PTEs.  This ensures that we know when MAX_SWAPFILES
 438 * is increased beyond what we presently support.
 439 */
 440#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
 441
 442/*
 443 * Encode and decode a file entry.  File entries are stored in the Linux
 444 * page tables as follows:
 445 *
 446 *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
 447 *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
 448 *   <----------------------- offset ------------------------> 1 0 0
 449 */
 450#define pte_file(pte)           (pte_val(pte) & L_PTE_FILE)
 451#define pte_to_pgoff(x)         (pte_val(x) >> 3)
 452#define pgoff_to_pte(x)         __pte(((x) << 3) | L_PTE_FILE)
 453
 454#define PTE_FILE_MAX_BITS       29
 455
 456/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 457/* FIXME: this is not correct */
 458#define kern_addr_valid(addr)   (1)
 459
 460#include <asm-generic/pgtable.h>
 461
 462/*
 463 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
 464 */
 465#define HAVE_ARCH_UNMAPPED_AREA
 466
 467/*
 468 * remap a physical page `pfn' of size `size' with page protection `prot'
 469 * into virtual address `from'
 470 */
 471#define io_remap_pfn_range(vma,from,pfn,size,prot) \
 472                remap_pfn_range(vma, from, pfn, size, prot)
 473
 474#define pgtable_cache_init() do { } while (0)
 475
 476void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
 477void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
 478
 479#endif /* !__ASSEMBLY__ */
 480
 481#endif /* CONFIG_MMU */
 482
 483#endif /* _ASMARM_PGTABLE_H */
 484