linux/arch/microblaze/include/asm/pgtable.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
   3 * Copyright (C) 2008-2009 PetaLogix
   4 * Copyright (C) 2006 Atmark Techno, Inc.
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License. See the file "COPYING" in the main directory of this archive
   8 * for more details.
   9 */
  10
  11#ifndef _ASM_MICROBLAZE_PGTABLE_H
  12#define _ASM_MICROBLAZE_PGTABLE_H
  13
  14#include <asm/setup.h>
  15
  16#ifndef __ASSEMBLY__
  17extern int mem_init_done;
  18#endif
  19
  20#ifndef CONFIG_MMU
  21
  22#define pgd_present(pgd)        (1) /* pages are always present on non MMU */
  23#define pgd_none(pgd)           (0)
  24#define pgd_bad(pgd)            (0)
  25#define pgd_clear(pgdp)
  26#define kern_addr_valid(addr)   (1)
  27#define pmd_offset(a, b)        ((void *) 0)
  28
  29#define PAGE_NONE               __pgprot(0) /* these mean nothing to non MMU */
  30#define PAGE_SHARED             __pgprot(0) /* these mean nothing to non MMU */
  31#define PAGE_COPY               __pgprot(0) /* these mean nothing to non MMU */
  32#define PAGE_READONLY           __pgprot(0) /* these mean nothing to non MMU */
  33#define PAGE_KERNEL             __pgprot(0) /* these mean nothing to non MMU */
  34
  35#define pgprot_noncached(x)     (x)
  36#define pgprot_writecombine     pgprot_noncached
  37#define pgprot_device           pgprot_noncached
  38
  39#define __swp_type(x)           (0)
  40#define __swp_offset(x)         (0)
  41#define __swp_entry(typ, off)   ((swp_entry_t) { ((typ) | ((off) << 7)) })
  42#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  43#define __swp_entry_to_pte(x)   ((pte_t) { (x).val })
  44
  45#define ZERO_PAGE(vaddr)        ({ BUG(); NULL; })
  46
  47#define swapper_pg_dir ((pgd_t *) NULL)
  48
  49#define pgtable_cache_init()    do {} while (0)
  50
  51#define arch_enter_lazy_cpu_mode()      do {} while (0)
  52
  53#define pgprot_noncached_wc(prot)       prot
  54
  55/*
  56 * All 32bit addresses are effectively valid for vmalloc...
  57 * Sort of meaningless for non-VM targets.
  58 */
  59#define VMALLOC_START   0
  60#define VMALLOC_END     0xffffffff
  61
  62#else /* CONFIG_MMU */
  63
  64#include <asm-generic/4level-fixup.h>
  65
  66#define __PAGETABLE_PMD_FOLDED
  67
  68#ifdef __KERNEL__
  69#ifndef __ASSEMBLY__
  70
  71#include <linux/sched.h>
  72#include <linux/threads.h>
  73#include <asm/processor.h>              /* For TASK_SIZE */
  74#include <asm/mmu.h>
  75#include <asm/page.h>
  76
  77#define FIRST_USER_ADDRESS      0UL
  78
  79extern unsigned long va_to_phys(unsigned long address);
  80extern pte_t *va_to_pte(unsigned long address);
  81
  82/*
  83 * The following only work if pte_present() is true.
  84 * Undefined behaviour if not..
  85 */
  86
  87static inline int pte_special(pte_t pte)        { return 0; }
  88
  89static inline pte_t pte_mkspecial(pte_t pte)    { return pte; }
  90
  91/* Start and end of the vmalloc area. */
  92/* Make sure to map the vmalloc area above the pinned kernel memory area
  93   of 32Mb.  */
  94#define VMALLOC_START   (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
  95#define VMALLOC_END     ioremap_bot
  96
  97#endif /* __ASSEMBLY__ */
  98
  99/*
 100 * Macro to mark a page protection value as "uncacheable".
 101 */
 102
 103#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
 104                                                        _PAGE_WRITETHRU)
 105
 106#define pgprot_noncached(prot) \
 107                        (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
 108                                        _PAGE_NO_CACHE | _PAGE_GUARDED))
 109
 110#define pgprot_noncached_wc(prot) \
 111                         (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
 112                                                        _PAGE_NO_CACHE))
 113
 114/*
 115 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
 116 * table containing PTEs, together with a set of 16 segment registers, to
 117 * define the virtual to physical address mapping.
 118 *
 119 * We use the hash table as an extended TLB, i.e. a cache of currently
 120 * active mappings.  We maintain a two-level page table tree, much
 121 * like that used by the i386, for the sake of the Linux memory
 122 * management code.  Low-level assembler code in hashtable.S
 123 * (procedure hash_page) is responsible for extracting ptes from the
 124 * tree and putting them into the hash table when necessary, and
 125 * updating the accessed and modified bits in the page table tree.
 126 */
 127
 128/*
 129 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
 130 * instruction and data sides share a unified, 64-entry, semi-associative
 131 * TLB which is maintained totally under software control. In addition, the
 132 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
 133 * TLB which serves as a first level to the shared TLB. These two TLBs are
 134 * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
 135 */
 136
 137/*
 138 * The normal case is that PTEs are 32-bits and we have a 1-page
 139 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
 140 *
 141 */
 142
 143/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
 144#define PMD_SHIFT       (PAGE_SHIFT + PTE_SHIFT)
 145#define PMD_SIZE        (1UL << PMD_SHIFT)
 146#define PMD_MASK        (~(PMD_SIZE-1))
 147
 148/* PGDIR_SHIFT determines what a top-level page table entry can map */
 149#define PGDIR_SHIFT     PMD_SHIFT
 150#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
 151#define PGDIR_MASK      (~(PGDIR_SIZE-1))
 152
 153/*
 154 * entries per page directory level: our page-table tree is two-level, so
 155 * we don't really have any PMD directory.
 156 */
 157#define PTRS_PER_PTE    (1 << PTE_SHIFT)
 158#define PTRS_PER_PMD    1
 159#define PTRS_PER_PGD    (1 << (32 - PGDIR_SHIFT))
 160
 161#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
 162#define FIRST_USER_PGD_NR       0
 163
 164#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
 165#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
 166
 167#define pte_ERROR(e) \
 168        printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
 169                __FILE__, __LINE__, pte_val(e))
 170#define pmd_ERROR(e) \
 171        printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
 172                __FILE__, __LINE__, pmd_val(e))
 173#define pgd_ERROR(e) \
 174        printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
 175                __FILE__, __LINE__, pgd_val(e))
 176
 177/*
 178 * Bits in a linux-style PTE.  These match the bits in the
 179 * (hardware-defined) PTE as closely as possible.
 180 */
 181
 182/* There are several potential gotchas here.  The hardware TLBLO
 183 * field looks like this:
 184 *
 185 * 0  1  2  3  4  ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 186 * RPN.....................  0  0 EX WR ZSEL.......  W  I  M  G
 187 *
 188 * Where possible we make the Linux PTE bits match up with this
 189 *
 190 * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
 191 * support down to 1k pages), this is done in the TLBMiss exception
 192 * handler.
 193 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
 194 * of the 16 available.  Bit 24-26 of the TLB are cleared in the TLB
 195 * miss handler.  Bit 27 is PAGE_USER, thus selecting the correct
 196 * zone.
 197 * - PRESENT *must* be in the bottom two bits because swap cache
 198 * entries use the top 30 bits.  Because 4xx doesn't support SMP
 199 * anyway, M is irrelevant so we borrow it for PAGE_PRESENT.  Bit 30
 200 * is cleared in the TLB miss handler before the TLB entry is loaded.
 201 * - All other bits of the PTE are loaded into TLBLO without
 202 *  * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
 203 * software PTE bits.  We actually use use bits 21, 24, 25, and
 204 * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
 205 * PRESENT.
 206 */
 207
 208/* Definitions for MicroBlaze. */
 209#define _PAGE_GUARDED   0x001   /* G: page is guarded from prefetch */
 210#define _PAGE_PRESENT   0x002   /* software: PTE contains a translation */
 211#define _PAGE_NO_CACHE  0x004   /* I: caching is inhibited */
 212#define _PAGE_WRITETHRU 0x008   /* W: caching is write-through */
 213#define _PAGE_USER      0x010   /* matches one of the zone permission bits */
 214#define _PAGE_RW        0x040   /* software: Writes permitted */
 215#define _PAGE_DIRTY     0x080   /* software: dirty page */
 216#define _PAGE_HWWRITE   0x100   /* hardware: Dirty & RW, set in exception */
 217#define _PAGE_HWEXEC    0x200   /* hardware: EX permission */
 218#define _PAGE_ACCESSED  0x400   /* software: R: page referenced */
 219#define _PMD_PRESENT    PAGE_MASK
 220
 221/*
 222 * Some bits are unused...
 223 */
 224#ifndef _PAGE_HASHPTE
 225#define _PAGE_HASHPTE   0
 226#endif
 227#ifndef _PTE_NONE_MASK
 228#define _PTE_NONE_MASK  0
 229#endif
 230#ifndef _PAGE_SHARED
 231#define _PAGE_SHARED    0
 232#endif
 233#ifndef _PAGE_EXEC
 234#define _PAGE_EXEC      0
 235#endif
 236
 237#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 238
 239/*
 240 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
 241 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
 242 * to have it in the Linux PTE, and in fact the bit could be reused for
 243 * another purpose.  -- paulus.
 244 */
 245#define _PAGE_BASE      (_PAGE_PRESENT | _PAGE_ACCESSED)
 246#define _PAGE_WRENABLE  (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
 247
 248#define _PAGE_KERNEL \
 249        (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
 250
 251#define _PAGE_IO        (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
 252
 253#define PAGE_NONE       __pgprot(_PAGE_BASE)
 254#define PAGE_READONLY   __pgprot(_PAGE_BASE | _PAGE_USER)
 255#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
 256#define PAGE_SHARED     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
 257#define PAGE_SHARED_X \
 258                __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
 259#define PAGE_COPY       __pgprot(_PAGE_BASE | _PAGE_USER)
 260#define PAGE_COPY_X     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
 261
 262#define PAGE_KERNEL     __pgprot(_PAGE_KERNEL)
 263#define PAGE_KERNEL_RO  __pgprot(_PAGE_BASE | _PAGE_SHARED)
 264#define PAGE_KERNEL_CI  __pgprot(_PAGE_IO)
 265
 266/*
 267 * We consider execute permission the same as read.
 268 * Also, write permissions imply read permissions.
 269 */
 270#define __P000  PAGE_NONE
 271#define __P001  PAGE_READONLY_X
 272#define __P010  PAGE_COPY
 273#define __P011  PAGE_COPY_X
 274#define __P100  PAGE_READONLY
 275#define __P101  PAGE_READONLY_X
 276#define __P110  PAGE_COPY
 277#define __P111  PAGE_COPY_X
 278
 279#define __S000  PAGE_NONE
 280#define __S001  PAGE_READONLY_X
 281#define __S010  PAGE_SHARED
 282#define __S011  PAGE_SHARED_X
 283#define __S100  PAGE_READONLY
 284#define __S101  PAGE_READONLY_X
 285#define __S110  PAGE_SHARED
 286#define __S111  PAGE_SHARED_X
 287
 288#ifndef __ASSEMBLY__
 289/*
 290 * ZERO_PAGE is a global shared page that is always zero: used
 291 * for zero-mapped memory areas etc..
 292 */
 293extern unsigned long empty_zero_page[1024];
 294#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 295
 296#endif /* __ASSEMBLY__ */
 297
 298#define pte_none(pte)           ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
 299#define pte_present(pte)        (pte_val(pte) & _PAGE_PRESENT)
 300#define pte_clear(mm, addr, ptep) \
 301        do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
 302
 303#define pmd_none(pmd)           (!pmd_val(pmd))
 304#define pmd_bad(pmd)            ((pmd_val(pmd) & _PMD_PRESENT) == 0)
 305#define pmd_present(pmd)        ((pmd_val(pmd) & _PMD_PRESENT) != 0)
 306#define pmd_clear(pmdp)         do { pmd_val(*(pmdp)) = 0; } while (0)
 307
 308#define pte_page(x)             (mem_map + (unsigned long) \
 309                                ((pte_val(x) - memory_start) >> PAGE_SHIFT))
 310#define PFN_SHIFT_OFFSET        (PAGE_SHIFT)
 311
 312#define pte_pfn(x)              (pte_val(x) >> PFN_SHIFT_OFFSET)
 313
 314#define pfn_pte(pfn, prot) \
 315        __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
 316
 317#ifndef __ASSEMBLY__
 318/*
 319 * The "pgd_xxx()" functions here are trivial for a folded two-level
 320 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 321 * into the pgd entry)
 322 */
 323static inline int pgd_none(pgd_t pgd)           { return 0; }
 324static inline int pgd_bad(pgd_t pgd)            { return 0; }
 325static inline int pgd_present(pgd_t pgd)        { return 1; }
 326#define pgd_clear(xp)                           do { } while (0)
 327#define pgd_page(pgd) \
 328        ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
 329
 330/*
 331 * The following only work if pte_present() is true.
 332 * Undefined behaviour if not..
 333 */
 334static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_USER; }
 335static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
 336static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC; }
 337static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
 338static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
 339
 340static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
 341static inline void pte_cache(pte_t pte)   { pte_val(pte) &= ~_PAGE_NO_CACHE; }
 342
 343static inline pte_t pte_rdprotect(pte_t pte) \
 344                { pte_val(pte) &= ~_PAGE_USER; return pte; }
 345static inline pte_t pte_wrprotect(pte_t pte) \
 346        { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
 347static inline pte_t pte_exprotect(pte_t pte) \
 348        { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
 349static inline pte_t pte_mkclean(pte_t pte) \
 350        { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
 351static inline pte_t pte_mkold(pte_t pte) \
 352        { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
 353
 354static inline pte_t pte_mkread(pte_t pte) \
 355        { pte_val(pte) |= _PAGE_USER; return pte; }
 356static inline pte_t pte_mkexec(pte_t pte) \
 357        { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
 358static inline pte_t pte_mkwrite(pte_t pte) \
 359        { pte_val(pte) |= _PAGE_RW; return pte; }
 360static inline pte_t pte_mkdirty(pte_t pte) \
 361        { pte_val(pte) |= _PAGE_DIRTY; return pte; }
 362static inline pte_t pte_mkyoung(pte_t pte) \
 363        { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 364
 365/*
 366 * Conversion functions: convert a page and protection to a page entry,
 367 * and a page entry and page directory to the page they refer to.
 368 */
 369
 370static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
 371{
 372        pte_t pte;
 373        pte_val(pte) = physpage | pgprot_val(pgprot);
 374        return pte;
 375}
 376
 377#define mk_pte(page, pgprot) \
 378({                                                                         \
 379        pte_t pte;                                                         \
 380        pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) |  \
 381                        pgprot_val(pgprot);                                \
 382        pte;                                                               \
 383})
 384
 385static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 386{
 387        pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
 388        return pte;
 389}
 390
 391/*
 392 * Atomic PTE updates.
 393 *
 394 * pte_update clears and sets bit atomically, and returns
 395 * the old pte value.
 396 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
 397 * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
 398 */
 399static inline unsigned long pte_update(pte_t *p, unsigned long clr,
 400                                unsigned long set)
 401{
 402        unsigned long flags, old, tmp;
 403
 404        raw_local_irq_save(flags);
 405
 406        __asm__ __volatile__(   "lw     %0, %2, r0      \n"
 407                                "andn   %1, %0, %3      \n"
 408                                "or     %1, %1, %4      \n"
 409                                "sw     %1, %2, r0      \n"
 410                        : "=&r" (old), "=&r" (tmp)
 411                        : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
 412                        : "cc");
 413
 414        raw_local_irq_restore(flags);
 415
 416        return old;
 417}
 418
 419/*
 420 * set_pte stores a linux PTE into the linux page table.
 421 */
 422static inline void set_pte(struct mm_struct *mm, unsigned long addr,
 423                pte_t *ptep, pte_t pte)
 424{
 425        *ptep = pte;
 426}
 427
 428static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 429                pte_t *ptep, pte_t pte)
 430{
 431        *ptep = pte;
 432}
 433
 434#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 435static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
 436                unsigned long address, pte_t *ptep)
 437{
 438        return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
 439}
 440
 441static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
 442                unsigned long addr, pte_t *ptep)
 443{
 444        return (pte_update(ptep, \
 445                (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
 446}
 447
 448#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 449static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 450                unsigned long addr, pte_t *ptep)
 451{
 452        return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
 453}
 454
 455/*static inline void ptep_set_wrprotect(struct mm_struct *mm,
 456                unsigned long addr, pte_t *ptep)
 457{
 458        pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
 459}*/
 460
 461static inline void ptep_mkdirty(struct mm_struct *mm,
 462                unsigned long addr, pte_t *ptep)
 463{
 464        pte_update(ptep, 0, _PAGE_DIRTY);
 465}
 466
 467/*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
 468
 469/* Convert pmd entry to page */
 470/* our pmd entry is an effective address of pte table*/
 471/* returns effective address of the pmd entry*/
 472#define pmd_page_kernel(pmd)    ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
 473
 474/* returns struct *page of the pmd entry*/
 475#define pmd_page(pmd)   (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
 476
 477/* to find an entry in a kernel page-table-directory */
 478#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 479
 480/* to find an entry in a page-table-directory */
 481#define pgd_index(address)       ((address) >> PGDIR_SHIFT)
 482#define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
 483
 484/* Find an entry in the second-level page table.. */
 485static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
 486{
 487        return (pmd_t *) dir;
 488}
 489
 490/* Find an entry in the third-level page table.. */
 491#define pte_index(address)              \
 492        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 493#define pte_offset_kernel(dir, addr)    \
 494        ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
 495#define pte_offset_map(dir, addr)               \
 496        ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
 497
 498#define pte_unmap(pte)          kunmap_atomic(pte)
 499
 500extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 501
 502/*
 503 * Encode and decode a swap entry.
 504 * Note that the bits we use in a PTE for representing a swap entry
 505 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
 506 * (if used).  -- paulus
 507 */
 508#define __swp_type(entry)               ((entry).val & 0x3f)
 509#define __swp_offset(entry)     ((entry).val >> 6)
 510#define __swp_entry(type, offset) \
 511                ((swp_entry_t) { (type) | ((offset) << 6) })
 512#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
 513#define __swp_entry_to_pte(x)   ((pte_t) { (x).val << 2 })
 514
 515extern unsigned long iopa(unsigned long addr);
 516
 517/* Values for nocacheflag and cmode */
 518/* These are not used by the APUS kernel_map, but prevents
 519 * compilation errors.
 520 */
 521#define IOMAP_FULL_CACHING      0
 522#define IOMAP_NOCACHE_SER       1
 523#define IOMAP_NOCACHE_NONSER    2
 524#define IOMAP_NO_COPYBACK       3
 525
 526/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 527#define kern_addr_valid(addr)   (1)
 528
 529/*
 530 * No page table caches to initialise
 531 */
 532#define pgtable_cache_init()    do { } while (0)
 533
 534void do_page_fault(struct pt_regs *regs, unsigned long address,
 535                   unsigned long error_code);
 536
 537void mapin_ram(void);
 538int map_page(unsigned long va, phys_addr_t pa, int flags);
 539
 540extern int mem_init_done;
 541
 542asmlinkage void __init mmu_init(void);
 543
 544void __init *early_get_page(void);
 545
 546#endif /* __ASSEMBLY__ */
 547#endif /* __KERNEL__ */
 548
 549#endif /* CONFIG_MMU */
 550
 551#ifndef __ASSEMBLY__
 552#include <asm-generic/pgtable.h>
 553
 554extern unsigned long ioremap_bot, ioremap_base;
 555
 556void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
 557void consistent_free(size_t size, void *vaddr);
 558void consistent_sync(void *vaddr, size_t size, int direction);
 559void consistent_sync_page(struct page *page, unsigned long offset,
 560        size_t size, int direction);
 561unsigned long consistent_virt_to_pfn(void *vaddr);
 562
 563void setup_memory(void);
 564#endif /* __ASSEMBLY__ */
 565
 566#endif /* _ASM_MICROBLAZE_PGTABLE_H */
 567