linux/arch/microblaze/include/asm/pgtable.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
   3 * Copyright (C) 2008-2009 PetaLogix
   4 * Copyright (C) 2006 Atmark Techno, Inc.
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License. See the file "COPYING" in the main directory of this archive
   8 * for more details.
   9 */
  10
  11#ifndef _ASM_MICROBLAZE_PGTABLE_H
  12#define _ASM_MICROBLAZE_PGTABLE_H
  13
  14#include <asm/setup.h>
  15
  16#ifndef __ASSEMBLY__
  17extern int mem_init_done;
  18#endif
  19
  20#ifndef CONFIG_MMU
  21
  22#define pgd_present(pgd)        (1) /* pages are always present on non MMU */
  23#define pgd_none(pgd)           (0)
  24#define pgd_bad(pgd)            (0)
  25#define pgd_clear(pgdp)
  26#define kern_addr_valid(addr)   (1)
  27#define pmd_offset(a, b)        ((void *) 0)
  28
  29#define PAGE_NONE               __pgprot(0) /* these mean nothing to non MMU */
  30#define PAGE_SHARED             __pgprot(0) /* these mean nothing to non MMU */
  31#define PAGE_COPY               __pgprot(0) /* these mean nothing to non MMU */
  32#define PAGE_READONLY           __pgprot(0) /* these mean nothing to non MMU */
  33#define PAGE_KERNEL             __pgprot(0) /* these mean nothing to non MMU */
  34
  35#define pgprot_noncached(x)     (x)
  36
  37#define __swp_type(x)           (0)
  38#define __swp_offset(x)         (0)
  39#define __swp_entry(typ, off)   ((swp_entry_t) { ((typ) | ((off) << 7)) })
  40#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  41#define __swp_entry_to_pte(x)   ((pte_t) { (x).val })
  42
  43#ifndef __ASSEMBLY__
  44static inline int pte_file(pte_t pte) { return 0; }
  45#endif /* __ASSEMBLY__ */
  46
  47#define ZERO_PAGE(vaddr)        ({ BUG(); NULL; })
  48
  49#define swapper_pg_dir ((pgd_t *) NULL)
  50
  51#define pgtable_cache_init()    do {} while (0)
  52
  53#define arch_enter_lazy_cpu_mode()      do {} while (0)
  54
  55#define pgprot_noncached_wc(prot)       prot
  56
  57/*
  58 * All 32bit addresses are effectively valid for vmalloc...
  59 * Sort of meaningless for non-VM targets.
  60 */
  61#define VMALLOC_START   0
  62#define VMALLOC_END     0xffffffff
  63
  64#else /* CONFIG_MMU */
  65
  66#include <asm-generic/4level-fixup.h>
  67
  68#ifdef __KERNEL__
  69#ifndef __ASSEMBLY__
  70
  71#include <linux/sched.h>
  72#include <linux/threads.h>
  73#include <asm/processor.h>              /* For TASK_SIZE */
  74#include <asm/mmu.h>
  75#include <asm/page.h>
  76
  77#define FIRST_USER_ADDRESS      0
  78
  79extern unsigned long va_to_phys(unsigned long address);
  80extern pte_t *va_to_pte(unsigned long address);
  81
  82/*
  83 * The following only work if pte_present() is true.
  84 * Undefined behaviour if not..
  85 */
  86
  87static inline int pte_special(pte_t pte)        { return 0; }
  88
  89static inline pte_t pte_mkspecial(pte_t pte)    { return pte; }
  90
  91/* Start and end of the vmalloc area. */
  92/* Make sure to map the vmalloc area above the pinned kernel memory area
  93   of 32Mb.  */
  94#define VMALLOC_START   (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
  95#define VMALLOC_END     ioremap_bot
  96
  97#endif /* __ASSEMBLY__ */
  98
  99/*
 100 * Macro to mark a page protection value as "uncacheable".
 101 */
 102
 103#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
 104                                                        _PAGE_WRITETHRU)
 105
 106#define pgprot_noncached(prot) \
 107                        (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
 108                                        _PAGE_NO_CACHE | _PAGE_GUARDED))
 109
 110#define pgprot_noncached_wc(prot) \
 111                         (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
 112                                                        _PAGE_NO_CACHE))
 113
 114/*
 115 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
 116 * table containing PTEs, together with a set of 16 segment registers, to
 117 * define the virtual to physical address mapping.
 118 *
 119 * We use the hash table as an extended TLB, i.e. a cache of currently
 120 * active mappings.  We maintain a two-level page table tree, much
 121 * like that used by the i386, for the sake of the Linux memory
 122 * management code.  Low-level assembler code in hashtable.S
 123 * (procedure hash_page) is responsible for extracting ptes from the
 124 * tree and putting them into the hash table when necessary, and
 125 * updating the accessed and modified bits in the page table tree.
 126 */
 127
 128/*
 129 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
 130 * instruction and data sides share a unified, 64-entry, semi-associative
 131 * TLB which is maintained totally under software control. In addition, the
 132 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
 133 * TLB which serves as a first level to the shared TLB. These two TLBs are
 134 * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
 135 */
 136
 137/*
 138 * The normal case is that PTEs are 32-bits and we have a 1-page
 139 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
 140 *
 141 */
 142
 143/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
 144#define PMD_SHIFT       (PAGE_SHIFT + PTE_SHIFT)
 145#define PMD_SIZE        (1UL << PMD_SHIFT)
 146#define PMD_MASK        (~(PMD_SIZE-1))
 147
 148/* PGDIR_SHIFT determines what a top-level page table entry can map */
 149#define PGDIR_SHIFT     PMD_SHIFT
 150#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
 151#define PGDIR_MASK      (~(PGDIR_SIZE-1))
 152
 153/*
 154 * entries per page directory level: our page-table tree is two-level, so
 155 * we don't really have any PMD directory.
 156 */
 157#define PTRS_PER_PTE    (1 << PTE_SHIFT)
 158#define PTRS_PER_PMD    1
 159#define PTRS_PER_PGD    (1 << (32 - PGDIR_SHIFT))
 160
 161#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
 162#define FIRST_USER_PGD_NR       0
 163
 164#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
 165#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
 166
 167#define pte_ERROR(e) \
 168        printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
 169                __FILE__, __LINE__, pte_val(e))
 170#define pmd_ERROR(e) \
 171        printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
 172                __FILE__, __LINE__, pmd_val(e))
 173#define pgd_ERROR(e) \
 174        printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
 175                __FILE__, __LINE__, pgd_val(e))
 176
 177/*
 178 * Bits in a linux-style PTE.  These match the bits in the
 179 * (hardware-defined) PTE as closely as possible.
 180 */
 181
 182/* There are several potential gotchas here.  The hardware TLBLO
 183 * field looks like this:
 184 *
 185 * 0  1  2  3  4  ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 186 * RPN.....................  0  0 EX WR ZSEL.......  W  I  M  G
 187 *
 188 * Where possible we make the Linux PTE bits match up with this
 189 *
 190 * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
 191 * support down to 1k pages), this is done in the TLBMiss exception
 192 * handler.
 193 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
 194 * of the 16 available.  Bit 24-26 of the TLB are cleared in the TLB
 195 * miss handler.  Bit 27 is PAGE_USER, thus selecting the correct
 196 * zone.
 197 * - PRESENT *must* be in the bottom two bits because swap cache
 198 * entries use the top 30 bits.  Because 4xx doesn't support SMP
 199 * anyway, M is irrelevant so we borrow it for PAGE_PRESENT.  Bit 30
 200 * is cleared in the TLB miss handler before the TLB entry is loaded.
 201 * - All other bits of the PTE are loaded into TLBLO without
 202 *  * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
 203 * software PTE bits.  We actually use use bits 21, 24, 25, and
 204 * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
 205 * PRESENT.
 206 */
 207
 208/* Definitions for MicroBlaze. */
 209#define _PAGE_GUARDED   0x001   /* G: page is guarded from prefetch */
 210#define _PAGE_FILE      0x001   /* when !present: nonlinear file mapping */
 211#define _PAGE_PRESENT   0x002   /* software: PTE contains a translation */
 212#define _PAGE_NO_CACHE  0x004   /* I: caching is inhibited */
 213#define _PAGE_WRITETHRU 0x008   /* W: caching is write-through */
 214#define _PAGE_USER      0x010   /* matches one of the zone permission bits */
 215#define _PAGE_RW        0x040   /* software: Writes permitted */
 216#define _PAGE_DIRTY     0x080   /* software: dirty page */
 217#define _PAGE_HWWRITE   0x100   /* hardware: Dirty & RW, set in exception */
 218#define _PAGE_HWEXEC    0x200   /* hardware: EX permission */
 219#define _PAGE_ACCESSED  0x400   /* software: R: page referenced */
 220#define _PMD_PRESENT    PAGE_MASK
 221
 222/*
 223 * Some bits are unused...
 224 */
 225#ifndef _PAGE_HASHPTE
 226#define _PAGE_HASHPTE   0
 227#endif
 228#ifndef _PTE_NONE_MASK
 229#define _PTE_NONE_MASK  0
 230#endif
 231#ifndef _PAGE_SHARED
 232#define _PAGE_SHARED    0
 233#endif
 234#ifndef _PAGE_EXEC
 235#define _PAGE_EXEC      0
 236#endif
 237
 238#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 239
 240/*
 241 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
 242 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
 243 * to have it in the Linux PTE, and in fact the bit could be reused for
 244 * another purpose.  -- paulus.
 245 */
 246#define _PAGE_BASE      (_PAGE_PRESENT | _PAGE_ACCESSED)
 247#define _PAGE_WRENABLE  (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
 248
 249#define _PAGE_KERNEL \
 250        (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
 251
 252#define _PAGE_IO        (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
 253
 254#define PAGE_NONE       __pgprot(_PAGE_BASE)
 255#define PAGE_READONLY   __pgprot(_PAGE_BASE | _PAGE_USER)
 256#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
 257#define PAGE_SHARED     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
 258#define PAGE_SHARED_X \
 259                __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
 260#define PAGE_COPY       __pgprot(_PAGE_BASE | _PAGE_USER)
 261#define PAGE_COPY_X     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
 262
 263#define PAGE_KERNEL     __pgprot(_PAGE_KERNEL)
 264#define PAGE_KERNEL_RO  __pgprot(_PAGE_BASE | _PAGE_SHARED)
 265#define PAGE_KERNEL_CI  __pgprot(_PAGE_IO)
 266
 267/*
 268 * We consider execute permission the same as read.
 269 * Also, write permissions imply read permissions.
 270 */
 271#define __P000  PAGE_NONE
 272#define __P001  PAGE_READONLY_X
 273#define __P010  PAGE_COPY
 274#define __P011  PAGE_COPY_X
 275#define __P100  PAGE_READONLY
 276#define __P101  PAGE_READONLY_X
 277#define __P110  PAGE_COPY
 278#define __P111  PAGE_COPY_X
 279
 280#define __S000  PAGE_NONE
 281#define __S001  PAGE_READONLY_X
 282#define __S010  PAGE_SHARED
 283#define __S011  PAGE_SHARED_X
 284#define __S100  PAGE_READONLY
 285#define __S101  PAGE_READONLY_X
 286#define __S110  PAGE_SHARED
 287#define __S111  PAGE_SHARED_X
 288
 289#ifndef __ASSEMBLY__
 290/*
 291 * ZERO_PAGE is a global shared page that is always zero: used
 292 * for zero-mapped memory areas etc..
 293 */
 294extern unsigned long empty_zero_page[1024];
 295#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 296
 297#endif /* __ASSEMBLY__ */
 298
 299#define pte_none(pte)           ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
 300#define pte_present(pte)        (pte_val(pte) & _PAGE_PRESENT)
 301#define pte_clear(mm, addr, ptep) \
 302        do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
 303
 304#define pmd_none(pmd)           (!pmd_val(pmd))
 305#define pmd_bad(pmd)            ((pmd_val(pmd) & _PMD_PRESENT) == 0)
 306#define pmd_present(pmd)        ((pmd_val(pmd) & _PMD_PRESENT) != 0)
 307#define pmd_clear(pmdp)         do { pmd_val(*(pmdp)) = 0; } while (0)
 308
 309#define pte_page(x)             (mem_map + (unsigned long) \
 310                                ((pte_val(x) - memory_start) >> PAGE_SHIFT))
 311#define PFN_SHIFT_OFFSET        (PAGE_SHIFT)
 312
 313#define pte_pfn(x)              (pte_val(x) >> PFN_SHIFT_OFFSET)
 314
 315#define pfn_pte(pfn, prot) \
 316        __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
 317
 318#ifndef __ASSEMBLY__
 319/*
 320 * The "pgd_xxx()" functions here are trivial for a folded two-level
 321 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 322 * into the pgd entry)
 323 */
 324static inline int pgd_none(pgd_t pgd)           { return 0; }
 325static inline int pgd_bad(pgd_t pgd)            { return 0; }
 326static inline int pgd_present(pgd_t pgd)        { return 1; }
 327#define pgd_clear(xp)                           do { } while (0)
 328#define pgd_page(pgd) \
 329        ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
 330
 331/*
 332 * The following only work if pte_present() is true.
 333 * Undefined behaviour if not..
 334 */
 335static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_USER; }
 336static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
 337static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC; }
 338static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
 339static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
 340static inline int pte_file(pte_t pte)  { return pte_val(pte) & _PAGE_FILE; }
 341
 342static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
 343static inline void pte_cache(pte_t pte)   { pte_val(pte) &= ~_PAGE_NO_CACHE; }
 344
 345static inline pte_t pte_rdprotect(pte_t pte) \
 346                { pte_val(pte) &= ~_PAGE_USER; return pte; }
 347static inline pte_t pte_wrprotect(pte_t pte) \
 348        { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
 349static inline pte_t pte_exprotect(pte_t pte) \
 350        { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
 351static inline pte_t pte_mkclean(pte_t pte) \
 352        { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
 353static inline pte_t pte_mkold(pte_t pte) \
 354        { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
 355
 356static inline pte_t pte_mkread(pte_t pte) \
 357        { pte_val(pte) |= _PAGE_USER; return pte; }
 358static inline pte_t pte_mkexec(pte_t pte) \
 359        { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
 360static inline pte_t pte_mkwrite(pte_t pte) \
 361        { pte_val(pte) |= _PAGE_RW; return pte; }
 362static inline pte_t pte_mkdirty(pte_t pte) \
 363        { pte_val(pte) |= _PAGE_DIRTY; return pte; }
 364static inline pte_t pte_mkyoung(pte_t pte) \
 365        { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 366
 367/*
 368 * Conversion functions: convert a page and protection to a page entry,
 369 * and a page entry and page directory to the page they refer to.
 370 */
 371
 372static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
 373{
 374        pte_t pte;
 375        pte_val(pte) = physpage | pgprot_val(pgprot);
 376        return pte;
 377}
 378
 379#define mk_pte(page, pgprot) \
 380({                                                                         \
 381        pte_t pte;                                                         \
 382        pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) |  \
 383                        pgprot_val(pgprot);                                \
 384        pte;                                                               \
 385})
 386
 387static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 388{
 389        pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
 390        return pte;
 391}
 392
 393/*
 394 * Atomic PTE updates.
 395 *
 396 * pte_update clears and sets bit atomically, and returns
 397 * the old pte value.
 398 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
 399 * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
 400 */
 401static inline unsigned long pte_update(pte_t *p, unsigned long clr,
 402                                unsigned long set)
 403{
 404        unsigned long flags, old, tmp;
 405
 406        raw_local_irq_save(flags);
 407
 408        __asm__ __volatile__(   "lw     %0, %2, r0      \n"
 409                                "andn   %1, %0, %3      \n"
 410                                "or     %1, %1, %4      \n"
 411                                "sw     %1, %2, r0      \n"
 412                        : "=&r" (old), "=&r" (tmp)
 413                        : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
 414                        : "cc");
 415
 416        raw_local_irq_restore(flags);
 417
 418        return old;
 419}
 420
 421/*
 422 * set_pte stores a linux PTE into the linux page table.
 423 */
 424static inline void set_pte(struct mm_struct *mm, unsigned long addr,
 425                pte_t *ptep, pte_t pte)
 426{
 427        *ptep = pte;
 428}
 429
 430static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 431                pte_t *ptep, pte_t pte)
 432{
 433        *ptep = pte;
 434}
 435
 436#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 437static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
 438                unsigned long address, pte_t *ptep)
 439{
 440        return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
 441}
 442
 443static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
 444                unsigned long addr, pte_t *ptep)
 445{
 446        return (pte_update(ptep, \
 447                (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
 448}
 449
 450#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 451static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 452                unsigned long addr, pte_t *ptep)
 453{
 454        return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
 455}
 456
 457/*static inline void ptep_set_wrprotect(struct mm_struct *mm,
 458                unsigned long addr, pte_t *ptep)
 459{
 460        pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
 461}*/
 462
 463static inline void ptep_mkdirty(struct mm_struct *mm,
 464                unsigned long addr, pte_t *ptep)
 465{
 466        pte_update(ptep, 0, _PAGE_DIRTY);
 467}
 468
 469/*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
 470
 471/* Convert pmd entry to page */
 472/* our pmd entry is an effective address of pte table*/
 473/* returns effective address of the pmd entry*/
 474#define pmd_page_kernel(pmd)    ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
 475
 476/* returns struct *page of the pmd entry*/
 477#define pmd_page(pmd)   (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
 478
 479/* to find an entry in a kernel page-table-directory */
 480#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 481
 482/* to find an entry in a page-table-directory */
 483#define pgd_index(address)       ((address) >> PGDIR_SHIFT)
 484#define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
 485
 486/* Find an entry in the second-level page table.. */
 487static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
 488{
 489        return (pmd_t *) dir;
 490}
 491
 492/* Find an entry in the third-level page table.. */
 493#define pte_index(address)              \
 494        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 495#define pte_offset_kernel(dir, addr)    \
 496        ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
 497#define pte_offset_map(dir, addr)               \
 498        ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
 499
 500#define pte_unmap(pte)          kunmap_atomic(pte)
 501
 502/* Encode and decode a nonlinear file mapping entry */
 503#define PTE_FILE_MAX_BITS       29
 504#define pte_to_pgoff(pte)       (pte_val(pte) >> 3)
 505#define pgoff_to_pte(off)       ((pte_t) { ((off) << 3) | _PAGE_FILE })
 506
 507extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 508
 509/*
 510 * Encode and decode a swap entry.
 511 * Note that the bits we use in a PTE for representing a swap entry
 512 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
 513 * (if used).  -- paulus
 514 */
 515#define __swp_type(entry)               ((entry).val & 0x3f)
 516#define __swp_offset(entry)     ((entry).val >> 6)
 517#define __swp_entry(type, offset) \
 518                ((swp_entry_t) { (type) | ((offset) << 6) })
 519#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
 520#define __swp_entry_to_pte(x)   ((pte_t) { (x).val << 2 })
 521
 522extern unsigned long iopa(unsigned long addr);
 523
 524/* Values for nocacheflag and cmode */
 525/* These are not used by the APUS kernel_map, but prevents
 526 * compilation errors.
 527 */
 528#define IOMAP_FULL_CACHING      0
 529#define IOMAP_NOCACHE_SER       1
 530#define IOMAP_NOCACHE_NONSER    2
 531#define IOMAP_NO_COPYBACK       3
 532
 533/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 534#define kern_addr_valid(addr)   (1)
 535
 536/*
 537 * No page table caches to initialise
 538 */
 539#define pgtable_cache_init()    do { } while (0)
 540
 541void do_page_fault(struct pt_regs *regs, unsigned long address,
 542                   unsigned long error_code);
 543
 544void mapin_ram(void);
 545int map_page(unsigned long va, phys_addr_t pa, int flags);
 546
 547extern int mem_init_done;
 548
 549asmlinkage void __init mmu_init(void);
 550
 551void __init *early_get_page(void);
 552
 553#endif /* __ASSEMBLY__ */
 554#endif /* __KERNEL__ */
 555
 556#endif /* CONFIG_MMU */
 557
 558#ifndef __ASSEMBLY__
 559#include <asm-generic/pgtable.h>
 560
 561extern unsigned long ioremap_bot, ioremap_base;
 562
 563void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
 564void consistent_free(size_t size, void *vaddr);
 565void consistent_sync(void *vaddr, size_t size, int direction);
 566void consistent_sync_page(struct page *page, unsigned long offset,
 567        size_t size, int direction);
 568
 569void setup_memory(void);
 570#endif /* __ASSEMBLY__ */
 571
 572#endif /* _ASM_MICROBLAZE_PGTABLE_H */
 573