linux/arch/microblaze/include/asm/pgtable.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
   3 * Copyright (C) 2008-2009 PetaLogix
   4 * Copyright (C) 2006 Atmark Techno, Inc.
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License. See the file "COPYING" in the main directory of this archive
   8 * for more details.
   9 */
  10
  11#ifndef _ASM_MICROBLAZE_PGTABLE_H
  12#define _ASM_MICROBLAZE_PGTABLE_H
  13
  14#include <asm/setup.h>
  15
  16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)         \
  17                remap_pfn_range(vma, vaddr, pfn, size, prot)
  18
  19#ifndef CONFIG_MMU
  20
  21#define pgd_present(pgd)        (1) /* pages are always present on non MMU */
  22#define pgd_none(pgd)           (0)
  23#define pgd_bad(pgd)            (0)
  24#define pgd_clear(pgdp)
  25#define kern_addr_valid(addr)   (1)
  26#define pmd_offset(a, b)        ((void *) 0)
  27
  28#define PAGE_NONE               __pgprot(0) /* these mean nothing to non MMU */
  29#define PAGE_SHARED             __pgprot(0) /* these mean nothing to non MMU */
  30#define PAGE_COPY               __pgprot(0) /* these mean nothing to non MMU */
  31#define PAGE_READONLY           __pgprot(0) /* these mean nothing to non MMU */
  32#define PAGE_KERNEL             __pgprot(0) /* these mean nothing to non MMU */
  33
  34#define pgprot_noncached(x)     (x)
  35
  36#define __swp_type(x)           (0)
  37#define __swp_offset(x)         (0)
  38#define __swp_entry(typ, off)   ((swp_entry_t) { ((typ) | ((off) << 7)) })
  39#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  40#define __swp_entry_to_pte(x)   ((pte_t) { (x).val })
  41
  42#ifndef __ASSEMBLY__
  43static inline int pte_file(pte_t pte) { return 0; }
  44#endif /* __ASSEMBLY__ */
  45
  46#define ZERO_PAGE(vaddr)        ({ BUG(); NULL; })
  47
  48#define swapper_pg_dir ((pgd_t *) NULL)
  49
  50#define pgtable_cache_init()    do {} while (0)
  51
  52#define arch_enter_lazy_cpu_mode()      do {} while (0)
  53
  54#else /* CONFIG_MMU */
  55
  56#include <asm-generic/4level-fixup.h>
  57
  58#ifdef __KERNEL__
  59#ifndef __ASSEMBLY__
  60
  61#include <linux/sched.h>
  62#include <linux/threads.h>
  63#include <asm/processor.h>              /* For TASK_SIZE */
  64#include <asm/mmu.h>
  65#include <asm/page.h>
  66
  67#define FIRST_USER_ADDRESS      0
  68
  69extern unsigned long va_to_phys(unsigned long address);
  70extern pte_t *va_to_pte(unsigned long address);
  71extern unsigned long ioremap_bot, ioremap_base;
  72
  73/*
  74 * The following only work if pte_present() is true.
  75 * Undefined behaviour if not..
  76 */
  77
  78static inline int pte_special(pte_t pte)        { return 0; }
  79
  80static inline pte_t pte_mkspecial(pte_t pte)    { return pte; }
  81
  82/* Start and end of the vmalloc area. */
  83/* Make sure to map the vmalloc area above the pinned kernel memory area
  84   of 32Mb.  */
  85#define VMALLOC_START   (CONFIG_KERNEL_START + \
  86                                max(32 * 1024 * 1024UL, memory_size))
  87#define VMALLOC_END     ioremap_bot
  88#define VMALLOC_VMADDR(x) ((unsigned long)(x))
  89
  90#endif /* __ASSEMBLY__ */
  91
  92/*
  93 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
  94 * table containing PTEs, together with a set of 16 segment registers, to
  95 * define the virtual to physical address mapping.
  96 *
  97 * We use the hash table as an extended TLB, i.e. a cache of currently
  98 * active mappings.  We maintain a two-level page table tree, much
  99 * like that used by the i386, for the sake of the Linux memory
 100 * management code.  Low-level assembler code in hashtable.S
 101 * (procedure hash_page) is responsible for extracting ptes from the
 102 * tree and putting them into the hash table when necessary, and
 103 * updating the accessed and modified bits in the page table tree.
 104 */
 105
 106/*
 107 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
 108 * instruction and data sides share a unified, 64-entry, semi-associative
 109 * TLB which is maintained totally under software control. In addition, the
 110 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
 111 * TLB which serves as a first level to the shared TLB. These two TLBs are
 112 * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
 113 */
 114
 115/*
 116 * The normal case is that PTEs are 32-bits and we have a 1-page
 117 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
 118 *
 119 */
 120
 121/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
 122#define PMD_SHIFT       (PAGE_SHIFT + PTE_SHIFT)
 123#define PMD_SIZE        (1UL << PMD_SHIFT)
 124#define PMD_MASK        (~(PMD_SIZE-1))
 125
 126/* PGDIR_SHIFT determines what a top-level page table entry can map */
 127#define PGDIR_SHIFT     PMD_SHIFT
 128#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
 129#define PGDIR_MASK      (~(PGDIR_SIZE-1))
 130
 131/*
 132 * entries per page directory level: our page-table tree is two-level, so
 133 * we don't really have any PMD directory.
 134 */
 135#define PTRS_PER_PTE    (1 << PTE_SHIFT)
 136#define PTRS_PER_PMD    1
 137#define PTRS_PER_PGD    (1 << (32 - PGDIR_SHIFT))
 138
 139#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
 140#define FIRST_USER_PGD_NR       0
 141
 142#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
 143#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
 144
 145#define pte_ERROR(e) \
 146        printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
 147                __FILE__, __LINE__, pte_val(e))
 148#define pmd_ERROR(e) \
 149        printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
 150                __FILE__, __LINE__, pmd_val(e))
 151#define pgd_ERROR(e) \
 152        printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
 153                __FILE__, __LINE__, pgd_val(e))
 154
 155/*
 156 * Bits in a linux-style PTE.  These match the bits in the
 157 * (hardware-defined) PTE as closely as possible.
 158 */
 159
 160/* There are several potential gotchas here.  The hardware TLBLO
 161 * field looks like this:
 162 *
 163 * 0  1  2  3  4  ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 164 * RPN.....................  0  0 EX WR ZSEL.......  W  I  M  G
 165 *
 166 * Where possible we make the Linux PTE bits match up with this
 167 *
 168 * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
 169 * support down to 1k pages), this is done in the TLBMiss exception
 170 * handler.
 171 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
 172 * of the 16 available.  Bit 24-26 of the TLB are cleared in the TLB
 173 * miss handler.  Bit 27 is PAGE_USER, thus selecting the correct
 174 * zone.
 175 * - PRESENT *must* be in the bottom two bits because swap cache
 176 * entries use the top 30 bits.  Because 4xx doesn't support SMP
 177 * anyway, M is irrelevant so we borrow it for PAGE_PRESENT.  Bit 30
 178 * is cleared in the TLB miss handler before the TLB entry is loaded.
 179 * - All other bits of the PTE are loaded into TLBLO without
 180 *  * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
 181 * software PTE bits.  We actually use use bits 21, 24, 25, and
 182 * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
 183 * PRESENT.
 184 */
 185
 186/* Definitions for MicroBlaze. */
 187#define _PAGE_GUARDED   0x001   /* G: page is guarded from prefetch */
 188#define _PAGE_FILE      0x001   /* when !present: nonlinear file mapping */
 189#define _PAGE_PRESENT   0x002   /* software: PTE contains a translation */
 190#define _PAGE_NO_CACHE  0x004   /* I: caching is inhibited */
 191#define _PAGE_WRITETHRU 0x008   /* W: caching is write-through */
 192#define _PAGE_USER      0x010   /* matches one of the zone permission bits */
 193#define _PAGE_RW        0x040   /* software: Writes permitted */
 194#define _PAGE_DIRTY     0x080   /* software: dirty page */
 195#define _PAGE_HWWRITE   0x100   /* hardware: Dirty & RW, set in exception */
 196#define _PAGE_HWEXEC    0x200   /* hardware: EX permission */
 197#define _PAGE_ACCESSED  0x400   /* software: R: page referenced */
 198#define _PMD_PRESENT    PAGE_MASK
 199
 200/*
 201 * Some bits are unused...
 202 */
 203#ifndef _PAGE_HASHPTE
 204#define _PAGE_HASHPTE   0
 205#endif
 206#ifndef _PTE_NONE_MASK
 207#define _PTE_NONE_MASK  0
 208#endif
 209#ifndef _PAGE_SHARED
 210#define _PAGE_SHARED    0
 211#endif
 212#ifndef _PAGE_HWWRITE
 213#define _PAGE_HWWRITE   0
 214#endif
 215#ifndef _PAGE_HWEXEC
 216#define _PAGE_HWEXEC    0
 217#endif
 218#ifndef _PAGE_EXEC
 219#define _PAGE_EXEC      0
 220#endif
 221
 222#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 223
 224/*
 225 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
 226 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
 227 * to have it in the Linux PTE, and in fact the bit could be reused for
 228 * another purpose.  -- paulus.
 229 */
 230#define _PAGE_BASE      (_PAGE_PRESENT | _PAGE_ACCESSED)
 231#define _PAGE_WRENABLE  (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
 232
 233#define _PAGE_KERNEL \
 234        (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
 235
 236#define _PAGE_IO        (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
 237
 238#define PAGE_NONE       __pgprot(_PAGE_BASE)
 239#define PAGE_READONLY   __pgprot(_PAGE_BASE | _PAGE_USER)
 240#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
 241#define PAGE_SHARED     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
 242#define PAGE_SHARED_X \
 243                __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
 244#define PAGE_COPY       __pgprot(_PAGE_BASE | _PAGE_USER)
 245#define PAGE_COPY_X     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
 246
 247#define PAGE_KERNEL     __pgprot(_PAGE_KERNEL)
 248#define PAGE_KERNEL_RO  __pgprot(_PAGE_BASE | _PAGE_SHARED)
 249#define PAGE_KERNEL_CI  __pgprot(_PAGE_IO)
 250
 251/*
 252 * We consider execute permission the same as read.
 253 * Also, write permissions imply read permissions.
 254 */
 255#define __P000  PAGE_NONE
 256#define __P001  PAGE_READONLY_X
 257#define __P010  PAGE_COPY
 258#define __P011  PAGE_COPY_X
 259#define __P100  PAGE_READONLY
 260#define __P101  PAGE_READONLY_X
 261#define __P110  PAGE_COPY
 262#define __P111  PAGE_COPY_X
 263
 264#define __S000  PAGE_NONE
 265#define __S001  PAGE_READONLY_X
 266#define __S010  PAGE_SHARED
 267#define __S011  PAGE_SHARED_X
 268#define __S100  PAGE_READONLY
 269#define __S101  PAGE_READONLY_X
 270#define __S110  PAGE_SHARED
 271#define __S111  PAGE_SHARED_X
 272
 273#ifndef __ASSEMBLY__
 274/*
 275 * ZERO_PAGE is a global shared page that is always zero: used
 276 * for zero-mapped memory areas etc..
 277 */
 278extern unsigned long empty_zero_page[1024];
 279#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 280
 281#endif /* __ASSEMBLY__ */
 282
 283#define pte_none(pte)           ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
 284#define pte_present(pte)        (pte_val(pte) & _PAGE_PRESENT)
 285#define pte_clear(mm, addr, ptep) \
 286        do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
 287
 288#define pmd_none(pmd)           (!pmd_val(pmd))
 289#define pmd_bad(pmd)            ((pmd_val(pmd) & _PMD_PRESENT) == 0)
 290#define pmd_present(pmd)        ((pmd_val(pmd) & _PMD_PRESENT) != 0)
 291#define pmd_clear(pmdp)         do { pmd_val(*(pmdp)) = 0; } while (0)
 292
 293#define pte_page(x)             (mem_map + (unsigned long) \
 294                                ((pte_val(x) - memory_start) >> PAGE_SHIFT))
 295#define PFN_SHIFT_OFFSET        (PAGE_SHIFT)
 296
 297#define pte_pfn(x)              (pte_val(x) >> PFN_SHIFT_OFFSET)
 298
 299#define pfn_pte(pfn, prot) \
 300        __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
 301
 302#ifndef __ASSEMBLY__
 303/*
 304 * The "pgd_xxx()" functions here are trivial for a folded two-level
 305 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 306 * into the pgd entry)
 307 */
 308static inline int pgd_none(pgd_t pgd)           { return 0; }
 309static inline int pgd_bad(pgd_t pgd)            { return 0; }
 310static inline int pgd_present(pgd_t pgd)        { return 1; }
 311#define pgd_clear(xp)                           do { } while (0)
 312#define pgd_page(pgd) \
 313        ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
 314
 315/*
 316 * The following only work if pte_present() is true.
 317 * Undefined behaviour if not..
 318 */
 319static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_USER; }
 320static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
 321static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC; }
 322static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
 323static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
 324static inline int pte_file(pte_t pte)  { return pte_val(pte) & _PAGE_FILE; }
 325
 326static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
 327static inline void pte_cache(pte_t pte)   { pte_val(pte) &= ~_PAGE_NO_CACHE; }
 328
 329static inline pte_t pte_rdprotect(pte_t pte) \
 330                { pte_val(pte) &= ~_PAGE_USER; return pte; }
 331static inline pte_t pte_wrprotect(pte_t pte) \
 332        { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
 333static inline pte_t pte_exprotect(pte_t pte) \
 334        { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
 335static inline pte_t pte_mkclean(pte_t pte) \
 336        { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
 337static inline pte_t pte_mkold(pte_t pte) \
 338        { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
 339
 340static inline pte_t pte_mkread(pte_t pte) \
 341        { pte_val(pte) |= _PAGE_USER; return pte; }
 342static inline pte_t pte_mkexec(pte_t pte) \
 343        { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
 344static inline pte_t pte_mkwrite(pte_t pte) \
 345        { pte_val(pte) |= _PAGE_RW; return pte; }
 346static inline pte_t pte_mkdirty(pte_t pte) \
 347        { pte_val(pte) |= _PAGE_DIRTY; return pte; }
 348static inline pte_t pte_mkyoung(pte_t pte) \
 349        { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 350
 351/*
 352 * Conversion functions: convert a page and protection to a page entry,
 353 * and a page entry and page directory to the page they refer to.
 354 */
 355
 356static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
 357{
 358        pte_t pte;
 359        pte_val(pte) = physpage | pgprot_val(pgprot);
 360        return pte;
 361}
 362
 363#define mk_pte(page, pgprot) \
 364({                                                                         \
 365        pte_t pte;                                                         \
 366        pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) |  \
 367                        pgprot_val(pgprot);                                \
 368        pte;                                                               \
 369})
 370
 371static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 372{
 373        pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
 374        return pte;
 375}
 376
 377/*
 378 * Atomic PTE updates.
 379 *
 380 * pte_update clears and sets bit atomically, and returns
 381 * the old pte value.
 382 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
 383 * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
 384 */
 385static inline unsigned long pte_update(pte_t *p, unsigned long clr,
 386                                unsigned long set)
 387{
 388        unsigned long old, tmp, msr;
 389
 390        __asm__ __volatile__("\
 391        msrclr  %2, 0x2\n\
 392        nop\n\
 393        lw      %0, %4, r0\n\
 394        andn    %1, %0, %5\n\
 395        or      %1, %1, %6\n\
 396        sw      %1, %4, r0\n\
 397        mts     rmsr, %2\n\
 398        nop"
 399        : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
 400        : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p)
 401        : "cc");
 402
 403        return old;
 404}
 405
 406/*
 407 * set_pte stores a linux PTE into the linux page table.
 408 */
 409static inline void set_pte(struct mm_struct *mm, unsigned long addr,
 410                pte_t *ptep, pte_t pte)
 411{
 412        *ptep = pte;
 413}
 414
 415static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 416                pte_t *ptep, pte_t pte)
 417{
 418        *ptep = pte;
 419}
 420
 421static inline int ptep_test_and_clear_young(struct mm_struct *mm,
 422                unsigned long addr, pte_t *ptep)
 423{
 424        return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
 425}
 426
 427static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
 428                unsigned long addr, pte_t *ptep)
 429{
 430        return (pte_update(ptep, \
 431                (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
 432}
 433
 434static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 435                unsigned long addr, pte_t *ptep)
 436{
 437        return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
 438}
 439
 440/*static inline void ptep_set_wrprotect(struct mm_struct *mm,
 441                unsigned long addr, pte_t *ptep)
 442{
 443        pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
 444}*/
 445
 446static inline void ptep_mkdirty(struct mm_struct *mm,
 447                unsigned long addr, pte_t *ptep)
 448{
 449        pte_update(ptep, 0, _PAGE_DIRTY);
 450}
 451
 452/*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
 453
 454/* Convert pmd entry to page */
 455/* our pmd entry is an effective address of pte table*/
 456/* returns effective address of the pmd entry*/
 457#define pmd_page_kernel(pmd)    ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
 458
 459/* returns struct *page of the pmd entry*/
 460#define pmd_page(pmd)   (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
 461
 462/* to find an entry in a kernel page-table-directory */
 463#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 464
 465/* to find an entry in a page-table-directory */
 466#define pgd_index(address)       ((address) >> PGDIR_SHIFT)
 467#define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
 468
 469/* Find an entry in the second-level page table.. */
 470static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
 471{
 472        return (pmd_t *) dir;
 473}
 474
 475/* Find an entry in the third-level page table.. */
 476#define pte_index(address)              \
 477        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 478#define pte_offset_kernel(dir, addr)    \
 479        ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
 480#define pte_offset_map(dir, addr)               \
 481        ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
 482#define pte_offset_map_nested(dir, addr)        \
 483        ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
 484
 485#define pte_unmap(pte)          kunmap_atomic(pte, KM_PTE0)
 486#define pte_unmap_nested(pte)   kunmap_atomic(pte, KM_PTE1)
 487
 488/* Encode and decode a nonlinear file mapping entry */
 489#define PTE_FILE_MAX_BITS       29
 490#define pte_to_pgoff(pte)       (pte_val(pte) >> 3)
 491#define pgoff_to_pte(off)       ((pte_t) { ((off) << 3) | _PAGE_FILE })
 492
 493extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 494
 495/*
 496 * When flushing the tlb entry for a page, we also need to flush the hash
 497 * table entry.  flush_hash_page is assembler (for speed) in hashtable.S.
 498 */
 499extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep);
 500
 501/* Add an HPTE to the hash table */
 502extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);
 503
 504/*
 505 * Encode and decode a swap entry.
 506 * Note that the bits we use in a PTE for representing a swap entry
 507 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
 508 * (if used).  -- paulus
 509 */
 510#define __swp_type(entry)               ((entry).val & 0x3f)
 511#define __swp_offset(entry)     ((entry).val >> 6)
 512#define __swp_entry(type, offset) \
 513                ((swp_entry_t) { (type) | ((offset) << 6) })
 514#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
 515#define __swp_entry_to_pte(x)   ((pte_t) { (x).val << 2 })
 516
 517
 518/* CONFIG_APUS */
 519/* For virtual address to physical address conversion */
 520extern void cache_clear(__u32 addr, int length);
 521extern void cache_push(__u32 addr, int length);
 522extern int mm_end_of_chunk(unsigned long addr, int len);
 523extern unsigned long iopa(unsigned long addr);
 524/* extern unsigned long mm_ptov(unsigned long addr) \
 525        __attribute__ ((const)); TBD */
 526
 527/* Values for nocacheflag and cmode */
 528/* These are not used by the APUS kernel_map, but prevents
 529 * compilation errors.
 530 */
 531#define IOMAP_FULL_CACHING      0
 532#define IOMAP_NOCACHE_SER       1
 533#define IOMAP_NOCACHE_NONSER    2
 534#define IOMAP_NO_COPYBACK       3
 535
 536/*
 537 * Map some physical address range into the kernel address space.
 538 */
 539extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
 540                                int nocacheflag, unsigned long *memavailp);
 541
 542/*
 543 * Set cache mode of (kernel space) address range.
 544 */
 545extern void kernel_set_cachemode(unsigned long address, unsigned long size,
 546                                unsigned int cmode);
 547
 548/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 549#define kern_addr_valid(addr)   (1)
 550
 551#define io_remap_page_range remap_page_range
 552
 553/*
 554 * No page table caches to initialise
 555 */
 556#define pgtable_cache_init()    do { } while (0)
 557
 558void do_page_fault(struct pt_regs *regs, unsigned long address,
 559                   unsigned long error_code);
 560
 561void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
 562                             unsigned int size, int flags);
 563
 564void __init adjust_total_lowmem(void);
 565void mapin_ram(void);
 566int map_page(unsigned long va, phys_addr_t pa, int flags);
 567
 568extern int mem_init_done;
 569extern unsigned long ioremap_base;
 570extern unsigned long ioremap_bot;
 571
 572asmlinkage void __init mmu_init(void);
 573
 574void __init *early_get_page(void);
 575
 576void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
 577void consistent_free(void *vaddr);
 578void consistent_sync(void *vaddr, size_t size, int direction);
 579void consistent_sync_page(struct page *page, unsigned long offset,
 580        size_t size, int direction);
 581#endif /* __ASSEMBLY__ */
 582#endif /* __KERNEL__ */
 583
 584#endif /* CONFIG_MMU */
 585
 586#ifndef __ASSEMBLY__
 587#include <asm-generic/pgtable.h>
 588
 589void setup_memory(void);
 590#endif /* __ASSEMBLY__ */
 591
 592#endif /* _ASM_MICROBLAZE_PGTABLE_H */
 593