linux/arch/powerpc/include/asm/book3s/32/pgtable.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
   2#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
   3
   4#define __ARCH_USE_5LEVEL_HACK
   5#include <asm-generic/pgtable-nopmd.h>
   6
   7#include <asm/book3s/32/hash.h>
   8
   9/* And here we include common definitions */
  10#include <asm/pte-common.h>
  11
  12#define PTE_INDEX_SIZE  PTE_SHIFT
  13#define PMD_INDEX_SIZE  0
  14#define PUD_INDEX_SIZE  0
  15#define PGD_INDEX_SIZE  (32 - PGDIR_SHIFT)
  16
  17#define PMD_CACHE_INDEX PMD_INDEX_SIZE
  18
  19#ifndef __ASSEMBLY__
  20#define PTE_TABLE_SIZE  (sizeof(pte_t) << PTE_INDEX_SIZE)
  21#define PMD_TABLE_SIZE  0
  22#define PUD_TABLE_SIZE  0
  23#define PGD_TABLE_SIZE  (sizeof(pgd_t) << PGD_INDEX_SIZE)
  24#endif  /* __ASSEMBLY__ */
  25
  26#define PTRS_PER_PTE    (1 << PTE_INDEX_SIZE)
  27#define PTRS_PER_PGD    (1 << PGD_INDEX_SIZE)
  28
  29/*
  30 * The normal case is that PTEs are 32-bits and we have a 1-page
  31 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
  32 *
  33 * For any >32-bit physical address platform, we can use the following
  34 * two level page table layout where the pgdir is 8KB and the MS 13 bits
  35 * are an index to the second level table.  The combined pgdir/pmd first
  36 * level has 2048 entries and the second level has 512 64-bit PTE entries.
  37 * -Matt
  38 */
  39/* PGDIR_SHIFT determines what a top-level page table entry can map */
  40#define PGDIR_SHIFT     (PAGE_SHIFT + PTE_INDEX_SIZE)
  41#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  42#define PGDIR_MASK      (~(PGDIR_SIZE-1))
  43
  44#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
  45/*
  46 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
  47 * value (for now) on others, from where we can start layout kernel
  48 * virtual space that goes below PKMAP and FIXMAP
  49 */
  50#ifdef CONFIG_HIGHMEM
  51#define KVIRT_TOP       PKMAP_BASE
  52#else
  53#define KVIRT_TOP       (0xfe000000UL)  /* for now, could be FIXMAP_BASE ? */
  54#endif
  55
  56/*
  57 * ioremap_bot starts at that address. Early ioremaps move down from there,
  58 * until mem_init() at which point this becomes the top of the vmalloc
  59 * and ioremap space
  60 */
  61#ifdef CONFIG_NOT_COHERENT_CACHE
  62#define IOREMAP_TOP     ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
  63#else
  64#define IOREMAP_TOP     KVIRT_TOP
  65#endif
  66
  67/*
  68 * Just any arbitrary offset to the start of the vmalloc VM area: the
  69 * current 16MB value just means that there will be a 64MB "hole" after the
  70 * physical memory until the kernel virtual memory starts.  That means that
  71 * any out-of-bounds memory accesses will hopefully be caught.
  72 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  73 * area for the same reason. ;)
  74 *
  75 * We no longer map larger than phys RAM with the BATs so we don't have
  76 * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
  77 * about clashes between our early calls to ioremap() that start growing down
  78 * from ioremap_base being run into the VM area allocations (growing upwards
  79 * from VMALLOC_START).  For this reason we have ioremap_bot to check when
  80 * we actually run into our mappings setup in the early boot with the VM
  81 * system.  This really does become a problem for machines with good amounts
  82 * of RAM.  -- Cort
  83 */
  84#define VMALLOC_OFFSET (0x1000000) /* 16M */
  85#ifdef PPC_PIN_SIZE
  86#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
  87#else
  88#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
  89#endif
  90#define VMALLOC_END     ioremap_bot
  91
  92#ifndef __ASSEMBLY__
  93#include <linux/sched.h>
  94#include <linux/threads.h>
  95#include <asm/io.h>                     /* For sub-arch specific PPC_PIN_SIZE */
  96
  97extern unsigned long ioremap_bot;
  98
  99/* Bits to mask out from a PGD to get to the PUD page */
 100#define PGD_MASKED_BITS         0
 101
 102#define pte_ERROR(e) \
 103        pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
 104                (unsigned long long)pte_val(e))
 105#define pgd_ERROR(e) \
 106        pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 107/*
 108 * Bits in a linux-style PTE.  These match the bits in the
 109 * (hardware-defined) PowerPC PTE as closely as possible.
 110 */
 111
 112#define pte_clear(mm, addr, ptep) \
 113        do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
 114
 115#define pmd_none(pmd)           (!pmd_val(pmd))
 116#define pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
 117#define pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
 118static inline void pmd_clear(pmd_t *pmdp)
 119{
 120        *pmdp = __pmd(0);
 121}
 122
 123
 124/*
 125 * When flushing the tlb entry for a page, we also need to flush the hash
 126 * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
 127 */
 128extern int flush_hash_pages(unsigned context, unsigned long va,
 129                            unsigned long pmdval, int count);
 130
 131/* Add an HPTE to the hash table */
 132extern void add_hash_page(unsigned context, unsigned long va,
 133                          unsigned long pmdval);
 134
 135/* Flush an entry from the TLB/hash table */
 136extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
 137                             unsigned long address);
 138
 139/*
 140 * PTE updates. This function is called whenever an existing
 141 * valid PTE is updated. This does -not- include set_pte_at()
 142 * which nowadays only sets a new PTE.
 143 *
 144 * Depending on the type of MMU, we may need to use atomic updates
 145 * and the PTE may be either 32 or 64 bit wide. In the later case,
 146 * when using atomic updates, only the low part of the PTE is
 147 * accessed atomically.
 148 *
 149 * In addition, on 44x, we also maintain a global flag indicating
 150 * that an executable user mapping was modified, which is needed
 151 * to properly flush the virtually tagged instruction cache of
 152 * those implementations.
 153 */
 154#ifndef CONFIG_PTE_64BIT
 155static inline unsigned long pte_update(pte_t *p,
 156                                       unsigned long clr,
 157                                       unsigned long set)
 158{
 159        unsigned long old, tmp;
 160
 161        __asm__ __volatile__("\
 1621:      lwarx   %0,0,%3\n\
 163        andc    %1,%0,%4\n\
 164        or      %1,%1,%5\n"
 165        PPC405_ERR77(0,%3)
 166"       stwcx.  %1,0,%3\n\
 167        bne-    1b"
 168        : "=&r" (old), "=&r" (tmp), "=m" (*p)
 169        : "r" (p), "r" (clr), "r" (set), "m" (*p)
 170        : "cc" );
 171
 172        return old;
 173}
 174#else /* CONFIG_PTE_64BIT */
 175static inline unsigned long long pte_update(pte_t *p,
 176                                            unsigned long clr,
 177                                            unsigned long set)
 178{
 179        unsigned long long old;
 180        unsigned long tmp;
 181
 182        __asm__ __volatile__("\
 1831:      lwarx   %L0,0,%4\n\
 184        lwzx    %0,0,%3\n\
 185        andc    %1,%L0,%5\n\
 186        or      %1,%1,%6\n"
 187        PPC405_ERR77(0,%3)
 188"       stwcx.  %1,0,%4\n\
 189        bne-    1b"
 190        : "=&r" (old), "=&r" (tmp), "=m" (*p)
 191        : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
 192        : "cc" );
 193
 194        return old;
 195}
 196#endif /* CONFIG_PTE_64BIT */
 197
 198/*
 199 * 2.6 calls this without flushing the TLB entry; this is wrong
 200 * for our hash-based implementation, we fix that up here.
 201 */
 202#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 203static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
 204{
 205        unsigned long old;
 206        old = pte_update(ptep, _PAGE_ACCESSED, 0);
 207        if (old & _PAGE_HASHPTE) {
 208                unsigned long ptephys = __pa(ptep) & PAGE_MASK;
 209                flush_hash_pages(context, addr, ptephys, 1);
 210        }
 211        return (old & _PAGE_ACCESSED) != 0;
 212}
 213#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
 214        __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
 215
 216#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 217static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 218                                       pte_t *ptep)
 219{
 220        return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
 221}
 222
 223#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 224static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 225                                      pte_t *ptep)
 226{
 227        pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
 228}
 229static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 230                                           unsigned long addr, pte_t *ptep)
 231{
 232        ptep_set_wrprotect(mm, addr, ptep);
 233}
 234
 235
 236static inline void __ptep_set_access_flags(struct mm_struct *mm,
 237                                           pte_t *ptep, pte_t entry,
 238                                           unsigned long address)
 239{
 240        unsigned long set = pte_val(entry) &
 241                (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
 242        unsigned long clr = ~pte_val(entry) & _PAGE_RO;
 243
 244        pte_update(ptep, clr, set);
 245}
 246
 247#define __HAVE_ARCH_PTE_SAME
 248#define pte_same(A,B)   (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
 249
 250/*
 251 * Note that on Book E processors, the pmd contains the kernel virtual
 252 * (lowmem) address of the pte page.  The physical address is less useful
 253 * because everything runs with translation enabled (even the TLB miss
 254 * handler).  On everything else the pmd contains the physical address
 255 * of the pte page.  -- paulus
 256 */
 257#ifndef CONFIG_BOOKE
 258#define pmd_page_vaddr(pmd)     \
 259        ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 260#define pmd_page(pmd)           \
 261        pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
 262#else
 263#define pmd_page_vaddr(pmd)     \
 264        ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
 265#define pmd_page(pmd)           \
 266        pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
 267#endif
 268
 269/* to find an entry in a kernel page-table-directory */
 270#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 271
 272/* to find an entry in a page-table-directory */
 273#define pgd_index(address)       ((address) >> PGDIR_SHIFT)
 274#define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
 275
 276/* Find an entry in the third-level page table.. */
 277#define pte_index(address)              \
 278        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 279#define pte_offset_kernel(dir, addr)    \
 280        ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
 281#define pte_offset_map(dir, addr)               \
 282        ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
 283#define pte_unmap(pte)          kunmap_atomic(pte)
 284
 285/*
 286 * Encode and decode a swap entry.
 287 * Note that the bits we use in a PTE for representing a swap entry
 288 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
 289 *   -- paulus
 290 */
 291#define __swp_type(entry)               ((entry).val & 0x1f)
 292#define __swp_offset(entry)             ((entry).val >> 5)
 293#define __swp_entry(type, offset)       ((swp_entry_t) { (type) | ((offset) << 5) })
 294#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) >> 3 })
 295#define __swp_entry_to_pte(x)           ((pte_t) { (x).val << 3 })
 296
 297extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
 298                      pmd_t **pmdp);
 299
 300int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
 301
 302/* Generic accessors to PTE bits */
 303static inline int pte_write(pte_t pte)          { return !!(pte_val(pte) & _PAGE_RW);}
 304static inline int pte_dirty(pte_t pte)          { return !!(pte_val(pte) & _PAGE_DIRTY); }
 305static inline int pte_young(pte_t pte)          { return !!(pte_val(pte) & _PAGE_ACCESSED); }
 306static inline int pte_special(pte_t pte)        { return !!(pte_val(pte) & _PAGE_SPECIAL); }
 307static inline int pte_none(pte_t pte)           { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
 308static inline pgprot_t pte_pgprot(pte_t pte)    { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
 309
 310static inline int pte_present(pte_t pte)
 311{
 312        return pte_val(pte) & _PAGE_PRESENT;
 313}
 314
 315/* Conversion functions: convert a page and protection to a page entry,
 316 * and a page entry and page directory to the page they refer to.
 317 *
 318 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
 319 * long for now.
 320 */
 321static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
 322{
 323        return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
 324                     pgprot_val(pgprot));
 325}
 326
 327static inline unsigned long pte_pfn(pte_t pte)
 328{
 329        return pte_val(pte) >> PTE_RPN_SHIFT;
 330}
 331
 332/* Generic modifiers for PTE bits */
 333static inline pte_t pte_wrprotect(pte_t pte)
 334{
 335        return __pte(pte_val(pte) & ~_PAGE_RW);
 336}
 337
 338static inline pte_t pte_mkclean(pte_t pte)
 339{
 340        return __pte(pte_val(pte) & ~_PAGE_DIRTY);
 341}
 342
 343static inline pte_t pte_mkold(pte_t pte)
 344{
 345        return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
 346}
 347
 348static inline pte_t pte_mkwrite(pte_t pte)
 349{
 350        return __pte(pte_val(pte) | _PAGE_RW);
 351}
 352
 353static inline pte_t pte_mkdirty(pte_t pte)
 354{
 355        return __pte(pte_val(pte) | _PAGE_DIRTY);
 356}
 357
 358static inline pte_t pte_mkyoung(pte_t pte)
 359{
 360        return __pte(pte_val(pte) | _PAGE_ACCESSED);
 361}
 362
 363static inline pte_t pte_mkspecial(pte_t pte)
 364{
 365        return __pte(pte_val(pte) | _PAGE_SPECIAL);
 366}
 367
 368static inline pte_t pte_mkhuge(pte_t pte)
 369{
 370        return pte;
 371}
 372
 373static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 374{
 375        return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
 376}
 377
 378
 379
 380/* This low level function performs the actual PTE insertion
 381 * Setting the PTE depends on the MMU type and other factors. It's
 382 * an horrible mess that I'm not going to try to clean up now but
 383 * I'm keeping it in one place rather than spread around
 384 */
 385static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 386                                pte_t *ptep, pte_t pte, int percpu)
 387{
 388#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
 389        /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
 390         * helper pte_update() which does an atomic update. We need to do that
 391         * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
 392         * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
 393         * the hash bits instead (ie, same as the non-SMP case)
 394         */
 395        if (percpu)
 396                *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 397                              | (pte_val(pte) & ~_PAGE_HASHPTE));
 398        else
 399                pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
 400
 401#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
 402        /* Second case is 32-bit with 64-bit PTE.  In this case, we
 403         * can just store as long as we do the two halves in the right order
 404         * with a barrier in between. This is possible because we take care,
 405         * in the hash code, to pre-invalidate if the PTE was already hashed,
 406         * which synchronizes us with any concurrent invalidation.
 407         * In the percpu case, we also fallback to the simple update preserving
 408         * the hash bits
 409         */
 410        if (percpu) {
 411                *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 412                              | (pte_val(pte) & ~_PAGE_HASHPTE));
 413                return;
 414        }
 415        if (pte_val(*ptep) & _PAGE_HASHPTE)
 416                flush_hash_entry(mm, ptep, addr);
 417        __asm__ __volatile__("\
 418                stw%U0%X0 %2,%0\n\
 419                eieio\n\
 420                stw%U0%X0 %L2,%1"
 421        : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
 422        : "r" (pte) : "memory");
 423
 424#elif defined(CONFIG_PPC_STD_MMU_32)
 425        /* Third case is 32-bit hash table in UP mode, we need to preserve
 426         * the _PAGE_HASHPTE bit since we may not have invalidated the previous
 427         * translation in the hash yet (done in a subsequent flush_tlb_xxx())
 428         * and see we need to keep track that this PTE needs invalidating
 429         */
 430        *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 431                      | (pte_val(pte) & ~_PAGE_HASHPTE));
 432
 433#else
 434#error "Not supported "
 435#endif
 436}
 437
 438/*
 439 * Macro to mark a page protection value as "uncacheable".
 440 */
 441
 442#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
 443                         _PAGE_WRITETHRU)
 444
 445#define pgprot_noncached pgprot_noncached
 446static inline pgprot_t pgprot_noncached(pgprot_t prot)
 447{
 448        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 449                        _PAGE_NO_CACHE | _PAGE_GUARDED);
 450}
 451
 452#define pgprot_noncached_wc pgprot_noncached_wc
 453static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
 454{
 455        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 456                        _PAGE_NO_CACHE);
 457}
 458
 459#define pgprot_cached pgprot_cached
 460static inline pgprot_t pgprot_cached(pgprot_t prot)
 461{
 462        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 463                        _PAGE_COHERENT);
 464}
 465
 466#define pgprot_cached_wthru pgprot_cached_wthru
 467static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
 468{
 469        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 470                        _PAGE_COHERENT | _PAGE_WRITETHRU);
 471}
 472
 473#define pgprot_cached_noncoherent pgprot_cached_noncoherent
 474static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
 475{
 476        return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
 477}
 478
 479#define pgprot_writecombine pgprot_writecombine
 480static inline pgprot_t pgprot_writecombine(pgprot_t prot)
 481{
 482        return pgprot_noncached_wc(prot);
 483}
 484
 485#endif /* !__ASSEMBLY__ */
 486
 487#endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
 488