linux/arch/powerpc/include/asm/book3s/32/pgtable.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
   2#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
   3
   4#include <asm-generic/pgtable-nopmd.h>
   5
   6#include <asm/book3s/32/hash.h>
   7
   8/* And here we include common definitions */
   9#include <asm/pte-common.h>
  10
  11/*
  12 * The normal case is that PTEs are 32-bits and we have a 1-page
  13 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
  14 *
  15 * For any >32-bit physical address platform, we can use the following
  16 * two level page table layout where the pgdir is 8KB and the MS 13 bits
  17 * are an index to the second level table.  The combined pgdir/pmd first
  18 * level has 2048 entries and the second level has 512 64-bit PTE entries.
  19 * -Matt
  20 */
  21/* PGDIR_SHIFT determines what a top-level page table entry can map */
  22#define PGDIR_SHIFT     (PAGE_SHIFT + PTE_SHIFT)
  23#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  24#define PGDIR_MASK      (~(PGDIR_SIZE-1))
  25
  26#define PTRS_PER_PTE    (1 << PTE_SHIFT)
  27#define PTRS_PER_PMD    1
  28#define PTRS_PER_PGD    (1 << (32 - PGDIR_SHIFT))
  29
  30#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
  31/*
  32 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
  33 * value (for now) on others, from where we can start layout kernel
  34 * virtual space that goes below PKMAP and FIXMAP
  35 */
  36#ifdef CONFIG_HIGHMEM
  37#define KVIRT_TOP       PKMAP_BASE
  38#else
  39#define KVIRT_TOP       (0xfe000000UL)  /* for now, could be FIXMAP_BASE ? */
  40#endif
  41
  42/*
  43 * ioremap_bot starts at that address. Early ioremaps move down from there,
  44 * until mem_init() at which point this becomes the top of the vmalloc
  45 * and ioremap space
  46 */
  47#ifdef CONFIG_NOT_COHERENT_CACHE
  48#define IOREMAP_TOP     ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
  49#else
  50#define IOREMAP_TOP     KVIRT_TOP
  51#endif
  52
  53/*
  54 * Just any arbitrary offset to the start of the vmalloc VM area: the
  55 * current 16MB value just means that there will be a 64MB "hole" after the
  56 * physical memory until the kernel virtual memory starts.  That means that
  57 * any out-of-bounds memory accesses will hopefully be caught.
  58 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  59 * area for the same reason. ;)
  60 *
  61 * We no longer map larger than phys RAM with the BATs so we don't have
  62 * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
  63 * about clashes between our early calls to ioremap() that start growing down
  64 * from ioremap_base being run into the VM area allocations (growing upwards
  65 * from VMALLOC_START).  For this reason we have ioremap_bot to check when
  66 * we actually run into our mappings setup in the early boot with the VM
  67 * system.  This really does become a problem for machines with good amounts
  68 * of RAM.  -- Cort
  69 */
  70#define VMALLOC_OFFSET (0x1000000) /* 16M */
  71#ifdef PPC_PIN_SIZE
  72#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
  73#else
  74#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
  75#endif
  76#define VMALLOC_END     ioremap_bot
  77
  78#ifndef __ASSEMBLY__
  79#include <linux/sched.h>
  80#include <linux/threads.h>
  81#include <asm/io.h>                     /* For sub-arch specific PPC_PIN_SIZE */
  82
  83extern unsigned long ioremap_bot;
  84
  85/*
  86 * entries per page directory level: our page-table tree is two-level, so
  87 * we don't really have any PMD directory.
  88 */
  89#define PTE_TABLE_SIZE  (sizeof(pte_t) << PTE_SHIFT)
  90#define PGD_TABLE_SIZE  (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
  91
  92#define pte_ERROR(e) \
  93        pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
  94                (unsigned long long)pte_val(e))
  95#define pgd_ERROR(e) \
  96        pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  97/*
  98 * Bits in a linux-style PTE.  These match the bits in the
  99 * (hardware-defined) PowerPC PTE as closely as possible.
 100 */
 101
 102#define pte_clear(mm, addr, ptep) \
 103        do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
 104
 105#define pmd_none(pmd)           (!pmd_val(pmd))
 106#define pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
 107#define pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
 108static inline void pmd_clear(pmd_t *pmdp)
 109{
 110        *pmdp = __pmd(0);
 111}
 112
 113
 114/*
 115 * When flushing the tlb entry for a page, we also need to flush the hash
 116 * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
 117 */
 118extern int flush_hash_pages(unsigned context, unsigned long va,
 119                            unsigned long pmdval, int count);
 120
 121/* Add an HPTE to the hash table */
 122extern void add_hash_page(unsigned context, unsigned long va,
 123                          unsigned long pmdval);
 124
 125/* Flush an entry from the TLB/hash table */
 126extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
 127                             unsigned long address);
 128
 129/*
 130 * PTE updates. This function is called whenever an existing
 131 * valid PTE is updated. This does -not- include set_pte_at()
 132 * which nowadays only sets a new PTE.
 133 *
 134 * Depending on the type of MMU, we may need to use atomic updates
 135 * and the PTE may be either 32 or 64 bit wide. In the later case,
 136 * when using atomic updates, only the low part of the PTE is
 137 * accessed atomically.
 138 *
 139 * In addition, on 44x, we also maintain a global flag indicating
 140 * that an executable user mapping was modified, which is needed
 141 * to properly flush the virtually tagged instruction cache of
 142 * those implementations.
 143 */
 144#ifndef CONFIG_PTE_64BIT
 145static inline unsigned long pte_update(pte_t *p,
 146                                       unsigned long clr,
 147                                       unsigned long set)
 148{
 149        unsigned long old, tmp;
 150
 151        __asm__ __volatile__("\
 1521:      lwarx   %0,0,%3\n\
 153        andc    %1,%0,%4\n\
 154        or      %1,%1,%5\n"
 155        PPC405_ERR77(0,%3)
 156"       stwcx.  %1,0,%3\n\
 157        bne-    1b"
 158        : "=&r" (old), "=&r" (tmp), "=m" (*p)
 159        : "r" (p), "r" (clr), "r" (set), "m" (*p)
 160        : "cc" );
 161
 162        return old;
 163}
 164#else /* CONFIG_PTE_64BIT */
 165static inline unsigned long long pte_update(pte_t *p,
 166                                            unsigned long clr,
 167                                            unsigned long set)
 168{
 169        unsigned long long old;
 170        unsigned long tmp;
 171
 172        __asm__ __volatile__("\
 1731:      lwarx   %L0,0,%4\n\
 174        lwzx    %0,0,%3\n\
 175        andc    %1,%L0,%5\n\
 176        or      %1,%1,%6\n"
 177        PPC405_ERR77(0,%3)
 178"       stwcx.  %1,0,%4\n\
 179        bne-    1b"
 180        : "=&r" (old), "=&r" (tmp), "=m" (*p)
 181        : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
 182        : "cc" );
 183
 184        return old;
 185}
 186#endif /* CONFIG_PTE_64BIT */
 187
 188/*
 189 * 2.6 calls this without flushing the TLB entry; this is wrong
 190 * for our hash-based implementation, we fix that up here.
 191 */
 192#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 193static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
 194{
 195        unsigned long old;
 196        old = pte_update(ptep, _PAGE_ACCESSED, 0);
 197        if (old & _PAGE_HASHPTE) {
 198                unsigned long ptephys = __pa(ptep) & PAGE_MASK;
 199                flush_hash_pages(context, addr, ptephys, 1);
 200        }
 201        return (old & _PAGE_ACCESSED) != 0;
 202}
 203#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
 204        __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
 205
 206#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 207static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 208                                       pte_t *ptep)
 209{
 210        return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
 211}
 212
 213#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 214static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 215                                      pte_t *ptep)
 216{
 217        pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
 218}
 219static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 220                                           unsigned long addr, pte_t *ptep)
 221{
 222        ptep_set_wrprotect(mm, addr, ptep);
 223}
 224
 225
 226static inline void __ptep_set_access_flags(struct mm_struct *mm,
 227                                           pte_t *ptep, pte_t entry)
 228{
 229        unsigned long set = pte_val(entry) &
 230                (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
 231        unsigned long clr = ~pte_val(entry) & _PAGE_RO;
 232
 233        pte_update(ptep, clr, set);
 234}
 235
 236#define __HAVE_ARCH_PTE_SAME
 237#define pte_same(A,B)   (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
 238
 239/*
 240 * Note that on Book E processors, the pmd contains the kernel virtual
 241 * (lowmem) address of the pte page.  The physical address is less useful
 242 * because everything runs with translation enabled (even the TLB miss
 243 * handler).  On everything else the pmd contains the physical address
 244 * of the pte page.  -- paulus
 245 */
 246#ifndef CONFIG_BOOKE
 247#define pmd_page_vaddr(pmd)     \
 248        ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 249#define pmd_page(pmd)           \
 250        pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
 251#else
 252#define pmd_page_vaddr(pmd)     \
 253        ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
 254#define pmd_page(pmd)           \
 255        pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
 256#endif
 257
 258/* to find an entry in a kernel page-table-directory */
 259#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 260
 261/* to find an entry in a page-table-directory */
 262#define pgd_index(address)       ((address) >> PGDIR_SHIFT)
 263#define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
 264
 265/* Find an entry in the third-level page table.. */
 266#define pte_index(address)              \
 267        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 268#define pte_offset_kernel(dir, addr)    \
 269        ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
 270#define pte_offset_map(dir, addr)               \
 271        ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
 272#define pte_unmap(pte)          kunmap_atomic(pte)
 273
 274/*
 275 * Encode and decode a swap entry.
 276 * Note that the bits we use in a PTE for representing a swap entry
 277 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
 278 *   -- paulus
 279 */
 280#define __swp_type(entry)               ((entry).val & 0x1f)
 281#define __swp_offset(entry)             ((entry).val >> 5)
 282#define __swp_entry(type, offset)       ((swp_entry_t) { (type) | ((offset) << 5) })
 283#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) >> 3 })
 284#define __swp_entry_to_pte(x)           ((pte_t) { (x).val << 3 })
 285
 286#ifndef CONFIG_PPC_4K_PAGES
 287void pgtable_cache_init(void);
 288#else
 289/*
 290 * No page table caches to initialise
 291 */
 292#define pgtable_cache_init()    do { } while (0)
 293#endif
 294
 295extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
 296                      pmd_t **pmdp);
 297
 298/* Generic accessors to PTE bits */
 299static inline int pte_write(pte_t pte)          { return !!(pte_val(pte) & _PAGE_RW);}
 300static inline int pte_dirty(pte_t pte)          { return !!(pte_val(pte) & _PAGE_DIRTY); }
 301static inline int pte_young(pte_t pte)          { return !!(pte_val(pte) & _PAGE_ACCESSED); }
 302static inline int pte_special(pte_t pte)        { return !!(pte_val(pte) & _PAGE_SPECIAL); }
 303static inline int pte_none(pte_t pte)           { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
 304static inline pgprot_t pte_pgprot(pte_t pte)    { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
 305
 306static inline int pte_present(pte_t pte)
 307{
 308        return pte_val(pte) & _PAGE_PRESENT;
 309}
 310
 311/* Conversion functions: convert a page and protection to a page entry,
 312 * and a page entry and page directory to the page they refer to.
 313 *
 314 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
 315 * long for now.
 316 */
 317static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
 318{
 319        return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
 320                     pgprot_val(pgprot));
 321}
 322
 323static inline unsigned long pte_pfn(pte_t pte)
 324{
 325        return pte_val(pte) >> PTE_RPN_SHIFT;
 326}
 327
 328/* Generic modifiers for PTE bits */
 329static inline pte_t pte_wrprotect(pte_t pte)
 330{
 331        return __pte(pte_val(pte) & ~_PAGE_RW);
 332}
 333
 334static inline pte_t pte_mkclean(pte_t pte)
 335{
 336        return __pte(pte_val(pte) & ~_PAGE_DIRTY);
 337}
 338
 339static inline pte_t pte_mkold(pte_t pte)
 340{
 341        return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
 342}
 343
 344static inline pte_t pte_mkwrite(pte_t pte)
 345{
 346        return __pte(pte_val(pte) | _PAGE_RW);
 347}
 348
 349static inline pte_t pte_mkdirty(pte_t pte)
 350{
 351        return __pte(pte_val(pte) | _PAGE_DIRTY);
 352}
 353
 354static inline pte_t pte_mkyoung(pte_t pte)
 355{
 356        return __pte(pte_val(pte) | _PAGE_ACCESSED);
 357}
 358
 359static inline pte_t pte_mkspecial(pte_t pte)
 360{
 361        return __pte(pte_val(pte) | _PAGE_SPECIAL);
 362}
 363
 364static inline pte_t pte_mkhuge(pte_t pte)
 365{
 366        return pte;
 367}
 368
 369static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 370{
 371        return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
 372}
 373
 374
 375
 376/* This low level function performs the actual PTE insertion
 377 * Setting the PTE depends on the MMU type and other factors. It's
 378 * an horrible mess that I'm not going to try to clean up now but
 379 * I'm keeping it in one place rather than spread around
 380 */
 381static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 382                                pte_t *ptep, pte_t pte, int percpu)
 383{
 384#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
 385        /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
 386         * helper pte_update() which does an atomic update. We need to do that
 387         * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
 388         * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
 389         * the hash bits instead (ie, same as the non-SMP case)
 390         */
 391        if (percpu)
 392                *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 393                              | (pte_val(pte) & ~_PAGE_HASHPTE));
 394        else
 395                pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
 396
 397#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
 398        /* Second case is 32-bit with 64-bit PTE.  In this case, we
 399         * can just store as long as we do the two halves in the right order
 400         * with a barrier in between. This is possible because we take care,
 401         * in the hash code, to pre-invalidate if the PTE was already hashed,
 402         * which synchronizes us with any concurrent invalidation.
 403         * In the percpu case, we also fallback to the simple update preserving
 404         * the hash bits
 405         */
 406        if (percpu) {
 407                *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 408                              | (pte_val(pte) & ~_PAGE_HASHPTE));
 409                return;
 410        }
 411        if (pte_val(*ptep) & _PAGE_HASHPTE)
 412                flush_hash_entry(mm, ptep, addr);
 413        __asm__ __volatile__("\
 414                stw%U0%X0 %2,%0\n\
 415                eieio\n\
 416                stw%U0%X0 %L2,%1"
 417        : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
 418        : "r" (pte) : "memory");
 419
 420#elif defined(CONFIG_PPC_STD_MMU_32)
 421        /* Third case is 32-bit hash table in UP mode, we need to preserve
 422         * the _PAGE_HASHPTE bit since we may not have invalidated the previous
 423         * translation in the hash yet (done in a subsequent flush_tlb_xxx())
 424         * and see we need to keep track that this PTE needs invalidating
 425         */
 426        *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 427                      | (pte_val(pte) & ~_PAGE_HASHPTE));
 428
 429#else
 430#error "Not supported "
 431#endif
 432}
 433
 434/*
 435 * Macro to mark a page protection value as "uncacheable".
 436 */
 437
 438#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
 439                         _PAGE_WRITETHRU)
 440
 441#define pgprot_noncached pgprot_noncached
 442static inline pgprot_t pgprot_noncached(pgprot_t prot)
 443{
 444        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 445                        _PAGE_NO_CACHE | _PAGE_GUARDED);
 446}
 447
 448#define pgprot_noncached_wc pgprot_noncached_wc
 449static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
 450{
 451        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 452                        _PAGE_NO_CACHE);
 453}
 454
 455#define pgprot_cached pgprot_cached
 456static inline pgprot_t pgprot_cached(pgprot_t prot)
 457{
 458        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 459                        _PAGE_COHERENT);
 460}
 461
 462#define pgprot_cached_wthru pgprot_cached_wthru
 463static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
 464{
 465        return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 466                        _PAGE_COHERENT | _PAGE_WRITETHRU);
 467}
 468
 469#define pgprot_cached_noncoherent pgprot_cached_noncoherent
 470static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
 471{
 472        return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
 473}
 474
 475#define pgprot_writecombine pgprot_writecombine
 476static inline pgprot_t pgprot_writecombine(pgprot_t prot)
 477{
 478        return pgprot_noncached_wc(prot);
 479}
 480
 481#endif /* !__ASSEMBLY__ */
 482
 483#endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
 484