linux/arch/powerpc/include/asm/book3s/64/pgtable.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
   2#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
   3/*
   4 * This file contains the functions and defines necessary to modify and use
   5 * the ppc64 hashed page table.
   6 */
   7
   8#include <asm/book3s/64/hash.h>
   9#include <asm/barrier.h>
  10
  11/*
  12 * The second half of the kernel virtual space is used for IO mappings,
  13 * it's itself carved into the PIO region (ISA and PHB IO space) and
  14 * the ioremap space
  15 *
  16 *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
  17 *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
  18 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
  19 */
  20#define KERN_IO_START   (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
  21#define FULL_IO_SIZE    0x80000000ul
  22#define  ISA_IO_BASE    (KERN_IO_START)
  23#define  ISA_IO_END     (KERN_IO_START + 0x10000ul)
  24#define  PHB_IO_BASE    (ISA_IO_END)
  25#define  PHB_IO_END     (KERN_IO_START + FULL_IO_SIZE)
  26#define IOREMAP_BASE    (PHB_IO_END)
  27#define IOREMAP_END     (KERN_VIRT_START + KERN_VIRT_SIZE)
  28
  29#define vmemmap                 ((struct page *)VMEMMAP_BASE)
  30
  31/* Advertise special mapping type for AGP */
  32#define HAVE_PAGE_AGP
  33
  34/* Advertise support for _PAGE_SPECIAL */
  35#define __HAVE_ARCH_PTE_SPECIAL
  36
  37#ifndef __ASSEMBLY__
  38
  39/*
  40 * This is the default implementation of various PTE accessors, it's
  41 * used in all cases except Book3S with 64K pages where we have a
  42 * concept of sub-pages
  43 */
  44#ifndef __real_pte
  45
  46#define __real_pte(e,p)         ((real_pte_t){(e)})
  47#define __rpte_to_pte(r)        ((r).pte)
  48#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
  49
  50#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
  51        do {                                                             \
  52                index = 0;                                               \
  53                shift = mmu_psize_defs[psize].shift;                     \
  54
  55#define pte_iterate_hashed_end() } while(0)
  56
  57/*
  58 * We expect this to be called only for user addresses or kernel virtual
  59 * addresses other than the linear mapping.
  60 */
  61#define pte_pagesize_index(mm, addr, pte)       MMU_PAGE_4K
  62
  63#endif /* __real_pte */
  64
  65static inline void pmd_set(pmd_t *pmdp, unsigned long val)
  66{
  67        *pmdp = __pmd(val);
  68}
  69
  70static inline void pmd_clear(pmd_t *pmdp)
  71{
  72        *pmdp = __pmd(0);
  73}
  74
  75#define pmd_none(pmd)           (!pmd_val(pmd))
  76#define pmd_present(pmd)        (!pmd_none(pmd))
  77
  78static inline void pud_set(pud_t *pudp, unsigned long val)
  79{
  80        *pudp = __pud(val);
  81}
  82
  83static inline void pud_clear(pud_t *pudp)
  84{
  85        *pudp = __pud(0);
  86}
  87
  88#define pud_none(pud)           (!pud_val(pud))
  89#define pud_present(pud)        (pud_val(pud) != 0)
  90
  91extern struct page *pud_page(pud_t pud);
  92extern struct page *pmd_page(pmd_t pmd);
  93static inline pte_t pud_pte(pud_t pud)
  94{
  95        return __pte(pud_val(pud));
  96}
  97
  98static inline pud_t pte_pud(pte_t pte)
  99{
 100        return __pud(pte_val(pte));
 101}
 102#define pud_write(pud)          pte_write(pud_pte(pud))
 103#define pgd_write(pgd)          pte_write(pgd_pte(pgd))
 104static inline void pgd_set(pgd_t *pgdp, unsigned long val)
 105{
 106        *pgdp = __pgd(val);
 107}
 108
 109static inline void pgd_clear(pgd_t *pgdp)
 110{
 111        *pgdp = __pgd(0);
 112}
 113
 114#define pgd_none(pgd)           (!pgd_val(pgd))
 115#define pgd_present(pgd)        (!pgd_none(pgd))
 116
 117static inline pte_t pgd_pte(pgd_t pgd)
 118{
 119        return __pte(pgd_val(pgd));
 120}
 121
 122static inline pgd_t pte_pgd(pte_t pte)
 123{
 124        return __pgd(pte_val(pte));
 125}
 126
 127extern struct page *pgd_page(pgd_t pgd);
 128
 129/*
 130 * Find an entry in a page-table-directory.  We combine the address region
 131 * (the high order N bits) and the pgd portion of the address.
 132 */
 133
 134#define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
 135
 136#define pud_offset(pgdp, addr)  \
 137        (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr))
 138#define pmd_offset(pudp,addr) \
 139        (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
 140#define pte_offset_kernel(dir,addr) \
 141        (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
 142
 143#define pte_offset_map(dir,addr)        pte_offset_kernel((dir), (addr))
 144#define pte_unmap(pte)                  do { } while(0)
 145
 146/* to find an entry in a kernel page-table-directory */
 147/* This now only contains the vmalloc pages */
 148#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 149
 150#define pte_ERROR(e) \
 151        pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 152#define pmd_ERROR(e) \
 153        pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
 154#define pud_ERROR(e) \
 155        pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
 156#define pgd_ERROR(e) \
 157        pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 158
 159/* Encode and de-code a swap entry */
 160#define MAX_SWAPFILES_CHECK() do { \
 161        BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
 162        /*                                                      \
 163         * Don't have overlapping bits with _PAGE_HPTEFLAGS     \
 164         * We filter HPTEFLAGS on set_pte.                      \
 165         */                                                     \
 166        BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
 167        BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY);   \
 168        } while (0)
 169/*
 170 * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
 171 */
 172#define SWP_TYPE_BITS 5
 173#define __swp_type(x)           (((x).val >> _PAGE_BIT_SWAP_TYPE) \
 174                                & ((1UL << SWP_TYPE_BITS) - 1))
 175#define __swp_offset(x)         (((x).val & PTE_RPN_MASK) >> PTE_RPN_SHIFT)
 176#define __swp_entry(type, offset)       ((swp_entry_t) { \
 177                                ((type) << _PAGE_BIT_SWAP_TYPE) \
 178                                | (((offset) << PTE_RPN_SHIFT) & PTE_RPN_MASK)})
 179/*
 180 * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
 181 * swap type and offset we get from swap and convert that to pte to find a
 182 * matching pte in linux page table.
 183 * Clear bits not found in swap entries here.
 184 */
 185#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
 186#define __swp_entry_to_pte(x)   __pte((x).val | _PAGE_PTE)
 187
 188#ifdef CONFIG_MEM_SOFT_DIRTY
 189#define _PAGE_SWP_SOFT_DIRTY   (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
 190#else
 191#define _PAGE_SWP_SOFT_DIRTY    0UL
 192#endif /* CONFIG_MEM_SOFT_DIRTY */
 193
 194#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 195static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
 196{
 197        return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
 198}
 199static inline bool pte_swp_soft_dirty(pte_t pte)
 200{
 201        return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
 202}
 203static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
 204{
 205        return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
 206}
 207#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 208
 209void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
 210void pgtable_cache_init(void);
 211
 212struct page *realmode_pfn_to_page(unsigned long pfn);
 213
 214#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 215extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
 216extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
 217extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
 218extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 219                       pmd_t *pmdp, pmd_t pmd);
 220extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
 221                                 pmd_t *pmd);
 222extern int has_transparent_hugepage(void);
 223#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 224
 225
 226static inline pte_t pmd_pte(pmd_t pmd)
 227{
 228        return __pte(pmd_val(pmd));
 229}
 230
 231static inline pmd_t pte_pmd(pte_t pte)
 232{
 233        return __pmd(pte_val(pte));
 234}
 235
 236static inline pte_t *pmdp_ptep(pmd_t *pmd)
 237{
 238        return (pte_t *)pmd;
 239}
 240
 241#define pmd_pfn(pmd)            pte_pfn(pmd_pte(pmd))
 242#define pmd_dirty(pmd)          pte_dirty(pmd_pte(pmd))
 243#define pmd_young(pmd)          pte_young(pmd_pte(pmd))
 244#define pmd_mkold(pmd)          pte_pmd(pte_mkold(pmd_pte(pmd)))
 245#define pmd_wrprotect(pmd)      pte_pmd(pte_wrprotect(pmd_pte(pmd)))
 246#define pmd_mkdirty(pmd)        pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 247#define pmd_mkclean(pmd)        pte_pmd(pte_mkclean(pmd_pte(pmd)))
 248#define pmd_mkyoung(pmd)        pte_pmd(pte_mkyoung(pmd_pte(pmd)))
 249#define pmd_mkwrite(pmd)        pte_pmd(pte_mkwrite(pmd_pte(pmd)))
 250
 251#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 252#define pmd_soft_dirty(pmd)    pte_soft_dirty(pmd_pte(pmd))
 253#define pmd_mksoft_dirty(pmd)  pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
 254#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
 255#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 256
 257#ifdef CONFIG_NUMA_BALANCING
 258static inline int pmd_protnone(pmd_t pmd)
 259{
 260        return pte_protnone(pmd_pte(pmd));
 261}
 262#endif /* CONFIG_NUMA_BALANCING */
 263
 264#define __HAVE_ARCH_PMD_WRITE
 265#define pmd_write(pmd)          pte_write(pmd_pte(pmd))
 266
 267static inline pmd_t pmd_mkhuge(pmd_t pmd)
 268{
 269        return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE));
 270}
 271
 272#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
 273extern int pmdp_set_access_flags(struct vm_area_struct *vma,
 274                                 unsigned long address, pmd_t *pmdp,
 275                                 pmd_t entry, int dirty);
 276
 277#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
 278extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 279                                     unsigned long address, pmd_t *pmdp);
 280#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
 281extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
 282                                  unsigned long address, pmd_t *pmdp);
 283
 284#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
 285extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
 286                                     unsigned long addr, pmd_t *pmdp);
 287
 288extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
 289                                 unsigned long address, pmd_t *pmdp);
 290#define pmdp_collapse_flush pmdp_collapse_flush
 291
 292#define __HAVE_ARCH_PGTABLE_DEPOSIT
 293extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 294                                       pgtable_t pgtable);
 295#define __HAVE_ARCH_PGTABLE_WITHDRAW
 296extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
 297
 298#define __HAVE_ARCH_PMDP_INVALIDATE
 299extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 300                            pmd_t *pmdp);
 301
 302#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
 303extern void pmdp_huge_split_prepare(struct vm_area_struct *vma,
 304                                    unsigned long address, pmd_t *pmdp);
 305
 306#define pmd_move_must_withdraw pmd_move_must_withdraw
 307struct spinlock;
 308static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
 309                                         struct spinlock *old_pmd_ptl)
 310{
 311        /*
 312         * Archs like ppc64 use pgtable to store per pmd
 313         * specific information. So when we switch the pmd,
 314         * we should also withdraw and deposit the pgtable
 315         */
 316        return true;
 317}
 318#endif /* __ASSEMBLY__ */
 319#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
 320