linux/arch/x86/include/asm/pgtable_64.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PGTABLE_64_H
   2#define _ASM_X86_PGTABLE_64_H
   3
   4#include <linux/const.h>
   5#include <asm/pgtable_64_types.h>
   6
   7#ifndef __ASSEMBLY__
   8
   9/*
  10 * This file contains the functions and defines necessary to modify and use
  11 * the x86-64 page table tree.
  12 */
  13#include <asm/processor.h>
  14#include <linux/bitops.h>
  15#include <linux/threads.h>
  16
  17extern pud_t level3_kernel_pgt[512];
  18extern pud_t level3_ident_pgt[512];
  19extern pmd_t level2_kernel_pgt[512];
  20extern pmd_t level2_fixmap_pgt[512];
  21extern pmd_t level2_ident_pgt[512];
  22extern pgd_t init_level4_pgt[];
  23
  24#define swapper_pg_dir init_level4_pgt
  25
  26extern void paging_init(void);
  27
  28#define pte_ERROR(e)                                    \
  29        printk("%s:%d: bad pte %p(%016lx).\n",          \
  30               __FILE__, __LINE__, &(e), pte_val(e))
  31#define pmd_ERROR(e)                                    \
  32        printk("%s:%d: bad pmd %p(%016lx).\n",          \
  33               __FILE__, __LINE__, &(e), pmd_val(e))
  34#define pud_ERROR(e)                                    \
  35        printk("%s:%d: bad pud %p(%016lx).\n",          \
  36               __FILE__, __LINE__, &(e), pud_val(e))
  37#define pgd_ERROR(e)                                    \
  38        printk("%s:%d: bad pgd %p(%016lx).\n",          \
  39               __FILE__, __LINE__, &(e), pgd_val(e))
  40
  41struct mm_struct;
  42
  43void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
  44
  45
  46static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
  47                                    pte_t *ptep)
  48{
  49        *ptep = native_make_pte(0);
  50}
  51
  52static inline void native_set_pte(pte_t *ptep, pte_t pte)
  53{
  54        *ptep = pte;
  55}
  56
  57static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
  58{
  59        native_set_pte(ptep, pte);
  60}
  61
  62static inline pte_t native_ptep_get_and_clear(pte_t *xp)
  63{
  64#ifdef CONFIG_SMP
  65        return native_make_pte(xchg(&xp->pte, 0));
  66#else
  67        /* native_local_ptep_get_and_clear,
  68           but duplicated because of cyclic dependency */
  69        pte_t ret = *xp;
  70        native_pte_clear(NULL, 0, xp);
  71        return ret;
  72#endif
  73}
  74
  75static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
  76{
  77        *pmdp = pmd;
  78}
  79
  80static inline void native_pmd_clear(pmd_t *pmd)
  81{
  82        native_set_pmd(pmd, native_make_pmd(0));
  83}
  84
  85static inline void native_set_pud(pud_t *pudp, pud_t pud)
  86{
  87        *pudp = pud;
  88}
  89
  90static inline void native_pud_clear(pud_t *pud)
  91{
  92        native_set_pud(pud, native_make_pud(0));
  93}
  94
  95static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
  96{
  97        *pgdp = pgd;
  98}
  99
 100static inline void native_pgd_clear(pgd_t *pgd)
 101{
 102        native_set_pgd(pgd, native_make_pgd(0));
 103}
 104
 105/*
 106 * Conversion functions: convert a page and protection to a page entry,
 107 * and a page entry and page directory to the page they refer to.
 108 */
 109
 110/*
 111 * Level 4 access.
 112 */
 113static inline int pgd_large(pgd_t pgd) { return 0; }
 114#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
 115
 116/* PUD - Level3 access */
 117
 118/* PMD  - Level 2 access */
 119#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
 120#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) |     \
 121                                            _PAGE_FILE })
 122#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
 123
 124/* PTE - Level 1 access. */
 125
 126/* x86-64 always has all page tables mapped. */
 127#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
 128#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
 129#define pte_unmap(pte) /* NOP */
 130#define pte_unmap_nested(pte) /* NOP */
 131
 132#define update_mmu_cache(vma, address, pte) do { } while (0)
 133
 134/* Encode and de-code a swap entry */
 135#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
 136#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
 137#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
 138#else
 139#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1)
 140#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1)
 141#endif
 142
 143#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
 144
 145#define __swp_type(x)                   (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
 146                                         & ((1U << SWP_TYPE_BITS) - 1))
 147#define __swp_offset(x)                 ((x).val >> SWP_OFFSET_SHIFT)
 148#define __swp_entry(type, offset)       ((swp_entry_t) { \
 149                                         ((type) << (_PAGE_BIT_PRESENT + 1)) \
 150                                         | ((offset) << SWP_OFFSET_SHIFT) })
 151#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val((pte)) })
 152#define __swp_entry_to_pte(x)           ((pte_t) { .pte = (x).val })
 153
 154extern int kern_addr_valid(unsigned long addr);
 155extern void cleanup_highmap(void);
 156
 157#define HAVE_ARCH_UNMAPPED_AREA
 158#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 159
 160#define pgtable_cache_init()   do { } while (0)
 161#define check_pgt_cache()      do { } while (0)
 162
 163#define PAGE_AGP    PAGE_KERNEL_NOCACHE
 164#define HAVE_PAGE_AGP 1
 165
 166/* fs/proc/kcore.c */
 167#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
 168#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
 169
 170#define __HAVE_ARCH_PTE_SAME
 171#endif /* !__ASSEMBLY__ */
 172
 173#endif /* _ASM_X86_PGTABLE_64_H */
 174