linux/arch/ia64/include/asm/pgtable.h
<<
>>
Prefs
   1#ifndef _ASM_IA64_PGTABLE_H
   2#define _ASM_IA64_PGTABLE_H
   3
   4/*
   5 * This file contains the functions and defines necessary to modify and use
   6 * the IA-64 page table tree.
   7 *
   8 * This hopefully works with any (fixed) IA-64 page-size, as defined
   9 * in <asm/page.h>.
  10 *
  11 * Copyright (C) 1998-2005 Hewlett-Packard Co
  12 *      David Mosberger-Tang <davidm@hpl.hp.com>
  13 */
  14
  15
  16#include <asm/mman.h>
  17#include <asm/page.h>
  18#include <asm/processor.h>
  19#include <asm/types.h>
  20
  21#define IA64_MAX_PHYS_BITS      50      /* max. number of physical address bits (architected) */
  22
  23/*
  24 * First, define the various bits in a PTE.  Note that the PTE format
  25 * matches the VHPT short format, the firt doubleword of the VHPD long
  26 * format, and the first doubleword of the TLB insertion format.
  27 */
  28#define _PAGE_P_BIT             0
  29#define _PAGE_A_BIT             5
  30#define _PAGE_D_BIT             6
  31
  32#define _PAGE_P                 (1 << _PAGE_P_BIT)      /* page present bit */
  33#define _PAGE_MA_WB             (0x0 <<  2)     /* write back memory attribute */
  34#define _PAGE_MA_UC             (0x4 <<  2)     /* uncacheable memory attribute */
  35#define _PAGE_MA_UCE            (0x5 <<  2)     /* UC exported attribute */
  36#define _PAGE_MA_WC             (0x6 <<  2)     /* write coalescing memory attribute */
  37#define _PAGE_MA_NAT            (0x7 <<  2)     /* not-a-thing attribute */
  38#define _PAGE_MA_MASK           (0x7 <<  2)
  39#define _PAGE_PL_0              (0 <<  7)       /* privilege level 0 (kernel) */
  40#define _PAGE_PL_1              (1 <<  7)       /* privilege level 1 (unused) */
  41#define _PAGE_PL_2              (2 <<  7)       /* privilege level 2 (unused) */
  42#define _PAGE_PL_3              (3 <<  7)       /* privilege level 3 (user) */
  43#define _PAGE_PL_MASK           (3 <<  7)
  44#define _PAGE_AR_R              (0 <<  9)       /* read only */
  45#define _PAGE_AR_RX             (1 <<  9)       /* read & execute */
  46#define _PAGE_AR_RW             (2 <<  9)       /* read & write */
  47#define _PAGE_AR_RWX            (3 <<  9)       /* read, write & execute */
  48#define _PAGE_AR_R_RW           (4 <<  9)       /* read / read & write */
  49#define _PAGE_AR_RX_RWX         (5 <<  9)       /* read & exec / read, write & exec */
  50#define _PAGE_AR_RWX_RW         (6 <<  9)       /* read, write & exec / read & write */
  51#define _PAGE_AR_X_RX           (7 <<  9)       /* exec & promote / read & exec */
  52#define _PAGE_AR_MASK           (7 <<  9)
  53#define _PAGE_AR_SHIFT          9
  54#define _PAGE_A                 (1 << _PAGE_A_BIT)      /* page accessed bit */
  55#define _PAGE_D                 (1 << _PAGE_D_BIT)      /* page dirty bit */
  56#define _PAGE_PPN_MASK          (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
  57#define _PAGE_ED                (__IA64_UL(1) << 52)    /* exception deferral */
  58#define _PAGE_PROTNONE          (__IA64_UL(1) << 63)
  59
  60/* Valid only for a PTE with the present bit cleared: */
  61#define _PAGE_FILE              (1 << 1)                /* see swap & file pte remarks below */
  62
  63#define _PFN_MASK               _PAGE_PPN_MASK
  64/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
  65#define _PAGE_CHG_MASK  (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
  66
  67#define _PAGE_SIZE_4K   12
  68#define _PAGE_SIZE_8K   13
  69#define _PAGE_SIZE_16K  14
  70#define _PAGE_SIZE_64K  16
  71#define _PAGE_SIZE_256K 18
  72#define _PAGE_SIZE_1M   20
  73#define _PAGE_SIZE_4M   22
  74#define _PAGE_SIZE_16M  24
  75#define _PAGE_SIZE_64M  26
  76#define _PAGE_SIZE_256M 28
  77#define _PAGE_SIZE_1G   30
  78#define _PAGE_SIZE_4G   32
  79
  80#define __ACCESS_BITS           _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
  81#define __DIRTY_BITS_NO_ED      _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
  82#define __DIRTY_BITS            _PAGE_ED | __DIRTY_BITS_NO_ED
  83
  84/*
  85 * How many pointers will a page table level hold expressed in shift
  86 */
  87#define PTRS_PER_PTD_SHIFT      (PAGE_SHIFT-3)
  88
  89/*
  90 * Definitions for fourth level:
  91 */
  92#define PTRS_PER_PTE    (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT))
  93
  94/*
  95 * Definitions for third level:
  96 *
  97 * PMD_SHIFT determines the size of the area a third-level page table
  98 * can map.
  99 */
 100#define PMD_SHIFT       (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT))
 101#define PMD_SIZE        (1UL << PMD_SHIFT)
 102#define PMD_MASK        (~(PMD_SIZE-1))
 103#define PTRS_PER_PMD    (1UL << (PTRS_PER_PTD_SHIFT))
 104
 105#ifdef CONFIG_PGTABLE_4
 106/*
 107 * Definitions for second level:
 108 *
 109 * PUD_SHIFT determines the size of the area a second-level page table
 110 * can map.
 111 */
 112#define PUD_SHIFT       (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
 113#define PUD_SIZE        (1UL << PUD_SHIFT)
 114#define PUD_MASK        (~(PUD_SIZE-1))
 115#define PTRS_PER_PUD    (1UL << (PTRS_PER_PTD_SHIFT))
 116#endif
 117
 118/*
 119 * Definitions for first level:
 120 *
 121 * PGDIR_SHIFT determines what a first-level page table entry can map.
 122 */
 123#ifdef CONFIG_PGTABLE_4
 124#define PGDIR_SHIFT             (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
 125#else
 126#define PGDIR_SHIFT             (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
 127#endif
 128#define PGDIR_SIZE              (__IA64_UL(1) << PGDIR_SHIFT)
 129#define PGDIR_MASK              (~(PGDIR_SIZE-1))
 130#define PTRS_PER_PGD_SHIFT      PTRS_PER_PTD_SHIFT
 131#define PTRS_PER_PGD            (1UL << PTRS_PER_PGD_SHIFT)
 132#define USER_PTRS_PER_PGD       (5*PTRS_PER_PGD/8)      /* regions 0-4 are user regions */
 133#define FIRST_USER_ADDRESS      0
 134
 135/*
 136 * All the normal masks have the "page accessed" bits on, as any time
 137 * they are used, the page is accessed. They are cleared only by the
 138 * page-out routines.
 139 */
 140#define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_A)
 141#define PAGE_SHARED     __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
 142#define PAGE_READONLY   __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
 143#define PAGE_COPY       __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
 144#define PAGE_COPY_EXEC  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
 145#define PAGE_GATE       __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
 146#define PAGE_KERNEL     __pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
 147#define PAGE_KERNELRX   __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
 148#define PAGE_KERNEL_UC  __pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX | \
 149                                 _PAGE_MA_UC)
 150
 151# ifndef __ASSEMBLY__
 152
 153#include <linux/sched.h>        /* for mm_struct */
 154#include <linux/bitops.h>
 155#include <asm/cacheflush.h>
 156#include <asm/mmu_context.h>
 157
 158/*
 159 * Next come the mappings that determine how mmap() protection bits
 160 * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented.  The
 161 * _P version gets used for a private shared memory segment, the _S
 162 * version gets used for a shared memory segment with MAP_SHARED on.
 163 * In a private shared memory segment, we do a copy-on-write if a task
 164 * attempts to write to the page.
 165 */
 166        /* xwr */
 167#define __P000  PAGE_NONE
 168#define __P001  PAGE_READONLY
 169#define __P010  PAGE_READONLY   /* write to priv pg -> copy & make writable */
 170#define __P011  PAGE_READONLY   /* ditto */
 171#define __P100  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
 172#define __P101  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
 173#define __P110  PAGE_COPY_EXEC
 174#define __P111  PAGE_COPY_EXEC
 175
 176#define __S000  PAGE_NONE
 177#define __S001  PAGE_READONLY
 178#define __S010  PAGE_SHARED     /* we don't have (and don't need) write-only */
 179#define __S011  PAGE_SHARED
 180#define __S100  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
 181#define __S101  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
 182#define __S110  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
 183#define __S111  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
 184
 185#define pgd_ERROR(e)    printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 186#ifdef CONFIG_PGTABLE_4
 187#define pud_ERROR(e)    printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
 188#endif
 189#define pmd_ERROR(e)    printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
 190#define pte_ERROR(e)    printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
 191
 192
 193/*
 194 * Some definitions to translate between mem_map, PTEs, and page addresses:
 195 */
 196
 197
 198/* Quick test to see if ADDR is a (potentially) valid physical address. */
 199static inline long
 200ia64_phys_addr_valid (unsigned long addr)
 201{
 202        return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
 203}
 204
 205/*
 206 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
 207 * memory.  For the return value to be meaningful, ADDR must be >=
 208 * PAGE_OFFSET.  This operation can be relatively expensive (e.g.,
 209 * require a hash-, or multi-level tree-lookup or something of that
 210 * sort) but it guarantees to return TRUE only if accessing the page
 211 * at that address does not cause an error.  Note that there may be
 212 * addresses for which kern_addr_valid() returns FALSE even though an
 213 * access would not cause an error (e.g., this is typically true for
 214 * memory mapped I/O regions.
 215 *
 216 * XXX Need to implement this for IA-64.
 217 */
 218#define kern_addr_valid(addr)   (1)
 219
 220
 221/*
 222 * Now come the defines and routines to manage and access the three-level
 223 * page table.
 224 */
 225
 226
 227#define VMALLOC_START           (RGN_BASE(RGN_GATE) + 0x200000000UL)
 228#ifdef CONFIG_VIRTUAL_MEM_MAP
 229# define VMALLOC_END_INIT       (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
 230extern unsigned long VMALLOC_END;
 231#else
 232#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
 233/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
 234# define VMALLOC_END            (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
 235# define vmemmap                ((struct page *)VMALLOC_END)
 236#else
 237# define VMALLOC_END            (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
 238#endif
 239#endif
 240
 241/* fs/proc/kcore.c */
 242#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
 243#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
 244
 245#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
 246#define RGN_MAP_LIMIT   ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE)    /* per region addr limit */
 247
 248/*
 249 * Conversion functions: convert page frame number (pfn) and a protection value to a page
 250 * table entry (pte).
 251 */
 252#define pfn_pte(pfn, pgprot) \
 253({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
 254
 255/* Extract pfn from pte.  */
 256#define pte_pfn(_pte)           ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
 257
 258#define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
 259
 260/* This takes a physical page address that is used by the remapping functions */
 261#define mk_pte_phys(physpage, pgprot) \
 262({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
 263
 264#define pte_modify(_pte, newprot) \
 265        (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
 266
 267#define pte_none(pte)                   (!pte_val(pte))
 268#define pte_present(pte)                (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
 269#define pte_clear(mm,addr,pte)          (pte_val(*(pte)) = 0UL)
 270/* pte_page() returns the "struct page *" corresponding to the PTE: */
 271#define pte_page(pte)                   virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
 272
 273#define pmd_none(pmd)                   (!pmd_val(pmd))
 274#define pmd_bad(pmd)                    (!ia64_phys_addr_valid(pmd_val(pmd)))
 275#define pmd_present(pmd)                (pmd_val(pmd) != 0UL)
 276#define pmd_clear(pmdp)                 (pmd_val(*(pmdp)) = 0UL)
 277#define pmd_page_vaddr(pmd)             ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
 278#define pmd_page(pmd)                   virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
 279
 280#define pud_none(pud)                   (!pud_val(pud))
 281#define pud_bad(pud)                    (!ia64_phys_addr_valid(pud_val(pud)))
 282#define pud_present(pud)                (pud_val(pud) != 0UL)
 283#define pud_clear(pudp)                 (pud_val(*(pudp)) = 0UL)
 284#define pud_page_vaddr(pud)             ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
 285#define pud_page(pud)                   virt_to_page((pud_val(pud) + PAGE_OFFSET))
 286
 287#ifdef CONFIG_PGTABLE_4
 288#define pgd_none(pgd)                   (!pgd_val(pgd))
 289#define pgd_bad(pgd)                    (!ia64_phys_addr_valid(pgd_val(pgd)))
 290#define pgd_present(pgd)                (pgd_val(pgd) != 0UL)
 291#define pgd_clear(pgdp)                 (pgd_val(*(pgdp)) = 0UL)
 292#define pgd_page_vaddr(pgd)             ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
 293#define pgd_page(pgd)                   virt_to_page((pgd_val(pgd) + PAGE_OFFSET))
 294#endif
 295
 296/*
 297 * The following have defined behavior only work if pte_present() is true.
 298 */
 299#define pte_write(pte)  ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
 300#define pte_exec(pte)           ((pte_val(pte) & _PAGE_AR_RX) != 0)
 301#define pte_dirty(pte)          ((pte_val(pte) & _PAGE_D) != 0)
 302#define pte_young(pte)          ((pte_val(pte) & _PAGE_A) != 0)
 303#define pte_file(pte)           ((pte_val(pte) & _PAGE_FILE) != 0)
 304#define pte_special(pte)        0
 305
 306/*
 307 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
 308 * access rights:
 309 */
 310#define pte_wrprotect(pte)      (__pte(pte_val(pte) & ~_PAGE_AR_RW))
 311#define pte_mkwrite(pte)        (__pte(pte_val(pte) | _PAGE_AR_RW))
 312#define pte_mkold(pte)          (__pte(pte_val(pte) & ~_PAGE_A))
 313#define pte_mkyoung(pte)        (__pte(pte_val(pte) | _PAGE_A))
 314#define pte_mkclean(pte)        (__pte(pte_val(pte) & ~_PAGE_D))
 315#define pte_mkdirty(pte)        (__pte(pte_val(pte) | _PAGE_D))
 316#define pte_mkhuge(pte)         (__pte(pte_val(pte)))
 317#define pte_mkspecial(pte)      (pte)
 318
 319/*
 320 * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
 321 * sync icache and dcache when we insert *new* executable page.
 322 *  __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache
 323 * if necessary.
 324 *
 325 *  set_pte() is also called by the kernel, but we can expect that the kernel
 326 *  flushes icache explicitly if necessary.
 327 */
 328#define pte_present_exec_user(pte)\
 329        ((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \
 330                (_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX))
 331
 332extern void __ia64_sync_icache_dcache(pte_t pteval);
 333static inline void set_pte(pte_t *ptep, pte_t pteval)
 334{
 335        /* page is present && page is user  && page is executable
 336         * && (page swapin or new page or page migraton
 337         *      || copy_on_write with page copying.)
 338         */
 339        if (pte_present_exec_user(pteval) &&
 340            (!pte_present(*ptep) ||
 341                pte_pfn(*ptep) != pte_pfn(pteval)))
 342                /* load_module() calles flush_icache_range() explicitly*/
 343                __ia64_sync_icache_dcache(pteval);
 344        *ptep = pteval;
 345}
 346
 347#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 348
 349/*
 350 * Make page protection values cacheable, uncacheable, or write-
 351 * combining.  Note that "protection" is really a misnomer here as the
 352 * protection value contains the memory attribute bits, dirty bits, and
 353 * various other bits as well.
 354 */
 355#define pgprot_cacheable(prot)          __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB)
 356#define pgprot_noncached(prot)          __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
 357#define pgprot_writecombine(prot)       __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
 358
 359struct file;
 360extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 361                                     unsigned long size, pgprot_t vma_prot);
 362#define __HAVE_PHYS_MEM_ACCESS_PROT
 363
 364static inline unsigned long
 365pgd_index (unsigned long address)
 366{
 367        unsigned long region = address >> 61;
 368        unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
 369
 370        return (region << (PAGE_SHIFT - 6)) | l1index;
 371}
 372
 373/* The offset in the 1-level directory is given by the 3 region bits
 374   (61..63) and the level-1 bits.  */
 375static inline pgd_t*
 376pgd_offset (const struct mm_struct *mm, unsigned long address)
 377{
 378        return mm->pgd + pgd_index(address);
 379}
 380
 381/* In the kernel's mapped region we completely ignore the region number
 382   (since we know it's in region number 5). */
 383#define pgd_offset_k(addr) \
 384        (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
 385
 386/* Look up a pgd entry in the gate area.  On IA-64, the gate-area
 387   resides in the kernel-mapped segment, hence we use pgd_offset_k()
 388   here.  */
 389#define pgd_offset_gate(mm, addr)       pgd_offset_k(addr)
 390
 391#ifdef CONFIG_PGTABLE_4
 392/* Find an entry in the second-level page table.. */
 393#define pud_offset(dir,addr) \
 394        ((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
 395#endif
 396
 397/* Find an entry in the third-level page table.. */
 398#define pmd_offset(dir,addr) \
 399        ((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
 400
 401/*
 402 * Find an entry in the third-level page table.  This looks more complicated than it
 403 * should be because some platforms place page tables in high memory.
 404 */
 405#define pte_index(addr)         (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 406#define pte_offset_kernel(dir,addr)     ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
 407#define pte_offset_map(dir,addr)        pte_offset_kernel(dir, addr)
 408#define pte_unmap(pte)                  do { } while (0)
 409
 410/* atomic versions of the some PTE manipulations: */
 411
 412static inline int
 413ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 414{
 415#ifdef CONFIG_SMP
 416        if (!pte_young(*ptep))
 417                return 0;
 418        return test_and_clear_bit(_PAGE_A_BIT, ptep);
 419#else
 420        pte_t pte = *ptep;
 421        if (!pte_young(pte))
 422                return 0;
 423        set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
 424        return 1;
 425#endif
 426}
 427
 428static inline pte_t
 429ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 430{
 431#ifdef CONFIG_SMP
 432        return __pte(xchg((long *) ptep, 0));
 433#else
 434        pte_t pte = *ptep;
 435        pte_clear(mm, addr, ptep);
 436        return pte;
 437#endif
 438}
 439
 440static inline void
 441ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 442{
 443#ifdef CONFIG_SMP
 444        unsigned long new, old;
 445
 446        do {
 447                old = pte_val(*ptep);
 448                new = pte_val(pte_wrprotect(__pte (old)));
 449        } while (cmpxchg((unsigned long *) ptep, old, new) != old);
 450#else
 451        pte_t old_pte = *ptep;
 452        set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
 453#endif
 454}
 455
 456static inline int
 457pte_same (pte_t a, pte_t b)
 458{
 459        return pte_val(a) == pte_val(b);
 460}
 461
 462#define update_mmu_cache(vma, address, ptep) do { } while (0)
 463
 464extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 465extern void paging_init (void);
 466
 467/*
 468 * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
 469 *       bits in the swap-type field of the swap pte.  It would be nice to
 470 *       enforce that, but we can't easily include <linux/swap.h> here.
 471 *       (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
 472 *
 473 * Format of swap pte:
 474 *      bit   0   : present bit (must be zero)
 475 *      bit   1   : _PAGE_FILE (must be zero)
 476 *      bits  2- 8: swap-type
 477 *      bits  9-62: swap offset
 478 *      bit  63   : _PAGE_PROTNONE bit
 479 *
 480 * Format of file pte:
 481 *      bit   0   : present bit (must be zero)
 482 *      bit   1   : _PAGE_FILE (must be one)
 483 *      bits  2-62: file_offset/PAGE_SIZE
 484 *      bit  63   : _PAGE_PROTNONE bit
 485 */
 486#define __swp_type(entry)               (((entry).val >> 2) & 0x7f)
 487#define __swp_offset(entry)             (((entry).val << 1) >> 10)
 488#define __swp_entry(type,offset)        ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
 489#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
 490#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 491
 492#define PTE_FILE_MAX_BITS               61
 493#define pte_to_pgoff(pte)               ((pte_val(pte) << 1) >> 3)
 494#define pgoff_to_pte(off)               ((pte_t) { ((off) << 2) | _PAGE_FILE })
 495
 496/*
 497 * ZERO_PAGE is a global shared page that is always zero: used
 498 * for zero-mapped memory areas etc..
 499 */
 500extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
 501extern struct page *zero_page_memmap_ptr;
 502#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
 503
 504/* We provide our own get_unmapped_area to cope with VA holes for userland */
 505#define HAVE_ARCH_UNMAPPED_AREA
 506
 507#ifdef CONFIG_HUGETLB_PAGE
 508#define HUGETLB_PGDIR_SHIFT     (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
 509#define HUGETLB_PGDIR_SIZE      (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
 510#define HUGETLB_PGDIR_MASK      (~(HUGETLB_PGDIR_SIZE-1))
 511#endif
 512
 513
 514#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 515/*
 516 * Update PTEP with ENTRY, which is guaranteed to be a less
 517 * restrictive PTE.  That is, ENTRY may have the ACCESSED, DIRTY, and
 518 * WRITABLE bits turned on, when the value at PTEP did not.  The
 519 * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
 520 *
 521 * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
 522 * having to worry about races.  On SMP machines, there are only two
 523 * cases where this is true:
 524 *
 525 *      (1) *PTEP has the PRESENT bit turned OFF
 526 *      (2) ENTRY has the DIRTY bit turned ON
 527 *
 528 * On ia64, we could implement this routine with a cmpxchg()-loop
 529 * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
 530 * However, like on x86, we can get a more streamlined version by
 531 * observing that it is OK to drop ACCESSED bit updates when
 532 * SAFELY_WRITABLE is FALSE.  Besides being rare, all that would do is
 533 * result in an extra Access-bit fault, which would then turn on the
 534 * ACCESSED bit in the low-level fault handler (iaccess_bit or
 535 * daccess_bit in ivt.S).
 536 */
 537#ifdef CONFIG_SMP
 538# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
 539({                                                                      \
 540        int __changed = !pte_same(*(__ptep), __entry);                  \
 541        if (__changed && __safely_writable) {                           \
 542                set_pte(__ptep, __entry);                               \
 543                flush_tlb_page(__vma, __addr);                          \
 544        }                                                               \
 545        __changed;                                                      \
 546})
 547#else
 548# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
 549({                                                                      \
 550        int __changed = !pte_same(*(__ptep), __entry);                  \
 551        if (__changed) {                                                \
 552                set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry);  \
 553                flush_tlb_page(__vma, __addr);                          \
 554        }                                                               \
 555        __changed;                                                      \
 556})
 557#endif
 558
 559#  ifdef CONFIG_VIRTUAL_MEM_MAP
 560  /* arch mem_map init routine is needed due to holes in a virtual mem_map */
 561#   define __HAVE_ARCH_MEMMAP_INIT
 562    extern void memmap_init (unsigned long size, int nid, unsigned long zone,
 563                             unsigned long start_pfn);
 564#  endif /* CONFIG_VIRTUAL_MEM_MAP */
 565# endif /* !__ASSEMBLY__ */
 566
 567/*
 568 * Identity-mapped regions use a large page size.  We'll call such large pages
 569 * "granules".  If you can think of a better name that's unambiguous, let me
 570 * know...
 571 */
 572#if defined(CONFIG_IA64_GRANULE_64MB)
 573# define IA64_GRANULE_SHIFT     _PAGE_SIZE_64M
 574#elif defined(CONFIG_IA64_GRANULE_16MB)
 575# define IA64_GRANULE_SHIFT     _PAGE_SIZE_16M
 576#endif
 577#define IA64_GRANULE_SIZE       (1 << IA64_GRANULE_SHIFT)
 578/*
 579 * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
 580 */
 581#define KERNEL_TR_PAGE_SHIFT    _PAGE_SIZE_64M
 582#define KERNEL_TR_PAGE_SIZE     (1 << KERNEL_TR_PAGE_SHIFT)
 583
 584/*
 585 * No page table caches to initialise
 586 */
 587#define pgtable_cache_init()    do { } while (0)
 588
 589/* These tell get_user_pages() that the first gate page is accessible from user-level.  */
 590#define FIXADDR_USER_START      GATE_ADDR
 591#ifdef HAVE_BUGGY_SEGREL
 592# define FIXADDR_USER_END       (GATE_ADDR + 2*PAGE_SIZE)
 593#else
 594# define FIXADDR_USER_END       (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
 595#endif
 596
 597#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 598#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 599#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 600#define __HAVE_ARCH_PTE_SAME
 601#define __HAVE_ARCH_PGD_OFFSET_GATE
 602
 603
 604#ifndef CONFIG_PGTABLE_4
 605#include <asm-generic/pgtable-nopud.h>
 606#endif
 607#include <asm-generic/pgtable.h>
 608
 609#endif /* _ASM_IA64_PGTABLE_H */
 610