linux/arch/ia64/include/asm/page.h
<<
>>
Prefs
   1#ifndef _ASM_IA64_PAGE_H
   2#define _ASM_IA64_PAGE_H
   3/*
   4 * Pagetable related stuff.
   5 *
   6 * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
   7 *      David Mosberger-Tang <davidm@hpl.hp.com>
   8 */
   9
  10#include <asm/intrinsics.h>
  11#include <asm/types.h>
  12
  13/*
  14 * The top three bits of an IA64 address are its Region Number.
  15 * Different regions are assigned to different purposes.
  16 */
  17#define RGN_SHIFT       (61)
  18#define RGN_BASE(r)     (__IA64_UL_CONST(r)<<RGN_SHIFT)
  19#define RGN_BITS        (RGN_BASE(-1))
  20
  21#define RGN_KERNEL      7       /* Identity mapped region */
  22#define RGN_UNCACHED    6       /* Identity mapped I/O region */
  23#define RGN_GATE        5       /* Gate page, Kernel text, etc */
  24#define RGN_HPAGE       4       /* For Huge TLB pages */
  25
  26/*
  27 * PAGE_SHIFT determines the actual kernel page size.
  28 */
  29#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
  30# define PAGE_SHIFT     12
  31#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
  32# define PAGE_SHIFT     13
  33#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
  34# define PAGE_SHIFT     14
  35#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
  36# define PAGE_SHIFT     16
  37#else
  38# error Unsupported page size!
  39#endif
  40
  41#define PAGE_SIZE               (__IA64_UL_CONST(1) << PAGE_SHIFT)
  42#define PAGE_MASK               (~(PAGE_SIZE - 1))
  43
  44#define PERCPU_PAGE_SHIFT       18      /* log2() of max. size of per-CPU area */
  45#define PERCPU_PAGE_SIZE        (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
  46
  47
  48#ifdef CONFIG_HUGETLB_PAGE
  49# define HPAGE_REGION_BASE      RGN_BASE(RGN_HPAGE)
  50# define HPAGE_SHIFT            hpage_shift
  51# define HPAGE_SHIFT_DEFAULT    28      /* check ia64 SDM for architecture supported size */
  52# define HPAGE_SIZE             (__IA64_UL_CONST(1) << HPAGE_SHIFT)
  53# define HPAGE_MASK             (~(HPAGE_SIZE - 1))
  54
  55# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  56#endif /* CONFIG_HUGETLB_PAGE */
  57
  58#ifdef __ASSEMBLY__
  59# define __pa(x)                ((x) - PAGE_OFFSET)
  60# define __va(x)                ((x) + PAGE_OFFSET)
  61#else /* !__ASSEMBLY */
  62#  define STRICT_MM_TYPECHECKS
  63
  64extern void clear_page (void *page);
  65extern void copy_page (void *to, void *from);
  66
  67/*
  68 * clear_user_page() and copy_user_page() can't be inline functions because
  69 * flush_dcache_page() can't be defined until later...
  70 */
  71#define clear_user_page(addr, vaddr, page)      \
  72do {                                            \
  73        clear_page(addr);                       \
  74        flush_dcache_page(page);                \
  75} while (0)
  76
  77#define copy_user_page(to, from, vaddr, page)   \
  78do {                                            \
  79        copy_page((to), (from));                \
  80        flush_dcache_page(page);                \
  81} while (0)
  82
  83
  84#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr)          \
  85({                                                                      \
  86        struct page *page = alloc_page_vma(                             \
  87                GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr);  \
  88        if (page)                                                       \
  89                flush_dcache_page(page);                                \
  90        page;                                                           \
  91})
  92
  93#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  94
  95#define virt_addr_valid(kaddr)  pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  96
  97#ifdef CONFIG_VIRTUAL_MEM_MAP
  98extern int ia64_pfn_valid (unsigned long pfn);
  99#else
 100# define ia64_pfn_valid(pfn) 1
 101#endif
 102
 103#ifdef CONFIG_VIRTUAL_MEM_MAP
 104extern struct page *vmem_map;
 105#ifdef CONFIG_DISCONTIGMEM
 106# define page_to_pfn(page)      ((unsigned long) (page - vmem_map))
 107# define pfn_to_page(pfn)       (vmem_map + (pfn))
 108#else
 109# include <asm-generic/memory_model.h>
 110#endif
 111#else
 112# include <asm-generic/memory_model.h>
 113#endif
 114
 115#ifdef CONFIG_FLATMEM
 116# define pfn_valid(pfn)         (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
 117#elif defined(CONFIG_DISCONTIGMEM)
 118extern unsigned long min_low_pfn;
 119extern unsigned long max_low_pfn;
 120# define pfn_valid(pfn)         (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
 121#endif
 122
 123#define page_to_phys(page)      (page_to_pfn(page) << PAGE_SHIFT)
 124#define virt_to_page(kaddr)     pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 125#define pfn_to_kaddr(pfn)       __va((pfn) << PAGE_SHIFT)
 126
 127typedef union ia64_va {
 128        struct {
 129                unsigned long off : 61;         /* intra-region offset */
 130                unsigned long reg :  3;         /* region number */
 131        } f;
 132        unsigned long l;
 133        void *p;
 134} ia64_va;
 135
 136/*
 137 * Note: These macros depend on the fact that PAGE_OFFSET has all
 138 * region bits set to 1 and all other bits set to zero.  They are
 139 * expressed in this way to ensure they result in a single "dep"
 140 * instruction.
 141 */
 142#define __pa(x)         ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
 143#define __va(x)         ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
 144
 145#define REGION_NUMBER(x)        ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
 146#define REGION_OFFSET(x)        ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
 147
 148#ifdef CONFIG_HUGETLB_PAGE
 149# define htlbpage_to_page(x)    (((unsigned long) REGION_NUMBER(x) << 61)                       \
 150                                 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
 151# define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
 152extern unsigned int hpage_shift;
 153#endif
 154
 155static __inline__ int
 156get_order (unsigned long size)
 157{
 158        long double d = size - 1;
 159        long order;
 160
 161        order = ia64_getf_exp(d);
 162        order = order - PAGE_SHIFT - 0xffff + 1;
 163        if (order < 0)
 164                order = 0;
 165        return order;
 166}
 167
 168#endif /* !__ASSEMBLY__ */
 169
 170#ifdef STRICT_MM_TYPECHECKS
 171  /*
 172   * These are used to make use of C type-checking..
 173   */
 174  typedef struct { unsigned long pte; } pte_t;
 175  typedef struct { unsigned long pmd; } pmd_t;
 176#ifdef CONFIG_PGTABLE_4
 177  typedef struct { unsigned long pud; } pud_t;
 178#endif
 179  typedef struct { unsigned long pgd; } pgd_t;
 180  typedef struct { unsigned long pgprot; } pgprot_t;
 181  typedef struct page *pgtable_t;
 182
 183# define pte_val(x)     ((x).pte)
 184# define pmd_val(x)     ((x).pmd)
 185#ifdef CONFIG_PGTABLE_4
 186# define pud_val(x)     ((x).pud)
 187#endif
 188# define pgd_val(x)     ((x).pgd)
 189# define pgprot_val(x)  ((x).pgprot)
 190
 191# define __pte(x)       ((pte_t) { (x) } )
 192# define __pmd(x)       ((pmd_t) { (x) } )
 193# define __pgprot(x)    ((pgprot_t) { (x) } )
 194
 195#else /* !STRICT_MM_TYPECHECKS */
 196  /*
 197   * .. while these make it easier on the compiler
 198   */
 199# ifndef __ASSEMBLY__
 200    typedef unsigned long pte_t;
 201    typedef unsigned long pmd_t;
 202    typedef unsigned long pgd_t;
 203    typedef unsigned long pgprot_t;
 204    typedef struct page *pgtable_t;
 205# endif
 206
 207# define pte_val(x)     (x)
 208# define pmd_val(x)     (x)
 209# define pgd_val(x)     (x)
 210# define pgprot_val(x)  (x)
 211
 212# define __pte(x)       (x)
 213# define __pgd(x)       (x)
 214# define __pgprot(x)    (x)
 215#endif /* !STRICT_MM_TYPECHECKS */
 216
 217#define PAGE_OFFSET                     RGN_BASE(RGN_KERNEL)
 218
 219#define VM_DATA_DEFAULT_FLAGS           (VM_READ | VM_WRITE |                                   \
 220                                         VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |                \
 221                                         (((current->personality & READ_IMPLIES_EXEC) != 0)     \
 222                                          ? VM_EXEC : 0))
 223
 224#define GATE_ADDR               RGN_BASE(RGN_GATE)
 225
 226/*
 227 * 0xa000000000000000+2*PERCPU_PAGE_SIZE
 228 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
 229 */
 230#define KERNEL_START             (GATE_ADDR+__IA64_UL_CONST(0x100000000))
 231#define PERCPU_ADDR             (-PERCPU_PAGE_SIZE)
 232#define LOAD_OFFSET             (KERNEL_START - KERNEL_TR_PAGE_SIZE)
 233
 234#endif /* _ASM_IA64_PAGE_H */
 235