linux/arch/ia64/include/asm/page.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_IA64_PAGE_H
   3#define _ASM_IA64_PAGE_H
   4/*
   5 * Pagetable related stuff.
   6 *
   7 * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
   8 *      David Mosberger-Tang <davidm@hpl.hp.com>
   9 */
  10
  11#include <asm/intrinsics.h>
  12#include <asm/types.h>
  13
  14/*
  15 * The top three bits of an IA64 address are its Region Number.
  16 * Different regions are assigned to different purposes.
  17 */
  18#define RGN_SHIFT       (61)
  19#define RGN_BASE(r)     (__IA64_UL_CONST(r)<<RGN_SHIFT)
  20#define RGN_BITS        (RGN_BASE(-1))
  21
  22#define RGN_KERNEL      7       /* Identity mapped region */
  23#define RGN_UNCACHED    6       /* Identity mapped I/O region */
  24#define RGN_GATE        5       /* Gate page, Kernel text, etc */
  25#define RGN_HPAGE       4       /* For Huge TLB pages */
  26
  27/*
  28 * PAGE_SHIFT determines the actual kernel page size.
  29 */
  30#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
  31# define PAGE_SHIFT     12
  32#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
  33# define PAGE_SHIFT     13
  34#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
  35# define PAGE_SHIFT     14
  36#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
  37# define PAGE_SHIFT     16
  38#else
  39# error Unsupported page size!
  40#endif
  41
  42#define PAGE_SIZE               (__IA64_UL_CONST(1) << PAGE_SHIFT)
  43#define PAGE_MASK               (~(PAGE_SIZE - 1))
  44
  45#define PERCPU_PAGE_SHIFT       18      /* log2() of max. size of per-CPU area */
  46#define PERCPU_PAGE_SIZE        (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
  47
  48
  49#ifdef CONFIG_HUGETLB_PAGE
  50# define HPAGE_REGION_BASE      RGN_BASE(RGN_HPAGE)
  51# define HPAGE_SHIFT            hpage_shift
  52# define HPAGE_SHIFT_DEFAULT    28      /* check ia64 SDM for architecture supported size */
  53# define HPAGE_SIZE             (__IA64_UL_CONST(1) << HPAGE_SHIFT)
  54# define HPAGE_MASK             (~(HPAGE_SIZE - 1))
  55
  56# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  57#endif /* CONFIG_HUGETLB_PAGE */
  58
  59#ifdef __ASSEMBLY__
  60# define __pa(x)                ((x) - PAGE_OFFSET)
  61# define __va(x)                ((x) + PAGE_OFFSET)
  62#else /* !__ASSEMBLY */
  63#  define STRICT_MM_TYPECHECKS
  64
  65extern void clear_page (void *page);
  66extern void copy_page (void *to, void *from);
  67
  68/*
  69 * clear_user_page() and copy_user_page() can't be inline functions because
  70 * flush_dcache_page() can't be defined until later...
  71 */
  72#define clear_user_page(addr, vaddr, page)      \
  73do {                                            \
  74        clear_page(addr);                       \
  75        flush_dcache_page(page);                \
  76} while (0)
  77
  78#define copy_user_page(to, from, vaddr, page)   \
  79do {                                            \
  80        copy_page((to), (from));                \
  81        flush_dcache_page(page);                \
  82} while (0)
  83
  84
  85#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr)          \
  86({                                                                      \
  87        struct page *page = alloc_page_vma(                             \
  88                GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr);  \
  89        if (page)                                                       \
  90                flush_dcache_page(page);                                \
  91        page;                                                           \
  92})
  93
  94#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  95
  96#define virt_addr_valid(kaddr)  pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  97
  98#ifdef CONFIG_VIRTUAL_MEM_MAP
  99extern int ia64_pfn_valid (unsigned long pfn);
 100#else
 101# define ia64_pfn_valid(pfn) 1
 102#endif
 103
 104#ifdef CONFIG_VIRTUAL_MEM_MAP
 105extern struct page *vmem_map;
 106#ifdef CONFIG_DISCONTIGMEM
 107# define page_to_pfn(page)      ((unsigned long) (page - vmem_map))
 108# define pfn_to_page(pfn)       (vmem_map + (pfn))
 109# define __pfn_to_phys(pfn)     PFN_PHYS(pfn)
 110#else
 111# include <asm-generic/memory_model.h>
 112#endif
 113#else
 114# include <asm-generic/memory_model.h>
 115#endif
 116
 117#ifdef CONFIG_FLATMEM
 118# define pfn_valid(pfn)         (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
 119#elif defined(CONFIG_DISCONTIGMEM)
 120extern unsigned long min_low_pfn;
 121extern unsigned long max_low_pfn;
 122# define pfn_valid(pfn)         (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
 123#endif
 124
 125#define page_to_phys(page)      (page_to_pfn(page) << PAGE_SHIFT)
 126#define virt_to_page(kaddr)     pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 127#define pfn_to_kaddr(pfn)       __va((pfn) << PAGE_SHIFT)
 128
 129typedef union ia64_va {
 130        struct {
 131                unsigned long off : 61;         /* intra-region offset */
 132                unsigned long reg :  3;         /* region number */
 133        } f;
 134        unsigned long l;
 135        void *p;
 136} ia64_va;
 137
 138/*
 139 * Note: These macros depend on the fact that PAGE_OFFSET has all
 140 * region bits set to 1 and all other bits set to zero.  They are
 141 * expressed in this way to ensure they result in a single "dep"
 142 * instruction.
 143 */
 144#define __pa(x)         ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
 145#define __va(x)         ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
 146
 147#define REGION_NUMBER(x)        ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
 148#define REGION_OFFSET(x)        ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
 149
 150#ifdef CONFIG_HUGETLB_PAGE
 151# define htlbpage_to_page(x)    (((unsigned long) REGION_NUMBER(x) << 61)                       \
 152                                 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
 153# define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
 154extern unsigned int hpage_shift;
 155#endif
 156
 157static __inline__ int
 158get_order (unsigned long size)
 159{
 160        long double d = size - 1;
 161        long order;
 162
 163        order = ia64_getf_exp(d);
 164        order = order - PAGE_SHIFT - 0xffff + 1;
 165        if (order < 0)
 166                order = 0;
 167        return order;
 168}
 169
 170#endif /* !__ASSEMBLY__ */
 171
 172#ifdef STRICT_MM_TYPECHECKS
 173  /*
 174   * These are used to make use of C type-checking..
 175   */
 176  typedef struct { unsigned long pte; } pte_t;
 177  typedef struct { unsigned long pmd; } pmd_t;
 178#if CONFIG_PGTABLE_LEVELS == 4
 179  typedef struct { unsigned long pud; } pud_t;
 180#endif
 181  typedef struct { unsigned long pgd; } pgd_t;
 182  typedef struct { unsigned long pgprot; } pgprot_t;
 183  typedef struct page *pgtable_t;
 184
 185# define pte_val(x)     ((x).pte)
 186# define pmd_val(x)     ((x).pmd)
 187#if CONFIG_PGTABLE_LEVELS == 4
 188# define pud_val(x)     ((x).pud)
 189#endif
 190# define pgd_val(x)     ((x).pgd)
 191# define pgprot_val(x)  ((x).pgprot)
 192
 193# define __pte(x)       ((pte_t) { (x) } )
 194# define __pmd(x)       ((pmd_t) { (x) } )
 195# define __pgprot(x)    ((pgprot_t) { (x) } )
 196
 197#else /* !STRICT_MM_TYPECHECKS */
 198  /*
 199   * .. while these make it easier on the compiler
 200   */
 201# ifndef __ASSEMBLY__
 202    typedef unsigned long pte_t;
 203    typedef unsigned long pmd_t;
 204    typedef unsigned long pgd_t;
 205    typedef unsigned long pgprot_t;
 206    typedef struct page *pgtable_t;
 207# endif
 208
 209# define pte_val(x)     (x)
 210# define pmd_val(x)     (x)
 211# define pgd_val(x)     (x)
 212# define pgprot_val(x)  (x)
 213
 214# define __pte(x)       (x)
 215# define __pgd(x)       (x)
 216# define __pgprot(x)    (x)
 217#endif /* !STRICT_MM_TYPECHECKS */
 218
 219#define PAGE_OFFSET                     RGN_BASE(RGN_KERNEL)
 220
 221#define VM_DATA_DEFAULT_FLAGS           (VM_READ | VM_WRITE |                                   \
 222                                         VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |                \
 223                                         (((current->personality & READ_IMPLIES_EXEC) != 0)     \
 224                                          ? VM_EXEC : 0))
 225
 226#define GATE_ADDR               RGN_BASE(RGN_GATE)
 227
 228/*
 229 * 0xa000000000000000+2*PERCPU_PAGE_SIZE
 230 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
 231 */
 232#define KERNEL_START             (GATE_ADDR+__IA64_UL_CONST(0x100000000))
 233#define PERCPU_ADDR             (-PERCPU_PAGE_SIZE)
 234#define LOAD_OFFSET             (KERNEL_START - KERNEL_TR_PAGE_SIZE)
 235
 236#define __HAVE_ARCH_GATE_AREA   1
 237
 238#endif /* _ASM_IA64_PAGE_H */
 239