linux/arch/powerpc/include/asm/page.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_PAGE_H
   2#define _ASM_POWERPC_PAGE_H
   3
   4/*
   5 * Copyright (C) 2001,2005 IBM Corporation.
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; either version
  10 * 2 of the License, or (at your option) any later version.
  11 */
  12
  13#ifndef __ASSEMBLY__
  14#include <linux/types.h>
  15#include <linux/kernel.h>
  16#else
  17#include <asm/types.h>
  18#endif
  19#include <asm/asm-compat.h>
  20#include <asm/kdump.h>
  21
  22/*
  23 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
  24 * on PPC44x). For PPC64 we support either 4K or 64K software
  25 * page size. When using 64K pages however, whether we are really supporting
  26 * 64K pages in HW or not is irrelevant to those definitions.
  27 */
  28#if defined(CONFIG_PPC_256K_PAGES)
  29#define PAGE_SHIFT              18
  30#elif defined(CONFIG_PPC_64K_PAGES)
  31#define PAGE_SHIFT              16
  32#elif defined(CONFIG_PPC_16K_PAGES)
  33#define PAGE_SHIFT              14
  34#else
  35#define PAGE_SHIFT              12
  36#endif
  37
  38#define PAGE_SIZE               (ASM_CONST(1) << PAGE_SHIFT)
  39
  40#ifndef __ASSEMBLY__
  41#ifdef CONFIG_HUGETLB_PAGE
  42extern unsigned int HPAGE_SHIFT;
  43#else
  44#define HPAGE_SHIFT PAGE_SHIFT
  45#endif
  46#define HPAGE_SIZE              ((1UL) << HPAGE_SHIFT)
  47#define HPAGE_MASK              (~(HPAGE_SIZE - 1))
  48#define HUGETLB_PAGE_ORDER      (HPAGE_SHIFT - PAGE_SHIFT)
  49#define HUGE_MAX_HSTATE         (MMU_PAGE_COUNT-1)
  50#endif
  51
  52/*
  53 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
  54 * assign PAGE_MASK to a larger type it gets extended the way we want
  55 * (i.e. with 1s in the high bits)
  56 */
  57#define PAGE_MASK      (~((1 << PAGE_SHIFT) - 1))
  58
  59/*
  60 * KERNELBASE is the virtual address of the start of the kernel, it's often
  61 * the same as PAGE_OFFSET, but _might not be_.
  62 *
  63 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
  64 *
  65 * PAGE_OFFSET is the virtual address of the start of lowmem.
  66 *
  67 * PHYSICAL_START is the physical address of the start of the kernel.
  68 *
  69 * MEMORY_START is the physical address of the start of lowmem.
  70 *
  71 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
  72 * ppc32 and based on how they are set we determine MEMORY_START.
  73 *
  74 * For the linear mapping the following equation should be true:
  75 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
  76 *
  77 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
  78 *
  79 * There are two ways to determine a physical address from a virtual one:
  80 * va = pa + PAGE_OFFSET - MEMORY_START
  81 * va = pa + KERNELBASE - PHYSICAL_START
  82 *
  83 * If you want to know something's offset from the start of the kernel you
  84 * should subtract KERNELBASE.
  85 *
  86 * If you want to test if something's a kernel address, use is_kernel_addr().
  87 */
  88
  89#define KERNELBASE      ASM_CONST(CONFIG_KERNEL_START)
  90#define PAGE_OFFSET     ASM_CONST(CONFIG_PAGE_OFFSET)
  91#define LOAD_OFFSET     ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
  92
  93#if defined(CONFIG_NONSTATIC_KERNEL)
  94#ifndef __ASSEMBLY__
  95
  96extern phys_addr_t memstart_addr;
  97extern phys_addr_t kernstart_addr;
  98
  99#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
 100extern long long virt_phys_offset;
 101#endif
 102
 103#endif /* __ASSEMBLY__ */
 104#define PHYSICAL_START  kernstart_addr
 105
 106#else   /* !CONFIG_NONSTATIC_KERNEL */
 107#define PHYSICAL_START  ASM_CONST(CONFIG_PHYSICAL_START)
 108#endif
 109
 110/* See Description below for VIRT_PHYS_OFFSET */
 111#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
 112#ifdef CONFIG_RELOCATABLE
 113#define VIRT_PHYS_OFFSET virt_phys_offset
 114#else
 115#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
 116#endif
 117#endif
 118
 119#ifdef CONFIG_PPC64
 120#define MEMORY_START    0UL
 121#elif defined(CONFIG_NONSTATIC_KERNEL)
 122#define MEMORY_START    memstart_addr
 123#else
 124#define MEMORY_START    (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
 125#endif
 126
 127#ifdef CONFIG_FLATMEM
 128#define ARCH_PFN_OFFSET         ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
 129#ifndef __ASSEMBLY__
 130extern unsigned long max_mapnr;
 131static inline bool pfn_valid(unsigned long pfn)
 132{
 133        unsigned long min_pfn = ARCH_PFN_OFFSET;
 134
 135        return pfn >= min_pfn && pfn < max_mapnr;
 136}
 137#endif
 138#endif
 139
 140#define virt_to_pfn(kaddr)      (__pa(kaddr) >> PAGE_SHIFT)
 141#define virt_to_page(kaddr)     pfn_to_page(virt_to_pfn(kaddr))
 142#define pfn_to_kaddr(pfn)       __va((pfn) << PAGE_SHIFT)
 143
 144#ifdef CONFIG_PPC_BOOK3S_64
 145/*
 146 * On hash the vmalloc and other regions alias to the kernel region when passed
 147 * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
 148 * return true for some vmalloc addresses, which is incorrect. So explicitly
 149 * check that the address is in the kernel region.
 150 */
 151#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
 152                                pfn_valid(virt_to_pfn(kaddr)))
 153#else
 154#define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
 155#endif
 156
 157/*
 158 * On Book-E parts we need __va to parse the device tree and we can't
 159 * determine MEMORY_START until then.  However we can determine PHYSICAL_START
 160 * from information at hand (program counter, TLB lookup).
 161 *
 162 * On BookE with RELOCATABLE && PPC32
 163 *
 164 *   With RELOCATABLE && PPC32,  we support loading the kernel at any physical
 165 *   address without any restriction on the page alignment.
 166 *
 167 *   We find the runtime address of _stext and relocate ourselves based on 
 168 *   the following calculation:
 169 *
 170 *        virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
 171 *                              MODULO(_stext.run,256M)
 172 *   and create the following mapping:
 173 *
 174 *        ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
 175 *
 176 *   When we process relocations, we cannot depend on the
 177 *   existing equation for the __va()/__pa() translations:
 178 *
 179 *         __va(x) = (x)  - PHYSICAL_START + KERNELBASE
 180 *
 181 *   Where:
 182 *       PHYSICAL_START = kernstart_addr = Physical address of _stext
 183 *       KERNELBASE = Compiled virtual address of _stext.
 184 *
 185 *   This formula holds true iff, kernel load address is TLB page aligned.
 186 *
 187 *   In our case, we need to also account for the shift in the kernel Virtual 
 188 *   address.
 189 *
 190 *   E.g.,
 191 *
 192 *   Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
 193 *   In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
 194 *
 195 *   Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
 196 *                 = 0xbc100000 , which is wrong.
 197 *
 198 *   Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
 199 *              according to our mapping.
 200 *
 201 *   Hence we use the following formula to get the translations right:
 202 *
 203 *        __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
 204 *
 205 *        Where :
 206 *              PHYSICAL_START = dynamic load address.(kernstart_addr variable)
 207 *              Effective KERNELBASE = virtual_base =
 208 *                                   = ALIGN_DOWN(KERNELBASE,256M) +
 209 *                                              MODULO(PHYSICAL_START,256M)
 210 *
 211 *      To make the cost of __va() / __pa() more light weight, we introduce
 212 *      a new variable virt_phys_offset, which will hold :
 213 *
 214 *      virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
 215 *                       = ALIGN_DOWN(KERNELBASE,256M) - 
 216 *                              ALIGN_DOWN(PHYSICALSTART,256M)
 217 *
 218 *      Hence :
 219 *
 220 *      __va(x) = x - PHYSICAL_START + Effective KERNELBASE
 221 *              = x + virt_phys_offset
 222 *
 223 *              and
 224 *      __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
 225 *              = x - virt_phys_offset
 226 *              
 227 * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
 228 * the other definitions for __va & __pa.
 229 */
 230#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
 231#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
 232#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
 233#else
 234#ifdef CONFIG_PPC64
 235/*
 236 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
 237 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
 238 */
 239#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
 240#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
 241
 242#else /* 32-bit, non book E */
 243#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
 244#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
 245#endif
 246#endif
 247
 248/*
 249 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
 250 * and needs to be executable.  This means the whole heap ends
 251 * up being executable.
 252 */
 253#define VM_DATA_DEFAULT_FLAGS32 \
 254        (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
 255                                 VM_READ | VM_WRITE | \
 256                                 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 257
 258#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
 259                                 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 260
 261#ifdef __powerpc64__
 262#include <asm/page_64.h>
 263#else
 264#include <asm/page_32.h>
 265#endif
 266
 267/* align addr on a size boundary - adjust address up/down if needed */
 268#define _ALIGN_UP(addr, size)   __ALIGN_KERNEL(addr, size)
 269#define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1)))
 270
 271/* align addr on a size boundary - adjust address up if needed */
 272#define _ALIGN(addr,size)     _ALIGN_UP(addr,size)
 273
 274/*
 275 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
 276 * "kernelness", use is_kernel_addr() - it should do what you want.
 277 */
 278#ifdef CONFIG_PPC_BOOK3E_64
 279#define is_kernel_addr(x)       ((x) >= 0x8000000000000000ul)
 280#else
 281#define is_kernel_addr(x)       ((x) >= PAGE_OFFSET)
 282#endif
 283
 284#ifndef CONFIG_PPC_BOOK3S_64
 285/*
 286 * Use the top bit of the higher-level page table entries to indicate whether
 287 * the entries we point to contain hugepages.  This works because we know that
 288 * the page tables live in kernel space.  If we ever decide to support having
 289 * page tables at arbitrary addresses, this breaks and will have to change.
 290 */
 291#ifdef CONFIG_PPC64
 292#define PD_HUGE 0x8000000000000000
 293#else
 294#define PD_HUGE 0x80000000
 295#endif
 296
 297#else   /* CONFIG_PPC_BOOK3S_64 */
 298/*
 299 * Book3S 64 stores real addresses in the hugepd entries to
 300 * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
 301 */
 302#define HUGEPD_ADDR_MASK        (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
 303#endif /* CONFIG_PPC_BOOK3S_64 */
 304
 305/*
 306 * Some number of bits at the level of the page table that points to
 307 * a hugepte are used to encode the size.  This masks those bits.
 308 */
 309#define HUGEPD_SHIFT_MASK     0x3f
 310
 311#ifndef __ASSEMBLY__
 312
 313#ifdef CONFIG_PPC_BOOK3S_64
 314#include <asm/pgtable-be-types.h>
 315#else
 316#include <asm/pgtable-types.h>
 317#endif
 318
 319
 320#ifndef CONFIG_HUGETLB_PAGE
 321#define is_hugepd(pdep)         (0)
 322#define pgd_huge(pgd)           (0)
 323#endif /* CONFIG_HUGETLB_PAGE */
 324
 325struct page;
 326extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
 327extern void copy_user_page(void *to, void *from, unsigned long vaddr,
 328                struct page *p);
 329extern int page_is_ram(unsigned long pfn);
 330extern int devmem_is_allowed(unsigned long pfn);
 331
 332#ifdef CONFIG_PPC_SMLPAR
 333void arch_free_page(struct page *page, int order);
 334#define HAVE_ARCH_FREE_PAGE
 335#endif
 336
 337struct vm_area_struct;
 338#ifdef CONFIG_PPC_BOOK3S_64
 339/*
 340 * For BOOK3s 64 with 4k and 64K linux page size
 341 * we want to use pointers, because the page table
 342 * actually store pfn
 343 */
 344typedef pte_t *pgtable_t;
 345#else
 346#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
 347typedef pte_t *pgtable_t;
 348#else
 349typedef struct page *pgtable_t;
 350#endif
 351#endif
 352
 353#include <asm-generic/memory_model.h>
 354#endif /* __ASSEMBLY__ */
 355#include <asm/slice.h>
 356
 357#endif /* _ASM_POWERPC_PAGE_H */
 358