linux/arch/arm/include/asm/pgtable.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/pgtable.h
   3 *
   4 *  Copyright (C) 1995-2002 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#ifndef _ASMARM_PGTABLE_H
  11#define _ASMARM_PGTABLE_H
  12
  13#include <linux/const.h>
  14#include <asm/proc-fns.h>
  15
  16#ifndef CONFIG_MMU
  17
  18#include <asm-generic/4level-fixup.h>
  19#include <asm/pgtable-nommu.h>
  20
  21#else
  22
  23#define __ARCH_USE_5LEVEL_HACK
  24#include <asm-generic/pgtable-nopud.h>
  25#include <asm/memory.h>
  26#include <asm/pgtable-hwdef.h>
  27
  28
  29#include <asm/tlbflush.h>
  30
  31#ifdef CONFIG_ARM_LPAE
  32#include <asm/pgtable-3level.h>
  33#else
  34#include <asm/pgtable-2level.h>
  35#endif
  36
  37/*
  38 * Just any arbitrary offset to the start of the vmalloc VM area: the
  39 * current 8MB value just means that there will be a 8MB "hole" after the
  40 * physical memory until the kernel virtual memory starts.  That means that
  41 * any out-of-bounds memory accesses will hopefully be caught.
  42 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  43 * area for the same reason. ;)
  44 */
  45#define VMALLOC_OFFSET          (8*1024*1024)
  46#define VMALLOC_START           (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  47#define VMALLOC_END             0xff800000UL
  48
  49#define LIBRARY_TEXT_START      0x0c000000
  50
  51#ifndef __ASSEMBLY__
  52extern void __pte_error(const char *file, int line, pte_t);
  53extern void __pmd_error(const char *file, int line, pmd_t);
  54extern void __pgd_error(const char *file, int line, pgd_t);
  55
  56#define pte_ERROR(pte)          __pte_error(__FILE__, __LINE__, pte)
  57#define pmd_ERROR(pmd)          __pmd_error(__FILE__, __LINE__, pmd)
  58#define pgd_ERROR(pgd)          __pgd_error(__FILE__, __LINE__, pgd)
  59
  60/*
  61 * This is the lowest virtual address we can permit any user space
  62 * mapping to be mapped at.  This is particularly important for
  63 * non-high vector CPUs.
  64 */
  65#define FIRST_USER_ADDRESS      (PAGE_SIZE * 2)
  66
  67/*
  68 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
  69 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
  70 * page shared between user and kernel).
  71 */
  72#ifdef CONFIG_ARM_LPAE
  73#define USER_PGTABLES_CEILING   TASK_SIZE
  74#endif
  75
  76/*
  77 * The pgprot_* and protection_map entries will be fixed up in runtime
  78 * to include the cachable and bufferable bits based on memory policy,
  79 * as well as any architecture dependent bits like global/ASID and SMP
  80 * shared mapping bits.
  81 */
  82#define _L_PTE_DEFAULT  L_PTE_PRESENT | L_PTE_YOUNG
  83
  84extern pgprot_t         pgprot_user;
  85extern pgprot_t         pgprot_kernel;
  86extern pgprot_t         pgprot_hyp_device;
  87extern pgprot_t         pgprot_s2;
  88extern pgprot_t         pgprot_s2_device;
  89
  90#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
  91
  92#define PAGE_NONE               _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
  93#define PAGE_SHARED             _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
  94#define PAGE_SHARED_EXEC        _MOD_PROT(pgprot_user, L_PTE_USER)
  95#define PAGE_COPY               _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
  96#define PAGE_COPY_EXEC          _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
  97#define PAGE_READONLY           _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
  98#define PAGE_READONLY_EXEC      _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
  99#define PAGE_KERNEL             _MOD_PROT(pgprot_kernel, L_PTE_XN)
 100#define PAGE_KERNEL_EXEC        pgprot_kernel
 101#define PAGE_HYP                _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
 102#define PAGE_HYP_EXEC           _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
 103#define PAGE_HYP_RO             _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
 104#define PAGE_HYP_DEVICE         _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
 105#define PAGE_S2                 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
 106#define PAGE_S2_DEVICE          _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY)
 107
 108#define __PAGE_NONE             __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
 109#define __PAGE_SHARED           __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
 110#define __PAGE_SHARED_EXEC      __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
 111#define __PAGE_COPY             __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
 112#define __PAGE_COPY_EXEC        __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
 113#define __PAGE_READONLY         __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
 114#define __PAGE_READONLY_EXEC    __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
 115
 116#define __pgprot_modify(prot,mask,bits)         \
 117        __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
 118
 119#define pgprot_noncached(prot) \
 120        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
 121
 122#define pgprot_writecombine(prot) \
 123        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
 124
 125#define pgprot_stronglyordered(prot) \
 126        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
 127
 128#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 129#define pgprot_dmacoherent(prot) \
 130        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
 131#define __HAVE_PHYS_MEM_ACCESS_PROT
 132struct file;
 133extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 134                                     unsigned long size, pgprot_t vma_prot);
 135#else
 136#define pgprot_dmacoherent(prot) \
 137        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
 138#endif
 139
 140#endif /* __ASSEMBLY__ */
 141
 142/*
 143 * The table below defines the page protection levels that we insert into our
 144 * Linux page table version.  These get translated into the best that the
 145 * architecture can perform.  Note that on most ARM hardware:
 146 *  1) We cannot do execute protection
 147 *  2) If we could do execute protection, then read is implied
 148 *  3) write implies read permissions
 149 */
 150#define __P000  __PAGE_NONE
 151#define __P001  __PAGE_READONLY
 152#define __P010  __PAGE_COPY
 153#define __P011  __PAGE_COPY
 154#define __P100  __PAGE_READONLY_EXEC
 155#define __P101  __PAGE_READONLY_EXEC
 156#define __P110  __PAGE_COPY_EXEC
 157#define __P111  __PAGE_COPY_EXEC
 158
 159#define __S000  __PAGE_NONE
 160#define __S001  __PAGE_READONLY
 161#define __S010  __PAGE_SHARED
 162#define __S011  __PAGE_SHARED
 163#define __S100  __PAGE_READONLY_EXEC
 164#define __S101  __PAGE_READONLY_EXEC
 165#define __S110  __PAGE_SHARED_EXEC
 166#define __S111  __PAGE_SHARED_EXEC
 167
 168#ifndef __ASSEMBLY__
 169/*
 170 * ZERO_PAGE is a global shared page that is always zero: used
 171 * for zero-mapped memory areas etc..
 172 */
 173extern struct page *empty_zero_page;
 174#define ZERO_PAGE(vaddr)        (empty_zero_page)
 175
 176
 177extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 178
 179/* to find an entry in a page-table-directory */
 180#define pgd_index(addr)         ((addr) >> PGDIR_SHIFT)
 181
 182#define pgd_offset(mm, addr)    ((mm)->pgd + pgd_index(addr))
 183
 184/* to find an entry in a kernel page-table-directory */
 185#define pgd_offset_k(addr)      pgd_offset(&init_mm, addr)
 186
 187#define pmd_none(pmd)           (!pmd_val(pmd))
 188
 189static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 190{
 191        return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
 192}
 193
 194#define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
 195
 196#ifndef CONFIG_HIGHPTE
 197#define __pte_map(pmd)          pmd_page_vaddr(*(pmd))
 198#define __pte_unmap(pte)        do { } while (0)
 199#else
 200#define __pte_map(pmd)          (pte_t *)kmap_atomic(pmd_page(*(pmd)))
 201#define __pte_unmap(pte)        kunmap_atomic(pte)
 202#endif
 203
 204#define pte_index(addr)         (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 205
 206#define pte_offset_kernel(pmd,addr)     (pmd_page_vaddr(*(pmd)) + pte_index(addr))
 207
 208#define pte_offset_map(pmd,addr)        (__pte_map(pmd) + pte_index(addr))
 209#define pte_unmap(pte)                  __pte_unmap(pte)
 210
 211#define pte_pfn(pte)            ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
 212#define pfn_pte(pfn,prot)       __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
 213
 214#define pte_page(pte)           pfn_to_page(pte_pfn(pte))
 215#define mk_pte(page,prot)       pfn_pte(page_to_pfn(page), prot)
 216
 217#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
 218
 219#define pte_isset(pte, val)     ((u32)(val) == (val) ? pte_val(pte) & (val) \
 220                                                : !!(pte_val(pte) & (val)))
 221#define pte_isclear(pte, val)   (!(pte_val(pte) & (val)))
 222
 223#define pte_none(pte)           (!pte_val(pte))
 224#define pte_present(pte)        (pte_isset((pte), L_PTE_PRESENT))
 225#define pte_valid(pte)          (pte_isset((pte), L_PTE_VALID))
 226#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
 227#define pte_write(pte)          (pte_isclear((pte), L_PTE_RDONLY))
 228#define pte_dirty(pte)          (pte_isset((pte), L_PTE_DIRTY))
 229#define pte_young(pte)          (pte_isset((pte), L_PTE_YOUNG))
 230#define pte_exec(pte)           (pte_isclear((pte), L_PTE_XN))
 231
 232#define pte_valid_user(pte)     \
 233        (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
 234
 235#if __LINUX_ARM_ARCH__ < 6
 236static inline void __sync_icache_dcache(pte_t pteval)
 237{
 238}
 239#else
 240extern void __sync_icache_dcache(pte_t pteval);
 241#endif
 242
 243static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 244                              pte_t *ptep, pte_t pteval)
 245{
 246        unsigned long ext = 0;
 247
 248        if (addr < TASK_SIZE && pte_valid_user(pteval)) {
 249                if (!pte_special(pteval))
 250                        __sync_icache_dcache(pteval);
 251                ext |= PTE_EXT_NG;
 252        }
 253
 254        set_pte_ext(ptep, pteval, ext);
 255}
 256
 257static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
 258{
 259        pte_val(pte) &= ~pgprot_val(prot);
 260        return pte;
 261}
 262
 263static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
 264{
 265        pte_val(pte) |= pgprot_val(prot);
 266        return pte;
 267}
 268
 269static inline pte_t pte_wrprotect(pte_t pte)
 270{
 271        return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
 272}
 273
 274static inline pte_t pte_mkwrite(pte_t pte)
 275{
 276        return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
 277}
 278
 279static inline pte_t pte_mkclean(pte_t pte)
 280{
 281        return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
 282}
 283
 284static inline pte_t pte_mkdirty(pte_t pte)
 285{
 286        return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
 287}
 288
 289static inline pte_t pte_mkold(pte_t pte)
 290{
 291        return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
 292}
 293
 294static inline pte_t pte_mkyoung(pte_t pte)
 295{
 296        return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
 297}
 298
 299static inline pte_t pte_mkexec(pte_t pte)
 300{
 301        return clear_pte_bit(pte, __pgprot(L_PTE_XN));
 302}
 303
 304static inline pte_t pte_mknexec(pte_t pte)
 305{
 306        return set_pte_bit(pte, __pgprot(L_PTE_XN));
 307}
 308
 309static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 310{
 311        const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
 312                L_PTE_NONE | L_PTE_VALID;
 313        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 314        return pte;
 315}
 316
 317/*
 318 * Encode and decode a swap entry.  Swap entries are stored in the Linux
 319 * page tables as follows:
 320 *
 321 *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
 322 *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
 323 *   <--------------- offset ------------------------> < type -> 0 0
 324 *
 325 * This gives us up to 31 swap files and 128GB per swap file.  Note that
 326 * the offset field is always non-zero.
 327 */
 328#define __SWP_TYPE_SHIFT        2
 329#define __SWP_TYPE_BITS         5
 330#define __SWP_TYPE_MASK         ((1 << __SWP_TYPE_BITS) - 1)
 331#define __SWP_OFFSET_SHIFT      (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
 332
 333#define __swp_type(x)           (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
 334#define __swp_offset(x)         ((x).val >> __SWP_OFFSET_SHIFT)
 335#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
 336
 337#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 338#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
 339
 340/*
 341 * It is an error for the kernel to have more swap files than we can
 342 * encode in the PTEs.  This ensures that we know when MAX_SWAPFILES
 343 * is increased beyond what we presently support.
 344 */
 345#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
 346
 347/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 348/* FIXME: this is not correct */
 349#define kern_addr_valid(addr)   (1)
 350
 351#include <asm-generic/pgtable.h>
 352
 353/*
 354 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
 355 */
 356#define HAVE_ARCH_UNMAPPED_AREA
 357#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 358
 359#define pgtable_cache_init() do { } while (0)
 360
 361#endif /* !__ASSEMBLY__ */
 362
 363#endif /* CONFIG_MMU */
 364
 365#endif /* _ASMARM_PGTABLE_H */
 366