linux/arch/arm/include/asm/pgtable.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/pgtable.h
   3 *
   4 *  Copyright (C) 1995-2002 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#ifndef _ASMARM_PGTABLE_H
  11#define _ASMARM_PGTABLE_H
  12
  13#include <asm-generic/4level-fixup.h>
  14#include <asm/proc-fns.h>
  15
  16#ifndef CONFIG_MMU
  17
  18#include "pgtable-nommu.h"
  19
  20#else
  21
  22#include <asm/memory.h>
  23#include <mach/vmalloc.h>
  24#include <asm/pgtable-hwdef.h>
  25
  26/*
  27 * Just any arbitrary offset to the start of the vmalloc VM area: the
  28 * current 8MB value just means that there will be a 8MB "hole" after the
  29 * physical memory until the kernel virtual memory starts.  That means that
  30 * any out-of-bounds memory accesses will hopefully be caught.
  31 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  32 * area for the same reason. ;)
  33 *
  34 * Note that platforms may override VMALLOC_START, but they must provide
  35 * VMALLOC_END.  VMALLOC_END defines the (exclusive) limit of this space,
  36 * which may not overlap IO space.
  37 */
  38#ifndef VMALLOC_START
  39#define VMALLOC_OFFSET          (8*1024*1024)
  40#define VMALLOC_START           (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  41#endif
  42
  43/*
  44 * Hardware-wise, we have a two level page table structure, where the first
  45 * level has 4096 entries, and the second level has 256 entries.  Each entry
  46 * is one 32-bit word.  Most of the bits in the second level entry are used
  47 * by hardware, and there aren't any "accessed" and "dirty" bits.
  48 *
  49 * Linux on the other hand has a three level page table structure, which can
  50 * be wrapped to fit a two level page table structure easily - using the PGD
  51 * and PTE only.  However, Linux also expects one "PTE" table per page, and
  52 * at least a "dirty" bit.
  53 *
  54 * Therefore, we tweak the implementation slightly - we tell Linux that we
  55 * have 2048 entries in the first level, each of which is 8 bytes (iow, two
  56 * hardware pointers to the second level.)  The second level contains two
  57 * hardware PTE tables arranged contiguously, followed by Linux versions
  58 * which contain the state information Linux needs.  We, therefore, end up
  59 * with 512 entries in the "PTE" level.
  60 *
  61 * This leads to the page tables having the following layout:
  62 *
  63 *    pgd             pte
  64 * |        |
  65 * +--------+ +0
  66 * |        |-----> +------------+ +0
  67 * +- - - - + +4    |  h/w pt 0  |
  68 * |        |-----> +------------+ +1024
  69 * +--------+ +8    |  h/w pt 1  |
  70 * |        |       +------------+ +2048
  71 * +- - - - +       | Linux pt 0 |
  72 * |        |       +------------+ +3072
  73 * +--------+       | Linux pt 1 |
  74 * |        |       +------------+ +4096
  75 *
  76 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
  77 * PTE_xxx for definitions of bits appearing in the "h/w pt".
  78 *
  79 * PMD_xxx definitions refer to bits in the first level page table.
  80 *
  81 * The "dirty" bit is emulated by only granting hardware write permission
  82 * iff the page is marked "writable" and "dirty" in the Linux PTE.  This
  83 * means that a write to a clean page will cause a permission fault, and
  84 * the Linux MM layer will mark the page dirty via handle_pte_fault().
  85 * For the hardware to notice the permission change, the TLB entry must
  86 * be flushed, and ptep_set_access_flags() does that for us.
  87 *
  88 * The "accessed" or "young" bit is emulated by a similar method; we only
  89 * allow accesses to the page if the "young" bit is set.  Accesses to the
  90 * page will cause a fault, and handle_pte_fault() will set the young bit
  91 * for us as long as the page is marked present in the corresponding Linux
  92 * PTE entry.  Again, ptep_set_access_flags() will ensure that the TLB is
  93 * up to date.
  94 *
  95 * However, when the "young" bit is cleared, we deny access to the page
  96 * by clearing the hardware PTE.  Currently Linux does not flush the TLB
  97 * for us in this case, which means the TLB will retain the transation
  98 * until either the TLB entry is evicted under pressure, or a context
  99 * switch which changes the user space mapping occurs.
 100 */
 101#define PTRS_PER_PTE            512
 102#define PTRS_PER_PMD            1
 103#define PTRS_PER_PGD            2048
 104
 105/*
 106 * PMD_SHIFT determines the size of the area a second-level page table can map
 107 * PGDIR_SHIFT determines what a third-level page table entry can map
 108 */
 109#define PMD_SHIFT               21
 110#define PGDIR_SHIFT             21
 111
 112#define LIBRARY_TEXT_START      0x0c000000
 113
 114#ifndef __ASSEMBLY__
 115extern void __pte_error(const char *file, int line, unsigned long val);
 116extern void __pmd_error(const char *file, int line, unsigned long val);
 117extern void __pgd_error(const char *file, int line, unsigned long val);
 118
 119#define pte_ERROR(pte)          __pte_error(__FILE__, __LINE__, pte_val(pte))
 120#define pmd_ERROR(pmd)          __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
 121#define pgd_ERROR(pgd)          __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
 122#endif /* !__ASSEMBLY__ */
 123
 124#define PMD_SIZE                (1UL << PMD_SHIFT)
 125#define PMD_MASK                (~(PMD_SIZE-1))
 126#define PGDIR_SIZE              (1UL << PGDIR_SHIFT)
 127#define PGDIR_MASK              (~(PGDIR_SIZE-1))
 128
 129/*
 130 * This is the lowest virtual address we can permit any user space
 131 * mapping to be mapped at.  This is particularly important for
 132 * non-high vector CPUs.
 133 */
 134#define FIRST_USER_ADDRESS      PAGE_SIZE
 135
 136#define FIRST_USER_PGD_NR       1
 137#define USER_PTRS_PER_PGD       ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
 138
 139/*
 140 * section address mask and size definitions.
 141 */
 142#define SECTION_SHIFT           20
 143#define SECTION_SIZE            (1UL << SECTION_SHIFT)
 144#define SECTION_MASK            (~(SECTION_SIZE-1))
 145
 146/*
 147 * ARMv6 supersection address mask and size definitions.
 148 */
 149#define SUPERSECTION_SHIFT      24
 150#define SUPERSECTION_SIZE       (1UL << SUPERSECTION_SHIFT)
 151#define SUPERSECTION_MASK       (~(SUPERSECTION_SIZE-1))
 152
 153/*
 154 * "Linux" PTE definitions.
 155 *
 156 * We keep two sets of PTEs - the hardware and the linux version.
 157 * This allows greater flexibility in the way we map the Linux bits
 158 * onto the hardware tables, and allows us to have YOUNG and DIRTY
 159 * bits.
 160 *
 161 * The PTE table pointer refers to the hardware entries; the "Linux"
 162 * entries are stored 1024 bytes below.
 163 */
 164#define L_PTE_PRESENT           (1 << 0)
 165#define L_PTE_YOUNG             (1 << 1)
 166#define L_PTE_FILE              (1 << 2)        /* only when !PRESENT */
 167#define L_PTE_DIRTY             (1 << 6)
 168#define L_PTE_WRITE             (1 << 7)
 169#define L_PTE_USER              (1 << 8)
 170#define L_PTE_EXEC              (1 << 9)
 171#define L_PTE_SHARED            (1 << 10)       /* shared(v6), coherent(xsc3) */
 172
 173/*
 174 * These are the memory types, defined to be compatible with
 175 * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB
 176 */
 177#define L_PTE_MT_UNCACHED       (0x00 << 2)     /* 0000 */
 178#define L_PTE_MT_BUFFERABLE     (0x01 << 2)     /* 0001 */
 179#define L_PTE_MT_WRITETHROUGH   (0x02 << 2)     /* 0010 */
 180#define L_PTE_MT_WRITEBACK      (0x03 << 2)     /* 0011 */
 181#define L_PTE_MT_MINICACHE      (0x06 << 2)     /* 0110 (sa1100, xscale) */
 182#define L_PTE_MT_WRITEALLOC     (0x07 << 2)     /* 0111 */
 183#define L_PTE_MT_DEV_SHARED     (0x04 << 2)     /* 0100 */
 184#define L_PTE_MT_DEV_NONSHARED  (0x0c << 2)     /* 1100 */
 185#define L_PTE_MT_DEV_WC         (0x09 << 2)     /* 1001 */
 186#define L_PTE_MT_DEV_CACHED     (0x0b << 2)     /* 1011 */
 187#define L_PTE_MT_MASK           (0x0f << 2)
 188
 189#ifndef __ASSEMBLY__
 190
 191/*
 192 * The pgprot_* and protection_map entries will be fixed up in runtime
 193 * to include the cachable and bufferable bits based on memory policy,
 194 * as well as any architecture dependent bits like global/ASID and SMP
 195 * shared mapping bits.
 196 */
 197#define _L_PTE_DEFAULT  L_PTE_PRESENT | L_PTE_YOUNG
 198
 199extern pgprot_t         pgprot_user;
 200extern pgprot_t         pgprot_kernel;
 201
 202#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
 203
 204#define PAGE_NONE               pgprot_user
 205#define PAGE_SHARED             _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE)
 206#define PAGE_SHARED_EXEC        _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
 207#define PAGE_COPY               _MOD_PROT(pgprot_user, L_PTE_USER)
 208#define PAGE_COPY_EXEC          _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
 209#define PAGE_READONLY           _MOD_PROT(pgprot_user, L_PTE_USER)
 210#define PAGE_READONLY_EXEC      _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
 211#define PAGE_KERNEL             pgprot_kernel
 212#define PAGE_KERNEL_EXEC        _MOD_PROT(pgprot_kernel, L_PTE_EXEC)
 213
 214#define __PAGE_NONE             __pgprot(_L_PTE_DEFAULT)
 215#define __PAGE_SHARED           __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE)
 216#define __PAGE_SHARED_EXEC      __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
 217#define __PAGE_COPY             __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
 218#define __PAGE_COPY_EXEC        __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
 219#define __PAGE_READONLY         __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
 220#define __PAGE_READONLY_EXEC    __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
 221
 222#endif /* __ASSEMBLY__ */
 223
 224/*
 225 * The table below defines the page protection levels that we insert into our
 226 * Linux page table version.  These get translated into the best that the
 227 * architecture can perform.  Note that on most ARM hardware:
 228 *  1) We cannot do execute protection
 229 *  2) If we could do execute protection, then read is implied
 230 *  3) write implies read permissions
 231 */
 232#define __P000  __PAGE_NONE
 233#define __P001  __PAGE_READONLY
 234#define __P010  __PAGE_COPY
 235#define __P011  __PAGE_COPY
 236#define __P100  __PAGE_READONLY_EXEC
 237#define __P101  __PAGE_READONLY_EXEC
 238#define __P110  __PAGE_COPY_EXEC
 239#define __P111  __PAGE_COPY_EXEC
 240
 241#define __S000  __PAGE_NONE
 242#define __S001  __PAGE_READONLY
 243#define __S010  __PAGE_SHARED
 244#define __S011  __PAGE_SHARED
 245#define __S100  __PAGE_READONLY_EXEC
 246#define __S101  __PAGE_READONLY_EXEC
 247#define __S110  __PAGE_SHARED_EXEC
 248#define __S111  __PAGE_SHARED_EXEC
 249
 250#ifndef __ASSEMBLY__
 251/*
 252 * ZERO_PAGE is a global shared page that is always zero: used
 253 * for zero-mapped memory areas etc..
 254 */
 255extern struct page *empty_zero_page;
 256#define ZERO_PAGE(vaddr)        (empty_zero_page)
 257
 258#define pte_pfn(pte)            (pte_val(pte) >> PAGE_SHIFT)
 259#define pfn_pte(pfn,prot)       (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
 260
 261#define pte_none(pte)           (!pte_val(pte))
 262#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
 263#define pte_page(pte)           (pfn_to_page(pte_pfn(pte)))
 264#define pte_offset_kernel(dir,addr)     (pmd_page_vaddr(*(dir)) + __pte_index(addr))
 265
 266#define pte_offset_map(dir,addr)        (__pte_map(dir, KM_PTE0) + __pte_index(addr))
 267#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr))
 268#define pte_unmap(pte)                  __pte_unmap(pte, KM_PTE0)
 269#define pte_unmap_nested(pte)           __pte_unmap(pte, KM_PTE1)
 270
 271#ifndef CONFIG_HIGHPTE
 272#define __pte_map(dir,km)       pmd_page_vaddr(*(dir))
 273#define __pte_unmap(pte,km)     do { } while (0)
 274#else
 275#define __pte_map(dir,km)       ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE)
 276#define __pte_unmap(pte,km)     kunmap_atomic((pte - PTRS_PER_PTE), km)
 277#endif
 278
 279#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
 280
 281#define set_pte_at(mm,addr,ptep,pteval) do { \
 282        set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \
 283 } while (0)
 284
 285/*
 286 * The following only work if pte_present() is true.
 287 * Undefined behaviour if not..
 288 */
 289#define pte_present(pte)        (pte_val(pte) & L_PTE_PRESENT)
 290#define pte_write(pte)          (pte_val(pte) & L_PTE_WRITE)
 291#define pte_dirty(pte)          (pte_val(pte) & L_PTE_DIRTY)
 292#define pte_young(pte)          (pte_val(pte) & L_PTE_YOUNG)
 293#define pte_special(pte)        (0)
 294
 295#define PTE_BIT_FUNC(fn,op) \
 296static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
 297
 298PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
 299PTE_BIT_FUNC(mkwrite,   |= L_PTE_WRITE);
 300PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);
 301PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);
 302PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);
 303PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG);
 304
 305static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 306
 307/*
 308 * Mark the prot value as uncacheable and unbufferable.
 309 */
 310#define pgprot_noncached(prot) \
 311        __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED)
 312#define pgprot_writecombine(prot) \
 313        __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)
 314
 315#define pmd_none(pmd)           (!pmd_val(pmd))
 316#define pmd_present(pmd)        (pmd_val(pmd))
 317#define pmd_bad(pmd)            (pmd_val(pmd) & 2)
 318
 319#define copy_pmd(pmdpd,pmdps)           \
 320        do {                            \
 321                pmdpd[0] = pmdps[0];    \
 322                pmdpd[1] = pmdps[1];    \
 323                flush_pmd_entry(pmdpd); \
 324        } while (0)
 325
 326#define pmd_clear(pmdp)                 \
 327        do {                            \
 328                pmdp[0] = __pmd(0);     \
 329                pmdp[1] = __pmd(0);     \
 330                clean_pmd_entry(pmdp);  \
 331        } while (0)
 332
 333static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 334{
 335        unsigned long ptr;
 336
 337        ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
 338        ptr += PTRS_PER_PTE * sizeof(void *);
 339
 340        return __va(ptr);
 341}
 342
 343#define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
 344
 345/*
 346 * Conversion functions: convert a page and protection to a page entry,
 347 * and a page entry and page directory to the page they refer to.
 348 */
 349#define mk_pte(page,prot)       pfn_pte(page_to_pfn(page),prot)
 350
 351/*
 352 * The "pgd_xxx()" functions here are trivial for a folded two-level
 353 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 354 * into the pgd entry)
 355 */
 356#define pgd_none(pgd)           (0)
 357#define pgd_bad(pgd)            (0)
 358#define pgd_present(pgd)        (1)
 359#define pgd_clear(pgdp)         do { } while (0)
 360#define set_pgd(pgd,pgdp)       do { } while (0)
 361
 362/* to find an entry in a page-table-directory */
 363#define pgd_index(addr)         ((addr) >> PGDIR_SHIFT)
 364
 365#define pgd_offset(mm, addr)    ((mm)->pgd+pgd_index(addr))
 366
 367/* to find an entry in a kernel page-table-directory */
 368#define pgd_offset_k(addr)      pgd_offset(&init_mm, addr)
 369
 370/* Find an entry in the second-level page table.. */
 371#define pmd_offset(dir, addr)   ((pmd_t *)(dir))
 372
 373/* Find an entry in the third-level page table.. */
 374#define __pte_index(addr)       (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 375
 376static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 377{
 378        const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER;
 379        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 380        return pte;
 381}
 382
 383extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 384
 385/*
 386 * Encode and decode a swap entry.  Swap entries are stored in the Linux
 387 * page tables as follows:
 388 *
 389 *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
 390 *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
 391 *   <--------------- offset --------------------> <- type --> 0 0 0
 392 *
 393 * This gives us up to 63 swap files and 32GB per swap file.  Note that
 394 * the offset field is always non-zero.
 395 */
 396#define __SWP_TYPE_SHIFT        3
 397#define __SWP_TYPE_BITS         6
 398#define __SWP_TYPE_MASK         ((1 << __SWP_TYPE_BITS) - 1)
 399#define __SWP_OFFSET_SHIFT      (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
 400
 401#define __swp_type(x)           (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
 402#define __swp_offset(x)         ((x).val >> __SWP_OFFSET_SHIFT)
 403#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
 404
 405#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 406#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
 407
 408/*
 409 * It is an error for the kernel to have more swap files than we can
 410 * encode in the PTEs.  This ensures that we know when MAX_SWAPFILES
 411 * is increased beyond what we presently support.
 412 */
 413#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
 414
 415/*
 416 * Encode and decode a file entry.  File entries are stored in the Linux
 417 * page tables as follows:
 418 *
 419 *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
 420 *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
 421 *   <----------------------- offset ------------------------> 1 0 0
 422 */
 423#define pte_file(pte)           (pte_val(pte) & L_PTE_FILE)
 424#define pte_to_pgoff(x)         (pte_val(x) >> 3)
 425#define pgoff_to_pte(x)         __pte(((x) << 3) | L_PTE_FILE)
 426
 427#define PTE_FILE_MAX_BITS       29
 428
 429/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 430/* FIXME: this is not correct */
 431#define kern_addr_valid(addr)   (1)
 432
 433#include <asm-generic/pgtable.h>
 434
 435/*
 436 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
 437 */
 438#define HAVE_ARCH_UNMAPPED_AREA
 439
 440/*
 441 * remap a physical page `pfn' of size `size' with page protection `prot'
 442 * into virtual address `from'
 443 */
 444#define io_remap_pfn_range(vma,from,pfn,size,prot) \
 445                remap_pfn_range(vma, from, pfn, size, prot)
 446
 447#define pgtable_cache_init() do { } while (0)
 448
 449#endif /* !__ASSEMBLY__ */
 450
 451#endif /* CONFIG_MMU */
 452
 453#endif /* _ASMARM_PGTABLE_H */
 454