1/* 2 * arch/arm/include/asm/pgtable-2level.h 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#ifndef _ASM_PGTABLE_2LEVEL_H 11#define _ASM_PGTABLE_2LEVEL_H 12 13#define __PAGETABLE_PMD_FOLDED 14 15/* 16 * Hardware-wise, we have a two level page table structure, where the first 17 * level has 4096 entries, and the second level has 256 entries. Each entry 18 * is one 32-bit word. Most of the bits in the second level entry are used 19 * by hardware, and there aren't any "accessed" and "dirty" bits. 20 * 21 * Linux on the other hand has a three level page table structure, which can 22 * be wrapped to fit a two level page table structure easily - using the PGD 23 * and PTE only. However, Linux also expects one "PTE" table per page, and 24 * at least a "dirty" bit. 25 * 26 * Therefore, we tweak the implementation slightly - we tell Linux that we 27 * have 2048 entries in the first level, each of which is 8 bytes (iow, two 28 * hardware pointers to the second level.) The second level contains two 29 * hardware PTE tables arranged contiguously, preceded by Linux versions 30 * which contain the state information Linux needs. We, therefore, end up 31 * with 512 entries in the "PTE" level. 32 * 33 * This leads to the page tables having the following layout: 34 * 35 * pgd pte 36 * | | 37 * +--------+ 38 * | | +------------+ +0 39 * +- - - - + | Linux pt 0 | 40 * | | +------------+ +1024 41 * +--------+ +0 | Linux pt 1 | 42 * | |-----> +------------+ +2048 43 * +- - - - + +4 | h/w pt 0 | 44 * | |-----> +------------+ +3072 45 * +--------+ +8 | h/w pt 1 | 46 * | | +------------+ +4096 47 * 48 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and 49 * PTE_xxx for definitions of bits appearing in the "h/w pt". 50 * 51 * PMD_xxx definitions refer to bits in the first level page table. 52 * 53 * The "dirty" bit is emulated by only granting hardware write permission 54 * iff the page is marked "writable" and "dirty" in the Linux PTE. This 55 * means that a write to a clean page will cause a permission fault, and 56 * the Linux MM layer will mark the page dirty via handle_pte_fault(). 57 * For the hardware to notice the permission change, the TLB entry must 58 * be flushed, and ptep_set_access_flags() does that for us. 59 * 60 * The "accessed" or "young" bit is emulated by a similar method; we only 61 * allow accesses to the page if the "young" bit is set. Accesses to the 62 * page will cause a fault, and handle_pte_fault() will set the young bit 63 * for us as long as the page is marked present in the corresponding Linux 64 * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is 65 * up to date. 66 * 67 * However, when the "young" bit is cleared, we deny access to the page 68 * by clearing the hardware PTE. Currently Linux does not flush the TLB 69 * for us in this case, which means the TLB will retain the transation 70 * until either the TLB entry is evicted under pressure, or a context 71 * switch which changes the user space mapping occurs. 72 */ 73#define PTRS_PER_PTE 512 74#define PTRS_PER_PMD 1 75#define PTRS_PER_PGD 2048 76 77#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) 78#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) 79#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) 80 81/* 82 * PMD_SHIFT determines the size of the area a second-level page table can map 83 * PGDIR_SHIFT determines what a third-level page table entry can map 84 */ 85#define PMD_SHIFT 21 86#define PGDIR_SHIFT 21 87 88#define PMD_SIZE (1UL << PMD_SHIFT) 89#define PMD_MASK (~(PMD_SIZE-1)) 90#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 91#define PGDIR_MASK (~(PGDIR_SIZE-1)) 92 93/* 94 * section address mask and size definitions. 95 */ 96#define SECTION_SHIFT 20 97#define SECTION_SIZE (1UL << SECTION_SHIFT) 98#define SECTION_MASK (~(SECTION_SIZE-1)) 99 100/* 101 * ARMv6 supersection address mask and size definitions. 102 */ 103#define SUPERSECTION_SHIFT 24 104#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) 105#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) 106 107#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 108 109/* 110 * "Linux" PTE definitions. 111 * 112 * We keep two sets of PTEs - the hardware and the linux version. 113 * This allows greater flexibility in the way we map the Linux bits 114 * onto the hardware tables, and allows us to have YOUNG and DIRTY 115 * bits. 116 * 117 * The PTE table pointer refers to the hardware entries; the "Linux" 118 * entries are stored 1024 bytes below. 119 */ 120#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ 121#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) 122#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) 123#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) 124#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) 125#define L_PTE_USER (_AT(pteval_t, 1) << 8) 126#define L_PTE_XN (_AT(pteval_t, 1) << 9) 127#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ 128#define L_PTE_NONE (_AT(pteval_t, 1) << 11) 129 130/* 131 * These are the memory types, defined to be compatible with 132 * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B 133 * ARMv6+ without TEX remapping, they are a table index. 134 * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B 135 * 136 * MT type Pre-ARMv6 ARMv6+ type / cacheable status 137 * UNCACHED Uncached Strongly ordered 138 * BUFFERABLE Bufferable Normal memory / non-cacheable 139 * WRITETHROUGH Writethrough Normal memory / write through 140 * WRITEBACK Writeback Normal memory / write back, read alloc 141 * MINICACHE Minicache N/A 142 * WRITEALLOC Writeback Normal memory / write back, write alloc 143 * DEV_SHARED Uncached Device memory (shared) 144 * DEV_NONSHARED Uncached Device memory (non-shared) 145 * DEV_WC Bufferable Normal memory / non-cacheable 146 * DEV_CACHED Writeback Normal memory / write back, read alloc 147 * VECTORS Variable Normal memory / variable 148 * 149 * All normal memory mappings have the following properties: 150 * - reads can be repeated with no side effects 151 * - repeated reads return the last value written 152 * - reads can fetch additional locations without side effects 153 * - writes can be repeated (in certain cases) with no side effects 154 * - writes can be merged before accessing the target 155 * - unaligned accesses can be supported 156 * 157 * All device mappings have the following properties: 158 * - no access speculation 159 * - no repetition (eg, on return from an exception) 160 * - number, order and size of accesses are maintained 161 * - unaligned accesses are "unpredictable" 162 */ 163#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ 164#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ 165#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ 166#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ 167#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ 168#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ 169#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ 170#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ 171#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ 172#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ 173#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */ 174#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) 175 176#ifndef __ASSEMBLY__ 177 178/* 179 * The "pud_xxx()" functions here are trivial when the pmd is folded into 180 * the pud: the pud entry is never bad, always exists, and can't be set or 181 * cleared. 182 */ 183#define pud_none(pud) (0) 184#define pud_bad(pud) (0) 185#define pud_present(pud) (1) 186#define pud_clear(pudp) do { } while (0) 187#define set_pud(pud,pudp) do { } while (0) 188 189static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) 190{ 191 return (pmd_t *)pud; 192} 193 194#define pmd_large(pmd) (pmd_val(pmd) & 2) 195#define pmd_bad(pmd) (pmd_val(pmd) & 2) 196#define pmd_present(pmd) (pmd_val(pmd)) 197 198#define copy_pmd(pmdpd,pmdps) \ 199 do { \ 200 pmdpd[0] = pmdps[0]; \ 201 pmdpd[1] = pmdps[1]; \ 202 flush_pmd_entry(pmdpd); \ 203 } while (0) 204 205#define pmd_clear(pmdp) \ 206 do { \ 207 pmdp[0] = __pmd(0); \ 208 pmdp[1] = __pmd(0); \ 209 clean_pmd_entry(pmdp); \ 210 } while (0) 211 212/* we don't need complex calculations here as the pmd is folded into the pgd */ 213#define pmd_addr_end(addr,end) (end) 214 215#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) 216#define pte_special(pte) (0) 217static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 218 219/* 220 * We don't have huge page support for short descriptors, for the moment 221 * define empty stubs for use by pin_page_for_write. 222 */ 223#define pmd_hugewillfault(pmd) (0) 224#define pmd_thp_or_huge(pmd) (0) 225 226#endif /* __ASSEMBLY__ */ 227 228#endif /* _ASM_PGTABLE_2LEVEL_H */ 229