linux/arch/arm64/include/asm/pgtable-hwdef.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 ARM Ltd.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16#ifndef __ASM_PGTABLE_HWDEF_H
  17#define __ASM_PGTABLE_HWDEF_H
  18
  19#include <asm/memory.h>
  20
  21/*
  22 * Number of page-table levels required to address 'va_bits' wide
  23 * address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
  24 * bits with (PAGE_SHIFT - 3) bits at each page table level. Hence:
  25 *
  26 *  levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), (PAGE_SHIFT - 3))
  27 *
  28 * where DIV_ROUND_UP(n, d) => (((n) + (d) - 1) / (d))
  29 *
  30 * We cannot include linux/kernel.h which defines DIV_ROUND_UP here
  31 * due to build issues. So we open code DIV_ROUND_UP here:
  32 *
  33 *      ((((va_bits) - PAGE_SHIFT) + (PAGE_SHIFT - 3) - 1) / (PAGE_SHIFT - 3))
  34 *
  35 * which gets simplified as :
  36 */
  37#define ARM64_HW_PGTABLE_LEVELS(va_bits) (((va_bits) - 4) / (PAGE_SHIFT - 3))
  38
  39/*
  40 * Size mapped by an entry at level n ( 0 <= n <= 3)
  41 * We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits
  42 * in the final page. The maximum number of translation levels supported by
  43 * the architecture is 4. Hence, starting at at level n, we have further
  44 * ((4 - n) - 1) levels of translation excluding the offset within the page.
  45 * So, the total number of bits mapped by an entry at level n is :
  46 *
  47 *  ((4 - n) - 1) * (PAGE_SHIFT - 3) + PAGE_SHIFT
  48 *
  49 * Rearranging it a bit we get :
  50 *   (4 - n) * (PAGE_SHIFT - 3) + 3
  51 */
  52#define ARM64_HW_PGTABLE_LEVEL_SHIFT(n) ((PAGE_SHIFT - 3) * (4 - (n)) + 3)
  53
  54#define PTRS_PER_PTE            (1 << (PAGE_SHIFT - 3))
  55
  56/*
  57 * PMD_SHIFT determines the size a level 2 page table entry can map.
  58 */
  59#if CONFIG_PGTABLE_LEVELS > 2
  60#define PMD_SHIFT               ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
  61#define PMD_SIZE                (_AC(1, UL) << PMD_SHIFT)
  62#define PMD_MASK                (~(PMD_SIZE-1))
  63#define PTRS_PER_PMD            PTRS_PER_PTE
  64#endif
  65
  66/*
  67 * PUD_SHIFT determines the size a level 1 page table entry can map.
  68 */
  69#if CONFIG_PGTABLE_LEVELS > 3
  70#define PUD_SHIFT               ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
  71#define PUD_SIZE                (_AC(1, UL) << PUD_SHIFT)
  72#define PUD_MASK                (~(PUD_SIZE-1))
  73#define PTRS_PER_PUD            PTRS_PER_PTE
  74#endif
  75
  76/*
  77 * PGDIR_SHIFT determines the size a top-level page table entry can map
  78 * (depending on the configuration, this level can be 0, 1 or 2).
  79 */
  80#define PGDIR_SHIFT             ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS)
  81#define PGDIR_SIZE              (_AC(1, UL) << PGDIR_SHIFT)
  82#define PGDIR_MASK              (~(PGDIR_SIZE-1))
  83#define PTRS_PER_PGD            (1 << (MAX_USER_VA_BITS - PGDIR_SHIFT))
  84
  85/*
  86 * Section address mask and size definitions.
  87 */
  88#define SECTION_SHIFT           PMD_SHIFT
  89#define SECTION_SIZE            (_AC(1, UL) << SECTION_SHIFT)
  90#define SECTION_MASK            (~(SECTION_SIZE-1))
  91
  92/*
  93 * Contiguous page definitions.
  94 */
  95#ifdef CONFIG_ARM64_64K_PAGES
  96#define CONT_PTE_SHIFT          5
  97#define CONT_PMD_SHIFT          5
  98#elif defined(CONFIG_ARM64_16K_PAGES)
  99#define CONT_PTE_SHIFT          7
 100#define CONT_PMD_SHIFT          5
 101#else
 102#define CONT_PTE_SHIFT          4
 103#define CONT_PMD_SHIFT          4
 104#endif
 105
 106#define CONT_PTES               (1 << CONT_PTE_SHIFT)
 107#define CONT_PTE_SIZE           (CONT_PTES * PAGE_SIZE)
 108#define CONT_PTE_MASK           (~(CONT_PTE_SIZE - 1))
 109#define CONT_PMDS               (1 << CONT_PMD_SHIFT)
 110#define CONT_PMD_SIZE           (CONT_PMDS * PMD_SIZE)
 111#define CONT_PMD_MASK           (~(CONT_PMD_SIZE - 1))
 112/* the the numerical offset of the PTE within a range of CONT_PTES */
 113#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
 114
 115/*
 116 * Hardware page table definitions.
 117 *
 118 * Level 1 descriptor (PUD).
 119 */
 120#define PUD_TYPE_TABLE          (_AT(pudval_t, 3) << 0)
 121#define PUD_TABLE_BIT           (_AT(pudval_t, 1) << 1)
 122#define PUD_TYPE_MASK           (_AT(pudval_t, 3) << 0)
 123#define PUD_TYPE_SECT           (_AT(pudval_t, 1) << 0)
 124
 125/*
 126 * Level 2 descriptor (PMD).
 127 */
 128#define PMD_TYPE_MASK           (_AT(pmdval_t, 3) << 0)
 129#define PMD_TYPE_FAULT          (_AT(pmdval_t, 0) << 0)
 130#define PMD_TYPE_TABLE          (_AT(pmdval_t, 3) << 0)
 131#define PMD_TYPE_SECT           (_AT(pmdval_t, 1) << 0)
 132#define PMD_TABLE_BIT           (_AT(pmdval_t, 1) << 1)
 133
 134/*
 135 * Section
 136 */
 137#define PMD_SECT_VALID          (_AT(pmdval_t, 1) << 0)
 138#define PMD_SECT_USER           (_AT(pmdval_t, 1) << 6)         /* AP[1] */
 139#define PMD_SECT_RDONLY         (_AT(pmdval_t, 1) << 7)         /* AP[2] */
 140#define PMD_SECT_S              (_AT(pmdval_t, 3) << 8)
 141#define PMD_SECT_AF             (_AT(pmdval_t, 1) << 10)
 142#define PMD_SECT_NG             (_AT(pmdval_t, 1) << 11)
 143#define PMD_SECT_CONT           (_AT(pmdval_t, 1) << 52)
 144#define PMD_SECT_PXN            (_AT(pmdval_t, 1) << 53)
 145#define PMD_SECT_UXN            (_AT(pmdval_t, 1) << 54)
 146
 147/*
 148 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
 149 */
 150#define PMD_ATTRINDX(t)         (_AT(pmdval_t, (t)) << 2)
 151#define PMD_ATTRINDX_MASK       (_AT(pmdval_t, 7) << 2)
 152
 153/*
 154 * Level 3 descriptor (PTE).
 155 */
 156#define PTE_TYPE_MASK           (_AT(pteval_t, 3) << 0)
 157#define PTE_TYPE_FAULT          (_AT(pteval_t, 0) << 0)
 158#define PTE_TYPE_PAGE           (_AT(pteval_t, 3) << 0)
 159#define PTE_TABLE_BIT           (_AT(pteval_t, 1) << 1)
 160#define PTE_USER                (_AT(pteval_t, 1) << 6)         /* AP[1] */
 161#define PTE_RDONLY              (_AT(pteval_t, 1) << 7)         /* AP[2] */
 162#define PTE_SHARED              (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
 163#define PTE_AF                  (_AT(pteval_t, 1) << 10)        /* Access Flag */
 164#define PTE_NG                  (_AT(pteval_t, 1) << 11)        /* nG */
 165#define PTE_DBM                 (_AT(pteval_t, 1) << 51)        /* Dirty Bit Management */
 166#define PTE_CONT                (_AT(pteval_t, 1) << 52)        /* Contiguous range */
 167#define PTE_PXN                 (_AT(pteval_t, 1) << 53)        /* Privileged XN */
 168#define PTE_UXN                 (_AT(pteval_t, 1) << 54)        /* User XN */
 169#define PTE_HYP_XN              (_AT(pteval_t, 1) << 54)        /* HYP XN */
 170
 171#define PTE_ADDR_LOW            (((_AT(pteval_t, 1) << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
 172#ifdef CONFIG_ARM64_PA_BITS_52
 173#define PTE_ADDR_HIGH           (_AT(pteval_t, 0xf) << 12)
 174#define PTE_ADDR_MASK           (PTE_ADDR_LOW | PTE_ADDR_HIGH)
 175#else
 176#define PTE_ADDR_MASK           PTE_ADDR_LOW
 177#endif
 178
 179/*
 180 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
 181 */
 182#define PTE_ATTRINDX(t)         (_AT(pteval_t, (t)) << 2)
 183#define PTE_ATTRINDX_MASK       (_AT(pteval_t, 7) << 2)
 184
 185/*
 186 * 2nd stage PTE definitions
 187 */
 188#define PTE_S2_RDONLY           (_AT(pteval_t, 1) << 6)   /* HAP[2:1] */
 189#define PTE_S2_RDWR             (_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
 190#define PTE_S2_XN               (_AT(pteval_t, 2) << 53)  /* XN[1:0] */
 191
 192#define PMD_S2_RDONLY           (_AT(pmdval_t, 1) << 6)   /* HAP[2:1] */
 193#define PMD_S2_RDWR             (_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */
 194#define PMD_S2_XN               (_AT(pmdval_t, 2) << 53)  /* XN[1:0] */
 195
 196#define PUD_S2_RDONLY           (_AT(pudval_t, 1) << 6)   /* HAP[2:1] */
 197#define PUD_S2_RDWR             (_AT(pudval_t, 3) << 6)   /* HAP[2:1] */
 198#define PUD_S2_XN               (_AT(pudval_t, 2) << 53)  /* XN[1:0] */
 199
 200/*
 201 * Memory Attribute override for Stage-2 (MemAttr[3:0])
 202 */
 203#define PTE_S2_MEMATTR(t)       (_AT(pteval_t, (t)) << 2)
 204#define PTE_S2_MEMATTR_MASK     (_AT(pteval_t, 0xf) << 2)
 205
 206/*
 207 * EL2/HYP PTE/PMD definitions
 208 */
 209#define PMD_HYP                 PMD_SECT_USER
 210#define PTE_HYP                 PTE_USER
 211
 212/*
 213 * Highest possible physical address supported.
 214 */
 215#define PHYS_MASK_SHIFT         (CONFIG_ARM64_PA_BITS)
 216#define PHYS_MASK               ((UL(1) << PHYS_MASK_SHIFT) - 1)
 217
 218#define TTBR_CNP_BIT            (UL(1) << 0)
 219
 220/*
 221 * TCR flags.
 222 */
 223#define TCR_T0SZ_OFFSET         0
 224#define TCR_T1SZ_OFFSET         16
 225#define TCR_T0SZ(x)             ((UL(64) - (x)) << TCR_T0SZ_OFFSET)
 226#define TCR_T1SZ(x)             ((UL(64) - (x)) << TCR_T1SZ_OFFSET)
 227#define TCR_TxSZ(x)             (TCR_T0SZ(x) | TCR_T1SZ(x))
 228#define TCR_TxSZ_WIDTH          6
 229#define TCR_T0SZ_MASK           (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET)
 230
 231#define TCR_EPD0_SHIFT          7
 232#define TCR_EPD0_MASK           (UL(1) << TCR_EPD0_SHIFT)
 233#define TCR_IRGN0_SHIFT         8
 234#define TCR_IRGN0_MASK          (UL(3) << TCR_IRGN0_SHIFT)
 235#define TCR_IRGN0_NC            (UL(0) << TCR_IRGN0_SHIFT)
 236#define TCR_IRGN0_WBWA          (UL(1) << TCR_IRGN0_SHIFT)
 237#define TCR_IRGN0_WT            (UL(2) << TCR_IRGN0_SHIFT)
 238#define TCR_IRGN0_WBnWA         (UL(3) << TCR_IRGN0_SHIFT)
 239
 240#define TCR_EPD1_SHIFT          23
 241#define TCR_EPD1_MASK           (UL(1) << TCR_EPD1_SHIFT)
 242#define TCR_IRGN1_SHIFT         24
 243#define TCR_IRGN1_MASK          (UL(3) << TCR_IRGN1_SHIFT)
 244#define TCR_IRGN1_NC            (UL(0) << TCR_IRGN1_SHIFT)
 245#define TCR_IRGN1_WBWA          (UL(1) << TCR_IRGN1_SHIFT)
 246#define TCR_IRGN1_WT            (UL(2) << TCR_IRGN1_SHIFT)
 247#define TCR_IRGN1_WBnWA         (UL(3) << TCR_IRGN1_SHIFT)
 248
 249#define TCR_IRGN_NC             (TCR_IRGN0_NC | TCR_IRGN1_NC)
 250#define TCR_IRGN_WBWA           (TCR_IRGN0_WBWA | TCR_IRGN1_WBWA)
 251#define TCR_IRGN_WT             (TCR_IRGN0_WT | TCR_IRGN1_WT)
 252#define TCR_IRGN_WBnWA          (TCR_IRGN0_WBnWA | TCR_IRGN1_WBnWA)
 253#define TCR_IRGN_MASK           (TCR_IRGN0_MASK | TCR_IRGN1_MASK)
 254
 255
 256#define TCR_ORGN0_SHIFT         10
 257#define TCR_ORGN0_MASK          (UL(3) << TCR_ORGN0_SHIFT)
 258#define TCR_ORGN0_NC            (UL(0) << TCR_ORGN0_SHIFT)
 259#define TCR_ORGN0_WBWA          (UL(1) << TCR_ORGN0_SHIFT)
 260#define TCR_ORGN0_WT            (UL(2) << TCR_ORGN0_SHIFT)
 261#define TCR_ORGN0_WBnWA         (UL(3) << TCR_ORGN0_SHIFT)
 262
 263#define TCR_ORGN1_SHIFT         26
 264#define TCR_ORGN1_MASK          (UL(3) << TCR_ORGN1_SHIFT)
 265#define TCR_ORGN1_NC            (UL(0) << TCR_ORGN1_SHIFT)
 266#define TCR_ORGN1_WBWA          (UL(1) << TCR_ORGN1_SHIFT)
 267#define TCR_ORGN1_WT            (UL(2) << TCR_ORGN1_SHIFT)
 268#define TCR_ORGN1_WBnWA         (UL(3) << TCR_ORGN1_SHIFT)
 269
 270#define TCR_ORGN_NC             (TCR_ORGN0_NC | TCR_ORGN1_NC)
 271#define TCR_ORGN_WBWA           (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA)
 272#define TCR_ORGN_WT             (TCR_ORGN0_WT | TCR_ORGN1_WT)
 273#define TCR_ORGN_WBnWA          (TCR_ORGN0_WBnWA | TCR_ORGN1_WBnWA)
 274#define TCR_ORGN_MASK           (TCR_ORGN0_MASK | TCR_ORGN1_MASK)
 275
 276#define TCR_SH0_SHIFT           12
 277#define TCR_SH0_MASK            (UL(3) << TCR_SH0_SHIFT)
 278#define TCR_SH0_INNER           (UL(3) << TCR_SH0_SHIFT)
 279
 280#define TCR_SH1_SHIFT           28
 281#define TCR_SH1_MASK            (UL(3) << TCR_SH1_SHIFT)
 282#define TCR_SH1_INNER           (UL(3) << TCR_SH1_SHIFT)
 283#define TCR_SHARED              (TCR_SH0_INNER | TCR_SH1_INNER)
 284
 285#define TCR_TG0_SHIFT           14
 286#define TCR_TG0_MASK            (UL(3) << TCR_TG0_SHIFT)
 287#define TCR_TG0_4K              (UL(0) << TCR_TG0_SHIFT)
 288#define TCR_TG0_64K             (UL(1) << TCR_TG0_SHIFT)
 289#define TCR_TG0_16K             (UL(2) << TCR_TG0_SHIFT)
 290
 291#define TCR_TG1_SHIFT           30
 292#define TCR_TG1_MASK            (UL(3) << TCR_TG1_SHIFT)
 293#define TCR_TG1_16K             (UL(1) << TCR_TG1_SHIFT)
 294#define TCR_TG1_4K              (UL(2) << TCR_TG1_SHIFT)
 295#define TCR_TG1_64K             (UL(3) << TCR_TG1_SHIFT)
 296
 297#define TCR_IPS_SHIFT           32
 298#define TCR_IPS_MASK            (UL(7) << TCR_IPS_SHIFT)
 299#define TCR_A1                  (UL(1) << 22)
 300#define TCR_ASID16              (UL(1) << 36)
 301#define TCR_TBI0                (UL(1) << 37)
 302#define TCR_TBI1                (UL(1) << 38)
 303#define TCR_HA                  (UL(1) << 39)
 304#define TCR_HD                  (UL(1) << 40)
 305#define TCR_NFD0                (UL(1) << 53)
 306#define TCR_NFD1                (UL(1) << 54)
 307
 308/*
 309 * TTBR.
 310 */
 311#ifdef CONFIG_ARM64_PA_BITS_52
 312/*
 313 * This should be GENMASK_ULL(47, 2).
 314 * TTBR_ELx[1] is RES0 in this configuration.
 315 */
 316#define TTBR_BADDR_MASK_52      (((UL(1) << 46) - 1) << 2)
 317#endif
 318
 319#ifdef CONFIG_ARM64_USER_VA_BITS_52
 320/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
 321#define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \
 322                                 (UL(1) << (48 - PGDIR_SHIFT))) * 8)
 323#endif
 324
 325#endif
 326