linux/arch/arc/include/asm/pgtable.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   4 *
   5 * vineetg: May 2011
   6 *  -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
   7 *     They are semantically the same although in different contexts
   8 *     VALID marks a TLB entry exists and it will only happen if PRESENT
   9 *  - Utilise some unused free bits to confine PTE flags to 12 bits
  10 *     This is a must for 4k pg-sz
  11 *
  12 * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods
  13 *  -TLB Locking never really existed, except for initial specs
  14 *  -SILENT_xxx not needed for our port
  15 *  -Per my request, MMU V3 changes the layout of some of the bits
  16 *     to avoid a few shifts in TLB Miss handlers.
  17 *
  18 * vineetg: April 2010
  19 *  -PGD entry no longer contains any flags. If empty it is 0, otherwise has
  20 *   Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
  21 *
  22 * vineetg: April 2010
  23 *  -Switched form 8:11:13 split for page table lookup to 11:8:13
  24 *  -this speeds up page table allocation itself as we now have to memset 1K
  25 *    instead of 8k per page table.
  26 * -TODO: Right now page table alloc is 8K and rest 7K is unused
  27 *    need to optimise it
  28 *
  29 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
  30 */
  31
  32#ifndef _ASM_ARC_PGTABLE_H
  33#define _ASM_ARC_PGTABLE_H
  34
  35#include <linux/bits.h>
  36#define __ARCH_USE_5LEVEL_HACK
  37#include <asm-generic/pgtable-nopmd.h>
  38#include <asm/page.h>
  39#include <asm/mmu.h>    /* to propagate CONFIG_ARC_MMU_VER <n> */
  40
  41/**************************************************************************
  42 * Page Table Flags
  43 *
  44 * ARC700 MMU only deals with softare managed TLB entries.
  45 * Page Tables are purely for Linux VM's consumption and the bits below are
  46 * suited to that (uniqueness). Hence some are not implemented in the TLB and
  47 * some have different value in TLB.
  48 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
  49 *      seperate PD0 and PD1, which combined forms a translation entry)
  50 *      while for PTE perspective, they are 8 and 9 respectively
  51 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
  52 *      (saves some bit shift ops in TLB Miss hdlrs)
  53 */
  54
  55#if (CONFIG_ARC_MMU_VER <= 2)
  56
  57#define _PAGE_ACCESSED      (1<<1)      /* Page is accessed (S) */
  58#define _PAGE_CACHEABLE     (1<<2)      /* Page is cached (H) */
  59#define _PAGE_EXECUTE       (1<<3)      /* Page has user execute perm (H) */
  60#define _PAGE_WRITE         (1<<4)      /* Page has user write perm (H) */
  61#define _PAGE_READ          (1<<5)      /* Page has user read perm (H) */
  62#define _PAGE_DIRTY         (1<<6)      /* Page modified (dirty) (S) */
  63#define _PAGE_SPECIAL       (1<<7)
  64#define _PAGE_GLOBAL        (1<<8)      /* Page is global (H) */
  65#define _PAGE_PRESENT       (1<<10)     /* TLB entry is valid (H) */
  66
  67#else   /* MMU v3 onwards */
  68
  69#define _PAGE_CACHEABLE     (1<<0)      /* Page is cached (H) */
  70#define _PAGE_EXECUTE       (1<<1)      /* Page has user execute perm (H) */
  71#define _PAGE_WRITE         (1<<2)      /* Page has user write perm (H) */
  72#define _PAGE_READ          (1<<3)      /* Page has user read perm (H) */
  73#define _PAGE_ACCESSED      (1<<4)      /* Page is accessed (S) */
  74#define _PAGE_DIRTY         (1<<5)      /* Page modified (dirty) (S) */
  75#define _PAGE_SPECIAL       (1<<6)
  76
  77#if (CONFIG_ARC_MMU_VER >= 4)
  78#define _PAGE_WTHRU         (1<<7)      /* Page cache mode write-thru (H) */
  79#endif
  80
  81#define _PAGE_GLOBAL        (1<<8)      /* Page is global (H) */
  82#define _PAGE_PRESENT       (1<<9)      /* TLB entry is valid (H) */
  83
  84#if (CONFIG_ARC_MMU_VER >= 4)
  85#define _PAGE_HW_SZ         (1<<10)     /* Page Size indicator (H): 0 normal, 1 super */
  86#endif
  87
  88#define _PAGE_SHARED_CODE   (1<<11)     /* Shared Code page with cmn vaddr
  89                                           usable for shared TLB entries (H) */
  90
  91#define _PAGE_UNUSED_BIT    (1<<12)
  92#endif
  93
  94/* vmalloc permissions */
  95#define _K_PAGE_PERMS  (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
  96                        _PAGE_GLOBAL | _PAGE_PRESENT)
  97
  98#ifndef CONFIG_ARC_CACHE_PAGES
  99#undef _PAGE_CACHEABLE
 100#define _PAGE_CACHEABLE 0
 101#endif
 102
 103#ifndef _PAGE_HW_SZ
 104#define _PAGE_HW_SZ     0
 105#endif
 106
 107/* Defaults for every user page */
 108#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
 109
 110/* Set of bits not changed in pte_modify */
 111#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
 112
 113/* More Abbrevaited helpers */
 114#define PAGE_U_NONE     __pgprot(___DEF)
 115#define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
 116#define PAGE_U_W_R      __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
 117#define PAGE_U_X_R      __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
 118#define PAGE_U_X_W_R    __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
 119                                                       _PAGE_EXECUTE)
 120
 121#define PAGE_SHARED     PAGE_U_W_R
 122
 123/* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
 124 * user vaddr space - visible in all addr spaces, but kernel mode only
 125 * Thus Global, all-kernel-access, no-user-access, cached
 126 */
 127#define PAGE_KERNEL          __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
 128
 129/* ioremap */
 130#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
 131
 132/* Masks for actual TLB "PD"s */
 133#define PTE_BITS_IN_PD0         (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
 134#define PTE_BITS_RWX            (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
 135
 136#ifdef CONFIG_ARC_HAS_PAE40
 137#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
 138#else
 139#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
 140#endif
 141
 142/**************************************************************************
 143 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
 144 *
 145 * Certain cases have 1:1 mapping
 146 *  e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
 147 *       which directly corresponds to  PAGE_U_X_R
 148 *
 149 * Other rules which cause the divergence from 1:1 mapping
 150 *
 151 *  1. Although ARC700 can do exclusive execute/write protection (meaning R
 152 *     can be tracked independet of X/W unlike some other CPUs), still to
 153 *     keep things consistent with other archs:
 154 *      -Write implies Read:   W => R
 155 *      -Execute implies Read: X => R
 156 *
 157 *  2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
 158 *     This is to enable COW mechanism
 159 */
 160        /* xwr */
 161#define __P000  PAGE_U_NONE
 162#define __P001  PAGE_U_R
 163#define __P010  PAGE_U_R        /* Pvt-W => !W */
 164#define __P011  PAGE_U_R        /* Pvt-W => !W */
 165#define __P100  PAGE_U_X_R      /* X => R */
 166#define __P101  PAGE_U_X_R
 167#define __P110  PAGE_U_X_R      /* Pvt-W => !W and X => R */
 168#define __P111  PAGE_U_X_R      /* Pvt-W => !W */
 169
 170#define __S000  PAGE_U_NONE
 171#define __S001  PAGE_U_R
 172#define __S010  PAGE_U_W_R      /* W => R */
 173#define __S011  PAGE_U_W_R
 174#define __S100  PAGE_U_X_R      /* X => R */
 175#define __S101  PAGE_U_X_R
 176#define __S110  PAGE_U_X_W_R    /* X => R */
 177#define __S111  PAGE_U_X_W_R
 178
 179/****************************************************************
 180 * 2 tier (PGD:PTE) software page walker
 181 *
 182 * [31]             32 bit virtual address              [0]
 183 * -------------------------------------------------------
 184 * |               | <------------ PGDIR_SHIFT ----------> |
 185 * |               |                                     |
 186 * | BITS_FOR_PGD  |  BITS_FOR_PTE  | <-- PAGE_SHIFT --> |
 187 * -------------------------------------------------------
 188 *       |                  |                |
 189 *       |                  |                --> off in page frame
 190 *       |                  ---> index into Page Table
 191 *       ----> index into Page Directory
 192 *
 193 * In a single page size configuration, only PAGE_SHIFT is fixed
 194 * So both PGD and PTE sizing can be tweaked
 195 *  e.g. 8K page (PAGE_SHIFT 13) can have
 196 *  - PGDIR_SHIFT 21  -> 11:8:13 address split
 197 *  - PGDIR_SHIFT 24  -> 8:11:13 address split
 198 *
 199 * If Super Page is configured, PGDIR_SHIFT becomes fixed too,
 200 * so the sizing flexibility is gone.
 201 */
 202
 203#if defined(CONFIG_ARC_HUGEPAGE_16M)
 204#define PGDIR_SHIFT     24
 205#elif defined(CONFIG_ARC_HUGEPAGE_2M)
 206#define PGDIR_SHIFT     21
 207#else
 208/*
 209 * Only Normal page support so "hackable" (see comment above)
 210 * Default value provides 11:8:13 (8K), 11:9:12 (4K)
 211 */
 212#define PGDIR_SHIFT     21
 213#endif
 214
 215#define BITS_FOR_PTE    (PGDIR_SHIFT - PAGE_SHIFT)
 216#define BITS_FOR_PGD    (32 - PGDIR_SHIFT)
 217
 218#define PGDIR_SIZE      BIT(PGDIR_SHIFT)        /* vaddr span, not PDG sz */
 219#define PGDIR_MASK      (~(PGDIR_SIZE-1))
 220
 221#define PTRS_PER_PTE    BIT(BITS_FOR_PTE)
 222#define PTRS_PER_PGD    BIT(BITS_FOR_PGD)
 223
 224/*
 225 * Number of entries a user land program use.
 226 * TASK_SIZE is the maximum vaddr that can be used by a userland program.
 227 */
 228#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
 229
 230/*
 231 * No special requirements for lowest virtual address we permit any user space
 232 * mapping to be mapped at.
 233 */
 234#define FIRST_USER_ADDRESS      0UL
 235
 236
 237/****************************************************************
 238 * Bucket load of VM Helpers
 239 */
 240
 241#ifndef __ASSEMBLY__
 242
 243#define pte_ERROR(e) \
 244        pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 245#define pgd_ERROR(e) \
 246        pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 247
 248/* the zero page used for uninitialized and anonymous pages */
 249extern char empty_zero_page[PAGE_SIZE];
 250#define ZERO_PAGE(vaddr)        (virt_to_page(empty_zero_page))
 251
 252#define pte_unmap(pte)          do { } while (0)
 253#define pte_unmap_nested(pte)           do { } while (0)
 254
 255#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
 256#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
 257
 258/* find the page descriptor of the Page Tbl ref by PMD entry */
 259#define pmd_page(pmd)           virt_to_page(pmd_val(pmd) & PAGE_MASK)
 260
 261/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
 262#define pmd_page_vaddr(pmd)     (pmd_val(pmd) & PAGE_MASK)
 263
 264/* In a 2 level sys, setup the PGD entry with PTE value */
 265static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
 266{
 267        pmd_val(*pmdp) = (unsigned long)ptep;
 268}
 269
 270#define pte_none(x)                     (!pte_val(x))
 271#define pte_present(x)                  (pte_val(x) & _PAGE_PRESENT)
 272#define pte_clear(mm, addr, ptep)       set_pte_at(mm, addr, ptep, __pte(0))
 273
 274#define pmd_none(x)                     (!pmd_val(x))
 275#define pmd_bad(x)                      ((pmd_val(x) & ~PAGE_MASK))
 276#define pmd_present(x)                  (pmd_val(x))
 277#define pmd_clear(xp)                   do { pmd_val(*(xp)) = 0; } while (0)
 278
 279#define pte_page(pte)           pfn_to_page(pte_pfn(pte))
 280#define mk_pte(page, prot)      pfn_pte(page_to_pfn(page), prot)
 281#define pfn_pte(pfn, prot)      __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
 282
 283/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
 284#define pte_pfn(pte)            (pte_val(pte) >> PAGE_SHIFT)
 285#define __pte_index(addr)       (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 286
 287/*
 288 * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
 289 * and returns ptr to PTE entry corresponding to @addr
 290 */
 291#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
 292                                         __pte_index(addr))
 293
 294/* No mapping of Page Tables in high mem etc, so following same as above */
 295#define pte_offset_kernel(dir, addr)            pte_offset(dir, addr)
 296#define pte_offset_map(dir, addr)               pte_offset(dir, addr)
 297
 298/* Zoo of pte_xxx function */
 299#define pte_read(pte)           (pte_val(pte) & _PAGE_READ)
 300#define pte_write(pte)          (pte_val(pte) & _PAGE_WRITE)
 301#define pte_dirty(pte)          (pte_val(pte) & _PAGE_DIRTY)
 302#define pte_young(pte)          (pte_val(pte) & _PAGE_ACCESSED)
 303#define pte_special(pte)        (pte_val(pte) & _PAGE_SPECIAL)
 304
 305#define PTE_BIT_FUNC(fn, op) \
 306        static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
 307
 308PTE_BIT_FUNC(mknotpresent,      &= ~(_PAGE_PRESENT));
 309PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
 310PTE_BIT_FUNC(mkwrite,   |= (_PAGE_WRITE));
 311PTE_BIT_FUNC(mkclean,   &= ~(_PAGE_DIRTY));
 312PTE_BIT_FUNC(mkdirty,   |= (_PAGE_DIRTY));
 313PTE_BIT_FUNC(mkold,     &= ~(_PAGE_ACCESSED));
 314PTE_BIT_FUNC(mkyoung,   |= (_PAGE_ACCESSED));
 315PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
 316PTE_BIT_FUNC(mkexec,    |= (_PAGE_EXECUTE));
 317PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
 318PTE_BIT_FUNC(mkhuge,    |= (_PAGE_HW_SZ));
 319
 320static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 321{
 322        return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
 323}
 324
 325/* Macro to mark a page protection as uncacheable */
 326#define pgprot_noncached(prot)  (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
 327
 328static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 329                              pte_t *ptep, pte_t pteval)
 330{
 331        set_pte(ptep, pteval);
 332}
 333
 334/*
 335 * All kernel related VM pages are in init's mm.
 336 */
 337#define pgd_offset_k(address)   pgd_offset(&init_mm, address)
 338#define pgd_index(addr)         ((addr) >> PGDIR_SHIFT)
 339#define pgd_offset(mm, addr)    (((mm)->pgd)+pgd_index(addr))
 340
 341/*
 342 * Macro to quickly access the PGD entry, utlising the fact that some
 343 * arch may cache the pointer to Page Directory of "current" task
 344 * in a MMU register
 345 *
 346 * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
 347 * becomes read a register
 348 *
 349 * ********CAUTION*******:
 350 * Kernel code might be dealing with some mm_struct of NON "current"
 351 * Thus use this macro only when you are certain that "current" is current
 352 * e.g. when dealing with signal frame setup code etc
 353 */
 354#ifndef CONFIG_SMP
 355#define pgd_offset_fast(mm, addr)       \
 356({                                      \
 357        pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0);  \
 358        pgd_base + pgd_index(addr);     \
 359})
 360#else
 361#define pgd_offset_fast(mm, addr)       pgd_offset(mm, addr)
 362#endif
 363
 364extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
 365void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 366                      pte_t *ptep);
 367
 368/* Encode swap {type,off} tuple into PTE
 369 * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
 370 * PAGE_PRESENT is zero in a PTE holding swap "identifier"
 371 */
 372#define __swp_entry(type, off)  ((swp_entry_t) { \
 373                                        ((type) & 0x1f) | ((off) << 13) })
 374
 375/* Decode a PTE containing swap "identifier "into constituents */
 376#define __swp_type(pte_lookalike)       (((pte_lookalike).val) & 0x1f)
 377#define __swp_offset(pte_lookalike)     ((pte_lookalike).val >> 13)
 378
 379/* NOPs, to keep generic kernel happy */
 380#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 381#define __swp_entry_to_pte(x)   ((pte_t) { (x).val })
 382
 383#define kern_addr_valid(addr)   (1)
 384
 385/*
 386 * remap a physical page `pfn' of size `size' with page protection `prot'
 387 * into virtual address `from'
 388 */
 389#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 390#include <asm/hugepage.h>
 391#endif
 392
 393#include <asm-generic/pgtable.h>
 394
 395/* to cope with aliasing VIPT cache */
 396#define HAVE_ARCH_UNMAPPED_AREA
 397
 398/*
 399 * No page table caches to initialise
 400 */
 401#define pgtable_cache_init()   do { } while (0)
 402
 403#endif /* __ASSEMBLY__ */
 404
 405#endif
 406