linux/arch/nds32/include/asm/pgtable.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2// Copyright (C) 2005-2017 Andes Technology Corporation
   3
   4#ifndef _ASMNDS32_PGTABLE_H
   5#define _ASMNDS32_PGTABLE_H
   6
   7#define __PAGETABLE_PMD_FOLDED 1
   8#include <asm-generic/4level-fixup.h>
   9#include <linux/sizes.h>
  10
  11#include <asm/memory.h>
  12#include <asm/nds32.h>
  13#ifndef __ASSEMBLY__
  14#include <asm/fixmap.h>
  15#include <asm/io.h>
  16#include <nds32_intrinsic.h>
  17#endif
  18
  19#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
  20#define PGDIR_SHIFT      22
  21#define PTRS_PER_PGD     1024
  22#define PMD_SHIFT        22
  23#define PTRS_PER_PMD     1
  24#define PTRS_PER_PTE     1024
  25#endif
  26
  27#ifdef CONFIG_ANDES_PAGE_SIZE_8KB
  28#define PGDIR_SHIFT      24
  29#define PTRS_PER_PGD     256
  30#define PMD_SHIFT        24
  31#define PTRS_PER_PMD     1
  32#define PTRS_PER_PTE     2048
  33#endif
  34
  35#ifndef __ASSEMBLY__
  36extern void __pte_error(const char *file, int line, unsigned long val);
  37extern void __pmd_error(const char *file, int line, unsigned long val);
  38extern void __pgd_error(const char *file, int line, unsigned long val);
  39
  40#define pte_ERROR(pte)          __pte_error(__FILE__, __LINE__, pte_val(pte))
  41#define pmd_ERROR(pmd)          __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
  42#define pgd_ERROR(pgd)          __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
  43#endif /* !__ASSEMBLY__ */
  44
  45#define PMD_SIZE                (1UL << PMD_SHIFT)
  46#define PMD_MASK                (~(PMD_SIZE-1))
  47#define PGDIR_SIZE              (1UL << PGDIR_SHIFT)
  48#define PGDIR_MASK              (~(PGDIR_SIZE-1))
  49
  50/*
  51 * This is the lowest virtual address we can permit any user space
  52 * mapping to be mapped at.  This is particularly important for
  53 * non-high vector CPUs.
  54 */
  55#define FIRST_USER_ADDRESS      0x8000
  56
  57#ifdef CONFIG_HIGHMEM
  58#define CONSISTENT_BASE         ((PKMAP_BASE) - (SZ_2M))
  59#define CONSISTENT_END          (PKMAP_BASE)
  60#else
  61#define CONSISTENT_BASE         (FIXADDR_START - SZ_2M)
  62#define CONSISTENT_END          (FIXADDR_START)
  63#endif
  64#define CONSISTENT_OFFSET(x)    (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
  65
  66#ifdef CONFIG_HIGHMEM
  67#ifndef __ASSEMBLY__
  68#include <asm/highmem.h>
  69#endif
  70#endif
  71
  72#define VMALLOC_RESERVE         SZ_128M
  73#define VMALLOC_END             (CONSISTENT_BASE - PAGE_SIZE)
  74#define VMALLOC_START           ((VMALLOC_END) - VMALLOC_RESERVE)
  75#define VMALLOC_VMADDR(x)       ((unsigned long)(x))
  76#define MAXMEM                  __pa(VMALLOC_START)
  77#define MAXMEM_PFN              PFN_DOWN(MAXMEM)
  78
  79#define FIRST_USER_PGD_NR       0
  80#define USER_PTRS_PER_PGD       ((TASK_SIZE/PGDIR_SIZE) + FIRST_USER_PGD_NR)
  81
  82/* L2 PTE */
  83#define _PAGE_V                 (1UL << 0)
  84
  85#define _PAGE_M_XKRW            (0UL << 1)
  86#define _PAGE_M_UR_KR           (1UL << 1)
  87#define _PAGE_M_UR_KRW          (2UL << 1)
  88#define _PAGE_M_URW_KRW         (3UL << 1)
  89#define _PAGE_M_KR              (5UL << 1)
  90#define _PAGE_M_KRW             (7UL << 1)
  91
  92#define _PAGE_D                 (1UL << 4)
  93#define _PAGE_E                 (1UL << 5)
  94#define _PAGE_A                 (1UL << 6)
  95#define _PAGE_G                 (1UL << 7)
  96
  97#define _PAGE_C_DEV             (0UL << 8)
  98#define _PAGE_C_DEV_WB          (1UL << 8)
  99#define _PAGE_C_MEM             (2UL << 8)
 100#define _PAGE_C_MEM_SHRD_WB     (4UL << 8)
 101#define _PAGE_C_MEM_SHRD_WT     (5UL << 8)
 102#define _PAGE_C_MEM_WB          (6UL << 8)
 103#define _PAGE_C_MEM_WT          (7UL << 8)
 104
 105#define _PAGE_L                 (1UL << 11)
 106
 107#define _HAVE_PAGE_L            (_PAGE_L)
 108#define _PAGE_FILE              (1UL << 1)
 109#define _PAGE_YOUNG             0
 110#define _PAGE_M_MASK            _PAGE_M_KRW
 111#define _PAGE_C_MASK            _PAGE_C_MEM_WT
 112
 113#ifdef CONFIG_SMP
 114#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 115#define _PAGE_CACHE_SHRD        _PAGE_C_MEM_SHRD_WT
 116#else
 117#define _PAGE_CACHE_SHRD        _PAGE_C_MEM_SHRD_WB
 118#endif
 119#else
 120#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 121#define _PAGE_CACHE_SHRD        _PAGE_C_MEM_WT
 122#else
 123#define _PAGE_CACHE_SHRD        _PAGE_C_MEM_WB
 124#endif
 125#endif
 126
 127#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 128#define _PAGE_CACHE             _PAGE_C_MEM_WT
 129#else
 130#define _PAGE_CACHE             _PAGE_C_MEM_WB
 131#endif
 132
 133/*
 134 * + Level 1 descriptor (PMD)
 135 */
 136#define PMD_TYPE_TABLE          0
 137
 138#ifndef __ASSEMBLY__
 139
 140#define _PAGE_USER_TABLE     PMD_TYPE_TABLE
 141#define _PAGE_KERNEL_TABLE   PMD_TYPE_TABLE
 142
 143#define PAGE_EXEC       __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_E)
 144#define PAGE_NONE       __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_A)
 145#define PAGE_READ       __pgprot(_PAGE_V | _PAGE_M_UR_KR)
 146#define PAGE_RDWR       __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D)
 147#define PAGE_COPY       __pgprot(_PAGE_V | _PAGE_M_UR_KR)
 148
 149#define PAGE_UXKRWX_V1  __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
 150#define PAGE_UXKRWX_V2  __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
 151#define PAGE_URXKRWX_V2 __pgprot(_PAGE_V | _PAGE_M_UR_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
 152#define PAGE_CACHE_L1   __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE)
 153#define PAGE_MEMORY     __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
 154#define PAGE_KERNEL     __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
 155#define PAGE_SHARED     __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D | _PAGE_CACHE_SHRD)
 156#define PAGE_DEVICE    __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV)
 157#endif /* __ASSEMBLY__ */
 158
 159/*         xwr */
 160#define __P000  (PAGE_NONE | _PAGE_CACHE_SHRD)
 161#define __P001  (PAGE_READ | _PAGE_CACHE_SHRD)
 162#define __P010  (PAGE_COPY | _PAGE_CACHE_SHRD)
 163#define __P011  (PAGE_COPY | _PAGE_CACHE_SHRD)
 164#define __P100  (PAGE_EXEC | _PAGE_CACHE_SHRD)
 165#define __P101  (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD)
 166#define __P110  (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD)
 167#define __P111  (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD)
 168
 169#define __S000  (PAGE_NONE | _PAGE_CACHE_SHRD)
 170#define __S001  (PAGE_READ | _PAGE_CACHE_SHRD)
 171#define __S010  (PAGE_RDWR | _PAGE_CACHE_SHRD)
 172#define __S011  (PAGE_RDWR | _PAGE_CACHE_SHRD)
 173#define __S100  (PAGE_EXEC | _PAGE_CACHE_SHRD)
 174#define __S101  (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD)
 175#define __S110  (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD)
 176#define __S111  (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD)
 177
 178#ifndef __ASSEMBLY__
 179/*
 180 * ZERO_PAGE is a global shared page that is always zero: used
 181 * for zero-mapped memory areas etc..
 182 */
 183extern struct page *empty_zero_page;
 184extern void paging_init(void);
 185#define ZERO_PAGE(vaddr)        (empty_zero_page)
 186
 187#define pte_pfn(pte)            (pte_val(pte) >> PAGE_SHIFT)
 188#define pfn_pte(pfn,prot)       (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
 189
 190#define pte_none(pte)           !(pte_val(pte))
 191#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0))
 192#define pte_page(pte)           (pfn_to_page(pte_pfn(pte)))
 193
 194#define pte_index(address)                   (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 195#define pte_offset_kernel(dir, address)      ((pte_t *)pmd_page_kernel(*(dir)) + pte_index(address))
 196#define pte_offset_map(dir, address)         ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
 197#define pte_offset_map_nested(dir, address)  pte_offset_map(dir, address)
 198#define pmd_page_kernel(pmd)                 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 199
 200#define pte_unmap(pte)          do { } while (0)
 201#define pte_unmap_nested(pte)   do { } while (0)
 202
 203#define pmd_off_k(address)      pmd_offset(pgd_offset_k(address), address)
 204
 205#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 206/*
 207 * Set a level 1 translation table entry, and clean it out of
 208 * any caches such that the MMUs can load it correctly.
 209 */
 210static inline void set_pmd(pmd_t * pmdp, pmd_t pmd)
 211{
 212
 213        *pmdp = pmd;
 214#if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
 215        __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (pmdp):"memory");
 216        __nds32__msync_all();
 217        __nds32__dsb();
 218#endif
 219}
 220
 221/*
 222 * Set a PTE and flush it out
 223 */
 224static inline void set_pte(pte_t * ptep, pte_t pte)
 225{
 226
 227        *ptep = pte;
 228#if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
 229        __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (ptep):"memory");
 230        __nds32__msync_all();
 231        __nds32__dsb();
 232#endif
 233}
 234
 235/*
 236 * The following only work if pte_present() is true.
 237 * Undefined behaviour if not..
 238 */
 239
 240/*
 241 * pte_write:        this page is writeable for user mode
 242 * pte_read:         this page is readable for user mode
 243 * pte_kernel_write: this page is writeable for kernel mode
 244 *
 245 * We don't have pte_kernel_read because kernel always can read.
 246 *
 247 * */
 248
 249#define pte_present(pte)        (pte_val(pte) & _PAGE_V)
 250#define pte_write(pte)          ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW)
 251#define pte_read(pte)           (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KR) || \
 252                                ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \
 253                                ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW))
 254#define pte_kernel_write(pte)   (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW) || \
 255                                ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \
 256                                ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_KRW) || \
 257                                (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_XKRW) && pte_exec(pte)))
 258#define pte_exec(pte)           (pte_val(pte) & _PAGE_E)
 259#define pte_dirty(pte)          (pte_val(pte) & _PAGE_D)
 260#define pte_young(pte)          (pte_val(pte) & _PAGE_YOUNG)
 261
 262/*
 263 * The following only works if pte_present() is not true.
 264 */
 265#define pte_file(pte)           (pte_val(pte) & _PAGE_FILE)
 266#define pte_to_pgoff(x)         (pte_val(x) >> 2)
 267#define pgoff_to_pte(x)         __pte(((x) << 2) | _PAGE_FILE)
 268
 269#define PTE_FILE_MAX_BITS       29
 270
 271#define PTE_BIT_FUNC(fn,op) \
 272static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
 273
 274static inline pte_t pte_wrprotect(pte_t pte)
 275{
 276        pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK;
 277        pte_val(pte) = pte_val(pte) | _PAGE_M_UR_KR;
 278        return pte;
 279}
 280
 281static inline pte_t pte_mkwrite(pte_t pte)
 282{
 283        pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK;
 284        pte_val(pte) = pte_val(pte) | _PAGE_M_URW_KRW;
 285        return pte;
 286}
 287
 288PTE_BIT_FUNC(exprotect, &=~_PAGE_E);
 289PTE_BIT_FUNC(mkexec, |=_PAGE_E);
 290PTE_BIT_FUNC(mkclean, &=~_PAGE_D);
 291PTE_BIT_FUNC(mkdirty, |=_PAGE_D);
 292PTE_BIT_FUNC(mkold, &=~_PAGE_YOUNG);
 293PTE_BIT_FUNC(mkyoung, |=_PAGE_YOUNG);
 294static inline int pte_special(pte_t pte)
 295{
 296        return 0;
 297}
 298
 299static inline pte_t pte_mkspecial(pte_t pte)
 300{
 301        return pte;
 302}
 303
 304/*
 305 * Mark the prot value as uncacheable and unbufferable.
 306 */
 307#define pgprot_noncached(prot)     __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV)
 308#define pgprot_writecombine(prot)  __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV_WB)
 309
 310#define pmd_none(pmd)         (pmd_val(pmd)&0x1)
 311#define pmd_present(pmd)      (!pmd_none(pmd))
 312#define pmd_bad(pmd)          pmd_none(pmd)
 313
 314#define copy_pmd(pmdpd,pmdps)   set_pmd((pmdpd), *(pmdps))
 315#define pmd_clear(pmdp)         set_pmd((pmdp), __pmd(1))
 316
 317static inline pmd_t __mk_pmd(pte_t * ptep, unsigned long prot)
 318{
 319        unsigned long ptr = (unsigned long)ptep;
 320        pmd_t pmd;
 321
 322        /*
 323         * The pmd must be loaded with the physical
 324         * address of the PTE table
 325         */
 326
 327        pmd_val(pmd) = __virt_to_phys(ptr) | prot;
 328        return pmd;
 329}
 330
 331#define pmd_page(pmd)        virt_to_page(__va(pmd_val(pmd)))
 332
 333/*
 334 * Permanent address of a page. We never have highmem, so this is trivial.
 335 */
 336#define pages_to_mb(x)       ((x) >> (20 - PAGE_SHIFT))
 337
 338/*
 339 * Conversion functions: convert a page and protection to a page entry,
 340 * and a page entry and page directory to the page they refer to.
 341 */
 342#define mk_pte(page,prot)       pfn_pte(page_to_pfn(page),prot)
 343
 344/*
 345 * The "pgd_xxx()" functions here are trivial for a folded two-level
 346 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 347 * into the pgd entry)
 348 */
 349#define pgd_none(pgd)           (0)
 350#define pgd_bad(pgd)            (0)
 351#define pgd_present(pgd)        (1)
 352#define pgd_clear(pgdp)         do { } while (0)
 353
 354#define page_pte_prot(page,prot)        mk_pte(page, prot)
 355#define page_pte(page)                  mk_pte(page, __pgprot(0))
 356/*
 357 *     L1PTE = $mr1 + ((virt >> PMD_SHIFT) << 2);
 358 *     L2PTE = (((virt >> PAGE_SHIFT) & (PTRS_PER_PTE -1 )) << 2);
 359 *     PPN = (phys & 0xfffff000);
 360 *
 361*/
 362
 363/* to find an entry in a page-table-directory */
 364#define pgd_index(address)      (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 365#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 366/* to find an entry in a kernel page-table-directory */
 367#define pgd_offset_k(addr)      pgd_offset(&init_mm, addr)
 368
 369/* Find an entry in the second-level page table.. */
 370#define pmd_offset(dir, addr)   ((pmd_t *)(dir))
 371
 372static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 373{
 374        const unsigned long mask = 0xfff;
 375        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 376        return pte;
 377}
 378
 379extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 380
 381/* Encode and decode a swap entry.
 382 *
 383 * We support up to 32GB of swap on 4k machines
 384 */
 385#define __swp_type(x)                (((x).val >> 2) & 0x7f)
 386#define __swp_offset(x)              ((x).val >> 9)
 387#define __swp_entry(type,offset)     ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
 388#define __pte_to_swp_entry(pte)      ((swp_entry_t) { pte_val(pte) })
 389#define __swp_entry_to_pte(swp)      ((pte_t) { (swp).val })
 390
 391/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 392#define kern_addr_valid(addr)   (1)
 393
 394#include <asm-generic/pgtable.h>
 395
 396/*
 397 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
 398 */
 399#define HAVE_ARCH_UNMAPPED_AREA
 400
 401/*
 402 * remap a physical address `phys' of size `size' with page protection `prot'
 403 * into virtual address `from'
 404 */
 405
 406#endif /* !__ASSEMBLY__ */
 407
 408#endif /* _ASMNDS32_PGTABLE_H */
 409