linux/arch/csky/include/asm/pgtable.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2
   3#ifndef __ASM_CSKY_PGTABLE_H
   4#define __ASM_CSKY_PGTABLE_H
   5
   6#include <asm/fixmap.h>
   7#include <asm/memory.h>
   8#include <asm/addrspace.h>
   9#include <abi/pgtable-bits.h>
  10#include <asm-generic/pgtable-nopmd.h>
  11
  12#define PGDIR_SHIFT             22
  13#define PGDIR_SIZE              (1UL << PGDIR_SHIFT)
  14#define PGDIR_MASK              (~(PGDIR_SIZE-1))
  15
  16#define USER_PTRS_PER_PGD       (PAGE_OFFSET/PGDIR_SIZE)
  17
  18/*
  19 * C-SKY is two-level paging structure:
  20 */
  21#define PGD_ORDER       0
  22#define PTE_ORDER       0
  23
  24#define PTRS_PER_PGD    ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
  25#define PTRS_PER_PMD    1
  26#define PTRS_PER_PTE    ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
  27
  28#define pte_ERROR(e) \
  29        pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
  30#define pgd_ERROR(e) \
  31        pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  32
  33#define pmd_page(pmd)   (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
  34#define pte_clear(mm, addr, ptep)       set_pte((ptep), \
  35        (((unsigned int) addr >= PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
  36#define pte_none(pte)           (!(pte_val(pte) & ~_PAGE_GLOBAL))
  37#define pte_present(pte)        (pte_val(pte) & _PAGE_PRESENT)
  38#define pte_pfn(x)      ((unsigned long)((x).pte_low >> PAGE_SHIFT))
  39#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
  40                                | pgprot_val(prot))
  41
  42#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
  43#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
  44
  45#define pte_page(x)                     pfn_to_page(pte_pfn(x))
  46#define __mk_pte(page_nr, pgprot)       __pte(((page_nr) << PAGE_SHIFT) | \
  47                                        pgprot_val(pgprot))
  48
  49/*
  50 * C-SKY only has VALID and DIRTY bit in hardware. So we need to use the
  51 * two bits emulate PRESENT, READ, WRITE, EXEC, MODIFIED, ACCESSED.
  52 */
  53#define _PAGE_BASE      (_PAGE_PRESENT | _PAGE_ACCESSED)
  54
  55#define PAGE_NONE       __pgprot(_PAGE_PROT_NONE)
  56#define PAGE_READ       __pgprot(_PAGE_BASE | _PAGE_READ | \
  57                                _CACHE_CACHED)
  58#define PAGE_WRITE      __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE | \
  59                                _CACHE_CACHED)
  60#define PAGE_SHARED PAGE_WRITE
  61
  62#define PAGE_KERNEL     __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
  63                                _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
  64                                _PAGE_GLOBAL | \
  65                                _CACHE_CACHED)
  66
  67#define _PAGE_IOREMAP           (_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
  68                                _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
  69                                _PAGE_GLOBAL | \
  70                                _CACHE_UNCACHED | _PAGE_SO)
  71
  72#define _PAGE_CHG_MASK  (~(unsigned long) \
  73                                (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
  74                                _CACHE_MASK | _PAGE_GLOBAL))
  75
  76#define MAX_SWAPFILES_CHECK() \
  77                BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
  78
  79#define __P000  PAGE_NONE
  80#define __P001  PAGE_READ
  81#define __P010  PAGE_READ
  82#define __P011  PAGE_READ
  83#define __P100  PAGE_READ
  84#define __P101  PAGE_READ
  85#define __P110  PAGE_READ
  86#define __P111  PAGE_READ
  87
  88#define __S000  PAGE_NONE
  89#define __S001  PAGE_READ
  90#define __S010  PAGE_WRITE
  91#define __S011  PAGE_WRITE
  92#define __S100  PAGE_READ
  93#define __S101  PAGE_READ
  94#define __S110  PAGE_WRITE
  95#define __S111  PAGE_WRITE
  96
  97extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  98#define ZERO_PAGE(vaddr)        (virt_to_page(empty_zero_page))
  99
 100extern void load_pgd(unsigned long pg_dir);
 101extern pte_t invalid_pte_table[PTRS_PER_PTE];
 102
 103static inline void set_pte(pte_t *p, pte_t pte)
 104{
 105        *p = pte;
 106#if defined(CONFIG_CPU_NEED_TLBSYNC)
 107        dcache_wb_line((u32)p);
 108#endif
 109        /* prevent out of order excution */
 110        smp_mb();
 111}
 112#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
 113
 114static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 115{
 116        unsigned long ptr;
 117
 118        ptr = pmd_val(pmd);
 119
 120        return __va(ptr);
 121}
 122
 123#define pmd_phys(pmd) pmd_val(pmd)
 124
 125static inline void set_pmd(pmd_t *p, pmd_t pmd)
 126{
 127        *p = pmd;
 128#if defined(CONFIG_CPU_NEED_TLBSYNC)
 129        dcache_wb_line((u32)p);
 130#endif
 131        /* prevent specul excute */
 132        smp_mb();
 133}
 134
 135
 136static inline int pmd_none(pmd_t pmd)
 137{
 138        return pmd_val(pmd) == __pa(invalid_pte_table);
 139}
 140
 141#define pmd_bad(pmd)    (pmd_val(pmd) & ~PAGE_MASK)
 142
 143static inline int pmd_present(pmd_t pmd)
 144{
 145        return (pmd_val(pmd) != __pa(invalid_pte_table));
 146}
 147
 148static inline void pmd_clear(pmd_t *p)
 149{
 150        pmd_val(*p) = (__pa(invalid_pte_table));
 151#if defined(CONFIG_CPU_NEED_TLBSYNC)
 152        dcache_wb_line((u32)p);
 153#endif
 154}
 155
 156/*
 157 * The following only work if pte_present() is true.
 158 * Undefined behaviour if not..
 159 */
 160static inline int pte_read(pte_t pte)
 161{
 162        return pte.pte_low & _PAGE_READ;
 163}
 164
 165static inline int pte_write(pte_t pte)
 166{
 167        return (pte).pte_low & _PAGE_WRITE;
 168}
 169
 170static inline int pte_dirty(pte_t pte)
 171{
 172        return (pte).pte_low & _PAGE_MODIFIED;
 173}
 174
 175static inline int pte_young(pte_t pte)
 176{
 177        return (pte).pte_low & _PAGE_ACCESSED;
 178}
 179
 180static inline pte_t pte_wrprotect(pte_t pte)
 181{
 182        pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
 183        return pte;
 184}
 185
 186static inline pte_t pte_mkclean(pte_t pte)
 187{
 188        pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY);
 189        return pte;
 190}
 191
 192static inline pte_t pte_mkold(pte_t pte)
 193{
 194        pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID);
 195        return pte;
 196}
 197
 198static inline pte_t pte_mkwrite(pte_t pte)
 199{
 200        pte_val(pte) |= _PAGE_WRITE;
 201        if (pte_val(pte) & _PAGE_MODIFIED)
 202                pte_val(pte) |= _PAGE_DIRTY;
 203        return pte;
 204}
 205
 206static inline pte_t pte_mkdirty(pte_t pte)
 207{
 208        pte_val(pte) |= _PAGE_MODIFIED;
 209        if (pte_val(pte) & _PAGE_WRITE)
 210                pte_val(pte) |= _PAGE_DIRTY;
 211        return pte;
 212}
 213
 214static inline pte_t pte_mkyoung(pte_t pte)
 215{
 216        pte_val(pte) |= _PAGE_ACCESSED;
 217        if (pte_val(pte) & _PAGE_READ)
 218                pte_val(pte) |= _PAGE_VALID;
 219        return pte;
 220}
 221
 222#define __HAVE_PHYS_MEM_ACCESS_PROT
 223struct file;
 224extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 225                                     unsigned long size, pgprot_t vma_prot);
 226
 227/*
 228 * Macro to make mark a page protection value as "uncacheable".  Note
 229 * that "protection" is really a misnomer here as the protection value
 230 * contains the memory attribute bits, dirty bits, and various other
 231 * bits as well.
 232 */
 233#define pgprot_noncached pgprot_noncached
 234
 235static inline pgprot_t pgprot_noncached(pgprot_t _prot)
 236{
 237        unsigned long prot = pgprot_val(_prot);
 238
 239        prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
 240
 241        return __pgprot(prot);
 242}
 243
 244#define pgprot_writecombine pgprot_writecombine
 245static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
 246{
 247        unsigned long prot = pgprot_val(_prot);
 248
 249        prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
 250
 251        return __pgprot(prot);
 252}
 253
 254/*
 255 * Conversion functions: convert a page and protection to a page entry,
 256 * and a page entry and page directory to the page they refer to.
 257 */
 258#define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
 259static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 260{
 261        return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
 262                     (pgprot_val(newprot)));
 263}
 264
 265extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 266extern void paging_init(void);
 267
 268void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 269                      pte_t *pte);
 270
 271/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 272#define kern_addr_valid(addr)   (1)
 273
 274#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
 275        remap_pfn_range(vma, vaddr, pfn, size, prot)
 276
 277#endif /* __ASM_CSKY_PGTABLE_H */
 278