linux/arch/sparc/include/asm/pgtable_32.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _SPARC_PGTABLE_H
   3#define _SPARC_PGTABLE_H
   4
   5/*  asm/pgtable.h:  Defines and functions used to work
   6 *                        with Sparc page tables.
   7 *
   8 *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   9 *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  10 */
  11
  12#include <linux/const.h>
  13
  14#define PMD_SHIFT               18
  15#define PMD_SIZE                (1UL << PMD_SHIFT)
  16#define PMD_MASK                (~(PMD_SIZE-1))
  17#define PMD_ALIGN(__addr)       (((__addr) + ~PMD_MASK) & PMD_MASK)
  18
  19#define PGDIR_SHIFT             24
  20#define PGDIR_SIZE              (1UL << PGDIR_SHIFT)
  21#define PGDIR_MASK              (~(PGDIR_SIZE-1))
  22#define PGDIR_ALIGN(__addr)     (((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
  23
  24#ifndef __ASSEMBLY__
  25#include <asm-generic/pgtable-nopud.h>
  26
  27#include <linux/spinlock.h>
  28#include <linux/mm_types.h>
  29#include <asm/types.h>
  30#include <asm/pgtsrmmu.h>
  31#include <asm/vaddrs.h>
  32#include <asm/oplib.h>
  33#include <asm/cpu_type.h>
  34
  35
  36struct vm_area_struct;
  37struct page;
  38
  39void load_mmu(void);
  40unsigned long calc_highpages(void);
  41unsigned long __init bootmem_init(unsigned long *pages_avail);
  42
  43#define pte_ERROR(e)   __builtin_trap()
  44#define pmd_ERROR(e)   __builtin_trap()
  45#define pgd_ERROR(e)   __builtin_trap()
  46
  47#define PTRS_PER_PTE            64
  48#define PTRS_PER_PMD            64
  49#define PTRS_PER_PGD            256
  50#define USER_PTRS_PER_PGD       PAGE_OFFSET / PGDIR_SIZE
  51#define FIRST_USER_ADDRESS      0UL
  52#define PTE_SIZE                (PTRS_PER_PTE*4)
  53
  54#define PAGE_NONE       SRMMU_PAGE_NONE
  55#define PAGE_SHARED     SRMMU_PAGE_SHARED
  56#define PAGE_COPY       SRMMU_PAGE_COPY
  57#define PAGE_READONLY   SRMMU_PAGE_RDONLY
  58#define PAGE_KERNEL     SRMMU_PAGE_KERNEL
  59
  60/* Top-level page directory - dummy used by init-mm.
  61 * srmmu.c will assign the real one (which is dynamically sized) */
  62#define swapper_pg_dir NULL
  63
  64void paging_init(void);
  65
  66extern unsigned long ptr_in_current_pgd;
  67
  68/*         xwr */
  69#define __P000  PAGE_NONE
  70#define __P001  PAGE_READONLY
  71#define __P010  PAGE_COPY
  72#define __P011  PAGE_COPY
  73#define __P100  PAGE_READONLY
  74#define __P101  PAGE_READONLY
  75#define __P110  PAGE_COPY
  76#define __P111  PAGE_COPY
  77
  78#define __S000  PAGE_NONE
  79#define __S001  PAGE_READONLY
  80#define __S010  PAGE_SHARED
  81#define __S011  PAGE_SHARED
  82#define __S100  PAGE_READONLY
  83#define __S101  PAGE_READONLY
  84#define __S110  PAGE_SHARED
  85#define __S111  PAGE_SHARED
  86
  87/* First physical page can be anywhere, the following is needed so that
  88 * va-->pa and vice versa conversions work properly without performance
  89 * hit for all __pa()/__va() operations.
  90 */
  91extern unsigned long phys_base;
  92extern unsigned long pfn_base;
  93
  94/*
  95 * ZERO_PAGE is a global shared page that is always zero: used
  96 * for zero-mapped memory areas etc..
  97 */
  98extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  99
 100#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 101
 102/*
 103 * In general all page table modifications should use the V8 atomic
 104 * swap instruction.  This insures the mmu and the cpu are in sync
 105 * with respect to ref/mod bits in the page tables.
 106 */
 107static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
 108{
 109        __asm__ __volatile__("swap [%2], %0" :
 110                        "=&r" (value) : "0" (value), "r" (addr) : "memory");
 111        return value;
 112}
 113
 114/* Certain architectures need to do special things when pte's
 115 * within a page table are directly modified.  Thus, the following
 116 * hook is made available.
 117 */
 118
 119static inline void set_pte(pte_t *ptep, pte_t pteval)
 120{
 121        srmmu_swap((unsigned long *)ptep, pte_val(pteval));
 122}
 123
 124#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 125
 126static inline int srmmu_device_memory(unsigned long x)
 127{
 128        return ((x & 0xF0000000) != 0);
 129}
 130
 131static inline struct page *pmd_page(pmd_t pmd)
 132{
 133        if (srmmu_device_memory(pmd_val(pmd)))
 134                BUG();
 135        return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
 136}
 137
 138static inline unsigned long __pmd_page(pmd_t pmd)
 139{
 140        unsigned long v;
 141
 142        if (srmmu_device_memory(pmd_val(pmd)))
 143                BUG();
 144
 145        v = pmd_val(pmd) & SRMMU_PTD_PMASK;
 146        return (unsigned long)__nocache_va(v << 4);
 147}
 148
 149static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 150{
 151        unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
 152        return (unsigned long)__nocache_va(v << 4);
 153}
 154
 155static inline unsigned long pud_page_vaddr(pud_t pud)
 156{
 157        if (srmmu_device_memory(pud_val(pud))) {
 158                return ~0;
 159        } else {
 160                unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
 161                return (unsigned long)__nocache_va(v << 4);
 162        }
 163}
 164
 165static inline int pte_present(pte_t pte)
 166{
 167        return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
 168}
 169
 170static inline int pte_none(pte_t pte)
 171{
 172        return !pte_val(pte);
 173}
 174
 175static inline void __pte_clear(pte_t *ptep)
 176{
 177        set_pte(ptep, __pte(0));
 178}
 179
 180static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 181{
 182        __pte_clear(ptep);
 183}
 184
 185static inline int pmd_bad(pmd_t pmd)
 186{
 187        return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
 188}
 189
 190static inline int pmd_present(pmd_t pmd)
 191{
 192        return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
 193}
 194
 195static inline int pmd_none(pmd_t pmd)
 196{
 197        return !pmd_val(pmd);
 198}
 199
 200static inline void pmd_clear(pmd_t *pmdp)
 201{
 202        set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
 203}
 204
 205static inline int pud_none(pud_t pud)
 206{
 207        return !(pud_val(pud) & 0xFFFFFFF);
 208}
 209
 210static inline int pud_bad(pud_t pud)
 211{
 212        return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
 213}
 214
 215static inline int pud_present(pud_t pud)
 216{
 217        return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
 218}
 219
 220static inline void pud_clear(pud_t *pudp)
 221{
 222        set_pte((pte_t *)pudp, __pte(0));
 223}
 224
 225/*
 226 * The following only work if pte_present() is true.
 227 * Undefined behaviour if not..
 228 */
 229static inline int pte_write(pte_t pte)
 230{
 231        return pte_val(pte) & SRMMU_WRITE;
 232}
 233
 234static inline int pte_dirty(pte_t pte)
 235{
 236        return pte_val(pte) & SRMMU_DIRTY;
 237}
 238
 239static inline int pte_young(pte_t pte)
 240{
 241        return pte_val(pte) & SRMMU_REF;
 242}
 243
 244static inline pte_t pte_wrprotect(pte_t pte)
 245{
 246        return __pte(pte_val(pte) & ~SRMMU_WRITE);
 247}
 248
 249static inline pte_t pte_mkclean(pte_t pte)
 250{
 251        return __pte(pte_val(pte) & ~SRMMU_DIRTY);
 252}
 253
 254static inline pte_t pte_mkold(pte_t pte)
 255{
 256        return __pte(pte_val(pte) & ~SRMMU_REF);
 257}
 258
 259static inline pte_t pte_mkwrite(pte_t pte)
 260{
 261        return __pte(pte_val(pte) | SRMMU_WRITE);
 262}
 263
 264static inline pte_t pte_mkdirty(pte_t pte)
 265{
 266        return __pte(pte_val(pte) | SRMMU_DIRTY);
 267}
 268
 269static inline pte_t pte_mkyoung(pte_t pte)
 270{
 271        return __pte(pte_val(pte) | SRMMU_REF);
 272}
 273
 274#define pfn_pte(pfn, prot)              mk_pte(pfn_to_page(pfn), prot)
 275
 276static inline unsigned long pte_pfn(pte_t pte)
 277{
 278        if (srmmu_device_memory(pte_val(pte))) {
 279                /* Just return something that will cause
 280                 * pfn_valid() to return false.  This makes
 281                 * copy_one_pte() to just directly copy to
 282                 * PTE over.
 283                 */
 284                return ~0UL;
 285        }
 286        return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
 287}
 288
 289#define pte_page(pte)   pfn_to_page(pte_pfn(pte))
 290
 291/*
 292 * Conversion functions: convert a page and protection to a page entry,
 293 * and a page entry and page directory to the page they refer to.
 294 */
 295static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 296{
 297        return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
 298}
 299
 300static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
 301{
 302        return __pte(((page) >> 4) | pgprot_val(pgprot));
 303}
 304
 305static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
 306{
 307        return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
 308}
 309
 310#define pgprot_noncached pgprot_noncached
 311static inline pgprot_t pgprot_noncached(pgprot_t prot)
 312{
 313        pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
 314        return prot;
 315}
 316
 317static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
 318static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 319{
 320        return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
 321                pgprot_val(newprot));
 322}
 323
 324struct seq_file;
 325void mmu_info(struct seq_file *m);
 326
 327/* Fault handler stuff... */
 328#define FAULT_CODE_PROT     0x1
 329#define FAULT_CODE_WRITE    0x2
 330#define FAULT_CODE_USER     0x4
 331
 332#define update_mmu_cache(vma, address, ptep) do { } while (0)
 333
 334void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
 335                      unsigned long xva, unsigned int len);
 336void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
 337
 338/* Encode and de-code a swap entry */
 339static inline unsigned long __swp_type(swp_entry_t entry)
 340{
 341        return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
 342}
 343
 344static inline unsigned long __swp_offset(swp_entry_t entry)
 345{
 346        return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
 347}
 348
 349static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
 350{
 351        return (swp_entry_t) {
 352                (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
 353                | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
 354}
 355
 356#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
 357#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 358
 359static inline unsigned long
 360__get_phys (unsigned long addr)
 361{
 362        switch (sparc_cpu_model){
 363        case sun4m:
 364        case sun4d:
 365                return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
 366        default:
 367                return 0;
 368        }
 369}
 370
 371static inline int
 372__get_iospace (unsigned long addr)
 373{
 374        switch (sparc_cpu_model){
 375        case sun4m:
 376        case sun4d:
 377                return (srmmu_get_pte (addr) >> 28);
 378        default:
 379                return -1;
 380        }
 381}
 382
 383extern unsigned long *sparc_valid_addr_bitmap;
 384
 385/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 386#define kern_addr_valid(addr) \
 387        (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
 388
 389/*
 390 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
 391 * its high 4 bits.  These macros/functions put it there or get it from there.
 392 */
 393#define MK_IOSPACE_PFN(space, pfn)      (pfn | (space << (BITS_PER_LONG - 4)))
 394#define GET_IOSPACE(pfn)                (pfn >> (BITS_PER_LONG - 4))
 395#define GET_PFN(pfn)                    (pfn & 0x0fffffffUL)
 396
 397int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
 398                    unsigned long, pgprot_t);
 399
 400static inline int io_remap_pfn_range(struct vm_area_struct *vma,
 401                                     unsigned long from, unsigned long pfn,
 402                                     unsigned long size, pgprot_t prot)
 403{
 404        unsigned long long offset, space, phys_base;
 405
 406        offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
 407        space = GET_IOSPACE(pfn);
 408        phys_base = offset | (space << 32ULL);
 409
 410        return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
 411}
 412#define io_remap_pfn_range io_remap_pfn_range
 413
 414#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 415#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
 416({                                                                        \
 417        int __changed = !pte_same(*(__ptep), __entry);                    \
 418        if (__changed) {                                                  \
 419                set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
 420                flush_tlb_page(__vma, __address);                         \
 421        }                                                                 \
 422        __changed;                                                        \
 423})
 424
 425#endif /* !(__ASSEMBLY__) */
 426
 427#define VMALLOC_START           _AC(0xfe600000,UL)
 428#define VMALLOC_END             _AC(0xffc00000,UL)
 429
 430/* We provide our own get_unmapped_area to cope with VA holes for userland */
 431#define HAVE_ARCH_UNMAPPED_AREA
 432
 433#endif /* !(_SPARC_PGTABLE_H) */
 434