linux/arch/sparc/include/asm/pgtable_32.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _SPARC_PGTABLE_H
   3#define _SPARC_PGTABLE_H
   4
   5/*  asm/pgtable.h:  Defines and functions used to work
   6 *                        with Sparc page tables.
   7 *
   8 *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   9 *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  10 */
  11
  12#include <linux/const.h>
  13
  14#ifndef __ASSEMBLY__
  15#include <asm-generic/4level-fixup.h>
  16
  17#include <linux/spinlock.h>
  18#include <linux/mm_types.h>
  19#include <asm/types.h>
  20#include <asm/pgtsrmmu.h>
  21#include <asm/vaddrs.h>
  22#include <asm/oplib.h>
  23#include <asm/cpu_type.h>
  24
  25
  26struct vm_area_struct;
  27struct page;
  28
  29void load_mmu(void);
  30unsigned long calc_highpages(void);
  31unsigned long __init bootmem_init(unsigned long *pages_avail);
  32
  33#define pte_ERROR(e)   __builtin_trap()
  34#define pmd_ERROR(e)   __builtin_trap()
  35#define pgd_ERROR(e)   __builtin_trap()
  36
  37#define PMD_SHIFT               22
  38#define PMD_SIZE                (1UL << PMD_SHIFT)
  39#define PMD_MASK                (~(PMD_SIZE-1))
  40#define PMD_ALIGN(__addr)       (((__addr) + ~PMD_MASK) & PMD_MASK)
  41#define PGDIR_SHIFT             SRMMU_PGDIR_SHIFT
  42#define PGDIR_SIZE              SRMMU_PGDIR_SIZE
  43#define PGDIR_MASK              SRMMU_PGDIR_MASK
  44#define PTRS_PER_PTE            1024
  45#define PTRS_PER_PMD            SRMMU_PTRS_PER_PMD
  46#define PTRS_PER_PGD            SRMMU_PTRS_PER_PGD
  47#define USER_PTRS_PER_PGD       PAGE_OFFSET / SRMMU_PGDIR_SIZE
  48#define FIRST_USER_ADDRESS      0UL
  49#define PTE_SIZE                (PTRS_PER_PTE*4)
  50
  51#define PAGE_NONE       SRMMU_PAGE_NONE
  52#define PAGE_SHARED     SRMMU_PAGE_SHARED
  53#define PAGE_COPY       SRMMU_PAGE_COPY
  54#define PAGE_READONLY   SRMMU_PAGE_RDONLY
  55#define PAGE_KERNEL     SRMMU_PAGE_KERNEL
  56
  57/* Top-level page directory - dummy used by init-mm.
  58 * srmmu.c will assign the real one (which is dynamically sized) */
  59#define swapper_pg_dir NULL
  60
  61void paging_init(void);
  62
  63extern unsigned long ptr_in_current_pgd;
  64
  65/*         xwr */
  66#define __P000  PAGE_NONE
  67#define __P001  PAGE_READONLY
  68#define __P010  PAGE_COPY
  69#define __P011  PAGE_COPY
  70#define __P100  PAGE_READONLY
  71#define __P101  PAGE_READONLY
  72#define __P110  PAGE_COPY
  73#define __P111  PAGE_COPY
  74
  75#define __S000  PAGE_NONE
  76#define __S001  PAGE_READONLY
  77#define __S010  PAGE_SHARED
  78#define __S011  PAGE_SHARED
  79#define __S100  PAGE_READONLY
  80#define __S101  PAGE_READONLY
  81#define __S110  PAGE_SHARED
  82#define __S111  PAGE_SHARED
  83
  84/* First physical page can be anywhere, the following is needed so that
  85 * va-->pa and vice versa conversions work properly without performance
  86 * hit for all __pa()/__va() operations.
  87 */
  88extern unsigned long phys_base;
  89extern unsigned long pfn_base;
  90
  91/*
  92 * ZERO_PAGE is a global shared page that is always zero: used
  93 * for zero-mapped memory areas etc..
  94 */
  95extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  96
  97#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  98
  99/*
 100 * In general all page table modifications should use the V8 atomic
 101 * swap instruction.  This insures the mmu and the cpu are in sync
 102 * with respect to ref/mod bits in the page tables.
 103 */
 104static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
 105{
 106        __asm__ __volatile__("swap [%2], %0" :
 107                        "=&r" (value) : "0" (value), "r" (addr) : "memory");
 108        return value;
 109}
 110
 111/* Certain architectures need to do special things when pte's
 112 * within a page table are directly modified.  Thus, the following
 113 * hook is made available.
 114 */
 115
 116static inline void set_pte(pte_t *ptep, pte_t pteval)
 117{
 118        srmmu_swap((unsigned long *)ptep, pte_val(pteval));
 119}
 120
 121#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 122
 123static inline int srmmu_device_memory(unsigned long x)
 124{
 125        return ((x & 0xF0000000) != 0);
 126}
 127
 128static inline struct page *pmd_page(pmd_t pmd)
 129{
 130        if (srmmu_device_memory(pmd_val(pmd)))
 131                BUG();
 132        return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
 133}
 134
 135static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 136{
 137        if (srmmu_device_memory(pgd_val(pgd))) {
 138                return ~0;
 139        } else {
 140                unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
 141                return (unsigned long)__nocache_va(v << 4);
 142        }
 143}
 144
 145static inline int pte_present(pte_t pte)
 146{
 147        return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
 148}
 149
 150static inline int pte_none(pte_t pte)
 151{
 152        return !pte_val(pte);
 153}
 154
 155static inline void __pte_clear(pte_t *ptep)
 156{
 157        set_pte(ptep, __pte(0));
 158}
 159
 160static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 161{
 162        __pte_clear(ptep);
 163}
 164
 165static inline int pmd_bad(pmd_t pmd)
 166{
 167        return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
 168}
 169
 170static inline int pmd_present(pmd_t pmd)
 171{
 172        return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
 173}
 174
 175static inline int pmd_none(pmd_t pmd)
 176{
 177        return !pmd_val(pmd);
 178}
 179
 180static inline void pmd_clear(pmd_t *pmdp)
 181{
 182        int i;
 183        for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
 184                set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
 185}
 186
 187static inline int pgd_none(pgd_t pgd)          
 188{
 189        return !(pgd_val(pgd) & 0xFFFFFFF);
 190}
 191
 192static inline int pgd_bad(pgd_t pgd)
 193{
 194        return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
 195}
 196
 197static inline int pgd_present(pgd_t pgd)
 198{
 199        return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
 200}
 201
 202static inline void pgd_clear(pgd_t *pgdp)
 203{
 204        set_pte((pte_t *)pgdp, __pte(0));
 205}
 206
 207/*
 208 * The following only work if pte_present() is true.
 209 * Undefined behaviour if not..
 210 */
 211static inline int pte_write(pte_t pte)
 212{
 213        return pte_val(pte) & SRMMU_WRITE;
 214}
 215
 216static inline int pte_dirty(pte_t pte)
 217{
 218        return pte_val(pte) & SRMMU_DIRTY;
 219}
 220
 221static inline int pte_young(pte_t pte)
 222{
 223        return pte_val(pte) & SRMMU_REF;
 224}
 225
 226static inline int pte_special(pte_t pte)
 227{
 228        return 0;
 229}
 230
 231static inline pte_t pte_wrprotect(pte_t pte)
 232{
 233        return __pte(pte_val(pte) & ~SRMMU_WRITE);
 234}
 235
 236static inline pte_t pte_mkclean(pte_t pte)
 237{
 238        return __pte(pte_val(pte) & ~SRMMU_DIRTY);
 239}
 240
 241static inline pte_t pte_mkold(pte_t pte)
 242{
 243        return __pte(pte_val(pte) & ~SRMMU_REF);
 244}
 245
 246static inline pte_t pte_mkwrite(pte_t pte)
 247{
 248        return __pte(pte_val(pte) | SRMMU_WRITE);
 249}
 250
 251static inline pte_t pte_mkdirty(pte_t pte)
 252{
 253        return __pte(pte_val(pte) | SRMMU_DIRTY);
 254}
 255
 256static inline pte_t pte_mkyoung(pte_t pte)
 257{
 258        return __pte(pte_val(pte) | SRMMU_REF);
 259}
 260
 261#define pte_mkspecial(pte)    (pte)
 262
 263#define pfn_pte(pfn, prot)              mk_pte(pfn_to_page(pfn), prot)
 264
 265static inline unsigned long pte_pfn(pte_t pte)
 266{
 267        if (srmmu_device_memory(pte_val(pte))) {
 268                /* Just return something that will cause
 269                 * pfn_valid() to return false.  This makes
 270                 * copy_one_pte() to just directly copy to
 271                 * PTE over.
 272                 */
 273                return ~0UL;
 274        }
 275        return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
 276}
 277
 278#define pte_page(pte)   pfn_to_page(pte_pfn(pte))
 279
 280/*
 281 * Conversion functions: convert a page and protection to a page entry,
 282 * and a page entry and page directory to the page they refer to.
 283 */
 284static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 285{
 286        return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
 287}
 288
 289static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
 290{
 291        return __pte(((page) >> 4) | pgprot_val(pgprot));
 292}
 293
 294static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
 295{
 296        return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
 297}
 298
 299#define pgprot_noncached pgprot_noncached
 300static inline pgprot_t pgprot_noncached(pgprot_t prot)
 301{
 302        pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
 303        return prot;
 304}
 305
 306static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
 307static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 308{
 309        return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
 310                pgprot_val(newprot));
 311}
 312
 313#define pgd_index(address) ((address) >> PGDIR_SHIFT)
 314
 315/* to find an entry in a page-table-directory */
 316#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 317
 318/* to find an entry in a kernel page-table-directory */
 319#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 320
 321/* Find an entry in the second-level page table.. */
 322static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
 323{
 324        return (pmd_t *) pgd_page_vaddr(*dir) +
 325                ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
 326}
 327
 328/* Find an entry in the third-level page table.. */
 329pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
 330
 331/*
 332 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
 333 */
 334#define pte_offset_map(d, a)            pte_offset_kernel(d,a)
 335#define pte_unmap(pte)          do{}while(0)
 336
 337struct seq_file;
 338void mmu_info(struct seq_file *m);
 339
 340/* Fault handler stuff... */
 341#define FAULT_CODE_PROT     0x1
 342#define FAULT_CODE_WRITE    0x2
 343#define FAULT_CODE_USER     0x4
 344
 345#define update_mmu_cache(vma, address, ptep) do { } while (0)
 346
 347void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
 348                      unsigned long xva, unsigned int len);
 349void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
 350
 351/* Encode and de-code a swap entry */
 352static inline unsigned long __swp_type(swp_entry_t entry)
 353{
 354        return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
 355}
 356
 357static inline unsigned long __swp_offset(swp_entry_t entry)
 358{
 359        return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
 360}
 361
 362static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
 363{
 364        return (swp_entry_t) {
 365                (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
 366                | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
 367}
 368
 369#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
 370#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 371
 372static inline unsigned long
 373__get_phys (unsigned long addr)
 374{
 375        switch (sparc_cpu_model){
 376        case sun4m:
 377        case sun4d:
 378                return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
 379        default:
 380                return 0;
 381        }
 382}
 383
 384static inline int
 385__get_iospace (unsigned long addr)
 386{
 387        switch (sparc_cpu_model){
 388        case sun4m:
 389        case sun4d:
 390                return (srmmu_get_pte (addr) >> 28);
 391        default:
 392                return -1;
 393        }
 394}
 395
 396extern unsigned long *sparc_valid_addr_bitmap;
 397
 398/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 399#define kern_addr_valid(addr) \
 400        (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
 401
 402/*
 403 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
 404 * its high 4 bits.  These macros/functions put it there or get it from there.
 405 */
 406#define MK_IOSPACE_PFN(space, pfn)      (pfn | (space << (BITS_PER_LONG - 4)))
 407#define GET_IOSPACE(pfn)                (pfn >> (BITS_PER_LONG - 4))
 408#define GET_PFN(pfn)                    (pfn & 0x0fffffffUL)
 409
 410int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
 411                    unsigned long, pgprot_t);
 412
 413static inline int io_remap_pfn_range(struct vm_area_struct *vma,
 414                                     unsigned long from, unsigned long pfn,
 415                                     unsigned long size, pgprot_t prot)
 416{
 417        unsigned long long offset, space, phys_base;
 418
 419        offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
 420        space = GET_IOSPACE(pfn);
 421        phys_base = offset | (space << 32ULL);
 422
 423        return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
 424}
 425#define io_remap_pfn_range io_remap_pfn_range 
 426
 427#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 428#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
 429({                                                                        \
 430        int __changed = !pte_same(*(__ptep), __entry);                    \
 431        if (__changed) {                                                  \
 432                set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
 433                flush_tlb_page(__vma, __address);                         \
 434        }                                                                 \
 435        __changed;                                                        \
 436})
 437
 438#include <asm-generic/pgtable.h>
 439
 440#endif /* !(__ASSEMBLY__) */
 441
 442#define VMALLOC_START           _AC(0xfe600000,UL)
 443#define VMALLOC_END             _AC(0xffc00000,UL)
 444
 445/* We provide our own get_unmapped_area to cope with VA holes for userland */
 446#define HAVE_ARCH_UNMAPPED_AREA
 447
 448/*
 449 * No page table caches to initialise
 450 */
 451#define pgtable_cache_init()    do { } while (0)
 452
 453#endif /* !(_SPARC_PGTABLE_H) */
 454