linux/arch/sparc/include/asm/pgtable_32.h
<<
>>
Prefs
   1#ifndef _SPARC_PGTABLE_H
   2#define _SPARC_PGTABLE_H
   3
   4/*  asm/pgtable.h:  Defines and functions used to work
   5 *                        with Sparc page tables.
   6 *
   7 *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   8 *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   9 */
  10
  11#include <linux/const.h>
  12
  13#ifndef __ASSEMBLY__
  14#include <asm-generic/4level-fixup.h>
  15
  16#include <linux/spinlock.h>
  17#include <linux/swap.h>
  18#include <asm/types.h>
  19#include <asm/pgtsrmmu.h>
  20#include <asm/vaddrs.h>
  21#include <asm/oplib.h>
  22#include <asm/cpu_type.h>
  23
  24
  25struct vm_area_struct;
  26struct page;
  27
  28extern void load_mmu(void);
  29extern unsigned long calc_highpages(void);
  30
  31#define pte_ERROR(e)   __builtin_trap()
  32#define pmd_ERROR(e)   __builtin_trap()
  33#define pgd_ERROR(e)   __builtin_trap()
  34
  35#define PMD_SHIFT               22
  36#define PMD_SIZE                (1UL << PMD_SHIFT)
  37#define PMD_MASK                (~(PMD_SIZE-1))
  38#define PMD_ALIGN(__addr)       (((__addr) + ~PMD_MASK) & PMD_MASK)
  39#define PGDIR_SHIFT             SRMMU_PGDIR_SHIFT
  40#define PGDIR_SIZE              SRMMU_PGDIR_SIZE
  41#define PGDIR_MASK              SRMMU_PGDIR_MASK
  42#define PTRS_PER_PTE            1024
  43#define PTRS_PER_PMD            SRMMU_PTRS_PER_PMD
  44#define PTRS_PER_PGD            SRMMU_PTRS_PER_PGD
  45#define USER_PTRS_PER_PGD       PAGE_OFFSET / SRMMU_PGDIR_SIZE
  46#define FIRST_USER_ADDRESS      0
  47#define PTE_SIZE                (PTRS_PER_PTE*4)
  48
  49#define PAGE_NONE       SRMMU_PAGE_NONE
  50#define PAGE_SHARED     SRMMU_PAGE_SHARED
  51#define PAGE_COPY       SRMMU_PAGE_COPY
  52#define PAGE_READONLY   SRMMU_PAGE_RDONLY
  53#define PAGE_KERNEL     SRMMU_PAGE_KERNEL
  54
  55/* Top-level page directory - dummy used by init-mm.
  56 * srmmu.c will assign the real one (which is dynamically sized) */
  57#define swapper_pg_dir NULL
  58
  59extern void paging_init(void);
  60
  61extern unsigned long ptr_in_current_pgd;
  62
  63/*         xwr */
  64#define __P000  PAGE_NONE
  65#define __P001  PAGE_READONLY
  66#define __P010  PAGE_COPY
  67#define __P011  PAGE_COPY
  68#define __P100  PAGE_READONLY
  69#define __P101  PAGE_READONLY
  70#define __P110  PAGE_COPY
  71#define __P111  PAGE_COPY
  72
  73#define __S000  PAGE_NONE
  74#define __S001  PAGE_READONLY
  75#define __S010  PAGE_SHARED
  76#define __S011  PAGE_SHARED
  77#define __S100  PAGE_READONLY
  78#define __S101  PAGE_READONLY
  79#define __S110  PAGE_SHARED
  80#define __S111  PAGE_SHARED
  81
  82/* First physical page can be anywhere, the following is needed so that
  83 * va-->pa and vice versa conversions work properly without performance
  84 * hit for all __pa()/__va() operations.
  85 */
  86extern unsigned long phys_base;
  87extern unsigned long pfn_base;
  88
  89/*
  90 * ZERO_PAGE is a global shared page that is always zero: used
  91 * for zero-mapped memory areas etc..
  92 */
  93extern unsigned long empty_zero_page;
  94
  95#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
  96
  97/*
  98 * In general all page table modifications should use the V8 atomic
  99 * swap instruction.  This insures the mmu and the cpu are in sync
 100 * with respect to ref/mod bits in the page tables.
 101 */
 102static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
 103{
 104        __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
 105        return value;
 106}
 107
 108/* Certain architectures need to do special things when pte's
 109 * within a page table are directly modified.  Thus, the following
 110 * hook is made available.
 111 */
 112
 113static inline void set_pte(pte_t *ptep, pte_t pteval)
 114{
 115        srmmu_swap((unsigned long *)ptep, pte_val(pteval));
 116}
 117
 118#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 119
 120static inline int srmmu_device_memory(unsigned long x)
 121{
 122        return ((x & 0xF0000000) != 0);
 123}
 124
 125static inline struct page *pmd_page(pmd_t pmd)
 126{
 127        if (srmmu_device_memory(pmd_val(pmd)))
 128                BUG();
 129        return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
 130}
 131
 132static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 133{
 134        if (srmmu_device_memory(pgd_val(pgd))) {
 135                return ~0;
 136        } else {
 137                unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
 138                return (unsigned long)__nocache_va(v << 4);
 139        }
 140}
 141
 142static inline int pte_present(pte_t pte)
 143{
 144        return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
 145}
 146
 147static inline int pte_none(pte_t pte)
 148{
 149        return !pte_val(pte);
 150}
 151
 152static inline void __pte_clear(pte_t *ptep)
 153{
 154        set_pte(ptep, __pte(0));
 155}
 156
 157static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 158{
 159        __pte_clear(ptep);
 160}
 161
 162static inline int pmd_bad(pmd_t pmd)
 163{
 164        return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
 165}
 166
 167static inline int pmd_present(pmd_t pmd)
 168{
 169        return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
 170}
 171
 172static inline int pmd_none(pmd_t pmd)
 173{
 174        return !pmd_val(pmd);
 175}
 176
 177static inline void pmd_clear(pmd_t *pmdp)
 178{
 179        int i;
 180        for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
 181                set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
 182}
 183
 184static inline int pgd_none(pgd_t pgd)          
 185{
 186        return !(pgd_val(pgd) & 0xFFFFFFF);
 187}
 188
 189static inline int pgd_bad(pgd_t pgd)
 190{
 191        return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
 192}
 193
 194static inline int pgd_present(pgd_t pgd)
 195{
 196        return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
 197}
 198
 199static inline void pgd_clear(pgd_t *pgdp)
 200{
 201        set_pte((pte_t *)pgdp, __pte(0));
 202}
 203
 204/*
 205 * The following only work if pte_present() is true.
 206 * Undefined behaviour if not..
 207 */
 208static inline int pte_write(pte_t pte)
 209{
 210        return pte_val(pte) & SRMMU_WRITE;
 211}
 212
 213static inline int pte_dirty(pte_t pte)
 214{
 215        return pte_val(pte) & SRMMU_DIRTY;
 216}
 217
 218static inline int pte_young(pte_t pte)
 219{
 220        return pte_val(pte) & SRMMU_REF;
 221}
 222
 223/*
 224 * The following only work if pte_present() is not true.
 225 */
 226static inline int pte_file(pte_t pte)
 227{
 228        return pte_val(pte) & SRMMU_FILE;
 229}
 230
 231static inline int pte_special(pte_t pte)
 232{
 233        return 0;
 234}
 235
 236static inline pte_t pte_wrprotect(pte_t pte)
 237{
 238        return __pte(pte_val(pte) & ~SRMMU_WRITE);
 239}
 240
 241static inline pte_t pte_mkclean(pte_t pte)
 242{
 243        return __pte(pte_val(pte) & ~SRMMU_DIRTY);
 244}
 245
 246static inline pte_t pte_mkold(pte_t pte)
 247{
 248        return __pte(pte_val(pte) & ~SRMMU_REF);
 249}
 250
 251static inline pte_t pte_mkwrite(pte_t pte)
 252{
 253        return __pte(pte_val(pte) | SRMMU_WRITE);
 254}
 255
 256static inline pte_t pte_mkdirty(pte_t pte)
 257{
 258        return __pte(pte_val(pte) | SRMMU_DIRTY);
 259}
 260
 261static inline pte_t pte_mkyoung(pte_t pte)
 262{
 263        return __pte(pte_val(pte) | SRMMU_REF);
 264}
 265
 266#define pte_mkspecial(pte)    (pte)
 267
 268#define pfn_pte(pfn, prot)              mk_pte(pfn_to_page(pfn), prot)
 269
 270static inline unsigned long pte_pfn(pte_t pte)
 271{
 272        if (srmmu_device_memory(pte_val(pte))) {
 273                /* Just return something that will cause
 274                 * pfn_valid() to return false.  This makes
 275                 * copy_one_pte() to just directly copy to
 276                 * PTE over.
 277                 */
 278                return ~0UL;
 279        }
 280        return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
 281}
 282
 283#define pte_page(pte)   pfn_to_page(pte_pfn(pte))
 284
 285/*
 286 * Conversion functions: convert a page and protection to a page entry,
 287 * and a page entry and page directory to the page they refer to.
 288 */
 289static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 290{
 291        return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
 292}
 293
 294static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
 295{
 296        return __pte(((page) >> 4) | pgprot_val(pgprot));
 297}
 298
 299static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
 300{
 301        return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
 302}
 303
 304#define pgprot_noncached pgprot_noncached
 305static inline pgprot_t pgprot_noncached(pgprot_t prot)
 306{
 307        prot &= ~__pgprot(SRMMU_CACHE);
 308        return prot;
 309}
 310
 311static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
 312static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 313{
 314        return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
 315                pgprot_val(newprot));
 316}
 317
 318#define pgd_index(address) ((address) >> PGDIR_SHIFT)
 319
 320/* to find an entry in a page-table-directory */
 321#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 322
 323/* to find an entry in a kernel page-table-directory */
 324#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 325
 326/* Find an entry in the second-level page table.. */
 327static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
 328{
 329        return (pmd_t *) pgd_page_vaddr(*dir) +
 330                ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
 331}
 332
 333/* Find an entry in the third-level page table.. */
 334pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
 335
 336/*
 337 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
 338 */
 339#define pte_offset_map(d, a)            pte_offset_kernel(d,a)
 340#define pte_unmap(pte)          do{}while(0)
 341
 342struct seq_file;
 343void mmu_info(struct seq_file *m);
 344
 345/* Fault handler stuff... */
 346#define FAULT_CODE_PROT     0x1
 347#define FAULT_CODE_WRITE    0x2
 348#define FAULT_CODE_USER     0x4
 349
 350#define update_mmu_cache(vma, address, ptep) do { } while (0)
 351
 352void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
 353                      unsigned long xva, unsigned int len);
 354void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
 355
 356/* Encode and de-code a swap entry */
 357static inline unsigned long __swp_type(swp_entry_t entry)
 358{
 359        return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
 360}
 361
 362static inline unsigned long __swp_offset(swp_entry_t entry)
 363{
 364        return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
 365}
 366
 367static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
 368{
 369        return (swp_entry_t) {
 370                (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
 371                | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
 372}
 373
 374#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
 375#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 376
 377/* file-offset-in-pte helpers */
 378static inline unsigned long pte_to_pgoff(pte_t pte)
 379{
 380        return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
 381}
 382
 383static inline pte_t pgoff_to_pte(unsigned long pgoff)
 384{
 385        return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
 386}
 387
 388/*
 389 * This is made a constant because mm/fremap.c required a constant.
 390 */
 391#define PTE_FILE_MAX_BITS 24
 392
 393static inline unsigned long
 394__get_phys (unsigned long addr)
 395{
 396        switch (sparc_cpu_model){
 397        case sun4m:
 398        case sun4d:
 399                return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
 400        default:
 401                return 0;
 402        }
 403}
 404
 405static inline int
 406__get_iospace (unsigned long addr)
 407{
 408        switch (sparc_cpu_model){
 409        case sun4m:
 410        case sun4d:
 411                return (srmmu_get_pte (addr) >> 28);
 412        default:
 413                return -1;
 414        }
 415}
 416
 417extern unsigned long *sparc_valid_addr_bitmap;
 418
 419/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 420#define kern_addr_valid(addr) \
 421        (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
 422
 423/*
 424 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
 425 * its high 4 bits.  These macros/functions put it there or get it from there.
 426 */
 427#define MK_IOSPACE_PFN(space, pfn)      (pfn | (space << (BITS_PER_LONG - 4)))
 428#define GET_IOSPACE(pfn)                (pfn >> (BITS_PER_LONG - 4))
 429#define GET_PFN(pfn)                    (pfn & 0x0fffffffUL)
 430
 431extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
 432                           unsigned long, pgprot_t);
 433
 434static inline int io_remap_pfn_range(struct vm_area_struct *vma,
 435                                     unsigned long from, unsigned long pfn,
 436                                     unsigned long size, pgprot_t prot)
 437{
 438        unsigned long long offset, space, phys_base;
 439
 440        offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
 441        space = GET_IOSPACE(pfn);
 442        phys_base = offset | (space << 32ULL);
 443
 444        return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
 445}
 446#define io_remap_pfn_range io_remap_pfn_range 
 447
 448#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 449#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
 450({                                                                        \
 451        int __changed = !pte_same(*(__ptep), __entry);                    \
 452        if (__changed) {                                                  \
 453                set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
 454                flush_tlb_page(__vma, __address);                         \
 455        }                                                                 \
 456        __changed;                                                        \
 457})
 458
 459#include <asm-generic/pgtable.h>
 460
 461#endif /* !(__ASSEMBLY__) */
 462
 463#define VMALLOC_START           _AC(0xfe600000,UL)
 464#define VMALLOC_END             _AC(0xffc00000,UL)
 465
 466/* We provide our own get_unmapped_area to cope with VA holes for userland */
 467#define HAVE_ARCH_UNMAPPED_AREA
 468
 469/*
 470 * No page table caches to initialise
 471 */
 472#define pgtable_cache_init()    do { } while (0)
 473
 474#endif /* !(_SPARC_PGTABLE_H) */
 475