linux/arch/hexagon/include/asm/pgtable.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Page table support for the Hexagon architecture
   4 *
   5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
   6 */
   7
   8#ifndef _ASM_PGTABLE_H
   9#define _ASM_PGTABLE_H
  10
  11/*
  12 * Page table definitions for Qualcomm Hexagon processor.
  13 */
  14#include <asm/page.h>
  15#define __ARCH_USE_5LEVEL_HACK
  16#include <asm-generic/pgtable-nopmd.h>
  17
  18/* A handy thing to have if one has the RAM. Declared in head.S */
  19extern unsigned long empty_zero_page;
  20
  21/*
  22 * The PTE model described here is that of the Hexagon Virtual Machine,
  23 * which autonomously walks 2-level page tables.  At a lower level, we
  24 * also describe the RISCish software-loaded TLB entry structure of
  25 * the underlying Hexagon processor. A kernel built to run on the
  26 * virtual machine has no need to know about the underlying hardware.
  27 */
  28#include <asm/vm_mmu.h>
  29
  30/*
  31 * To maximize the comfort level for the PTE manipulation macros,
  32 * define the "well known" architecture-specific bits.
  33 */
  34#define _PAGE_READ      __HVM_PTE_R
  35#define _PAGE_WRITE     __HVM_PTE_W
  36#define _PAGE_EXECUTE   __HVM_PTE_X
  37#define _PAGE_USER      __HVM_PTE_U
  38
  39/*
  40 * We have a total of 4 "soft" bits available in the abstract PTE.
  41 * The two mandatory software bits are Dirty and Accessed.
  42 * To make nonlinear swap work according to the more recent
  43 * model, we want a low order "Present" bit to indicate whether
  44 * the PTE describes MMU programming or swap space.
  45 */
  46#define _PAGE_PRESENT   (1<<0)
  47#define _PAGE_DIRTY     (1<<1)
  48#define _PAGE_ACCESSED  (1<<2)
  49
  50/*
  51 * For now, let's say that Valid and Present are the same thing.
  52 * Alternatively, we could say that it's the "or" of R, W, and X
  53 * permissions.
  54 */
  55#define _PAGE_VALID     _PAGE_PRESENT
  56
  57/*
  58 * We're not defining _PAGE_GLOBAL here, since there's no concept
  59 * of global pages or ASIDs exposed to the Hexagon Virtual Machine,
  60 * and we want to use the same page table structures and macros in
  61 * the native kernel as we do in the virtual machine kernel.
  62 * So we'll put up with a bit of inefficiency for now...
  63 */
  64
  65/*
  66 * Top "FOURTH" level (pgd), which for the Hexagon VM is really
  67 * only the second from the bottom, pgd and pud both being collapsed.
  68 * Each entry represents 4MB of virtual address space, 4K of table
  69 * thus maps the full 4GB.
  70 */
  71#define PGDIR_SHIFT 22
  72#define PTRS_PER_PGD 1024
  73
  74#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  75#define PGDIR_MASK (~(PGDIR_SIZE-1))
  76
  77#ifdef CONFIG_PAGE_SIZE_4KB
  78#define PTRS_PER_PTE 1024
  79#endif
  80
  81#ifdef CONFIG_PAGE_SIZE_16KB
  82#define PTRS_PER_PTE 256
  83#endif
  84
  85#ifdef CONFIG_PAGE_SIZE_64KB
  86#define PTRS_PER_PTE 64
  87#endif
  88
  89#ifdef CONFIG_PAGE_SIZE_256KB
  90#define PTRS_PER_PTE 16
  91#endif
  92
  93#ifdef CONFIG_PAGE_SIZE_1MB
  94#define PTRS_PER_PTE 4
  95#endif
  96
  97/*  Any bigger and the PTE disappears.  */
  98#define pgd_ERROR(e) \
  99        printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\
 100                pgd_val(e))
 101
 102/*
 103 * Page Protection Constants. Includes (in this variant) cache attributes.
 104 */
 105extern unsigned long _dflt_cache_att;
 106
 107#define PAGE_NONE       __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 108                                _dflt_cache_att)
 109#define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 110                                _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
 111#define PAGE_COPY       PAGE_READONLY
 112#define PAGE_EXEC       __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 113                                _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
 114#define PAGE_COPY_EXEC  PAGE_EXEC
 115#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
 116                                _PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att)
 117#define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_READ | \
 118                                _PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att)
 119
 120
 121/*
 122 * Aliases for mapping mmap() protection bits to page protections.
 123 * These get used for static initialization, so using the _dflt_cache_att
 124 * variable for the default cache attribute isn't workable. If the
 125 * default gets changed at boot time, the boot option code has to
 126 * update data structures like the protaction_map[] array.
 127 */
 128#define CACHEDEF        (CACHE_DEFAULT << 6)
 129
 130/* Private (copy-on-write) page protections. */
 131#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF)
 132#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF)
 133#define __P010 __P000   /* Write-only copy-on-write */
 134#define __P011 __P001   /* Read/Write copy-on-write */
 135#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 136                        _PAGE_EXECUTE | CACHEDEF)
 137#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \
 138                        _PAGE_READ | CACHEDEF)
 139#define __P110 __P100   /* Write/execute copy-on-write */
 140#define __P111 __P101   /* Read/Write/Execute, copy-on-write */
 141
 142/* Shared page protections. */
 143#define __S000 __P000
 144#define __S001 __P001
 145#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 146                        _PAGE_WRITE | CACHEDEF)
 147#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
 148                        _PAGE_WRITE | CACHEDEF)
 149#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 150                        _PAGE_EXECUTE | CACHEDEF)
 151#define __S101 __P101
 152#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 153                        _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
 154#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
 155                        _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
 156
 157extern pgd_t swapper_pg_dir[PTRS_PER_PGD];  /* located in head.S */
 158
 159/* Seems to be zero even in architectures where the zero page is firewalled? */
 160#define FIRST_USER_ADDRESS 0UL
 161#define pte_special(pte)        0
 162#define pte_mkspecial(pte)      (pte)
 163
 164/*  HUGETLB not working currently  */
 165#ifdef CONFIG_HUGETLB_PAGE
 166#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
 167#endif
 168
 169/*
 170 * For now, assume that higher-level code will do TLB/MMU invalidations
 171 * and don't insert that overhead into this low-level function.
 172 */
 173extern void sync_icache_dcache(pte_t pte);
 174
 175#define pte_present_exec_user(pte) \
 176        ((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \
 177        (_PAGE_EXECUTE | _PAGE_USER))
 178
 179static inline void set_pte(pte_t *ptep, pte_t pteval)
 180{
 181        /*  should really be using pte_exec, if it weren't declared later. */
 182        if (pte_present_exec_user(pteval))
 183                sync_icache_dcache(pteval);
 184
 185        *ptep = pteval;
 186}
 187
 188/*
 189 * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid
 190 * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE
 191 * (Linux PTE), the key is to have bits 11..9 all zero.  We'd use 0x7
 192 * as a universal null entry, but some of those least significant bits
 193 * are interpreted by software.
 194 */
 195#define _NULL_PMD       0x7
 196#define _NULL_PTE       0x0
 197
 198static inline void pmd_clear(pmd_t *pmd_entry_ptr)
 199{
 200         pmd_val(*pmd_entry_ptr) = _NULL_PMD;
 201}
 202
 203/*
 204 * Conveniently, a null PTE value is invalid.
 205 */
 206static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 207                                pte_t *ptep)
 208{
 209        pte_val(*ptep) = _NULL_PTE;
 210}
 211
 212#ifdef NEED_PMD_INDEX_DESPITE_BEING_2_LEVEL
 213/**
 214 * pmd_index - returns the index of the entry in the PMD page
 215 * which would control the given virtual address
 216 */
 217#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 218
 219#endif
 220
 221/**
 222 * pgd_index - returns the index of the entry in the PGD page
 223 * which would control the given virtual address
 224 *
 225 * This returns the *index* for the address in the pgd_t
 226 */
 227#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 228
 229/*
 230 * pgd_offset - find an offset in a page-table-directory
 231 */
 232#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
 233
 234/*
 235 * pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr
 236 */
 237#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 238
 239/**
 240 * pmd_none - check if pmd_entry is mapped
 241 * @pmd_entry:  pmd entry
 242 *
 243 * MIPS checks it against that "invalid pte table" thing.
 244 */
 245static inline int pmd_none(pmd_t pmd)
 246{
 247        return pmd_val(pmd) == _NULL_PMD;
 248}
 249
 250/**
 251 * pmd_present - is there a page table behind this?
 252 * Essentially the inverse of pmd_none.  We maybe
 253 * save an inline instruction by defining it this
 254 * way, instead of simply "!pmd_none".
 255 */
 256static inline int pmd_present(pmd_t pmd)
 257{
 258        return pmd_val(pmd) != (unsigned long)_NULL_PMD;
 259}
 260
 261/**
 262 * pmd_bad - check if a PMD entry is "bad". That might mean swapped out.
 263 * As we have no known cause of badness, it's null, as it is for many
 264 * architectures.
 265 */
 266static inline int pmd_bad(pmd_t pmd)
 267{
 268        return 0;
 269}
 270
 271/*
 272 * pmd_page - converts a PMD entry to a page pointer
 273 */
 274#define pmd_page(pmd)  (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
 275#define pmd_pgtable(pmd) pmd_page(pmd)
 276
 277/**
 278 * pte_none - check if pte is mapped
 279 * @pte: pte_t entry
 280 */
 281static inline int pte_none(pte_t pte)
 282{
 283        return pte_val(pte) == _NULL_PTE;
 284};
 285
 286/*
 287 * pte_present - check if page is present
 288 */
 289static inline int pte_present(pte_t pte)
 290{
 291        return pte_val(pte) & _PAGE_PRESENT;
 292}
 293
 294/* mk_pte - make a PTE out of a page pointer and protection bits */
 295#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
 296
 297/* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */
 298#define pte_page(x) pfn_to_page(pte_pfn(x))
 299
 300/* pte_mkold - mark PTE as not recently accessed */
 301static inline pte_t pte_mkold(pte_t pte)
 302{
 303        pte_val(pte) &= ~_PAGE_ACCESSED;
 304        return pte;
 305}
 306
 307/* pte_mkyoung - mark PTE as recently accessed */
 308static inline pte_t pte_mkyoung(pte_t pte)
 309{
 310        pte_val(pte) |= _PAGE_ACCESSED;
 311        return pte;
 312}
 313
 314/* pte_mkclean - mark page as in sync with backing store */
 315static inline pte_t pte_mkclean(pte_t pte)
 316{
 317        pte_val(pte) &= ~_PAGE_DIRTY;
 318        return pte;
 319}
 320
 321/* pte_mkdirty - mark page as modified */
 322static inline pte_t pte_mkdirty(pte_t pte)
 323{
 324        pte_val(pte) |= _PAGE_DIRTY;
 325        return pte;
 326}
 327
 328/* pte_young - "is PTE marked as accessed"? */
 329static inline int pte_young(pte_t pte)
 330{
 331        return pte_val(pte) & _PAGE_ACCESSED;
 332}
 333
 334/* pte_dirty - "is PTE dirty?" */
 335static inline int pte_dirty(pte_t pte)
 336{
 337        return pte_val(pte) & _PAGE_DIRTY;
 338}
 339
 340/* pte_modify - set protection bits on PTE */
 341static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
 342{
 343        pte_val(pte) &= PAGE_MASK;
 344        pte_val(pte) |= pgprot_val(prot);
 345        return pte;
 346}
 347
 348/* pte_wrprotect - mark page as not writable */
 349static inline pte_t pte_wrprotect(pte_t pte)
 350{
 351        pte_val(pte) &= ~_PAGE_WRITE;
 352        return pte;
 353}
 354
 355/* pte_mkwrite - mark page as writable */
 356static inline pte_t pte_mkwrite(pte_t pte)
 357{
 358        pte_val(pte) |= _PAGE_WRITE;
 359        return pte;
 360}
 361
 362/* pte_mkexec - mark PTE as executable */
 363static inline pte_t pte_mkexec(pte_t pte)
 364{
 365        pte_val(pte) |= _PAGE_EXECUTE;
 366        return pte;
 367}
 368
 369/* pte_read - "is PTE marked as readable?" */
 370static inline int pte_read(pte_t pte)
 371{
 372        return pte_val(pte) & _PAGE_READ;
 373}
 374
 375/* pte_write - "is PTE marked as writable?" */
 376static inline int pte_write(pte_t pte)
 377{
 378        return pte_val(pte) & _PAGE_WRITE;
 379}
 380
 381
 382/* pte_exec - "is PTE marked as executable?" */
 383static inline int pte_exec(pte_t pte)
 384{
 385        return pte_val(pte) & _PAGE_EXECUTE;
 386}
 387
 388/* __pte_to_swp_entry - extract swap entry from PTE */
 389#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 390
 391/* __swp_entry_to_pte - extract PTE from swap entry */
 392#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
 393
 394/* pfn_pte - convert page number and protection value to page table entry */
 395#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
 396
 397/* pte_pfn - convert pte to page frame number */
 398#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
 399#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
 400
 401/*
 402 * set_pte_at - update page table and do whatever magic may be
 403 * necessary to make the underlying hardware/firmware take note.
 404 *
 405 * VM may require a virtual instruction to alert the MMU.
 406 */
 407#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
 408
 409/*
 410 * May need to invoke the virtual machine as well...
 411 */
 412#define pte_unmap(pte)          do { } while (0)
 413#define pte_unmap_nested(pte)   do { } while (0)
 414
 415/*
 416 * pte_offset_map - returns the linear address of the page table entry
 417 * corresponding to an address
 418 */
 419#define pte_offset_map(dir, address)                                    \
 420        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
 421
 422#define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr)
 423
 424/* pte_offset_kernel - kernel version of pte_offset */
 425#define pte_offset_kernel(dir, address) \
 426        ((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \
 427                                +  __pte_offset(address))
 428
 429/* ZERO_PAGE - returns the globally shared zero page */
 430#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
 431
 432#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 433
 434/*  I think this is in case we have page table caches; needed by init/main.c  */
 435#define pgtable_cache_init()    do { } while (0)
 436
 437/*
 438 * Swap/file PTE definitions.  If _PAGE_PRESENT is zero, the rest of the PTE is
 439 * interpreted as swap information.  The remaining free bits are interpreted as
 440 * swap type/offset tuple.  Rather than have the TLB fill handler test
 441 * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to
 442 * all zeros for swap entries, which speeds up the miss handler at the cost of
 443 * 3 bits of offset.  That trade-off can be revisited if necessary, but Hexagon
 444 * processor architecture and target applications suggest a lot of TLB misses
 445 * and not much swap space.
 446 *
 447 * Format of swap PTE:
 448 *      bit     0:      Present (zero)
 449 *      bits    1-5:    swap type (arch independent layer uses 5 bits max)
 450 *      bits    6-9:    bits 3:0 of offset
 451 *      bits    10-12:  effectively _PAGE_PROTNONE (all zero)
 452 *      bits    13-31:  bits 22:4 of swap offset
 453 *
 454 * The split offset makes some of the following macros a little gnarly,
 455 * but there's plenty of precedent for this sort of thing.
 456 */
 457
 458/* Used for swap PTEs */
 459#define __swp_type(swp_pte)             (((swp_pte).val >> 1) & 0x1f)
 460
 461#define __swp_offset(swp_pte) \
 462        ((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0))
 463
 464#define __swp_entry(type, offset) \
 465        ((swp_entry_t)  { \
 466                ((type << 1) | \
 467                 ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) })
 468
 469/*  Oh boy.  There are a lot of possible arch overrides found in this file.  */
 470#include <asm-generic/pgtable.h>
 471
 472#endif
 473