linux/arch/hexagon/include/asm/pgtable.h
<<
>>
Prefs
   1/*
   2 * Page table support for the Hexagon architecture
   3 *
   4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 and
   8 * only version 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18 * 02110-1301, USA.
  19 */
  20
  21#ifndef _ASM_PGTABLE_H
  22#define _ASM_PGTABLE_H
  23
  24/*
  25 * Page table definitions for Qualcomm Hexagon processor.
  26 */
  27#include <asm/page.h>
  28#define __ARCH_USE_5LEVEL_HACK
  29#include <asm-generic/pgtable-nopmd.h>
  30
  31/* A handy thing to have if one has the RAM. Declared in head.S */
  32extern unsigned long empty_zero_page;
  33extern unsigned long zero_page_mask;
  34
  35/*
  36 * The PTE model described here is that of the Hexagon Virtual Machine,
  37 * which autonomously walks 2-level page tables.  At a lower level, we
  38 * also describe the RISCish software-loaded TLB entry structure of
  39 * the underlying Hexagon processor. A kernel built to run on the
  40 * virtual machine has no need to know about the underlying hardware.
  41 */
  42#include <asm/vm_mmu.h>
  43
  44/*
  45 * To maximize the comfort level for the PTE manipulation macros,
  46 * define the "well known" architecture-specific bits.
  47 */
  48#define _PAGE_READ      __HVM_PTE_R
  49#define _PAGE_WRITE     __HVM_PTE_W
  50#define _PAGE_EXECUTE   __HVM_PTE_X
  51#define _PAGE_USER      __HVM_PTE_U
  52
  53/*
  54 * We have a total of 4 "soft" bits available in the abstract PTE.
  55 * The two mandatory software bits are Dirty and Accessed.
  56 * To make nonlinear swap work according to the more recent
  57 * model, we want a low order "Present" bit to indicate whether
  58 * the PTE describes MMU programming or swap space.
  59 */
  60#define _PAGE_PRESENT   (1<<0)
  61#define _PAGE_DIRTY     (1<<1)
  62#define _PAGE_ACCESSED  (1<<2)
  63
  64/*
  65 * For now, let's say that Valid and Present are the same thing.
  66 * Alternatively, we could say that it's the "or" of R, W, and X
  67 * permissions.
  68 */
  69#define _PAGE_VALID     _PAGE_PRESENT
  70
  71/*
  72 * We're not defining _PAGE_GLOBAL here, since there's no concept
  73 * of global pages or ASIDs exposed to the Hexagon Virtual Machine,
  74 * and we want to use the same page table structures and macros in
  75 * the native kernel as we do in the virtual machine kernel.
  76 * So we'll put up with a bit of inefficiency for now...
  77 */
  78
  79/*
  80 * Top "FOURTH" level (pgd), which for the Hexagon VM is really
  81 * only the second from the bottom, pgd and pud both being collapsed.
  82 * Each entry represents 4MB of virtual address space, 4K of table
  83 * thus maps the full 4GB.
  84 */
  85#define PGDIR_SHIFT 22
  86#define PTRS_PER_PGD 1024
  87
  88#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  89#define PGDIR_MASK (~(PGDIR_SIZE-1))
  90
  91#ifdef CONFIG_PAGE_SIZE_4KB
  92#define PTRS_PER_PTE 1024
  93#endif
  94
  95#ifdef CONFIG_PAGE_SIZE_16KB
  96#define PTRS_PER_PTE 256
  97#endif
  98
  99#ifdef CONFIG_PAGE_SIZE_64KB
 100#define PTRS_PER_PTE 64
 101#endif
 102
 103#ifdef CONFIG_PAGE_SIZE_256KB
 104#define PTRS_PER_PTE 16
 105#endif
 106
 107#ifdef CONFIG_PAGE_SIZE_1MB
 108#define PTRS_PER_PTE 4
 109#endif
 110
 111/*  Any bigger and the PTE disappears.  */
 112#define pgd_ERROR(e) \
 113        printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\
 114                pgd_val(e))
 115
 116/*
 117 * Page Protection Constants. Includes (in this variant) cache attributes.
 118 */
 119extern unsigned long _dflt_cache_att;
 120
 121#define PAGE_NONE       __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 122                                _dflt_cache_att)
 123#define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 124                                _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
 125#define PAGE_COPY       PAGE_READONLY
 126#define PAGE_EXEC       __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 127                                _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
 128#define PAGE_COPY_EXEC  PAGE_EXEC
 129#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
 130                                _PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att)
 131#define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_READ | \
 132                                _PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att)
 133
 134
 135/*
 136 * Aliases for mapping mmap() protection bits to page protections.
 137 * These get used for static initialization, so using the _dflt_cache_att
 138 * variable for the default cache attribute isn't workable. If the
 139 * default gets changed at boot time, the boot option code has to
 140 * update data structures like the protaction_map[] array.
 141 */
 142#define CACHEDEF        (CACHE_DEFAULT << 6)
 143
 144/* Private (copy-on-write) page protections. */
 145#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF)
 146#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF)
 147#define __P010 __P000   /* Write-only copy-on-write */
 148#define __P011 __P001   /* Read/Write copy-on-write */
 149#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 150                        _PAGE_EXECUTE | CACHEDEF)
 151#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \
 152                        _PAGE_READ | CACHEDEF)
 153#define __P110 __P100   /* Write/execute copy-on-write */
 154#define __P111 __P101   /* Read/Write/Execute, copy-on-write */
 155
 156/* Shared page protections. */
 157#define __S000 __P000
 158#define __S001 __P001
 159#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 160                        _PAGE_WRITE | CACHEDEF)
 161#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
 162                        _PAGE_WRITE | CACHEDEF)
 163#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 164                        _PAGE_EXECUTE | CACHEDEF)
 165#define __S101 __P101
 166#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
 167                        _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
 168#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
 169                        _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
 170
 171extern pgd_t swapper_pg_dir[PTRS_PER_PGD];  /* located in head.S */
 172
 173/* Seems to be zero even in architectures where the zero page is firewalled? */
 174#define FIRST_USER_ADDRESS 0UL
 175#define pte_special(pte)        0
 176#define pte_mkspecial(pte)      (pte)
 177
 178/*  HUGETLB not working currently  */
 179#ifdef CONFIG_HUGETLB_PAGE
 180#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
 181#endif
 182
 183/*
 184 * For now, assume that higher-level code will do TLB/MMU invalidations
 185 * and don't insert that overhead into this low-level function.
 186 */
 187extern void sync_icache_dcache(pte_t pte);
 188
 189#define pte_present_exec_user(pte) \
 190        ((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \
 191        (_PAGE_EXECUTE | _PAGE_USER))
 192
 193static inline void set_pte(pte_t *ptep, pte_t pteval)
 194{
 195        /*  should really be using pte_exec, if it weren't declared later. */
 196        if (pte_present_exec_user(pteval))
 197                sync_icache_dcache(pteval);
 198
 199        *ptep = pteval;
 200}
 201
 202/*
 203 * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid
 204 * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE
 205 * (Linux PTE), the key is to have bits 11..9 all zero.  We'd use 0x7
 206 * as a universal null entry, but some of those least significant bits
 207 * are interpreted by software.
 208 */
 209#define _NULL_PMD       0x7
 210#define _NULL_PTE       0x0
 211
 212static inline void pmd_clear(pmd_t *pmd_entry_ptr)
 213{
 214         pmd_val(*pmd_entry_ptr) = _NULL_PMD;
 215}
 216
 217/*
 218 * Conveniently, a null PTE value is invalid.
 219 */
 220static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 221                                pte_t *ptep)
 222{
 223        pte_val(*ptep) = _NULL_PTE;
 224}
 225
 226#ifdef NEED_PMD_INDEX_DESPITE_BEING_2_LEVEL
 227/**
 228 * pmd_index - returns the index of the entry in the PMD page
 229 * which would control the given virtual address
 230 */
 231#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 232
 233#endif
 234
 235/**
 236 * pgd_index - returns the index of the entry in the PGD page
 237 * which would control the given virtual address
 238 *
 239 * This returns the *index* for the address in the pgd_t
 240 */
 241#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 242
 243/*
 244 * pgd_offset - find an offset in a page-table-directory
 245 */
 246#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
 247
 248/*
 249 * pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr
 250 */
 251#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 252
 253/**
 254 * pmd_none - check if pmd_entry is mapped
 255 * @pmd_entry:  pmd entry
 256 *
 257 * MIPS checks it against that "invalid pte table" thing.
 258 */
 259static inline int pmd_none(pmd_t pmd)
 260{
 261        return pmd_val(pmd) == _NULL_PMD;
 262}
 263
 264/**
 265 * pmd_present - is there a page table behind this?
 266 * Essentially the inverse of pmd_none.  We maybe
 267 * save an inline instruction by defining it this
 268 * way, instead of simply "!pmd_none".
 269 */
 270static inline int pmd_present(pmd_t pmd)
 271{
 272        return pmd_val(pmd) != (unsigned long)_NULL_PMD;
 273}
 274
 275/**
 276 * pmd_bad - check if a PMD entry is "bad". That might mean swapped out.
 277 * As we have no known cause of badness, it's null, as it is for many
 278 * architectures.
 279 */
 280static inline int pmd_bad(pmd_t pmd)
 281{
 282        return 0;
 283}
 284
 285/*
 286 * pmd_page - converts a PMD entry to a page pointer
 287 */
 288#define pmd_page(pmd)  (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
 289#define pmd_pgtable(pmd) pmd_page(pmd)
 290
 291/**
 292 * pte_none - check if pte is mapped
 293 * @pte: pte_t entry
 294 */
 295static inline int pte_none(pte_t pte)
 296{
 297        return pte_val(pte) == _NULL_PTE;
 298};
 299
 300/*
 301 * pte_present - check if page is present
 302 */
 303static inline int pte_present(pte_t pte)
 304{
 305        return pte_val(pte) & _PAGE_PRESENT;
 306}
 307
 308/* mk_pte - make a PTE out of a page pointer and protection bits */
 309#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
 310
 311/* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */
 312#define pte_page(x) pfn_to_page(pte_pfn(x))
 313
 314/* pte_mkold - mark PTE as not recently accessed */
 315static inline pte_t pte_mkold(pte_t pte)
 316{
 317        pte_val(pte) &= ~_PAGE_ACCESSED;
 318        return pte;
 319}
 320
 321/* pte_mkyoung - mark PTE as recently accessed */
 322static inline pte_t pte_mkyoung(pte_t pte)
 323{
 324        pte_val(pte) |= _PAGE_ACCESSED;
 325        return pte;
 326}
 327
 328/* pte_mkclean - mark page as in sync with backing store */
 329static inline pte_t pte_mkclean(pte_t pte)
 330{
 331        pte_val(pte) &= ~_PAGE_DIRTY;
 332        return pte;
 333}
 334
 335/* pte_mkdirty - mark page as modified */
 336static inline pte_t pte_mkdirty(pte_t pte)
 337{
 338        pte_val(pte) |= _PAGE_DIRTY;
 339        return pte;
 340}
 341
 342/* pte_young - "is PTE marked as accessed"? */
 343static inline int pte_young(pte_t pte)
 344{
 345        return pte_val(pte) & _PAGE_ACCESSED;
 346}
 347
 348/* pte_dirty - "is PTE dirty?" */
 349static inline int pte_dirty(pte_t pte)
 350{
 351        return pte_val(pte) & _PAGE_DIRTY;
 352}
 353
 354/* pte_modify - set protection bits on PTE */
 355static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
 356{
 357        pte_val(pte) &= PAGE_MASK;
 358        pte_val(pte) |= pgprot_val(prot);
 359        return pte;
 360}
 361
 362/* pte_wrprotect - mark page as not writable */
 363static inline pte_t pte_wrprotect(pte_t pte)
 364{
 365        pte_val(pte) &= ~_PAGE_WRITE;
 366        return pte;
 367}
 368
 369/* pte_mkwrite - mark page as writable */
 370static inline pte_t pte_mkwrite(pte_t pte)
 371{
 372        pte_val(pte) |= _PAGE_WRITE;
 373        return pte;
 374}
 375
 376/* pte_mkexec - mark PTE as executable */
 377static inline pte_t pte_mkexec(pte_t pte)
 378{
 379        pte_val(pte) |= _PAGE_EXECUTE;
 380        return pte;
 381}
 382
 383/* pte_read - "is PTE marked as readable?" */
 384static inline int pte_read(pte_t pte)
 385{
 386        return pte_val(pte) & _PAGE_READ;
 387}
 388
 389/* pte_write - "is PTE marked as writable?" */
 390static inline int pte_write(pte_t pte)
 391{
 392        return pte_val(pte) & _PAGE_WRITE;
 393}
 394
 395
 396/* pte_exec - "is PTE marked as executable?" */
 397static inline int pte_exec(pte_t pte)
 398{
 399        return pte_val(pte) & _PAGE_EXECUTE;
 400}
 401
 402/* __pte_to_swp_entry - extract swap entry from PTE */
 403#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 404
 405/* __swp_entry_to_pte - extract PTE from swap entry */
 406#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
 407
 408/* pfn_pte - convert page number and protection value to page table entry */
 409#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
 410
 411/* pte_pfn - convert pte to page frame number */
 412#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
 413#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
 414
 415/*
 416 * set_pte_at - update page table and do whatever magic may be
 417 * necessary to make the underlying hardware/firmware take note.
 418 *
 419 * VM may require a virtual instruction to alert the MMU.
 420 */
 421#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
 422
 423/*
 424 * May need to invoke the virtual machine as well...
 425 */
 426#define pte_unmap(pte)          do { } while (0)
 427#define pte_unmap_nested(pte)   do { } while (0)
 428
 429/*
 430 * pte_offset_map - returns the linear address of the page table entry
 431 * corresponding to an address
 432 */
 433#define pte_offset_map(dir, address)                                    \
 434        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
 435
 436#define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr)
 437
 438/* pte_offset_kernel - kernel version of pte_offset */
 439#define pte_offset_kernel(dir, address) \
 440        ((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \
 441                                +  __pte_offset(address))
 442
 443/* ZERO_PAGE - returns the globally shared zero page */
 444#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
 445
 446#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 447
 448/*  I think this is in case we have page table caches; needed by init/main.c  */
 449#define pgtable_cache_init()    do { } while (0)
 450
 451/*
 452 * Swap/file PTE definitions.  If _PAGE_PRESENT is zero, the rest of the PTE is
 453 * interpreted as swap information.  The remaining free bits are interpreted as
 454 * swap type/offset tuple.  Rather than have the TLB fill handler test
 455 * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to
 456 * all zeros for swap entries, which speeds up the miss handler at the cost of
 457 * 3 bits of offset.  That trade-off can be revisited if necessary, but Hexagon
 458 * processor architecture and target applications suggest a lot of TLB misses
 459 * and not much swap space.
 460 *
 461 * Format of swap PTE:
 462 *      bit     0:      Present (zero)
 463 *      bits    1-5:    swap type (arch independent layer uses 5 bits max)
 464 *      bits    6-9:    bits 3:0 of offset
 465 *      bits    10-12:  effectively _PAGE_PROTNONE (all zero)
 466 *      bits    13-31:  bits 22:4 of swap offset
 467 *
 468 * The split offset makes some of the following macros a little gnarly,
 469 * but there's plenty of precedent for this sort of thing.
 470 */
 471
 472/* Used for swap PTEs */
 473#define __swp_type(swp_pte)             (((swp_pte).val >> 1) & 0x1f)
 474
 475#define __swp_offset(swp_pte) \
 476        ((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0))
 477
 478#define __swp_entry(type, offset) \
 479        ((swp_entry_t)  { \
 480                ((type << 1) | \
 481                 ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) })
 482
 483/*  Oh boy.  There are a lot of possible arch overrides found in this file.  */
 484#include <asm-generic/pgtable.h>
 485
 486#endif
 487