linux/arch/tile/include/asm/pgtable.h
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 *
  14 * This file contains the functions and defines necessary to modify and use
  15 * the TILE page table tree.
  16 */
  17
  18#ifndef _ASM_TILE_PGTABLE_H
  19#define _ASM_TILE_PGTABLE_H
  20
  21#include <hv/hypervisor.h>
  22
  23#ifndef __ASSEMBLY__
  24
  25#include <linux/bitops.h>
  26#include <linux/threads.h>
  27#include <linux/slab.h>
  28#include <linux/list.h>
  29#include <linux/spinlock.h>
  30#include <linux/pfn.h>
  31#include <asm/processor.h>
  32#include <asm/fixmap.h>
  33#include <asm/page.h>
  34
  35struct mm_struct;
  36struct vm_area_struct;
  37
  38/*
  39 * ZERO_PAGE is a global shared page that is always zero: used
  40 * for zero-mapped memory areas etc..
  41 */
  42extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
  43#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  44
  45extern pgd_t swapper_pg_dir[];
  46extern pgprot_t swapper_pgprot;
  47extern struct kmem_cache *pgd_cache;
  48extern spinlock_t pgd_lock;
  49extern struct list_head pgd_list;
  50
  51/*
  52 * The very last slots in the pgd_t are for addresses unusable by Linux
  53 * (pgd_addr_invalid() returns true).  So we use them for the list structure.
  54 * The x86 code we are modelled on uses the page->private/index fields
  55 * (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since
  56 * our pgds are so much smaller than a page, it seems a waste to
  57 * spend a whole page on each pgd.
  58 */
  59#define PGD_LIST_OFFSET \
  60  ((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head))
  61#define pgd_to_list(pgd) \
  62  ((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET))
  63#define list_to_pgd(list) \
  64  ((pgd_t *)((char *)(list) - PGD_LIST_OFFSET))
  65
  66extern void pgtable_cache_init(void);
  67extern void paging_init(void);
  68extern void set_page_homes(void);
  69
  70#define FIRST_USER_ADDRESS      0UL
  71
  72#define _PAGE_PRESENT           HV_PTE_PRESENT
  73#define _PAGE_HUGE_PAGE         HV_PTE_PAGE
  74#define _PAGE_SUPER_PAGE        HV_PTE_SUPER
  75#define _PAGE_READABLE          HV_PTE_READABLE
  76#define _PAGE_WRITABLE          HV_PTE_WRITABLE
  77#define _PAGE_EXECUTABLE        HV_PTE_EXECUTABLE
  78#define _PAGE_ACCESSED          HV_PTE_ACCESSED
  79#define _PAGE_DIRTY             HV_PTE_DIRTY
  80#define _PAGE_GLOBAL            HV_PTE_GLOBAL
  81#define _PAGE_USER              HV_PTE_USER
  82
  83/*
  84 * All the "standard" bits.  Cache-control bits are managed elsewhere.
  85 * This is used to test for valid level-2 page table pointers by checking
  86 * all the bits, and to mask away the cache control bits for mprotect.
  87 */
  88#define _PAGE_ALL (\
  89  _PAGE_PRESENT | \
  90  _PAGE_HUGE_PAGE | \
  91  _PAGE_SUPER_PAGE | \
  92  _PAGE_READABLE | \
  93  _PAGE_WRITABLE | \
  94  _PAGE_EXECUTABLE | \
  95  _PAGE_ACCESSED | \
  96  _PAGE_DIRTY | \
  97  _PAGE_GLOBAL | \
  98  _PAGE_USER \
  99)
 100
 101#define PAGE_NONE \
 102        __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
 103#define PAGE_SHARED \
 104        __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
 105                 _PAGE_USER | _PAGE_ACCESSED)
 106
 107#define PAGE_SHARED_EXEC \
 108        __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
 109                 _PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED)
 110#define PAGE_COPY_NOEXEC \
 111        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
 112#define PAGE_COPY_EXEC \
 113        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
 114                 _PAGE_READABLE | _PAGE_EXECUTABLE)
 115#define PAGE_COPY \
 116        PAGE_COPY_NOEXEC
 117#define PAGE_READONLY \
 118        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
 119#define PAGE_READONLY_EXEC \
 120        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
 121                 _PAGE_READABLE | _PAGE_EXECUTABLE)
 122
 123#define _PAGE_KERNEL_RO \
 124 (_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED)
 125#define _PAGE_KERNEL \
 126 (_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY)
 127#define _PAGE_KERNEL_EXEC       (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE)
 128
 129#define PAGE_KERNEL             __pgprot(_PAGE_KERNEL)
 130#define PAGE_KERNEL_RO          __pgprot(_PAGE_KERNEL_RO)
 131#define PAGE_KERNEL_EXEC        __pgprot(_PAGE_KERNEL_EXEC)
 132
 133#define page_to_kpgprot(p) PAGE_KERNEL
 134
 135/*
 136 * We could tighten these up, but for now writable or executable
 137 * implies readable.
 138 */
 139#define __P000  PAGE_NONE
 140#define __P001  PAGE_READONLY
 141#define __P010  PAGE_COPY      /* this is write-only, which we won't support */
 142#define __P011  PAGE_COPY
 143#define __P100  PAGE_READONLY_EXEC
 144#define __P101  PAGE_READONLY_EXEC
 145#define __P110  PAGE_COPY_EXEC
 146#define __P111  PAGE_COPY_EXEC
 147
 148#define __S000  PAGE_NONE
 149#define __S001  PAGE_READONLY
 150#define __S010  PAGE_SHARED
 151#define __S011  PAGE_SHARED
 152#define __S100  PAGE_READONLY_EXEC
 153#define __S101  PAGE_READONLY_EXEC
 154#define __S110  PAGE_SHARED_EXEC
 155#define __S111  PAGE_SHARED_EXEC
 156
 157/*
 158 * All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT
 159 * and PAGE_HUGE_PAGE, which must be one and zero, respectively.
 160 * We set the ignored bits to zero.
 161 */
 162#define _PAGE_TABLE     _PAGE_PRESENT
 163
 164/* Inherit the caching flags from the old protection bits. */
 165#define pgprot_modify(oldprot, newprot) \
 166  (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
 167
 168/* Just setting the PFN to zero suffices. */
 169#define pte_pgprot(x) hv_pte_set_pa((x), 0)
 170
 171/*
 172 * For PTEs and PDEs, we must clear the Present bit first when
 173 * clearing a page table entry, so clear the bottom half first and
 174 * enforce ordering with a barrier.
 175 */
 176static inline void __pte_clear(pte_t *ptep)
 177{
 178#ifdef __tilegx__
 179        ptep->val = 0;
 180#else
 181        u32 *tmp = (u32 *)ptep;
 182        tmp[0] = 0;
 183        barrier();
 184        tmp[1] = 0;
 185#endif
 186}
 187#define pte_clear(mm, addr, ptep) __pte_clear(ptep)
 188
 189/*
 190 * The following only work if pte_present() is true.
 191 * Undefined behaviour if not..
 192 */
 193#define pte_present hv_pte_get_present
 194#define pte_mknotpresent hv_pte_clear_present
 195#define pte_user hv_pte_get_user
 196#define pte_read hv_pte_get_readable
 197#define pte_dirty hv_pte_get_dirty
 198#define pte_young hv_pte_get_accessed
 199#define pte_write hv_pte_get_writable
 200#define pte_exec hv_pte_get_executable
 201#define pte_huge hv_pte_get_page
 202#define pte_super hv_pte_get_super
 203#define pte_rdprotect hv_pte_clear_readable
 204#define pte_exprotect hv_pte_clear_executable
 205#define pte_mkclean hv_pte_clear_dirty
 206#define pte_mkold hv_pte_clear_accessed
 207#define pte_wrprotect hv_pte_clear_writable
 208#define pte_mksmall hv_pte_clear_page
 209#define pte_mkread hv_pte_set_readable
 210#define pte_mkexec hv_pte_set_executable
 211#define pte_mkdirty hv_pte_set_dirty
 212#define pte_mkyoung hv_pte_set_accessed
 213#define pte_mkwrite hv_pte_set_writable
 214#define pte_mkhuge hv_pte_set_page
 215#define pte_mksuper hv_pte_set_super
 216
 217#define pte_special(pte) 0
 218#define pte_mkspecial(pte) (pte)
 219
 220/*
 221 * Use some spare bits in the PTE for user-caching tags.
 222 */
 223#define pte_set_forcecache hv_pte_set_client0
 224#define pte_get_forcecache hv_pte_get_client0
 225#define pte_clear_forcecache hv_pte_clear_client0
 226#define pte_set_anyhome hv_pte_set_client1
 227#define pte_get_anyhome hv_pte_get_client1
 228#define pte_clear_anyhome hv_pte_clear_client1
 229
 230/*
 231 * A migrating PTE has PAGE_PRESENT clear but all the other bits preserved.
 232 */
 233#define pte_migrating hv_pte_get_migrating
 234#define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x))
 235#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
 236
 237#define pte_ERROR(e) \
 238        pr_err("%s:%d: bad pte 0x%016llx\n", __FILE__, __LINE__, pte_val(e))
 239#define pgd_ERROR(e) \
 240        pr_err("%s:%d: bad pgd 0x%016llx\n", __FILE__, __LINE__, pgd_val(e))
 241
 242/* Return PA and protection info for a given kernel VA. */
 243int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
 244
 245/*
 246 * __set_pte() ensures we write the 64-bit PTE with 32-bit words in
 247 * the right order on 32-bit platforms and also allows us to write
 248 * hooks to check valid PTEs, etc., if we want.
 249 */
 250void __set_pte(pte_t *ptep, pte_t pte);
 251
 252/*
 253 * set_pte() sets the given PTE and also sanity-checks the
 254 * requested PTE against the page homecaching.  Unspecified parts
 255 * of the PTE are filled in when it is written to memory, i.e. all
 256 * caching attributes if "!forcecache", or the home cpu if "anyhome".
 257 */
 258extern void set_pte(pte_t *ptep, pte_t pte);
 259#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
 260#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
 261
 262#define pte_page(x)             pfn_to_page(pte_pfn(x))
 263
 264static inline int pte_none(pte_t pte)
 265{
 266        return !pte.val;
 267}
 268
 269static inline unsigned long pte_pfn(pte_t pte)
 270{
 271        return PFN_DOWN(hv_pte_get_pa(pte));
 272}
 273
 274/* Set or get the remote cache cpu in a pgprot with remote caching. */
 275extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu);
 276extern int get_remote_cache_cpu(pgprot_t prot);
 277
 278static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
 279{
 280        return hv_pte_set_pa(prot, PFN_PHYS(pfn));
 281}
 282
 283/* Support for priority mappings. */
 284extern void start_mm_caching(struct mm_struct *mm);
 285extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
 286
 287/*
 288 * Encode and de-code a swap entry (see <linux/swapops.h>).
 289 * We put the swap file type+offset in the 32 high bits;
 290 * I believe we can just leave the low bits clear.
 291 */
 292#define __swp_type(swp)         ((swp).val & 0x1f)
 293#define __swp_offset(swp)       ((swp).val >> 5)
 294#define __swp_entry(type, off)  ((swp_entry_t) { (type) | ((off) << 5) })
 295#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 })
 296#define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
 297
 298/*
 299 * Conversion functions: convert a page and protection to a page entry,
 300 * and a page entry and page directory to the page they refer to.
 301 */
 302
 303#define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
 304
 305/*
 306 * If we are doing an mprotect(), just accept the new vma->vm_page_prot
 307 * value and combine it with the PFN from the old PTE to get a new PTE.
 308 */
 309static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 310{
 311        return pfn_pte(pte_pfn(pte), newprot);
 312}
 313
 314/*
 315 * The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 316 *
 317 * This macro returns the index of the entry in the pgd page which would
 318 * control the given virtual address.
 319 */
 320#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 321
 322/*
 323 * pgd_offset() returns a (pgd_t *)
 324 * pgd_index() is used get the offset into the pgd page's array of pgd_t's.
 325 */
 326#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 327
 328/*
 329 * A shortcut which implies the use of the kernel's pgd, instead
 330 * of a process's.
 331 */
 332#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 333
 334#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
 335#define pte_unmap(pte) do { } while (0)
 336
 337/* Clear a non-executable kernel PTE and flush it from the TLB. */
 338#define kpte_clear_flush(ptep, vaddr)           \
 339do {                                            \
 340        pte_clear(&init_mm, (vaddr), (ptep));   \
 341        local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \
 342} while (0)
 343
 344/*
 345 * The kernel page tables contain what we need, and we flush when we
 346 * change specific page table entries.
 347 */
 348#define update_mmu_cache(vma, address, pte) do { } while (0)
 349
 350#ifdef CONFIG_FLATMEM
 351#define kern_addr_valid(addr)   (1)
 352#endif /* CONFIG_FLATMEM */
 353
 354extern void vmalloc_sync_all(void);
 355
 356#endif /* !__ASSEMBLY__ */
 357
 358#ifdef __tilegx__
 359#include <asm/pgtable_64.h>
 360#else
 361#include <asm/pgtable_32.h>
 362#endif
 363
 364#ifndef __ASSEMBLY__
 365
 366static inline int pmd_none(pmd_t pmd)
 367{
 368        /*
 369         * Only check low word on 32-bit platforms, since it might be
 370         * out of sync with upper half.
 371         */
 372        return (unsigned long)pmd_val(pmd) == 0;
 373}
 374
 375static inline int pmd_present(pmd_t pmd)
 376{
 377        return pmd_val(pmd) & _PAGE_PRESENT;
 378}
 379
 380static inline int pmd_bad(pmd_t pmd)
 381{
 382        return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE);
 383}
 384
 385static inline unsigned long pages_to_mb(unsigned long npg)
 386{
 387        return npg >> (20 - PAGE_SHIFT);
 388}
 389
 390/*
 391 * The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD]
 392 *
 393 * This function returns the index of the entry in the pmd which would
 394 * control the given virtual address.
 395 */
 396static inline unsigned long pmd_index(unsigned long address)
 397{
 398        return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 399}
 400
 401#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
 402static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 403                                            unsigned long address,
 404                                            pmd_t *pmdp)
 405{
 406        return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp));
 407}
 408
 409#define __HAVE_ARCH_PMDP_SET_WRPROTECT
 410static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 411                                      unsigned long address, pmd_t *pmdp)
 412{
 413        ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp));
 414}
 415
 416
 417#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
 418static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
 419                                       unsigned long address,
 420                                       pmd_t *pmdp)
 421{
 422        return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
 423}
 424
 425static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
 426{
 427        set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval));
 428}
 429
 430#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
 431
 432/* Create a pmd from a PTFN. */
 433static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
 434{
 435        return pte_pmd(hv_pte_set_ptfn(prot, ptfn));
 436}
 437
 438/* Return the page-table frame number (ptfn) that a pmd_t points at. */
 439#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
 440
 441/*
 442 * A given kernel pmd_t maps to a specific virtual address (either a
 443 * kernel huge page or a kernel pte_t table).  Since kernel pte_t
 444 * tables can be aligned at sub-page granularity, this function can
 445 * return non-page-aligned pointers, despite its name.
 446 */
 447static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 448{
 449        phys_addr_t pa =
 450                (phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN;
 451        return (unsigned long)__va(pa);
 452}
 453
 454/*
 455 * A pmd_t points to the base of a huge page or to a pte_t array.
 456 * If a pte_t array, since we can have multiple per page, we don't
 457 * have a one-to-one mapping of pmd_t's to pages.  However, this is
 458 * OK for pte_lockptr(), since we just end up with potentially one
 459 * lock being used for several pte_t arrays.
 460 */
 461#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
 462
 463static inline void pmd_clear(pmd_t *pmdp)
 464{
 465        __pte_clear(pmdp_ptep(pmdp));
 466}
 467
 468#define pmd_mknotpresent(pmd)   pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
 469#define pmd_young(pmd)          pte_young(pmd_pte(pmd))
 470#define pmd_mkyoung(pmd)        pte_pmd(pte_mkyoung(pmd_pte(pmd)))
 471#define pmd_mkold(pmd)          pte_pmd(pte_mkold(pmd_pte(pmd)))
 472#define pmd_mkwrite(pmd)        pte_pmd(pte_mkwrite(pmd_pte(pmd)))
 473#define pmd_write(pmd)          pte_write(pmd_pte(pmd))
 474#define pmd_wrprotect(pmd)      pte_pmd(pte_wrprotect(pmd_pte(pmd)))
 475#define pmd_mkdirty(pmd)        pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 476#define pmd_huge_page(pmd)      pte_huge(pmd_pte(pmd))
 477#define pmd_mkhuge(pmd)         pte_pmd(pte_mkhuge(pmd_pte(pmd)))
 478#define __HAVE_ARCH_PMD_WRITE
 479
 480#define pfn_pmd(pfn, pgprot)    pte_pmd(pfn_pte((pfn), (pgprot)))
 481#define pmd_pfn(pmd)            pte_pfn(pmd_pte(pmd))
 482#define mk_pmd(page, pgprot)    pfn_pmd(page_to_pfn(page), (pgprot))
 483
 484static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 485{
 486        return pfn_pmd(pmd_pfn(pmd), newprot);
 487}
 488
 489#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 490#define has_transparent_hugepage() 1
 491#define pmd_trans_huge pmd_huge_page
 492
 493static inline pmd_t pmd_mksplitting(pmd_t pmd)
 494{
 495        return pte_pmd(hv_pte_set_client2(pmd_pte(pmd)));
 496}
 497
 498static inline int pmd_trans_splitting(pmd_t pmd)
 499{
 500        return hv_pte_get_client2(pmd_pte(pmd));
 501}
 502#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 503
 504/*
 505 * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
 506 *
 507 * This macro returns the index of the entry in the pte page which would
 508 * control the given virtual address.
 509 */
 510static inline unsigned long pte_index(unsigned long address)
 511{
 512        return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 513}
 514
 515static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
 516{
 517       return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
 518}
 519
 520#include <asm-generic/pgtable.h>
 521
 522/* Support /proc/NN/pgtable API. */
 523struct seq_file;
 524int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
 525                           unsigned long vaddr, unsigned long pagesize,
 526                           pte_t *ptep, void **datap);
 527
 528#endif /* !__ASSEMBLY__ */
 529
 530#endif /* _ASM_TILE_PGTABLE_H */
 531