linux/arch/arc/include/asm/pgtable-bits-arcv2.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   4 */
   5
   6/*
   7 * page table flags for software walked/managed MMUv3 (ARC700) and MMUv4 (HS)
   8 * There correspond to the corresponding bits in the TLB
   9 */
  10
  11#ifndef _ASM_ARC_PGTABLE_BITS_ARCV2_H
  12#define _ASM_ARC_PGTABLE_BITS_ARCV2_H
  13
  14#ifdef CONFIG_ARC_CACHE_PAGES
  15#define _PAGE_CACHEABLE         (1 << 0)  /* Cached (H) */
  16#else
  17#define _PAGE_CACHEABLE         0
  18#endif
  19
  20#define _PAGE_EXECUTE           (1 << 1)  /* User Execute  (H) */
  21#define _PAGE_WRITE             (1 << 2)  /* User Write    (H) */
  22#define _PAGE_READ              (1 << 3)  /* User Read     (H) */
  23#define _PAGE_ACCESSED          (1 << 4)  /* Accessed      (s) */
  24#define _PAGE_DIRTY             (1 << 5)  /* Modified      (s) */
  25#define _PAGE_SPECIAL           (1 << 6)
  26#define _PAGE_GLOBAL            (1 << 8)  /* ASID agnostic (H) */
  27#define _PAGE_PRESENT           (1 << 9)  /* PTE/TLB Valid (H) */
  28
  29#ifdef CONFIG_ARC_MMU_V4
  30#define _PAGE_HW_SZ             (1 << 10)  /* Normal/super (H) */
  31#else
  32#define _PAGE_HW_SZ             0
  33#endif
  34
  35/* Defaults for every user page */
  36#define ___DEF          (_PAGE_PRESENT | _PAGE_CACHEABLE)
  37
  38/* Set of bits not changed in pte_modify */
  39#define _PAGE_CHG_MASK  (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
  40                                                           _PAGE_SPECIAL)
  41
  42/* More Abbrevaited helpers */
  43#define PAGE_U_NONE     __pgprot(___DEF)
  44#define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
  45#define PAGE_U_W_R      __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
  46#define PAGE_U_X_R      __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
  47#define PAGE_U_X_W_R    __pgprot(___DEF \
  48                                | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
  49#define PAGE_KERNEL     __pgprot(___DEF | _PAGE_GLOBAL \
  50                                | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
  51
  52#define PAGE_SHARED     PAGE_U_W_R
  53
  54#define pgprot_noncached(prot)  (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
  55
  56/*
  57 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
  58 *
  59 * Certain cases have 1:1 mapping
  60 *  e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
  61 *       which directly corresponds to  PAGE_U_X_R
  62 *
  63 * Other rules which cause the divergence from 1:1 mapping
  64 *
  65 *  1. Although ARC700 can do exclusive execute/write protection (meaning R
  66 *     can be tracked independet of X/W unlike some other CPUs), still to
  67 *     keep things consistent with other archs:
  68 *      -Write implies Read:   W => R
  69 *      -Execute implies Read: X => R
  70 *
  71 *  2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
  72 *     This is to enable COW mechanism
  73 */
  74        /* xwr */
  75#define __P000  PAGE_U_NONE
  76#define __P001  PAGE_U_R
  77#define __P010  PAGE_U_R        /* Pvt-W => !W */
  78#define __P011  PAGE_U_R        /* Pvt-W => !W */
  79#define __P100  PAGE_U_X_R      /* X => R */
  80#define __P101  PAGE_U_X_R
  81#define __P110  PAGE_U_X_R      /* Pvt-W => !W and X => R */
  82#define __P111  PAGE_U_X_R      /* Pvt-W => !W */
  83
  84#define __S000  PAGE_U_NONE
  85#define __S001  PAGE_U_R
  86#define __S010  PAGE_U_W_R      /* W => R */
  87#define __S011  PAGE_U_W_R
  88#define __S100  PAGE_U_X_R      /* X => R */
  89#define __S101  PAGE_U_X_R
  90#define __S110  PAGE_U_X_W_R    /* X => R */
  91#define __S111  PAGE_U_X_W_R
  92
  93#ifndef __ASSEMBLY__
  94
  95#define pte_write(pte)          (pte_val(pte) & _PAGE_WRITE)
  96#define pte_dirty(pte)          (pte_val(pte) & _PAGE_DIRTY)
  97#define pte_young(pte)          (pte_val(pte) & _PAGE_ACCESSED)
  98#define pte_special(pte)        (pte_val(pte) & _PAGE_SPECIAL)
  99
 100#define PTE_BIT_FUNC(fn, op) \
 101        static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
 102
 103PTE_BIT_FUNC(mknotpresent,     &= ~(_PAGE_PRESENT));
 104PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
 105PTE_BIT_FUNC(mkwrite,   |= (_PAGE_WRITE));
 106PTE_BIT_FUNC(mkclean,   &= ~(_PAGE_DIRTY));
 107PTE_BIT_FUNC(mkdirty,   |= (_PAGE_DIRTY));
 108PTE_BIT_FUNC(mkold,     &= ~(_PAGE_ACCESSED));
 109PTE_BIT_FUNC(mkyoung,   |= (_PAGE_ACCESSED));
 110PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
 111PTE_BIT_FUNC(mkhuge,    |= (_PAGE_HW_SZ));
 112
 113static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 114{
 115        return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
 116}
 117
 118static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 119                              pte_t *ptep, pte_t pteval)
 120{
 121        set_pte(ptep, pteval);
 122}
 123
 124void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 125                      pte_t *ptep);
 126
 127/* Encode swap {type,off} tuple into PTE
 128 * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
 129 * PAGE_PRESENT is zero in a PTE holding swap "identifier"
 130 */
 131#define __swp_entry(type, off)          ((swp_entry_t) \
 132                                        { ((type) & 0x1f) | ((off) << 13) })
 133
 134/* Decode a PTE containing swap "identifier "into constituents */
 135#define __swp_type(pte_lookalike)       (((pte_lookalike).val) & 0x1f)
 136#define __swp_offset(pte_lookalike)     ((pte_lookalike).val >> 13)
 137
 138#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
 139#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 140
 141#define kern_addr_valid(addr)   (1)
 142
 143#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 144#include <asm/hugepage.h>
 145#endif
 146
 147#endif /* __ASSEMBLY__ */
 148
 149#endif
 150