linux/arch/x86/include/asm/pgtable-3level.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_PGTABLE_3LEVEL_H
   3#define _ASM_X86_PGTABLE_3LEVEL_H
   4
   5/*
   6 * Intel Physical Address Extension (PAE) Mode - three-level page
   7 * tables on PPro+ CPUs.
   8 *
   9 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  10 */
  11
  12#define pte_ERROR(e)                                                    \
  13        pr_err("%s:%d: bad pte %p(%08lx%08lx)\n",                       \
  14               __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
  15#define pmd_ERROR(e)                                                    \
  16        pr_err("%s:%d: bad pmd %p(%016Lx)\n",                           \
  17               __FILE__, __LINE__, &(e), pmd_val(e))
  18#define pgd_ERROR(e)                                                    \
  19        pr_err("%s:%d: bad pgd %p(%016Lx)\n",                           \
  20               __FILE__, __LINE__, &(e), pgd_val(e))
  21
  22/* Rules for using set_pte: the pte being assigned *must* be
  23 * either not present or in a state where the hardware will
  24 * not attempt to update the pte.  In places where this is
  25 * not possible, use pte_get_and_clear to obtain the old pte
  26 * value and then use set_pte to update it.  -ben
  27 */
  28static inline void native_set_pte(pte_t *ptep, pte_t pte)
  29{
  30        ptep->pte_high = pte.pte_high;
  31        smp_wmb();
  32        ptep->pte_low = pte.pte_low;
  33}
  34
  35#define pmd_read_atomic pmd_read_atomic
  36/*
  37 * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
  38 * a "*pmdp" dereference done by gcc. Problem is, in certain places
  39 * where pte_offset_map_lock is called, concurrent page faults are
  40 * allowed, if the mmap_sem is hold for reading. An example is mincore
  41 * vs page faults vs MADV_DONTNEED. On the page fault side
  42 * pmd_populate rightfully does a set_64bit, but if we're reading the
  43 * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
  44 * because gcc will not read the 64bit of the pmd atomically. To fix
  45 * this all places running pmd_offset_map_lock() while holding the
  46 * mmap_sem in read mode, shall read the pmdp pointer using this
  47 * function to know if the pmd is null nor not, and in turn to know if
  48 * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
  49 * operations.
  50 *
  51 * Without THP if the mmap_sem is hold for reading, the pmd can only
  52 * transition from null to not null while pmd_read_atomic runs. So
  53 * we can always return atomic pmd values with this function.
  54 *
  55 * With THP if the mmap_sem is hold for reading, the pmd can become
  56 * trans_huge or none or point to a pte (and in turn become "stable")
  57 * at any time under pmd_read_atomic. We could read it really
  58 * atomically here with a atomic64_read for the THP enabled case (and
  59 * it would be a whole lot simpler), but to avoid using cmpxchg8b we
  60 * only return an atomic pmdval if the low part of the pmdval is later
  61 * found stable (i.e. pointing to a pte). And we're returning a none
  62 * pmdval if the low part of the pmd is none. In some cases the high
  63 * and low part of the pmdval returned may not be consistent if THP is
  64 * enabled (the low part may point to previously mapped hugepage,
  65 * while the high part may point to a more recently mapped hugepage),
  66 * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
  67 * of the pmd to be read atomically to decide if the pmd is unstable
  68 * or not, with the only exception of when the low part of the pmd is
  69 * zero in which case we return a none pmd.
  70 */
  71static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
  72{
  73        pmdval_t ret;
  74        u32 *tmp = (u32 *)pmdp;
  75
  76        ret = (pmdval_t) (*tmp);
  77        if (ret) {
  78                /*
  79                 * If the low part is null, we must not read the high part
  80                 * or we can end up with a partial pmd.
  81                 */
  82                smp_rmb();
  83                ret |= ((pmdval_t)*(tmp + 1)) << 32;
  84        }
  85
  86        return (pmd_t) { ret };
  87}
  88
  89static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
  90{
  91        set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
  92}
  93
  94static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
  95{
  96        set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
  97}
  98
  99static inline void native_set_pud(pud_t *pudp, pud_t pud)
 100{
 101        set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
 102}
 103
 104/*
 105 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
 106 * entry, so clear the bottom half first and enforce ordering with a compiler
 107 * barrier.
 108 */
 109static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
 110                                    pte_t *ptep)
 111{
 112        ptep->pte_low = 0;
 113        smp_wmb();
 114        ptep->pte_high = 0;
 115}
 116
 117static inline void native_pmd_clear(pmd_t *pmd)
 118{
 119        u32 *tmp = (u32 *)pmd;
 120        *tmp = 0;
 121        smp_wmb();
 122        *(tmp + 1) = 0;
 123}
 124
 125static inline void native_pud_clear(pud_t *pudp)
 126{
 127}
 128
 129static inline void pud_clear(pud_t *pudp)
 130{
 131        set_pud(pudp, __pud(0));
 132
 133        /*
 134         * According to Intel App note "TLBs, Paging-Structure Caches,
 135         * and Their Invalidation", April 2007, document 317080-001,
 136         * section 8.1: in PAE mode we explicitly have to flush the
 137         * TLB via cr3 if the top-level pgd is changed...
 138         *
 139         * Currently all places where pud_clear() is called either have
 140         * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
 141         * pud_clear_bad()), so we don't need TLB flush here.
 142         */
 143}
 144
 145#ifdef CONFIG_SMP
 146static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
 147{
 148        pte_t res;
 149
 150        /* xchg acts as a barrier before the setting of the high bits */
 151        res.pte_low = xchg(&ptep->pte_low, 0);
 152        res.pte_high = ptep->pte_high;
 153        ptep->pte_high = 0;
 154
 155        return res;
 156}
 157#else
 158#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 159#endif
 160
 161union split_pmd {
 162        struct {
 163                u32 pmd_low;
 164                u32 pmd_high;
 165        };
 166        pmd_t pmd;
 167};
 168
 169#ifdef CONFIG_SMP
 170static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
 171{
 172        union split_pmd res, *orig = (union split_pmd *)pmdp;
 173
 174        /* xchg acts as a barrier before setting of the high bits */
 175        res.pmd_low = xchg(&orig->pmd_low, 0);
 176        res.pmd_high = orig->pmd_high;
 177        orig->pmd_high = 0;
 178
 179        return res.pmd;
 180}
 181#else
 182#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
 183#endif
 184
 185#ifndef pmdp_establish
 186#define pmdp_establish pmdp_establish
 187static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
 188                unsigned long address, pmd_t *pmdp, pmd_t pmd)
 189{
 190        pmd_t old;
 191
 192        /*
 193         * If pmd has present bit cleared we can get away without expensive
 194         * cmpxchg64: we can update pmdp half-by-half without racing with
 195         * anybody.
 196         */
 197        if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
 198                union split_pmd old, new, *ptr;
 199
 200                ptr = (union split_pmd *)pmdp;
 201
 202                new.pmd = pmd;
 203
 204                /* xchg acts as a barrier before setting of the high bits */
 205                old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low);
 206                old.pmd_high = ptr->pmd_high;
 207                ptr->pmd_high = new.pmd_high;
 208                return old.pmd;
 209        }
 210
 211        do {
 212                old = *pmdp;
 213        } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
 214
 215        return old;
 216}
 217#endif
 218
 219#ifdef CONFIG_SMP
 220union split_pud {
 221        struct {
 222                u32 pud_low;
 223                u32 pud_high;
 224        };
 225        pud_t pud;
 226};
 227
 228static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
 229{
 230        union split_pud res, *orig = (union split_pud *)pudp;
 231
 232        /* xchg acts as a barrier before setting of the high bits */
 233        res.pud_low = xchg(&orig->pud_low, 0);
 234        res.pud_high = orig->pud_high;
 235        orig->pud_high = 0;
 236
 237        return res.pud;
 238}
 239#else
 240#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
 241#endif
 242
 243/* Encode and de-code a swap entry */
 244#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
 245#define __swp_type(x)                   (((x).val) & 0x1f)
 246#define __swp_offset(x)                 ((x).val >> 5)
 247#define __swp_entry(type, offset)       ((swp_entry_t){(type) | (offset) << 5})
 248#define __pte_to_swp_entry(pte)         ((swp_entry_t){ (pte).pte_high })
 249#define __swp_entry_to_pte(x)           ((pte_t){ { .pte_high = (x).val } })
 250
 251#define gup_get_pte gup_get_pte
 252/*
 253 * WARNING: only to be used in the get_user_pages_fast() implementation.
 254 *
 255 * With get_user_pages_fast(), we walk down the pagetables without taking
 256 * any locks.  For this we would like to load the pointers atomically,
 257 * but that is not possible (without expensive cmpxchg8b) on PAE.  What
 258 * we do have is the guarantee that a PTE will only either go from not
 259 * present to present, or present to not present or both -- it will not
 260 * switch to a completely different present page without a TLB flush in
 261 * between; something that we are blocking by holding interrupts off.
 262 *
 263 * Setting ptes from not present to present goes:
 264 *
 265 *   ptep->pte_high = h;
 266 *   smp_wmb();
 267 *   ptep->pte_low = l;
 268 *
 269 * And present to not present goes:
 270 *
 271 *   ptep->pte_low = 0;
 272 *   smp_wmb();
 273 *   ptep->pte_high = 0;
 274 *
 275 * We must ensure here that the load of pte_low sees 'l' iff pte_high
 276 * sees 'h'. We load pte_high *after* loading pte_low, which ensures we
 277 * don't see an older value of pte_high.  *Then* we recheck pte_low,
 278 * which ensures that we haven't picked up a changed pte high. We might
 279 * have gotten rubbish values from pte_low and pte_high, but we are
 280 * guaranteed that pte_low will not have the present bit set *unless*
 281 * it is 'l'. Because get_user_pages_fast() only operates on present ptes
 282 * we're safe.
 283 */
 284static inline pte_t gup_get_pte(pte_t *ptep)
 285{
 286        pte_t pte;
 287
 288        do {
 289                pte.pte_low = ptep->pte_low;
 290                smp_rmb();
 291                pte.pte_high = ptep->pte_high;
 292                smp_rmb();
 293        } while (unlikely(pte.pte_low != ptep->pte_low));
 294
 295        return pte;
 296}
 297
 298#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
 299