linux/arch/x86/include/asm/tlbflush.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_TLBFLUSH_H
   3#define _ASM_X86_TLBFLUSH_H
   4
   5#include <linux/mm.h>
   6#include <linux/sched.h>
   7
   8#include <asm/processor.h>
   9#include <asm/cpufeature.h>
  10#include <asm/special_insns.h>
  11#include <asm/smp.h>
  12#include <asm/invpcid.h>
  13#include <asm/pti.h>
  14#include <asm/processor-flags.h>
  15
  16void __flush_tlb_all(void);
  17
  18#define TLB_FLUSH_ALL   -1UL
  19
  20void cr4_update_irqsoff(unsigned long set, unsigned long clear);
  21unsigned long cr4_read_shadow(void);
  22
  23/* Set in this cpu's CR4. */
  24static inline void cr4_set_bits_irqsoff(unsigned long mask)
  25{
  26        cr4_update_irqsoff(mask, 0);
  27}
  28
  29/* Clear in this cpu's CR4. */
  30static inline void cr4_clear_bits_irqsoff(unsigned long mask)
  31{
  32        cr4_update_irqsoff(0, mask);
  33}
  34
  35/* Set in this cpu's CR4. */
  36static inline void cr4_set_bits(unsigned long mask)
  37{
  38        unsigned long flags;
  39
  40        local_irq_save(flags);
  41        cr4_set_bits_irqsoff(mask);
  42        local_irq_restore(flags);
  43}
  44
  45/* Clear in this cpu's CR4. */
  46static inline void cr4_clear_bits(unsigned long mask)
  47{
  48        unsigned long flags;
  49
  50        local_irq_save(flags);
  51        cr4_clear_bits_irqsoff(mask);
  52        local_irq_restore(flags);
  53}
  54
  55#ifndef MODULE
  56/*
  57 * 6 because 6 should be plenty and struct tlb_state will fit in two cache
  58 * lines.
  59 */
  60#define TLB_NR_DYN_ASIDS        6
  61
  62struct tlb_context {
  63        u64 ctx_id;
  64        u64 tlb_gen;
  65};
  66
  67struct tlb_state {
  68        /*
  69         * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
  70         * are on.  This means that it may not match current->active_mm,
  71         * which will contain the previous user mm when we're in lazy TLB
  72         * mode even if we've already switched back to swapper_pg_dir.
  73         *
  74         * During switch_mm_irqs_off(), loaded_mm will be set to
  75         * LOADED_MM_SWITCHING during the brief interrupts-off window
  76         * when CR3 and loaded_mm would otherwise be inconsistent.  This
  77         * is for nmi_uaccess_okay()'s benefit.
  78         */
  79        struct mm_struct *loaded_mm;
  80
  81#define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
  82
  83        /* Last user mm for optimizing IBPB */
  84        union {
  85                struct mm_struct        *last_user_mm;
  86                unsigned long           last_user_mm_spec;
  87        };
  88
  89        u16 loaded_mm_asid;
  90        u16 next_asid;
  91
  92        /*
  93         * If set we changed the page tables in such a way that we
  94         * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
  95         * This tells us to go invalidate all the non-loaded ctxs[]
  96         * on the next context switch.
  97         *
  98         * The current ctx was kept up-to-date as it ran and does not
  99         * need to be invalidated.
 100         */
 101        bool invalidate_other;
 102
 103        /*
 104         * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
 105         * the corresponding user PCID needs a flush next time we
 106         * switch to it; see SWITCH_TO_USER_CR3.
 107         */
 108        unsigned short user_pcid_flush_mask;
 109
 110        /*
 111         * Access to this CR4 shadow and to H/W CR4 is protected by
 112         * disabling interrupts when modifying either one.
 113         */
 114        unsigned long cr4;
 115
 116        /*
 117         * This is a list of all contexts that might exist in the TLB.
 118         * There is one per ASID that we use, and the ASID (what the
 119         * CPU calls PCID) is the index into ctxts.
 120         *
 121         * For each context, ctx_id indicates which mm the TLB's user
 122         * entries came from.  As an invariant, the TLB will never
 123         * contain entries that are out-of-date as when that mm reached
 124         * the tlb_gen in the list.
 125         *
 126         * To be clear, this means that it's legal for the TLB code to
 127         * flush the TLB without updating tlb_gen.  This can happen
 128         * (for now, at least) due to paravirt remote flushes.
 129         *
 130         * NB: context 0 is a bit special, since it's also used by
 131         * various bits of init code.  This is fine -- code that
 132         * isn't aware of PCID will end up harmlessly flushing
 133         * context 0.
 134         */
 135        struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
 136};
 137DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate);
 138
 139struct tlb_state_shared {
 140        /*
 141         * We can be in one of several states:
 142         *
 143         *  - Actively using an mm.  Our CPU's bit will be set in
 144         *    mm_cpumask(loaded_mm) and is_lazy == false;
 145         *
 146         *  - Not using a real mm.  loaded_mm == &init_mm.  Our CPU's bit
 147         *    will not be set in mm_cpumask(&init_mm) and is_lazy == false.
 148         *
 149         *  - Lazily using a real mm.  loaded_mm != &init_mm, our bit
 150         *    is set in mm_cpumask(loaded_mm), but is_lazy == true.
 151         *    We're heuristically guessing that the CR3 load we
 152         *    skipped more than makes up for the overhead added by
 153         *    lazy mode.
 154         */
 155        bool is_lazy;
 156};
 157DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
 158
 159bool nmi_uaccess_okay(void);
 160#define nmi_uaccess_okay nmi_uaccess_okay
 161
 162/* Initialize cr4 shadow for this CPU. */
 163static inline void cr4_init_shadow(void)
 164{
 165        this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
 166}
 167
 168extern unsigned long mmu_cr4_features;
 169extern u32 *trampoline_cr4_features;
 170
 171extern void initialize_tlbstate_and_flush(void);
 172
 173/*
 174 * TLB flushing:
 175 *
 176 *  - flush_tlb_all() flushes all processes TLBs
 177 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 178 *  - flush_tlb_page(vma, vmaddr) flushes one page
 179 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 180 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
 181 *  - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus
 182 *
 183 * ..but the i386 has somewhat limited tlb flushing capabilities,
 184 * and page-granular flushes are available only on i486 and up.
 185 */
 186struct flush_tlb_info {
 187        /*
 188         * We support several kinds of flushes.
 189         *
 190         * - Fully flush a single mm.  .mm will be set, .end will be
 191         *   TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
 192         *   which the IPI sender is trying to catch us up.
 193         *
 194         * - Partially flush a single mm.  .mm will be set, .start and
 195         *   .end will indicate the range, and .new_tlb_gen will be set
 196         *   such that the changes between generation .new_tlb_gen-1 and
 197         *   .new_tlb_gen are entirely contained in the indicated range.
 198         *
 199         * - Fully flush all mms whose tlb_gens have been updated.  .mm
 200         *   will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
 201         *   will be zero.
 202         */
 203        struct mm_struct        *mm;
 204        unsigned long           start;
 205        unsigned long           end;
 206        u64                     new_tlb_gen;
 207        unsigned int            initiating_cpu;
 208        u8                      stride_shift;
 209        u8                      freed_tables;
 210};
 211
 212void flush_tlb_local(void);
 213void flush_tlb_one_user(unsigned long addr);
 214void flush_tlb_one_kernel(unsigned long addr);
 215void flush_tlb_multi(const struct cpumask *cpumask,
 216                      const struct flush_tlb_info *info);
 217
 218#ifdef CONFIG_PARAVIRT
 219#include <asm/paravirt.h>
 220#endif
 221
 222#define flush_tlb_mm(mm)                                                \
 223                flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
 224
 225#define flush_tlb_range(vma, start, end)                                \
 226        flush_tlb_mm_range((vma)->vm_mm, start, end,                    \
 227                           ((vma)->vm_flags & VM_HUGETLB)               \
 228                                ? huge_page_shift(hstate_vma(vma))      \
 229                                : PAGE_SHIFT, false)
 230
 231extern void flush_tlb_all(void);
 232extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 233                                unsigned long end, unsigned int stride_shift,
 234                                bool freed_tables);
 235extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 236
 237static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
 238{
 239        flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
 240}
 241
 242static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
 243{
 244        /*
 245         * Bump the generation count.  This also serves as a full barrier
 246         * that synchronizes with switch_mm(): callers are required to order
 247         * their read of mm_cpumask after their writes to the paging
 248         * structures.
 249         */
 250        return atomic64_inc_return(&mm->context.tlb_gen);
 251}
 252
 253static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
 254                                        struct mm_struct *mm)
 255{
 256        inc_mm_tlb_gen(mm);
 257        cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
 258}
 259
 260extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 261
 262#endif /* !MODULE */
 263
 264#endif /* _ASM_X86_TLBFLUSH_H */
 265