linux/arch/x86/include/asm/tlbflush.h
<<
>>
Prefs
   1#ifndef _ASM_X86_TLBFLUSH_H
   2#define _ASM_X86_TLBFLUSH_H
   3
   4#include <linux/mm.h>
   5#include <linux/sched.h>
   6
   7#include <asm/processor.h>
   8#include <asm/special_insns.h>
   9
  10#ifdef CONFIG_PARAVIRT
  11#include <asm/paravirt.h>
  12#else
  13#define __flush_tlb() __native_flush_tlb()
  14#define __flush_tlb_global() __native_flush_tlb_global()
  15#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
  16#endif
  17
  18static inline void __native_flush_tlb(void)
  19{
  20        native_write_cr3(native_read_cr3());
  21}
  22
  23static inline void __native_flush_tlb_global_irq_disabled(void)
  24{
  25        unsigned long cr4;
  26
  27        cr4 = native_read_cr4();
  28        /* clear PGE */
  29        native_write_cr4(cr4 & ~X86_CR4_PGE);
  30        /* write old PGE again and flush TLBs */
  31        native_write_cr4(cr4);
  32}
  33
  34static inline void __native_flush_tlb_global(void)
  35{
  36        unsigned long flags;
  37
  38        /*
  39         * Read-modify-write to CR4 - protect it from preemption and
  40         * from interrupts. (Use the raw variant because this code can
  41         * be called from deep inside debugging code.)
  42         */
  43        raw_local_irq_save(flags);
  44
  45        __native_flush_tlb_global_irq_disabled();
  46
  47        raw_local_irq_restore(flags);
  48}
  49
  50static inline void __native_flush_tlb_single(unsigned long addr)
  51{
  52        asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
  53}
  54
  55static inline void __flush_tlb_all(void)
  56{
  57        if (cpu_has_pge)
  58                __flush_tlb_global();
  59        else
  60                __flush_tlb();
  61}
  62
  63static inline void __flush_tlb_one(unsigned long addr)
  64{
  65        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
  66        __flush_tlb_single(addr);
  67}
  68
  69#define TLB_FLUSH_ALL   -1UL
  70
  71/*
  72 * TLB flushing:
  73 *
  74 *  - flush_tlb() flushes the current mm struct TLBs
  75 *  - flush_tlb_all() flushes all processes TLBs
  76 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
  77 *  - flush_tlb_page(vma, vmaddr) flushes one page
  78 *  - flush_tlb_range(vma, start, end) flushes a range of pages
  79 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  80 *  - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
  81 *
  82 * ..but the i386 has somewhat limited tlb flushing capabilities,
  83 * and page-granular flushes are available only on i486 and up.
  84 */
  85
  86#ifndef CONFIG_SMP
  87
  88/* "_up" is for UniProcessor.
  89 *
  90 * This is a helper for other header functions.  *Not* intended to be called
  91 * directly.  All global TLB flushes need to either call this, or to bump the
  92 * vm statistics themselves.
  93 */
  94static inline void __flush_tlb_up(void)
  95{
  96        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
  97        __flush_tlb();
  98}
  99
 100static inline void flush_tlb_all(void)
 101{
 102        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
 103        __flush_tlb_all();
 104}
 105
 106static inline void flush_tlb(void)
 107{
 108        __flush_tlb_up();
 109}
 110
 111static inline void local_flush_tlb(void)
 112{
 113        __flush_tlb_up();
 114}
 115
 116static inline void flush_tlb_mm(struct mm_struct *mm)
 117{
 118        if (mm == current->active_mm)
 119                __flush_tlb_up();
 120}
 121
 122static inline void flush_tlb_page(struct vm_area_struct *vma,
 123                                  unsigned long addr)
 124{
 125        if (vma->vm_mm == current->active_mm)
 126                __flush_tlb_one(addr);
 127}
 128
 129static inline void flush_tlb_range(struct vm_area_struct *vma,
 130                                   unsigned long start, unsigned long end)
 131{
 132        if (vma->vm_mm == current->active_mm)
 133                __flush_tlb_up();
 134}
 135
 136static inline void flush_tlb_mm_range(struct mm_struct *mm,
 137           unsigned long start, unsigned long end, unsigned long vmflag)
 138{
 139        if (mm == current->active_mm)
 140                __flush_tlb_up();
 141}
 142
 143static inline void native_flush_tlb_others(const struct cpumask *cpumask,
 144                                           struct mm_struct *mm,
 145                                           unsigned long start,
 146                                           unsigned long end)
 147{
 148}
 149
 150static inline void reset_lazy_tlbstate(void)
 151{
 152}
 153
 154static inline void flush_tlb_kernel_range(unsigned long start,
 155                                          unsigned long end)
 156{
 157        flush_tlb_all();
 158}
 159
 160#else  /* SMP */
 161
 162#include <asm/smp.h>
 163
 164#define local_flush_tlb() __flush_tlb()
 165
 166#define flush_tlb_mm(mm)        flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
 167
 168#define flush_tlb_range(vma, start, end)        \
 169                flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
 170
 171extern void flush_tlb_all(void);
 172extern void flush_tlb_current_task(void);
 173extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
 174extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 175                                unsigned long end, unsigned long vmflag);
 176extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 177
 178#define flush_tlb()     flush_tlb_current_task()
 179
 180void native_flush_tlb_others(const struct cpumask *cpumask,
 181                                struct mm_struct *mm,
 182                                unsigned long start, unsigned long end);
 183
 184#define TLBSTATE_OK     1
 185#define TLBSTATE_LAZY   2
 186
 187struct tlb_state {
 188        struct mm_struct *active_mm;
 189        int state;
 190};
 191DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 192
 193static inline void reset_lazy_tlbstate(void)
 194{
 195        this_cpu_write(cpu_tlbstate.state, 0);
 196        this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
 197}
 198
 199#endif  /* SMP */
 200
 201#ifndef CONFIG_PARAVIRT
 202#define flush_tlb_others(mask, mm, start, end)  \
 203        native_flush_tlb_others(mask, mm, start, end)
 204#endif
 205
 206#endif /* _ASM_X86_TLBFLUSH_H */
 207