linux/arch/x86/include/asm/tlbflush.h
<<
>>
Prefs
   1#ifndef _ASM_X86_TLBFLUSH_H
   2#define _ASM_X86_TLBFLUSH_H
   3
   4#include <linux/mm.h>
   5#include <linux/sched.h>
   6
   7#include <asm/processor.h>
   8#include <asm/system.h>
   9
  10#ifdef CONFIG_PARAVIRT
  11#include <asm/paravirt.h>
  12#else
  13#define __flush_tlb() __native_flush_tlb()
  14#define __flush_tlb_global() __native_flush_tlb_global()
  15#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
  16#endif
  17
  18static inline void __native_flush_tlb(void)
  19{
  20        native_write_cr3(native_read_cr3());
  21}
  22
  23static inline void __native_flush_tlb_global(void)
  24{
  25        unsigned long flags;
  26        unsigned long cr4;
  27
  28        /*
  29         * Read-modify-write to CR4 - protect it from preemption and
  30         * from interrupts. (Use the raw variant because this code can
  31         * be called from deep inside debugging code.)
  32         */
  33        raw_local_irq_save(flags);
  34
  35        cr4 = native_read_cr4();
  36        /* clear PGE */
  37        native_write_cr4(cr4 & ~X86_CR4_PGE);
  38        /* write old PGE again and flush TLBs */
  39        native_write_cr4(cr4);
  40
  41        raw_local_irq_restore(flags);
  42}
  43
  44static inline void __native_flush_tlb_single(unsigned long addr)
  45{
  46        asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
  47}
  48
  49static inline void __flush_tlb_all(void)
  50{
  51        if (cpu_has_pge)
  52                __flush_tlb_global();
  53        else
  54                __flush_tlb();
  55}
  56
  57static inline void __flush_tlb_one(unsigned long addr)
  58{
  59        if (cpu_has_invlpg)
  60                __flush_tlb_single(addr);
  61        else
  62                __flush_tlb();
  63}
  64
  65#ifdef CONFIG_X86_32
  66# define TLB_FLUSH_ALL  0xffffffff
  67#else
  68# define TLB_FLUSH_ALL  -1ULL
  69#endif
  70
  71/*
  72 * TLB flushing:
  73 *
  74 *  - flush_tlb() flushes the current mm struct TLBs
  75 *  - flush_tlb_all() flushes all processes TLBs
  76 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
  77 *  - flush_tlb_page(vma, vmaddr) flushes one page
  78 *  - flush_tlb_range(vma, start, end) flushes a range of pages
  79 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  80 *  - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
  81 *
  82 * ..but the i386 has somewhat limited tlb flushing capabilities,
  83 * and page-granular flushes are available only on i486 and up.
  84 *
  85 * x86-64 can only flush individual pages or full VMs. For a range flush
  86 * we always do the full VM. Might be worth trying if for a small
  87 * range a few INVLPGs in a row are a win.
  88 */
  89
  90#ifndef CONFIG_SMP
  91
  92#define flush_tlb() __flush_tlb()
  93#define flush_tlb_all() __flush_tlb_all()
  94#define local_flush_tlb() __flush_tlb()
  95
  96static inline void flush_tlb_mm(struct mm_struct *mm)
  97{
  98        if (mm == current->active_mm)
  99                __flush_tlb();
 100}
 101
 102static inline void flush_tlb_page(struct vm_area_struct *vma,
 103                                  unsigned long addr)
 104{
 105        if (vma->vm_mm == current->active_mm)
 106                __flush_tlb_one(addr);
 107}
 108
 109static inline void flush_tlb_range(struct vm_area_struct *vma,
 110                                   unsigned long start, unsigned long end)
 111{
 112        if (vma->vm_mm == current->active_mm)
 113                __flush_tlb();
 114}
 115
 116static inline void native_flush_tlb_others(const struct cpumask *cpumask,
 117                                           struct mm_struct *mm,
 118                                           unsigned long va)
 119{
 120}
 121
 122static inline void reset_lazy_tlbstate(void)
 123{
 124}
 125
 126#else  /* SMP */
 127
 128#include <asm/smp.h>
 129
 130#define local_flush_tlb() __flush_tlb()
 131
 132extern void flush_tlb_all(void);
 133extern void flush_tlb_current_task(void);
 134extern void flush_tlb_mm(struct mm_struct *);
 135extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
 136
 137#define flush_tlb()     flush_tlb_current_task()
 138
 139static inline void flush_tlb_range(struct vm_area_struct *vma,
 140                                   unsigned long start, unsigned long end)
 141{
 142        flush_tlb_mm(vma->vm_mm);
 143}
 144
 145void native_flush_tlb_others(const struct cpumask *cpumask,
 146                             struct mm_struct *mm, unsigned long va);
 147
 148#define TLBSTATE_OK     1
 149#define TLBSTATE_LAZY   2
 150
 151struct tlb_state {
 152        struct mm_struct *active_mm;
 153        int state;
 154};
 155DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 156
 157static inline void reset_lazy_tlbstate(void)
 158{
 159        percpu_write(cpu_tlbstate.state, 0);
 160        percpu_write(cpu_tlbstate.active_mm, &init_mm);
 161}
 162
 163#endif  /* SMP */
 164
 165#ifndef CONFIG_PARAVIRT
 166#define flush_tlb_others(mask, mm, va)  native_flush_tlb_others(mask, mm, va)
 167#endif
 168
 169static inline void flush_tlb_kernel_range(unsigned long start,
 170                                          unsigned long end)
 171{
 172        flush_tlb_all();
 173}
 174
 175extern void zap_low_mappings(bool early);
 176
 177#endif /* _ASM_X86_TLBFLUSH_H */
 178