linux/arch/x86/mm/tlb.c
<<
>>
Prefs
   1#include <linux/init.h>
   2
   3#include <linux/mm.h>
   4#include <linux/spinlock.h>
   5#include <linux/smp.h>
   6#include <linux/interrupt.h>
   7#include <linux/module.h>
   8
   9#include <asm/tlbflush.h>
  10#include <asm/mmu_context.h>
  11#include <asm/apic.h>
  12#include <asm/uv/uv.h>
  13
  14DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
  15                        = { &init_mm, 0, };
  16
  17/*
  18 *      Smarter SMP flushing macros.
  19 *              c/o Linus Torvalds.
  20 *
  21 *      These mean you can really definitely utterly forget about
  22 *      writing to user space from interrupts. (Its not allowed anyway).
  23 *
  24 *      Optimizations Manfred Spraul <manfred@colorfullife.com>
  25 *
  26 *      More scalable flush, from Andi Kleen
  27 *
  28 *      To avoid global state use 8 different call vectors.
  29 *      Each CPU uses a specific vector to trigger flushes on other
  30 *      CPUs. Depending on the received vector the target CPUs look into
  31 *      the right array slot for the flush data.
  32 *
  33 *      With more than 8 CPUs they are hashed to the 8 available
  34 *      vectors. The limited global vector space forces us to this right now.
  35 *      In future when interrupts are split into per CPU domains this could be
  36 *      fixed, at the cost of triggering multiple IPIs in some cases.
  37 */
  38
  39union smp_flush_state {
  40        struct {
  41                struct mm_struct *flush_mm;
  42                unsigned long flush_va;
  43                spinlock_t tlbstate_lock;
  44                DECLARE_BITMAP(flush_cpumask, NR_CPUS);
  45        };
  46        char pad[CONFIG_X86_INTERNODE_CACHE_BYTES];
  47} ____cacheline_internodealigned_in_smp;
  48
  49/* State is put into the per CPU data section, but padded
  50   to a full cache line because other CPUs can access it and we don't
  51   want false sharing in the per cpu data segment. */
  52static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
  53
  54/*
  55 * We cannot call mmdrop() because we are in interrupt context,
  56 * instead update mm->cpu_vm_mask.
  57 */
  58void leave_mm(int cpu)
  59{
  60        if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
  61                BUG();
  62        cpumask_clear_cpu(cpu,
  63                          mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
  64        load_cr3(swapper_pg_dir);
  65}
  66EXPORT_SYMBOL_GPL(leave_mm);
  67
  68/*
  69 *
  70 * The flush IPI assumes that a thread switch happens in this order:
  71 * [cpu0: the cpu that switches]
  72 * 1) switch_mm() either 1a) or 1b)
  73 * 1a) thread switch to a different mm
  74 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
  75 *      Stop ipi delivery for the old mm. This is not synchronized with
  76 *      the other cpus, but smp_invalidate_interrupt ignore flush ipis
  77 *      for the wrong mm, and in the worst case we perform a superfluous
  78 *      tlb flush.
  79 * 1a2) set cpu mmu_state to TLBSTATE_OK
  80 *      Now the smp_invalidate_interrupt won't call leave_mm if cpu0
  81 *      was in lazy tlb mode.
  82 * 1a3) update cpu active_mm
  83 *      Now cpu0 accepts tlb flushes for the new mm.
  84 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
  85 *      Now the other cpus will send tlb flush ipis.
  86 * 1a4) change cr3.
  87 * 1b) thread switch without mm change
  88 *      cpu active_mm is correct, cpu0 already handles
  89 *      flush ipis.
  90 * 1b1) set cpu mmu_state to TLBSTATE_OK
  91 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
  92 *      Atomically set the bit [other cpus will start sending flush ipis],
  93 *      and test the bit.
  94 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
  95 * 2) switch %%esp, ie current
  96 *
  97 * The interrupt must handle 2 special cases:
  98 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
  99 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
 100 *   runs in kernel space, the cpu could load tlb entries for user space
 101 *   pages.
 102 *
 103 * The good news is that cpu mmu_state is local to each cpu, no
 104 * write/read ordering problems.
 105 */
 106
 107/*
 108 * TLB flush IPI:
 109 *
 110 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
 111 * 2) Leave the mm if we are in the lazy tlb mode.
 112 *
 113 * Interrupts are disabled.
 114 */
 115
 116/*
 117 * FIXME: use of asmlinkage is not consistent.  On x86_64 it's noop
 118 * but still used for documentation purpose but the usage is slightly
 119 * inconsistent.  On x86_32, asmlinkage is regparm(0) but interrupt
 120 * entry calls in with the first parameter in %eax.  Maybe define
 121 * intrlinkage?
 122 */
 123#ifdef CONFIG_X86_64
 124asmlinkage
 125#endif
 126void smp_invalidate_interrupt(struct pt_regs *regs)
 127{
 128        unsigned int cpu;
 129        unsigned int sender;
 130        union smp_flush_state *f;
 131
 132        cpu = smp_processor_id();
 133        /*
 134         * orig_rax contains the negated interrupt vector.
 135         * Use that to determine where the sender put the data.
 136         */
 137        sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
 138        f = &flush_state[sender];
 139
 140        if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
 141                goto out;
 142                /*
 143                 * This was a BUG() but until someone can quote me the
 144                 * line from the intel manual that guarantees an IPI to
 145                 * multiple CPUs is retried _only_ on the erroring CPUs
 146                 * its staying as a return
 147                 *
 148                 * BUG();
 149                 */
 150
 151        if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
 152                if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
 153                        if (f->flush_va == TLB_FLUSH_ALL)
 154                                local_flush_tlb();
 155                        else
 156                                __flush_tlb_one(f->flush_va);
 157                } else
 158                        leave_mm(cpu);
 159        }
 160out:
 161        ack_APIC_irq();
 162        smp_mb__before_clear_bit();
 163        cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
 164        smp_mb__after_clear_bit();
 165        inc_irq_stat(irq_tlb_count);
 166}
 167
 168static void flush_tlb_others_ipi(const struct cpumask *cpumask,
 169                                 struct mm_struct *mm, unsigned long va)
 170{
 171        unsigned int sender;
 172        union smp_flush_state *f;
 173
 174        /* Caller has disabled preemption */
 175        sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
 176        f = &flush_state[sender];
 177
 178        /*
 179         * Could avoid this lock when
 180         * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
 181         * probably not worth checking this for a cache-hot lock.
 182         */
 183        spin_lock(&f->tlbstate_lock);
 184
 185        f->flush_mm = mm;
 186        f->flush_va = va;
 187        if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
 188                /*
 189                 * We have to send the IPI only to
 190                 * CPUs affected.
 191                 */
 192                apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
 193                              INVALIDATE_TLB_VECTOR_START + sender);
 194
 195                while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
 196                        cpu_relax();
 197        }
 198
 199        f->flush_mm = NULL;
 200        f->flush_va = 0;
 201        spin_unlock(&f->tlbstate_lock);
 202}
 203
 204void native_flush_tlb_others(const struct cpumask *cpumask,
 205                             struct mm_struct *mm, unsigned long va)
 206{
 207        if (is_uv_system()) {
 208                unsigned int cpu;
 209
 210                cpu = get_cpu();
 211                cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
 212                if (cpumask)
 213                        flush_tlb_others_ipi(cpumask, mm, va);
 214                put_cpu();
 215                return;
 216        }
 217        flush_tlb_others_ipi(cpumask, mm, va);
 218}
 219
 220static int __cpuinit init_smp_flush(void)
 221{
 222        int i;
 223
 224        for (i = 0; i < ARRAY_SIZE(flush_state); i++)
 225                spin_lock_init(&flush_state[i].tlbstate_lock);
 226
 227        return 0;
 228}
 229core_initcall(init_smp_flush);
 230
 231void flush_tlb_current_task(void)
 232{
 233        struct mm_struct *mm = current->mm;
 234
 235        preempt_disable();
 236
 237        local_flush_tlb();
 238        if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
 239                flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
 240        preempt_enable();
 241}
 242
 243void flush_tlb_mm(struct mm_struct *mm)
 244{
 245        preempt_disable();
 246
 247        if (current->active_mm == mm) {
 248                if (current->mm)
 249                        local_flush_tlb();
 250                else
 251                        leave_mm(smp_processor_id());
 252        }
 253        if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
 254                flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
 255
 256        preempt_enable();
 257}
 258
 259void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
 260{
 261        struct mm_struct *mm = vma->vm_mm;
 262
 263        preempt_disable();
 264
 265        if (current->active_mm == mm) {
 266                if (current->mm)
 267                        __flush_tlb_one(va);
 268                else
 269                        leave_mm(smp_processor_id());
 270        }
 271
 272        if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
 273                flush_tlb_others(mm_cpumask(mm), mm, va);
 274
 275        preempt_enable();
 276}
 277
 278static void do_flush_tlb_all(void *info)
 279{
 280        unsigned long cpu = smp_processor_id();
 281
 282        __flush_tlb_all();
 283        if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
 284                leave_mm(cpu);
 285}
 286
 287void flush_tlb_all(void)
 288{
 289        on_each_cpu(do_flush_tlb_all, NULL, 1);
 290}
 291