linux/arch/x86/mm/tlb.c
<<
>>
Prefs
   1#include <linux/init.h>
   2
   3#include <linux/mm.h>
   4#include <linux/spinlock.h>
   5#include <linux/smp.h>
   6#include <linux/interrupt.h>
   7#include <linux/module.h>
   8#include <linux/cpu.h>
   9
  10#include <asm/tlbflush.h>
  11#include <asm/mmu_context.h>
  12#include <asm/cache.h>
  13#include <asm/apic.h>
  14#include <asm/uv/uv.h>
  15
  16DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
  17                        = { &init_mm, 0, };
  18
  19/*
  20 *      Smarter SMP flushing macros.
  21 *              c/o Linus Torvalds.
  22 *
  23 *      These mean you can really definitely utterly forget about
  24 *      writing to user space from interrupts. (Its not allowed anyway).
  25 *
  26 *      Optimizations Manfred Spraul <manfred@colorfullife.com>
  27 *
  28 *      More scalable flush, from Andi Kleen
  29 *
  30 *      To avoid global state use 8 different call vectors.
  31 *      Each CPU uses a specific vector to trigger flushes on other
  32 *      CPUs. Depending on the received vector the target CPUs look into
  33 *      the right array slot for the flush data.
  34 *
  35 *      With more than 8 CPUs they are hashed to the 8 available
  36 *      vectors. The limited global vector space forces us to this right now.
  37 *      In future when interrupts are split into per CPU domains this could be
  38 *      fixed, at the cost of triggering multiple IPIs in some cases.
  39 */
  40
  41union smp_flush_state {
  42        struct {
  43                struct mm_struct *flush_mm;
  44                unsigned long flush_va;
  45                raw_spinlock_t tlbstate_lock;
  46                DECLARE_BITMAP(flush_cpumask, NR_CPUS);
  47        };
  48        char pad[INTERNODE_CACHE_BYTES];
  49} ____cacheline_internodealigned_in_smp;
  50
  51/* State is put into the per CPU data section, but padded
  52   to a full cache line because other CPUs can access it and we don't
  53   want false sharing in the per cpu data segment. */
  54static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
  55
  56static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
  57
  58/*
  59 * We cannot call mmdrop() because we are in interrupt context,
  60 * instead update mm->cpu_vm_mask.
  61 */
  62void leave_mm(int cpu)
  63{
  64        struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
  65        if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
  66                BUG();
  67        if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
  68                cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
  69                load_cr3(swapper_pg_dir);
  70        }
  71}
  72EXPORT_SYMBOL_GPL(leave_mm);
  73
  74/*
  75 *
  76 * The flush IPI assumes that a thread switch happens in this order:
  77 * [cpu0: the cpu that switches]
  78 * 1) switch_mm() either 1a) or 1b)
  79 * 1a) thread switch to a different mm
  80 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
  81 *      Stop ipi delivery for the old mm. This is not synchronized with
  82 *      the other cpus, but smp_invalidate_interrupt ignore flush ipis
  83 *      for the wrong mm, and in the worst case we perform a superfluous
  84 *      tlb flush.
  85 * 1a2) set cpu mmu_state to TLBSTATE_OK
  86 *      Now the smp_invalidate_interrupt won't call leave_mm if cpu0
  87 *      was in lazy tlb mode.
  88 * 1a3) update cpu active_mm
  89 *      Now cpu0 accepts tlb flushes for the new mm.
  90 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
  91 *      Now the other cpus will send tlb flush ipis.
  92 * 1a4) change cr3.
  93 * 1b) thread switch without mm change
  94 *      cpu active_mm is correct, cpu0 already handles
  95 *      flush ipis.
  96 * 1b1) set cpu mmu_state to TLBSTATE_OK
  97 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
  98 *      Atomically set the bit [other cpus will start sending flush ipis],
  99 *      and test the bit.
 100 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
 101 * 2) switch %%esp, ie current
 102 *
 103 * The interrupt must handle 2 special cases:
 104 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
 105 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
 106 *   runs in kernel space, the cpu could load tlb entries for user space
 107 *   pages.
 108 *
 109 * The good news is that cpu mmu_state is local to each cpu, no
 110 * write/read ordering problems.
 111 */
 112
 113/*
 114 * TLB flush IPI:
 115 *
 116 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
 117 * 2) Leave the mm if we are in the lazy tlb mode.
 118 *
 119 * Interrupts are disabled.
 120 */
 121
 122/*
 123 * FIXME: use of asmlinkage is not consistent.  On x86_64 it's noop
 124 * but still used for documentation purpose but the usage is slightly
 125 * inconsistent.  On x86_32, asmlinkage is regparm(0) but interrupt
 126 * entry calls in with the first parameter in %eax.  Maybe define
 127 * intrlinkage?
 128 */
 129#ifdef CONFIG_X86_64
 130asmlinkage
 131#endif
 132void smp_invalidate_interrupt(struct pt_regs *regs)
 133{
 134        unsigned int cpu;
 135        unsigned int sender;
 136        union smp_flush_state *f;
 137
 138        cpu = smp_processor_id();
 139        /*
 140         * orig_rax contains the negated interrupt vector.
 141         * Use that to determine where the sender put the data.
 142         */
 143        sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
 144        f = &flush_state[sender];
 145
 146        if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
 147                goto out;
 148                /*
 149                 * This was a BUG() but until someone can quote me the
 150                 * line from the intel manual that guarantees an IPI to
 151                 * multiple CPUs is retried _only_ on the erroring CPUs
 152                 * its staying as a return
 153                 *
 154                 * BUG();
 155                 */
 156
 157        if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) {
 158                if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
 159                        if (f->flush_va == TLB_FLUSH_ALL)
 160                                local_flush_tlb();
 161                        else
 162                                __flush_tlb_one(f->flush_va);
 163                } else
 164                        leave_mm(cpu);
 165        }
 166out:
 167        ack_APIC_irq();
 168        smp_mb__before_clear_bit();
 169        cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
 170        smp_mb__after_clear_bit();
 171        inc_irq_stat(irq_tlb_count);
 172}
 173
 174static void flush_tlb_others_ipi(const struct cpumask *cpumask,
 175                                 struct mm_struct *mm, unsigned long va)
 176{
 177        unsigned int sender;
 178        union smp_flush_state *f;
 179
 180        /* Caller has disabled preemption */
 181        sender = this_cpu_read(tlb_vector_offset);
 182        f = &flush_state[sender];
 183
 184        if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
 185                raw_spin_lock(&f->tlbstate_lock);
 186
 187        f->flush_mm = mm;
 188        f->flush_va = va;
 189        if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
 190                /*
 191                 * We have to send the IPI only to
 192                 * CPUs affected.
 193                 */
 194                apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
 195                              INVALIDATE_TLB_VECTOR_START + sender);
 196
 197                while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
 198                        cpu_relax();
 199        }
 200
 201        f->flush_mm = NULL;
 202        f->flush_va = 0;
 203        if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
 204                raw_spin_unlock(&f->tlbstate_lock);
 205}
 206
 207void native_flush_tlb_others(const struct cpumask *cpumask,
 208                             struct mm_struct *mm, unsigned long va)
 209{
 210        if (is_uv_system()) {
 211                unsigned int cpu;
 212
 213                cpu = smp_processor_id();
 214                cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
 215                if (cpumask)
 216                        flush_tlb_others_ipi(cpumask, mm, va);
 217                return;
 218        }
 219        flush_tlb_others_ipi(cpumask, mm, va);
 220}
 221
 222static void __cpuinit calculate_tlb_offset(void)
 223{
 224        int cpu, node, nr_node_vecs, idx = 0;
 225        /*
 226         * we are changing tlb_vector_offset for each CPU in runtime, but this
 227         * will not cause inconsistency, as the write is atomic under X86. we
 228         * might see more lock contentions in a short time, but after all CPU's
 229         * tlb_vector_offset are changed, everything should go normal
 230         *
 231         * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
 232         * waste some vectors.
 233         **/
 234        if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
 235                nr_node_vecs = 1;
 236        else
 237                nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
 238
 239        for_each_online_node(node) {
 240                int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
 241                        nr_node_vecs;
 242                int cpu_offset = 0;
 243                for_each_cpu(cpu, cpumask_of_node(node)) {
 244                        per_cpu(tlb_vector_offset, cpu) = node_offset +
 245                                cpu_offset;
 246                        cpu_offset++;
 247                        cpu_offset = cpu_offset % nr_node_vecs;
 248                }
 249                idx++;
 250        }
 251}
 252
 253static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
 254                unsigned long action, void *hcpu)
 255{
 256        switch (action & 0xf) {
 257        case CPU_ONLINE:
 258        case CPU_DEAD:
 259                calculate_tlb_offset();
 260        }
 261        return NOTIFY_OK;
 262}
 263
 264static int __cpuinit init_smp_flush(void)
 265{
 266        int i;
 267
 268        for (i = 0; i < ARRAY_SIZE(flush_state); i++)
 269                raw_spin_lock_init(&flush_state[i].tlbstate_lock);
 270
 271        calculate_tlb_offset();
 272        hotcpu_notifier(tlb_cpuhp_notify, 0);
 273        return 0;
 274}
 275core_initcall(init_smp_flush);
 276
 277void flush_tlb_current_task(void)
 278{
 279        struct mm_struct *mm = current->mm;
 280
 281        preempt_disable();
 282
 283        local_flush_tlb();
 284        if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
 285                flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
 286        preempt_enable();
 287}
 288
 289void flush_tlb_mm(struct mm_struct *mm)
 290{
 291        preempt_disable();
 292
 293        if (current->active_mm == mm) {
 294                if (current->mm)
 295                        local_flush_tlb();
 296                else
 297                        leave_mm(smp_processor_id());
 298        }
 299        if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
 300                flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
 301
 302        preempt_enable();
 303}
 304
 305void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
 306{
 307        struct mm_struct *mm = vma->vm_mm;
 308
 309        preempt_disable();
 310
 311        if (current->active_mm == mm) {
 312                if (current->mm)
 313                        __flush_tlb_one(va);
 314                else
 315                        leave_mm(smp_processor_id());
 316        }
 317
 318        if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
 319                flush_tlb_others(mm_cpumask(mm), mm, va);
 320
 321        preempt_enable();
 322}
 323
 324static void do_flush_tlb_all(void *info)
 325{
 326        __flush_tlb_all();
 327        if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
 328                leave_mm(smp_processor_id());
 329}
 330
 331void flush_tlb_all(void)
 332{
 333        on_each_cpu(do_flush_tlb_all, NULL, 1);
 334}
 335