linux/arch/x86/kernel/smp_64.c
<<
>>
Prefs
   1/*
   2 *      Intel SMP support routines.
   3 *
   4 *      (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
   5 *      (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
   6 *      (c) 2002,2003 Andi Kleen, SuSE Labs.
   7 *
   8 *      This code is released under the GNU General Public License version 2 or
   9 *      later.
  10 */
  11
  12#include <linux/init.h>
  13
  14#include <linux/mm.h>
  15#include <linux/delay.h>
  16#include <linux/spinlock.h>
  17#include <linux/smp.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/mc146818rtc.h>
  20#include <linux/interrupt.h>
  21
  22#include <asm/mtrr.h>
  23#include <asm/pgalloc.h>
  24#include <asm/tlbflush.h>
  25#include <asm/mach_apic.h>
  26#include <asm/mmu_context.h>
  27#include <asm/proto.h>
  28#include <asm/apicdef.h>
  29#include <asm/idle.h>
  30
  31/*
  32 *      Smarter SMP flushing macros. 
  33 *              c/o Linus Torvalds.
  34 *
  35 *      These mean you can really definitely utterly forget about
  36 *      writing to user space from interrupts. (Its not allowed anyway).
  37 *
  38 *      Optimizations Manfred Spraul <manfred@colorfullife.com>
  39 *
  40 *      More scalable flush, from Andi Kleen
  41 *
  42 *      To avoid global state use 8 different call vectors.
  43 *      Each CPU uses a specific vector to trigger flushes on other
  44 *      CPUs. Depending on the received vector the target CPUs look into
  45 *      the right per cpu variable for the flush data.
  46 *
  47 *      With more than 8 CPUs they are hashed to the 8 available
  48 *      vectors. The limited global vector space forces us to this right now.
  49 *      In future when interrupts are split into per CPU domains this could be
  50 *      fixed, at the cost of triggering multiple IPIs in some cases.
  51 */
  52
  53union smp_flush_state {
  54        struct {
  55                cpumask_t flush_cpumask;
  56                struct mm_struct *flush_mm;
  57                unsigned long flush_va;
  58#define FLUSH_ALL       -1ULL
  59                spinlock_t tlbstate_lock;
  60        };
  61        char pad[SMP_CACHE_BYTES];
  62} ____cacheline_aligned;
  63
  64/* State is put into the per CPU data section, but padded
  65   to a full cache line because other CPUs can access it and we don't
  66   want false sharing in the per cpu data segment. */
  67static DEFINE_PER_CPU(union smp_flush_state, flush_state);
  68
  69/*
  70 * We cannot call mmdrop() because we are in interrupt context, 
  71 * instead update mm->cpu_vm_mask.
  72 */
  73static inline void leave_mm(int cpu)
  74{
  75        if (read_pda(mmu_state) == TLBSTATE_OK)
  76                BUG();
  77        cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
  78        load_cr3(swapper_pg_dir);
  79}
  80
  81/*
  82 *
  83 * The flush IPI assumes that a thread switch happens in this order:
  84 * [cpu0: the cpu that switches]
  85 * 1) switch_mm() either 1a) or 1b)
  86 * 1a) thread switch to a different mm
  87 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
  88 *      Stop ipi delivery for the old mm. This is not synchronized with
  89 *      the other cpus, but smp_invalidate_interrupt ignore flush ipis
  90 *      for the wrong mm, and in the worst case we perform a superfluous
  91 *      tlb flush.
  92 * 1a2) set cpu mmu_state to TLBSTATE_OK
  93 *      Now the smp_invalidate_interrupt won't call leave_mm if cpu0
  94 *      was in lazy tlb mode.
  95 * 1a3) update cpu active_mm
  96 *      Now cpu0 accepts tlb flushes for the new mm.
  97 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
  98 *      Now the other cpus will send tlb flush ipis.
  99 * 1a4) change cr3.
 100 * 1b) thread switch without mm change
 101 *      cpu active_mm is correct, cpu0 already handles
 102 *      flush ipis.
 103 * 1b1) set cpu mmu_state to TLBSTATE_OK
 104 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
 105 *      Atomically set the bit [other cpus will start sending flush ipis],
 106 *      and test the bit.
 107 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
 108 * 2) switch %%esp, ie current
 109 *
 110 * The interrupt must handle 2 special cases:
 111 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
 112 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
 113 *   runs in kernel space, the cpu could load tlb entries for user space
 114 *   pages.
 115 *
 116 * The good news is that cpu mmu_state is local to each cpu, no
 117 * write/read ordering problems.
 118 */
 119
 120/*
 121 * TLB flush IPI:
 122 *
 123 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
 124 * 2) Leave the mm if we are in the lazy tlb mode.
 125 *
 126 * Interrupts are disabled.
 127 */
 128
 129asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
 130{
 131        int cpu;
 132        int sender;
 133        union smp_flush_state *f;
 134
 135        cpu = smp_processor_id();
 136        /*
 137         * orig_rax contains the negated interrupt vector.
 138         * Use that to determine where the sender put the data.
 139         */
 140        sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
 141        f = &per_cpu(flush_state, sender);
 142
 143        if (!cpu_isset(cpu, f->flush_cpumask))
 144                goto out;
 145                /* 
 146                 * This was a BUG() but until someone can quote me the
 147                 * line from the intel manual that guarantees an IPI to
 148                 * multiple CPUs is retried _only_ on the erroring CPUs
 149                 * its staying as a return
 150                 *
 151                 * BUG();
 152                 */
 153                 
 154        if (f->flush_mm == read_pda(active_mm)) {
 155                if (read_pda(mmu_state) == TLBSTATE_OK) {
 156                        if (f->flush_va == FLUSH_ALL)
 157                                local_flush_tlb();
 158                        else
 159                                __flush_tlb_one(f->flush_va);
 160                } else
 161                        leave_mm(cpu);
 162        }
 163out:
 164        ack_APIC_irq();
 165        cpu_clear(cpu, f->flush_cpumask);
 166        add_pda(irq_tlb_count, 1);
 167}
 168
 169static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
 170                                                unsigned long va)
 171{
 172        int sender;
 173        union smp_flush_state *f;
 174
 175        /* Caller has disabled preemption */
 176        sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
 177        f = &per_cpu(flush_state, sender);
 178
 179        /* Could avoid this lock when
 180           num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
 181           probably not worth checking this for a cache-hot lock. */
 182        spin_lock(&f->tlbstate_lock);
 183
 184        f->flush_mm = mm;
 185        f->flush_va = va;
 186        cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
 187
 188        /*
 189         * We have to send the IPI only to
 190         * CPUs affected.
 191         */
 192        send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
 193
 194        while (!cpus_empty(f->flush_cpumask))
 195                cpu_relax();
 196
 197        f->flush_mm = NULL;
 198        f->flush_va = 0;
 199        spin_unlock(&f->tlbstate_lock);
 200}
 201
 202int __cpuinit init_smp_flush(void)
 203{
 204        int i;
 205        for_each_cpu_mask(i, cpu_possible_map) {
 206                spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
 207        }
 208        return 0;
 209}
 210
 211core_initcall(init_smp_flush);
 212        
 213void flush_tlb_current_task(void)
 214{
 215        struct mm_struct *mm = current->mm;
 216        cpumask_t cpu_mask;
 217
 218        preempt_disable();
 219        cpu_mask = mm->cpu_vm_mask;
 220        cpu_clear(smp_processor_id(), cpu_mask);
 221
 222        local_flush_tlb();
 223        if (!cpus_empty(cpu_mask))
 224                flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
 225        preempt_enable();
 226}
 227EXPORT_SYMBOL(flush_tlb_current_task);
 228
 229void flush_tlb_mm (struct mm_struct * mm)
 230{
 231        cpumask_t cpu_mask;
 232
 233        preempt_disable();
 234        cpu_mask = mm->cpu_vm_mask;
 235        cpu_clear(smp_processor_id(), cpu_mask);
 236
 237        if (current->active_mm == mm) {
 238                if (current->mm)
 239                        local_flush_tlb();
 240                else
 241                        leave_mm(smp_processor_id());
 242        }
 243        if (!cpus_empty(cpu_mask))
 244                flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
 245
 246        preempt_enable();
 247}
 248EXPORT_SYMBOL(flush_tlb_mm);
 249
 250void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
 251{
 252        struct mm_struct *mm = vma->vm_mm;
 253        cpumask_t cpu_mask;
 254
 255        preempt_disable();
 256        cpu_mask = mm->cpu_vm_mask;
 257        cpu_clear(smp_processor_id(), cpu_mask);
 258
 259        if (current->active_mm == mm) {
 260                if(current->mm)
 261                        __flush_tlb_one(va);
 262                 else
 263                        leave_mm(smp_processor_id());
 264        }
 265
 266        if (!cpus_empty(cpu_mask))
 267                flush_tlb_others(cpu_mask, mm, va);
 268
 269        preempt_enable();
 270}
 271EXPORT_SYMBOL(flush_tlb_page);
 272
 273static void do_flush_tlb_all(void* info)
 274{
 275        unsigned long cpu = smp_processor_id();
 276
 277        __flush_tlb_all();
 278        if (read_pda(mmu_state) == TLBSTATE_LAZY)
 279                leave_mm(cpu);
 280}
 281
 282void flush_tlb_all(void)
 283{
 284        on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
 285}
 286
 287/*
 288 * this function sends a 'reschedule' IPI to another CPU.
 289 * it goes straight through and wastes no time serializing
 290 * anything. Worst case is that we lose a reschedule ...
 291 */
 292
 293void smp_send_reschedule(int cpu)
 294{
 295        send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
 296}
 297
 298/*
 299 * Structure and data for smp_call_function(). This is designed to minimise
 300 * static memory requirements. It also looks cleaner.
 301 */
 302static DEFINE_SPINLOCK(call_lock);
 303
 304struct call_data_struct {
 305        void (*func) (void *info);
 306        void *info;
 307        atomic_t started;
 308        atomic_t finished;
 309        int wait;
 310};
 311
 312static struct call_data_struct * call_data;
 313
 314void lock_ipi_call_lock(void)
 315{
 316        spin_lock_irq(&call_lock);
 317}
 318
 319void unlock_ipi_call_lock(void)
 320{
 321        spin_unlock_irq(&call_lock);
 322}
 323
 324/*
 325 * this function sends a 'generic call function' IPI to all other CPU
 326 * of the system defined in the mask.
 327 */
 328
 329static int
 330__smp_call_function_mask(cpumask_t mask,
 331                         void (*func)(void *), void *info,
 332                         int wait)
 333{
 334        struct call_data_struct data;
 335        cpumask_t allbutself;
 336        int cpus;
 337
 338        allbutself = cpu_online_map;
 339        cpu_clear(smp_processor_id(), allbutself);
 340
 341        cpus_and(mask, mask, allbutself);
 342        cpus = cpus_weight(mask);
 343
 344        if (!cpus)
 345                return 0;
 346
 347        data.func = func;
 348        data.info = info;
 349        atomic_set(&data.started, 0);
 350        data.wait = wait;
 351        if (wait)
 352                atomic_set(&data.finished, 0);
 353
 354        call_data = &data;
 355        wmb();
 356
 357        /* Send a message to other CPUs */
 358        if (cpus_equal(mask, allbutself))
 359                send_IPI_allbutself(CALL_FUNCTION_VECTOR);
 360        else
 361                send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 362
 363        /* Wait for response */
 364        while (atomic_read(&data.started) != cpus)
 365                cpu_relax();
 366
 367        if (!wait)
 368                return 0;
 369
 370        while (atomic_read(&data.finished) != cpus)
 371                cpu_relax();
 372
 373        return 0;
 374}
 375/**
 376 * smp_call_function_mask(): Run a function on a set of other CPUs.
 377 * @mask: The set of cpus to run on.  Must not include the current cpu.
 378 * @func: The function to run. This must be fast and non-blocking.
 379 * @info: An arbitrary pointer to pass to the function.
 380 * @wait: If true, wait (atomically) until function has completed on other CPUs.
 381 *
 382 * Returns 0 on success, else a negative status code.
 383 *
 384 * If @wait is true, then returns once @func has returned; otherwise
 385 * it returns just before the target cpu calls @func.
 386 *
 387 * You must not call this function with disabled interrupts or from a
 388 * hardware interrupt handler or from a bottom half handler.
 389 */
 390int smp_call_function_mask(cpumask_t mask,
 391                           void (*func)(void *), void *info,
 392                           int wait)
 393{
 394        int ret;
 395
 396        /* Can deadlock when called with interrupts disabled */
 397        WARN_ON(irqs_disabled());
 398
 399        spin_lock(&call_lock);
 400        ret = __smp_call_function_mask(mask, func, info, wait);
 401        spin_unlock(&call_lock);
 402        return ret;
 403}
 404EXPORT_SYMBOL(smp_call_function_mask);
 405
 406/*
 407 * smp_call_function_single - Run a function on a specific CPU
 408 * @func: The function to run. This must be fast and non-blocking.
 409 * @info: An arbitrary pointer to pass to the function.
 410 * @nonatomic: Currently unused.
 411 * @wait: If true, wait until function has completed on other CPUs.
 412 *
 413 * Retrurns 0 on success, else a negative status code.
 414 *
 415 * Does not return until the remote CPU is nearly ready to execute <func>
 416 * or is or has executed.
 417 */
 418
 419int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
 420        int nonatomic, int wait)
 421{
 422        /* prevent preemption and reschedule on another processor */
 423        int ret;
 424        int me = get_cpu();
 425
 426        /* Can deadlock when called with interrupts disabled */
 427        WARN_ON(irqs_disabled());
 428
 429        if (cpu == me) {
 430                local_irq_disable();
 431                func(info);
 432                local_irq_enable();
 433                put_cpu();
 434                return 0;
 435        }
 436
 437        ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
 438
 439        put_cpu();
 440        return ret;
 441}
 442EXPORT_SYMBOL(smp_call_function_single);
 443
 444/*
 445 * smp_call_function - run a function on all other CPUs.
 446 * @func: The function to run. This must be fast and non-blocking.
 447 * @info: An arbitrary pointer to pass to the function.
 448 * @nonatomic: currently unused.
 449 * @wait: If true, wait (atomically) until function has completed on other
 450 *        CPUs.
 451 *
 452 * Returns 0 on success, else a negative status code. Does not return until
 453 * remote CPUs are nearly ready to execute func or are or have executed.
 454 *
 455 * You must not call this function with disabled interrupts or from a
 456 * hardware interrupt handler or from a bottom half handler.
 457 * Actually there are a few legal cases, like panic.
 458 */
 459int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
 460                        int wait)
 461{
 462        return smp_call_function_mask(cpu_online_map, func, info, wait);
 463}
 464EXPORT_SYMBOL(smp_call_function);
 465
 466static void stop_this_cpu(void *dummy)
 467{
 468        local_irq_disable();
 469        /*
 470         * Remove this CPU:
 471         */
 472        cpu_clear(smp_processor_id(), cpu_online_map);
 473        disable_local_APIC();
 474        for (;;) 
 475                halt();
 476} 
 477
 478void smp_send_stop(void)
 479{
 480        int nolock;
 481        unsigned long flags;
 482
 483        if (reboot_force)
 484                return;
 485
 486        /* Don't deadlock on the call lock in panic */
 487        nolock = !spin_trylock(&call_lock);
 488        local_irq_save(flags);
 489        __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
 490        if (!nolock)
 491                spin_unlock(&call_lock);
 492        disable_local_APIC();
 493        local_irq_restore(flags);
 494}
 495
 496/*
 497 * Reschedule call back. Nothing to do,
 498 * all the work is done automatically when
 499 * we return from the interrupt.
 500 */
 501asmlinkage void smp_reschedule_interrupt(void)
 502{
 503        ack_APIC_irq();
 504        add_pda(irq_resched_count, 1);
 505}
 506
 507asmlinkage void smp_call_function_interrupt(void)
 508{
 509        void (*func) (void *info) = call_data->func;
 510        void *info = call_data->info;
 511        int wait = call_data->wait;
 512
 513        ack_APIC_irq();
 514        /*
 515         * Notify initiating CPU that I've grabbed the data and am
 516         * about to execute the function
 517         */
 518        mb();
 519        atomic_inc(&call_data->started);
 520        /*
 521         * At this point the info structure may be out of scope unless wait==1
 522         */
 523        exit_idle();
 524        irq_enter();
 525        (*func)(info);
 526        add_pda(irq_call_count, 1);
 527        irq_exit();
 528        if (wait) {
 529                mb();
 530                atomic_inc(&call_data->finished);
 531        }
 532}
 533
 534