linux/arch/mips/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or
   3 * modify it under the terms of the GNU General Public License
   4 * as published by the Free Software Foundation; either version 2
   5 * of the License, or (at your option) any later version.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  15 *
  16 * Copyright (C) 2000, 2001 Kanoj Sarcar
  17 * Copyright (C) 2000, 2001 Ralf Baechle
  18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
  19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
  20 */
  21#include <linux/cache.h>
  22#include <linux/delay.h>
  23#include <linux/init.h>
  24#include <linux/interrupt.h>
  25#include <linux/spinlock.h>
  26#include <linux/threads.h>
  27#include <linux/module.h>
  28#include <linux/time.h>
  29#include <linux/timex.h>
  30#include <linux/sched.h>
  31#include <linux/cpumask.h>
  32#include <linux/cpu.h>
  33#include <linux/err.h>
  34
  35#include <asm/atomic.h>
  36#include <asm/cpu.h>
  37#include <asm/processor.h>
  38#include <asm/system.h>
  39#include <asm/mmu_context.h>
  40#include <asm/smp.h>
  41#include <asm/time.h>
  42
  43#ifdef CONFIG_MIPS_MT_SMTC
  44#include <asm/mipsmtregs.h>
  45#endif /* CONFIG_MIPS_MT_SMTC */
  46
  47cpumask_t phys_cpu_present_map;         /* Bitmask of available CPUs */
  48volatile cpumask_t cpu_callin_map;      /* Bitmask of started secondaries */
  49cpumask_t cpu_online_map;               /* Bitmask of currently online CPUs */
  50int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
  51int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
  52
  53EXPORT_SYMBOL(phys_cpu_present_map);
  54EXPORT_SYMBOL(cpu_online_map);
  55
  56extern void __init calibrate_delay(void);
  57extern void cpu_idle(void);
  58
  59/*
  60 * First C code run on the secondary CPUs after being started up by
  61 * the master.
  62 */
  63asmlinkage __cpuinit void start_secondary(void)
  64{
  65        unsigned int cpu;
  66
  67#ifdef CONFIG_MIPS_MT_SMTC
  68        /* Only do cpu_probe for first TC of CPU */
  69        if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
  70#endif /* CONFIG_MIPS_MT_SMTC */
  71        cpu_probe();
  72        cpu_report();
  73        per_cpu_trap_init();
  74        mips_clockevent_init();
  75        prom_init_secondary();
  76
  77        /*
  78         * XXX parity protection should be folded in here when it's converted
  79         * to an option instead of something based on .cputype
  80         */
  81
  82        calibrate_delay();
  83        preempt_disable();
  84        cpu = smp_processor_id();
  85        cpu_data[cpu].udelay_val = loops_per_jiffy;
  86
  87        prom_smp_finish();
  88
  89        cpu_set(cpu, cpu_callin_map);
  90
  91        cpu_idle();
  92}
  93
  94DEFINE_SPINLOCK(smp_call_lock);
  95
  96struct call_data_struct *call_data;
  97
  98/*
  99 * Run a function on all other CPUs.
 100 *
 101 *  <mask>      cpuset_t of all processors to run the function on.
 102 *  <func>      The function to run. This must be fast and non-blocking.
 103 *  <info>      An arbitrary pointer to pass to the function.
 104 *  <retry>     If true, keep retrying until ready.
 105 *  <wait>      If true, wait until function has completed on other CPUs.
 106 *  [RETURNS]   0 on success, else a negative status code.
 107 *
 108 * Does not return until remote CPUs are nearly ready to execute <func>
 109 * or are or have executed.
 110 *
 111 * You must not call this function with disabled interrupts or from a
 112 * hardware interrupt handler or from a bottom half handler:
 113 *
 114 * CPU A                               CPU B
 115 * Disable interrupts
 116 *                                     smp_call_function()
 117 *                                     Take call_lock
 118 *                                     Send IPIs
 119 *                                     Wait for all cpus to acknowledge IPI
 120 *                                     CPU A has not responded, spin waiting
 121 *                                     for cpu A to respond, holding call_lock
 122 * smp_call_function()
 123 * Spin waiting for call_lock
 124 * Deadlock                            Deadlock
 125 */
 126int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
 127        void *info, int retry, int wait)
 128{
 129        struct call_data_struct data;
 130        int cpu = smp_processor_id();
 131        int cpus;
 132
 133        /*
 134         * Can die spectacularly if this CPU isn't yet marked online
 135         */
 136        BUG_ON(!cpu_online(cpu));
 137
 138        cpu_clear(cpu, mask);
 139        cpus = cpus_weight(mask);
 140        if (!cpus)
 141                return 0;
 142
 143        /* Can deadlock when called with interrupts disabled */
 144        WARN_ON(irqs_disabled());
 145
 146        data.func = func;
 147        data.info = info;
 148        atomic_set(&data.started, 0);
 149        data.wait = wait;
 150        if (wait)
 151                atomic_set(&data.finished, 0);
 152
 153        spin_lock(&smp_call_lock);
 154        call_data = &data;
 155        smp_mb();
 156
 157        /* Send a message to all other CPUs and wait for them to respond */
 158        core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
 159
 160        /* Wait for response */
 161        /* FIXME: lock-up detection, backtrace on lock-up */
 162        while (atomic_read(&data.started) != cpus)
 163                barrier();
 164
 165        if (wait)
 166                while (atomic_read(&data.finished) != cpus)
 167                        barrier();
 168        call_data = NULL;
 169        spin_unlock(&smp_call_lock);
 170
 171        return 0;
 172}
 173
 174int smp_call_function(void (*func) (void *info), void *info, int retry,
 175        int wait)
 176{
 177        return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
 178}
 179
 180void smp_call_function_interrupt(void)
 181{
 182        void (*func) (void *info) = call_data->func;
 183        void *info = call_data->info;
 184        int wait = call_data->wait;
 185
 186        /*
 187         * Notify initiating CPU that I've grabbed the data and am
 188         * about to execute the function.
 189         */
 190        smp_mb();
 191        atomic_inc(&call_data->started);
 192
 193        /*
 194         * At this point the info structure may be out of scope unless wait==1.
 195         */
 196        irq_enter();
 197        (*func)(info);
 198        irq_exit();
 199
 200        if (wait) {
 201                smp_mb();
 202                atomic_inc(&call_data->finished);
 203        }
 204}
 205
 206int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 207                             int retry, int wait)
 208{
 209        int ret, me;
 210
 211        /*
 212         * Can die spectacularly if this CPU isn't yet marked online
 213         */
 214        if (!cpu_online(cpu))
 215                return 0;
 216
 217        me = get_cpu();
 218        BUG_ON(!cpu_online(me));
 219
 220        if (cpu == me) {
 221                local_irq_disable();
 222                func(info);
 223                local_irq_enable();
 224                put_cpu();
 225                return 0;
 226        }
 227
 228        ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
 229                                     wait);
 230
 231        put_cpu();
 232        return 0;
 233}
 234
 235static void stop_this_cpu(void *dummy)
 236{
 237        /*
 238         * Remove this CPU:
 239         */
 240        cpu_clear(smp_processor_id(), cpu_online_map);
 241        local_irq_enable();     /* May need to service _machine_restart IPI */
 242        for (;;);               /* Wait if available. */
 243}
 244
 245void smp_send_stop(void)
 246{
 247        smp_call_function(stop_this_cpu, NULL, 1, 0);
 248}
 249
 250void __init smp_cpus_done(unsigned int max_cpus)
 251{
 252        prom_cpus_done();
 253}
 254
 255/* called from main before smp_init() */
 256void __init smp_prepare_cpus(unsigned int max_cpus)
 257{
 258        init_new_context(current, &init_mm);
 259        current_thread_info()->cpu = 0;
 260        plat_prepare_cpus(max_cpus);
 261#ifndef CONFIG_HOTPLUG_CPU
 262        cpu_present_map = cpu_possible_map;
 263#endif
 264}
 265
 266/* preload SMP state for boot cpu */
 267void __devinit smp_prepare_boot_cpu(void)
 268{
 269        /*
 270         * This assumes that bootup is always handled by the processor
 271         * with the logic and physical number 0.
 272         */
 273        __cpu_number_map[0] = 0;
 274        __cpu_logical_map[0] = 0;
 275        cpu_set(0, phys_cpu_present_map);
 276        cpu_set(0, cpu_online_map);
 277        cpu_set(0, cpu_callin_map);
 278}
 279
 280/*
 281 * Called once for each "cpu_possible(cpu)".  Needs to spin up the cpu
 282 * and keep control until "cpu_online(cpu)" is set.  Note: cpu is
 283 * physical, not logical.
 284 */
 285int __cpuinit __cpu_up(unsigned int cpu)
 286{
 287        struct task_struct *idle;
 288
 289        /*
 290         * Processor goes to start_secondary(), sets online flag
 291         * The following code is purely to make sure
 292         * Linux can schedule processes on this slave.
 293         */
 294        idle = fork_idle(cpu);
 295        if (IS_ERR(idle))
 296                panic(KERN_ERR "Fork failed for CPU %d", cpu);
 297
 298        prom_boot_secondary(cpu, idle);
 299
 300        /*
 301         * Trust is futile.  We should really have timeouts ...
 302         */
 303        while (!cpu_isset(cpu, cpu_callin_map))
 304                udelay(100);
 305
 306        cpu_set(cpu, cpu_online_map);
 307
 308        return 0;
 309}
 310
 311/* Not really SMP stuff ... */
 312int setup_profiling_timer(unsigned int multiplier)
 313{
 314        return 0;
 315}
 316
 317static void flush_tlb_all_ipi(void *info)
 318{
 319        local_flush_tlb_all();
 320}
 321
 322void flush_tlb_all(void)
 323{
 324        on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1);
 325}
 326
 327static void flush_tlb_mm_ipi(void *mm)
 328{
 329        local_flush_tlb_mm((struct mm_struct *)mm);
 330}
 331
 332/*
 333 * Special Variant of smp_call_function for use by TLB functions:
 334 *
 335 *  o No return value
 336 *  o collapses to normal function call on UP kernels
 337 *  o collapses to normal function call on systems with a single shared
 338 *    primary cache.
 339 *  o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
 340 */
 341static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
 342{
 343#ifndef CONFIG_MIPS_MT_SMTC
 344        smp_call_function(func, info, 1, 1);
 345#endif
 346}
 347
 348static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
 349{
 350        preempt_disable();
 351
 352        smp_on_other_tlbs(func, info);
 353        func(info);
 354
 355        preempt_enable();
 356}
 357
 358/*
 359 * The following tlb flush calls are invoked when old translations are
 360 * being torn down, or pte attributes are changing. For single threaded
 361 * address spaces, a new context is obtained on the current cpu, and tlb
 362 * context on other cpus are invalidated to force a new context allocation
 363 * at switch_mm time, should the mm ever be used on other cpus. For
 364 * multithreaded address spaces, intercpu interrupts have to be sent.
 365 * Another case where intercpu interrupts are required is when the target
 366 * mm might be active on another cpu (eg debuggers doing the flushes on
 367 * behalf of debugees, kswapd stealing pages from another process etc).
 368 * Kanoj 07/00.
 369 */
 370
 371void flush_tlb_mm(struct mm_struct *mm)
 372{
 373        preempt_disable();
 374
 375        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 376                smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
 377        } else {
 378                cpumask_t mask = cpu_online_map;
 379                unsigned int cpu;
 380
 381                cpu_clear(smp_processor_id(), mask);
 382                for_each_cpu_mask(cpu, mask)
 383                        if (cpu_context(cpu, mm))
 384                                cpu_context(cpu, mm) = 0;
 385        }
 386        local_flush_tlb_mm(mm);
 387
 388        preempt_enable();
 389}
 390
 391struct flush_tlb_data {
 392        struct vm_area_struct *vma;
 393        unsigned long addr1;
 394        unsigned long addr2;
 395};
 396
 397static void flush_tlb_range_ipi(void *info)
 398{
 399        struct flush_tlb_data *fd = info;
 400
 401        local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
 402}
 403
 404void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 405{
 406        struct mm_struct *mm = vma->vm_mm;
 407
 408        preempt_disable();
 409        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 410                struct flush_tlb_data fd = {
 411                        .vma = vma,
 412                        .addr1 = start,
 413                        .addr2 = end,
 414                };
 415
 416                smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
 417        } else {
 418                cpumask_t mask = cpu_online_map;
 419                unsigned int cpu;
 420
 421                cpu_clear(smp_processor_id(), mask);
 422                for_each_cpu_mask(cpu, mask)
 423                        if (cpu_context(cpu, mm))
 424                                cpu_context(cpu, mm) = 0;
 425        }
 426        local_flush_tlb_range(vma, start, end);
 427        preempt_enable();
 428}
 429
 430static void flush_tlb_kernel_range_ipi(void *info)
 431{
 432        struct flush_tlb_data *fd = info;
 433
 434        local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
 435}
 436
 437void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 438{
 439        struct flush_tlb_data fd = {
 440                .addr1 = start,
 441                .addr2 = end,
 442        };
 443
 444        on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
 445}
 446
 447static void flush_tlb_page_ipi(void *info)
 448{
 449        struct flush_tlb_data *fd = info;
 450
 451        local_flush_tlb_page(fd->vma, fd->addr1);
 452}
 453
 454void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 455{
 456        preempt_disable();
 457        if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
 458                struct flush_tlb_data fd = {
 459                        .vma = vma,
 460                        .addr1 = page,
 461                };
 462
 463                smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
 464        } else {
 465                cpumask_t mask = cpu_online_map;
 466                unsigned int cpu;
 467
 468                cpu_clear(smp_processor_id(), mask);
 469                for_each_cpu_mask(cpu, mask)
 470                        if (cpu_context(cpu, vma->vm_mm))
 471                                cpu_context(cpu, vma->vm_mm) = 0;
 472        }
 473        local_flush_tlb_page(vma, page);
 474        preempt_enable();
 475}
 476
 477static void flush_tlb_one_ipi(void *info)
 478{
 479        unsigned long vaddr = (unsigned long) info;
 480
 481        local_flush_tlb_one(vaddr);
 482}
 483
 484void flush_tlb_one(unsigned long vaddr)
 485{
 486        smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
 487}
 488
 489EXPORT_SYMBOL(flush_tlb_page);
 490EXPORT_SYMBOL(flush_tlb_one);
 491