linux/arch/mips/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or
   3 * modify it under the terms of the GNU General Public License
   4 * as published by the Free Software Foundation; either version 2
   5 * of the License, or (at your option) any later version.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  15 *
  16 * Copyright (C) 2000, 2001 Kanoj Sarcar
  17 * Copyright (C) 2000, 2001 Ralf Baechle
  18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
  19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
  20 */
  21#include <linux/cache.h>
  22#include <linux/delay.h>
  23#include <linux/init.h>
  24#include <linux/interrupt.h>
  25#include <linux/smp.h>
  26#include <linux/spinlock.h>
  27#include <linux/threads.h>
  28#include <linux/module.h>
  29#include <linux/time.h>
  30#include <linux/timex.h>
  31#include <linux/sched.h>
  32#include <linux/cpumask.h>
  33#include <linux/cpu.h>
  34#include <linux/err.h>
  35#include <linux/ftrace.h>
  36
  37#include <asm/atomic.h>
  38#include <asm/cpu.h>
  39#include <asm/processor.h>
  40#include <asm/r4k-timer.h>
  41#include <asm/system.h>
  42#include <asm/mmu_context.h>
  43#include <asm/time.h>
  44
  45#ifdef CONFIG_MIPS_MT_SMTC
  46#include <asm/mipsmtregs.h>
  47#endif /* CONFIG_MIPS_MT_SMTC */
  48
  49volatile cpumask_t cpu_callin_map;      /* Bitmask of started secondaries */
  50
  51int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
  52EXPORT_SYMBOL(__cpu_number_map);
  53
  54int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
  55EXPORT_SYMBOL(__cpu_logical_map);
  56
  57/* Number of TCs (or siblings in Intel speak) per CPU core */
  58int smp_num_siblings = 1;
  59EXPORT_SYMBOL(smp_num_siblings);
  60
  61/* representing the TCs (or siblings in Intel speak) of each logical CPU */
  62cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
  63EXPORT_SYMBOL(cpu_sibling_map);
  64
  65/* representing cpus for which sibling maps can be computed */
  66static cpumask_t cpu_sibling_setup_map;
  67
  68static inline void set_cpu_sibling_map(int cpu)
  69{
  70        int i;
  71
  72        cpu_set(cpu, cpu_sibling_setup_map);
  73
  74        if (smp_num_siblings > 1) {
  75                for_each_cpu_mask(i, cpu_sibling_setup_map) {
  76                        if (cpu_data[cpu].core == cpu_data[i].core) {
  77                                cpu_set(i, cpu_sibling_map[cpu]);
  78                                cpu_set(cpu, cpu_sibling_map[i]);
  79                        }
  80                }
  81        } else
  82                cpu_set(cpu, cpu_sibling_map[cpu]);
  83}
  84
  85struct plat_smp_ops *mp_ops;
  86
  87__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
  88{
  89        if (mp_ops)
  90                printk(KERN_WARNING "Overriding previously set SMP ops\n");
  91
  92        mp_ops = ops;
  93}
  94
  95/*
  96 * First C code run on the secondary CPUs after being started up by
  97 * the master.
  98 */
  99asmlinkage __cpuinit void start_secondary(void)
 100{
 101        unsigned int cpu;
 102
 103#ifdef CONFIG_MIPS_MT_SMTC
 104        /* Only do cpu_probe for first TC of CPU */
 105        if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
 106#endif /* CONFIG_MIPS_MT_SMTC */
 107        cpu_probe();
 108        cpu_report();
 109        per_cpu_trap_init();
 110        mips_clockevent_init();
 111        mp_ops->init_secondary();
 112
 113        /*
 114         * XXX parity protection should be folded in here when it's converted
 115         * to an option instead of something based on .cputype
 116         */
 117
 118        calibrate_delay();
 119        preempt_disable();
 120        cpu = smp_processor_id();
 121        cpu_data[cpu].udelay_val = loops_per_jiffy;
 122
 123        notify_cpu_starting(cpu);
 124
 125        mp_ops->smp_finish();
 126        set_cpu_sibling_map(cpu);
 127
 128        cpu_set(cpu, cpu_callin_map);
 129
 130        synchronise_count_slave();
 131
 132        cpu_idle();
 133}
 134
 135/*
 136 * Call into both interrupt handlers, as we share the IPI for them
 137 */
 138void __irq_entry smp_call_function_interrupt(void)
 139{
 140        irq_enter();
 141        generic_smp_call_function_single_interrupt();
 142        generic_smp_call_function_interrupt();
 143        irq_exit();
 144}
 145
 146static void stop_this_cpu(void *dummy)
 147{
 148        /*
 149         * Remove this CPU:
 150         */
 151        cpu_clear(smp_processor_id(), cpu_online_map);
 152        for (;;) {
 153                if (cpu_wait)
 154                        (*cpu_wait)();          /* Wait if available. */
 155        }
 156}
 157
 158void smp_send_stop(void)
 159{
 160        smp_call_function(stop_this_cpu, NULL, 0);
 161}
 162
 163void __init smp_cpus_done(unsigned int max_cpus)
 164{
 165        mp_ops->cpus_done();
 166        synchronise_count_master();
 167}
 168
 169/* called from main before smp_init() */
 170void __init smp_prepare_cpus(unsigned int max_cpus)
 171{
 172        init_new_context(current, &init_mm);
 173        current_thread_info()->cpu = 0;
 174        mp_ops->prepare_cpus(max_cpus);
 175        set_cpu_sibling_map(0);
 176#ifndef CONFIG_HOTPLUG_CPU
 177        init_cpu_present(&cpu_possible_map);
 178#endif
 179}
 180
 181/* preload SMP state for boot cpu */
 182void __devinit smp_prepare_boot_cpu(void)
 183{
 184        set_cpu_possible(0, true);
 185        set_cpu_online(0, true);
 186        cpu_set(0, cpu_callin_map);
 187}
 188
 189/*
 190 * Called once for each "cpu_possible(cpu)".  Needs to spin up the cpu
 191 * and keep control until "cpu_online(cpu)" is set.  Note: cpu is
 192 * physical, not logical.
 193 */
 194static struct task_struct *cpu_idle_thread[NR_CPUS];
 195
 196struct create_idle {
 197        struct work_struct work;
 198        struct task_struct *idle;
 199        struct completion done;
 200        int cpu;
 201};
 202
 203static void __cpuinit do_fork_idle(struct work_struct *work)
 204{
 205        struct create_idle *c_idle =
 206                container_of(work, struct create_idle, work);
 207
 208        c_idle->idle = fork_idle(c_idle->cpu);
 209        complete(&c_idle->done);
 210}
 211
 212int __cpuinit __cpu_up(unsigned int cpu)
 213{
 214        struct task_struct *idle;
 215
 216        /*
 217         * Processor goes to start_secondary(), sets online flag
 218         * The following code is purely to make sure
 219         * Linux can schedule processes on this slave.
 220         */
 221        if (!cpu_idle_thread[cpu]) {
 222                /*
 223                 * Schedule work item to avoid forking user task
 224                 * Ported from arch/x86/kernel/smpboot.c
 225                 */
 226                struct create_idle c_idle = {
 227                        .cpu    = cpu,
 228                        .done   = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
 229                };
 230
 231                INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
 232                schedule_work(&c_idle.work);
 233                wait_for_completion(&c_idle.done);
 234                idle = cpu_idle_thread[cpu] = c_idle.idle;
 235
 236                if (IS_ERR(idle))
 237                        panic(KERN_ERR "Fork failed for CPU %d", cpu);
 238        } else {
 239                idle = cpu_idle_thread[cpu];
 240                init_idle(idle, cpu);
 241        }
 242
 243        mp_ops->boot_secondary(cpu, idle);
 244
 245        /*
 246         * Trust is futile.  We should really have timeouts ...
 247         */
 248        while (!cpu_isset(cpu, cpu_callin_map))
 249                udelay(100);
 250
 251        cpu_set(cpu, cpu_online_map);
 252
 253        return 0;
 254}
 255
 256/* Not really SMP stuff ... */
 257int setup_profiling_timer(unsigned int multiplier)
 258{
 259        return 0;
 260}
 261
 262static void flush_tlb_all_ipi(void *info)
 263{
 264        local_flush_tlb_all();
 265}
 266
 267void flush_tlb_all(void)
 268{
 269        on_each_cpu(flush_tlb_all_ipi, NULL, 1);
 270}
 271
 272static void flush_tlb_mm_ipi(void *mm)
 273{
 274        local_flush_tlb_mm((struct mm_struct *)mm);
 275}
 276
 277/*
 278 * Special Variant of smp_call_function for use by TLB functions:
 279 *
 280 *  o No return value
 281 *  o collapses to normal function call on UP kernels
 282 *  o collapses to normal function call on systems with a single shared
 283 *    primary cache.
 284 *  o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
 285 */
 286static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
 287{
 288#ifndef CONFIG_MIPS_MT_SMTC
 289        smp_call_function(func, info, 1);
 290#endif
 291}
 292
 293static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
 294{
 295        preempt_disable();
 296
 297        smp_on_other_tlbs(func, info);
 298        func(info);
 299
 300        preempt_enable();
 301}
 302
 303/*
 304 * The following tlb flush calls are invoked when old translations are
 305 * being torn down, or pte attributes are changing. For single threaded
 306 * address spaces, a new context is obtained on the current cpu, and tlb
 307 * context on other cpus are invalidated to force a new context allocation
 308 * at switch_mm time, should the mm ever be used on other cpus. For
 309 * multithreaded address spaces, intercpu interrupts have to be sent.
 310 * Another case where intercpu interrupts are required is when the target
 311 * mm might be active on another cpu (eg debuggers doing the flushes on
 312 * behalf of debugees, kswapd stealing pages from another process etc).
 313 * Kanoj 07/00.
 314 */
 315
 316void flush_tlb_mm(struct mm_struct *mm)
 317{
 318        preempt_disable();
 319
 320        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 321                smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
 322        } else {
 323                cpumask_t mask = cpu_online_map;
 324                unsigned int cpu;
 325
 326                cpu_clear(smp_processor_id(), mask);
 327                for_each_cpu_mask(cpu, mask)
 328                        if (cpu_context(cpu, mm))
 329                                cpu_context(cpu, mm) = 0;
 330        }
 331        local_flush_tlb_mm(mm);
 332
 333        preempt_enable();
 334}
 335
 336struct flush_tlb_data {
 337        struct vm_area_struct *vma;
 338        unsigned long addr1;
 339        unsigned long addr2;
 340};
 341
 342static void flush_tlb_range_ipi(void *info)
 343{
 344        struct flush_tlb_data *fd = info;
 345
 346        local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
 347}
 348
 349void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 350{
 351        struct mm_struct *mm = vma->vm_mm;
 352
 353        preempt_disable();
 354        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 355                struct flush_tlb_data fd = {
 356                        .vma = vma,
 357                        .addr1 = start,
 358                        .addr2 = end,
 359                };
 360
 361                smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
 362        } else {
 363                cpumask_t mask = cpu_online_map;
 364                unsigned int cpu;
 365
 366                cpu_clear(smp_processor_id(), mask);
 367                for_each_cpu_mask(cpu, mask)
 368                        if (cpu_context(cpu, mm))
 369                                cpu_context(cpu, mm) = 0;
 370        }
 371        local_flush_tlb_range(vma, start, end);
 372        preempt_enable();
 373}
 374
 375static void flush_tlb_kernel_range_ipi(void *info)
 376{
 377        struct flush_tlb_data *fd = info;
 378
 379        local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
 380}
 381
 382void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 383{
 384        struct flush_tlb_data fd = {
 385                .addr1 = start,
 386                .addr2 = end,
 387        };
 388
 389        on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
 390}
 391
 392static void flush_tlb_page_ipi(void *info)
 393{
 394        struct flush_tlb_data *fd = info;
 395
 396        local_flush_tlb_page(fd->vma, fd->addr1);
 397}
 398
 399void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 400{
 401        preempt_disable();
 402        if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
 403                struct flush_tlb_data fd = {
 404                        .vma = vma,
 405                        .addr1 = page,
 406                };
 407
 408                smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
 409        } else {
 410                cpumask_t mask = cpu_online_map;
 411                unsigned int cpu;
 412
 413                cpu_clear(smp_processor_id(), mask);
 414                for_each_cpu_mask(cpu, mask)
 415                        if (cpu_context(cpu, vma->vm_mm))
 416                                cpu_context(cpu, vma->vm_mm) = 0;
 417        }
 418        local_flush_tlb_page(vma, page);
 419        preempt_enable();
 420}
 421
 422static void flush_tlb_one_ipi(void *info)
 423{
 424        unsigned long vaddr = (unsigned long) info;
 425
 426        local_flush_tlb_one(vaddr);
 427}
 428
 429void flush_tlb_one(unsigned long vaddr)
 430{
 431        smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
 432}
 433
 434EXPORT_SYMBOL(flush_tlb_page);
 435EXPORT_SYMBOL(flush_tlb_one);
 436