linux/arch/mips/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or
   3 * modify it under the terms of the GNU General Public License
   4 * as published by the Free Software Foundation; either version 2
   5 * of the License, or (at your option) any later version.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  15 *
  16 * Copyright (C) 2000, 2001 Kanoj Sarcar
  17 * Copyright (C) 2000, 2001 Ralf Baechle
  18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
  19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
  20 */
  21#include <linux/cache.h>
  22#include <linux/delay.h>
  23#include <linux/init.h>
  24#include <linux/interrupt.h>
  25#include <linux/smp.h>
  26#include <linux/spinlock.h>
  27#include <linux/threads.h>
  28#include <linux/module.h>
  29#include <linux/time.h>
  30#include <linux/timex.h>
  31#include <linux/sched.h>
  32#include <linux/cpumask.h>
  33#include <linux/cpu.h>
  34#include <linux/err.h>
  35#include <linux/ftrace.h>
  36
  37#include <linux/atomic.h>
  38#include <asm/cpu.h>
  39#include <asm/processor.h>
  40#include <asm/idle.h>
  41#include <asm/r4k-timer.h>
  42#include <asm/mmu_context.h>
  43#include <asm/time.h>
  44#include <asm/setup.h>
  45
  46volatile cpumask_t cpu_callin_map;      /* Bitmask of started secondaries */
  47
  48int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
  49EXPORT_SYMBOL(__cpu_number_map);
  50
  51int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
  52EXPORT_SYMBOL(__cpu_logical_map);
  53
  54/* Number of TCs (or siblings in Intel speak) per CPU core */
  55int smp_num_siblings = 1;
  56EXPORT_SYMBOL(smp_num_siblings);
  57
  58/* representing the TCs (or siblings in Intel speak) of each logical CPU */
  59cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
  60EXPORT_SYMBOL(cpu_sibling_map);
  61
  62/* representing cpus for which sibling maps can be computed */
  63static cpumask_t cpu_sibling_setup_map;
  64
  65cpumask_t cpu_coherent_mask;
  66
  67static inline void set_cpu_sibling_map(int cpu)
  68{
  69        int i;
  70
  71        cpu_set(cpu, cpu_sibling_setup_map);
  72
  73        if (smp_num_siblings > 1) {
  74                for_each_cpu_mask(i, cpu_sibling_setup_map) {
  75                        if (cpu_data[cpu].core == cpu_data[i].core) {
  76                                cpu_set(i, cpu_sibling_map[cpu]);
  77                                cpu_set(cpu, cpu_sibling_map[i]);
  78                        }
  79                }
  80        } else
  81                cpu_set(cpu, cpu_sibling_map[cpu]);
  82}
  83
  84struct plat_smp_ops *mp_ops;
  85EXPORT_SYMBOL(mp_ops);
  86
  87void register_smp_ops(struct plat_smp_ops *ops)
  88{
  89        if (mp_ops)
  90                printk(KERN_WARNING "Overriding previously set SMP ops\n");
  91
  92        mp_ops = ops;
  93}
  94
  95/*
  96 * First C code run on the secondary CPUs after being started up by
  97 * the master.
  98 */
  99asmlinkage void start_secondary(void)
 100{
 101        unsigned int cpu;
 102
 103        cpu_probe();
 104        cpu_report();
 105        per_cpu_trap_init(false);
 106        mips_clockevent_init();
 107        mp_ops->init_secondary();
 108
 109        /*
 110         * XXX parity protection should be folded in here when it's converted
 111         * to an option instead of something based on .cputype
 112         */
 113
 114        calibrate_delay();
 115        preempt_disable();
 116        cpu = smp_processor_id();
 117        cpu_data[cpu].udelay_val = loops_per_jiffy;
 118
 119        cpu_set(cpu, cpu_coherent_mask);
 120        notify_cpu_starting(cpu);
 121
 122        set_cpu_online(cpu, true);
 123
 124        set_cpu_sibling_map(cpu);
 125
 126        cpu_set(cpu, cpu_callin_map);
 127
 128        synchronise_count_slave(cpu);
 129
 130        /*
 131         * irq will be enabled in ->smp_finish(), enabling it too early
 132         * is dangerous.
 133         */
 134        WARN_ON_ONCE(!irqs_disabled());
 135        mp_ops->smp_finish();
 136
 137        cpu_startup_entry(CPUHP_ONLINE);
 138}
 139
 140/*
 141 * Call into both interrupt handlers, as we share the IPI for them
 142 */
 143void __irq_entry smp_call_function_interrupt(void)
 144{
 145        irq_enter();
 146        generic_smp_call_function_interrupt();
 147        irq_exit();
 148}
 149
 150static void stop_this_cpu(void *dummy)
 151{
 152        /*
 153         * Remove this CPU:
 154         */
 155        set_cpu_online(smp_processor_id(), false);
 156        for (;;) {
 157                if (cpu_wait)
 158                        (*cpu_wait)();          /* Wait if available. */
 159        }
 160}
 161
 162void smp_send_stop(void)
 163{
 164        smp_call_function(stop_this_cpu, NULL, 0);
 165}
 166
 167void __init smp_cpus_done(unsigned int max_cpus)
 168{
 169}
 170
 171/* called from main before smp_init() */
 172void __init smp_prepare_cpus(unsigned int max_cpus)
 173{
 174        init_new_context(current, &init_mm);
 175        current_thread_info()->cpu = 0;
 176        mp_ops->prepare_cpus(max_cpus);
 177        set_cpu_sibling_map(0);
 178#ifndef CONFIG_HOTPLUG_CPU
 179        init_cpu_present(cpu_possible_mask);
 180#endif
 181        cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
 182}
 183
 184/* preload SMP state for boot cpu */
 185void smp_prepare_boot_cpu(void)
 186{
 187        set_cpu_possible(0, true);
 188        set_cpu_online(0, true);
 189        cpu_set(0, cpu_callin_map);
 190}
 191
 192int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 193{
 194        mp_ops->boot_secondary(cpu, tidle);
 195
 196        /*
 197         * Trust is futile.  We should really have timeouts ...
 198         */
 199        while (!cpu_isset(cpu, cpu_callin_map))
 200                udelay(100);
 201
 202        synchronise_count_master(cpu);
 203        return 0;
 204}
 205
 206/* Not really SMP stuff ... */
 207int setup_profiling_timer(unsigned int multiplier)
 208{
 209        return 0;
 210}
 211
 212static void flush_tlb_all_ipi(void *info)
 213{
 214        local_flush_tlb_all();
 215}
 216
 217void flush_tlb_all(void)
 218{
 219        on_each_cpu(flush_tlb_all_ipi, NULL, 1);
 220}
 221
 222static void flush_tlb_mm_ipi(void *mm)
 223{
 224        local_flush_tlb_mm((struct mm_struct *)mm);
 225}
 226
 227/*
 228 * Special Variant of smp_call_function for use by TLB functions:
 229 *
 230 *  o No return value
 231 *  o collapses to normal function call on UP kernels
 232 *  o collapses to normal function call on systems with a single shared
 233 *    primary cache.
 234 */
 235static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
 236{
 237        smp_call_function(func, info, 1);
 238}
 239
 240static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
 241{
 242        preempt_disable();
 243
 244        smp_on_other_tlbs(func, info);
 245        func(info);
 246
 247        preempt_enable();
 248}
 249
 250/*
 251 * The following tlb flush calls are invoked when old translations are
 252 * being torn down, or pte attributes are changing. For single threaded
 253 * address spaces, a new context is obtained on the current cpu, and tlb
 254 * context on other cpus are invalidated to force a new context allocation
 255 * at switch_mm time, should the mm ever be used on other cpus. For
 256 * multithreaded address spaces, intercpu interrupts have to be sent.
 257 * Another case where intercpu interrupts are required is when the target
 258 * mm might be active on another cpu (eg debuggers doing the flushes on
 259 * behalf of debugees, kswapd stealing pages from another process etc).
 260 * Kanoj 07/00.
 261 */
 262
 263void flush_tlb_mm(struct mm_struct *mm)
 264{
 265        preempt_disable();
 266
 267        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 268                smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
 269        } else {
 270                unsigned int cpu;
 271
 272                for_each_online_cpu(cpu) {
 273                        if (cpu != smp_processor_id() && cpu_context(cpu, mm))
 274                                cpu_context(cpu, mm) = 0;
 275                }
 276        }
 277        local_flush_tlb_mm(mm);
 278
 279        preempt_enable();
 280}
 281
 282struct flush_tlb_data {
 283        struct vm_area_struct *vma;
 284        unsigned long addr1;
 285        unsigned long addr2;
 286};
 287
 288static void flush_tlb_range_ipi(void *info)
 289{
 290        struct flush_tlb_data *fd = info;
 291
 292        local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
 293}
 294
 295void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 296{
 297        struct mm_struct *mm = vma->vm_mm;
 298
 299        preempt_disable();
 300        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 301                struct flush_tlb_data fd = {
 302                        .vma = vma,
 303                        .addr1 = start,
 304                        .addr2 = end,
 305                };
 306
 307                smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
 308        } else {
 309                unsigned int cpu;
 310
 311                for_each_online_cpu(cpu) {
 312                        if (cpu != smp_processor_id() && cpu_context(cpu, mm))
 313                                cpu_context(cpu, mm) = 0;
 314                }
 315        }
 316        local_flush_tlb_range(vma, start, end);
 317        preempt_enable();
 318}
 319
 320static void flush_tlb_kernel_range_ipi(void *info)
 321{
 322        struct flush_tlb_data *fd = info;
 323
 324        local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
 325}
 326
 327void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 328{
 329        struct flush_tlb_data fd = {
 330                .addr1 = start,
 331                .addr2 = end,
 332        };
 333
 334        on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
 335}
 336
 337static void flush_tlb_page_ipi(void *info)
 338{
 339        struct flush_tlb_data *fd = info;
 340
 341        local_flush_tlb_page(fd->vma, fd->addr1);
 342}
 343
 344void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 345{
 346        preempt_disable();
 347        if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
 348                struct flush_tlb_data fd = {
 349                        .vma = vma,
 350                        .addr1 = page,
 351                };
 352
 353                smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
 354        } else {
 355                unsigned int cpu;
 356
 357                for_each_online_cpu(cpu) {
 358                        if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
 359                                cpu_context(cpu, vma->vm_mm) = 0;
 360                }
 361        }
 362        local_flush_tlb_page(vma, page);
 363        preempt_enable();
 364}
 365
 366static void flush_tlb_one_ipi(void *info)
 367{
 368        unsigned long vaddr = (unsigned long) info;
 369
 370        local_flush_tlb_one(vaddr);
 371}
 372
 373void flush_tlb_one(unsigned long vaddr)
 374{
 375        smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
 376}
 377
 378EXPORT_SYMBOL(flush_tlb_page);
 379EXPORT_SYMBOL(flush_tlb_one);
 380
 381#if defined(CONFIG_KEXEC)
 382void (*dump_ipi_function_ptr)(void *) = NULL;
 383void dump_send_ipi(void (*dump_ipi_callback)(void *))
 384{
 385        int i;
 386        int cpu = smp_processor_id();
 387
 388        dump_ipi_function_ptr = dump_ipi_callback;
 389        smp_mb();
 390        for_each_online_cpu(i)
 391                if (i != cpu)
 392                        mp_ops->send_ipi_single(i, SMP_DUMP);
 393
 394}
 395EXPORT_SYMBOL(dump_send_ipi);
 396#endif
 397
 398#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 399
 400static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
 401static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
 402
 403void tick_broadcast(const struct cpumask *mask)
 404{
 405        atomic_t *count;
 406        struct call_single_data *csd;
 407        int cpu;
 408
 409        for_each_cpu(cpu, mask) {
 410                count = &per_cpu(tick_broadcast_count, cpu);
 411                csd = &per_cpu(tick_broadcast_csd, cpu);
 412
 413                if (atomic_inc_return(count) == 1)
 414                        smp_call_function_single_async(cpu, csd);
 415        }
 416}
 417
 418static void tick_broadcast_callee(void *info)
 419{
 420        int cpu = smp_processor_id();
 421        tick_receive_broadcast();
 422        atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
 423}
 424
 425static int __init tick_broadcast_init(void)
 426{
 427        struct call_single_data *csd;
 428        int cpu;
 429
 430        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 431                csd = &per_cpu(tick_broadcast_csd, cpu);
 432                csd->func = tick_broadcast_callee;
 433        }
 434
 435        return 0;
 436}
 437early_initcall(tick_broadcast_init);
 438
 439#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
 440