linux/arch/sparc/kernel/sun4m_smp.c
<<
>>
Prefs
   1/*
   2 *  sun4m SMP support.
   3 *
   4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   5 */
   6
   7#include <linux/clockchips.h>
   8#include <linux/interrupt.h>
   9#include <linux/profile.h>
  10#include <linux/delay.h>
  11#include <linux/sched.h>
  12#include <linux/cpu.h>
  13
  14#include <asm/cacheflush.h>
  15#include <asm/switch_to.h>
  16#include <asm/tlbflush.h>
  17#include <asm/timer.h>
  18#include <asm/oplib.h>
  19
  20#include "irq.h"
  21#include "kernel.h"
  22
  23#define IRQ_IPI_SINGLE          12
  24#define IRQ_IPI_MASK            13
  25#define IRQ_IPI_RESCHED         14
  26#define IRQ_CROSS_CALL          15
  27
  28static inline unsigned long
  29swap_ulong(volatile unsigned long *ptr, unsigned long val)
  30{
  31        __asm__ __volatile__("swap [%1], %0\n\t" :
  32                             "=&r" (val), "=&r" (ptr) :
  33                             "0" (val), "1" (ptr));
  34        return val;
  35}
  36
  37void __cpuinit sun4m_cpu_pre_starting(void *arg)
  38{
  39}
  40
  41void __cpuinit sun4m_cpu_pre_online(void *arg)
  42{
  43        int cpuid = hard_smp_processor_id();
  44
  45        /* Allow master to continue. The master will then give us the
  46         * go-ahead by setting the smp_commenced_mask and will wait without
  47         * timeouts until our setup is completed fully (signified by
  48         * our bit being set in the cpu_online_mask).
  49         */
  50        swap_ulong(&cpu_callin_map[cpuid], 1);
  51
  52        /* XXX: What's up with all the flushes? */
  53        local_ops->cache_all();
  54        local_ops->tlb_all();
  55
  56        /* Fix idle thread fields. */
  57        __asm__ __volatile__("ld [%0], %%g6\n\t"
  58                             : : "r" (&current_set[cpuid])
  59                             : "memory" /* paranoid */);
  60
  61        /* Attach to the address space of init_task. */
  62        atomic_inc(&init_mm.mm_count);
  63        current->active_mm = &init_mm;
  64
  65        while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
  66                mb();
  67}
  68
  69/*
  70 *      Cycle through the processors asking the PROM to start each one.
  71 */
  72void __init smp4m_boot_cpus(void)
  73{
  74        sun4m_unmask_profile_irq();
  75        local_ops->cache_all();
  76}
  77
  78int __cpuinit smp4m_boot_one_cpu(int i, struct task_struct *idle)
  79{
  80        unsigned long *entry = &sun4m_cpu_startup;
  81        int timeout;
  82        int cpu_node;
  83
  84        cpu_find_by_mid(i, &cpu_node);
  85        current_set[i] = task_thread_info(idle);
  86
  87        /* See trampoline.S for details... */
  88        entry += ((i - 1) * 3);
  89
  90        /*
  91         * Initialize the contexts table
  92         * Since the call to prom_startcpu() trashes the structure,
  93         * we need to re-initialize it for each cpu
  94         */
  95        smp_penguin_ctable.which_io = 0;
  96        smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
  97        smp_penguin_ctable.reg_size = 0;
  98
  99        /* whirrr, whirrr, whirrrrrrrrr... */
 100        printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
 101        local_ops->cache_all();
 102        prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
 103
 104        /* wheee... it's going... */
 105        for (timeout = 0; timeout < 10000; timeout++) {
 106                if (cpu_callin_map[i])
 107                        break;
 108                udelay(200);
 109        }
 110
 111        if (!(cpu_callin_map[i])) {
 112                printk(KERN_ERR "Processor %d is stuck.\n", i);
 113                return -ENODEV;
 114        }
 115
 116        local_ops->cache_all();
 117        return 0;
 118}
 119
 120void __init smp4m_smp_done(void)
 121{
 122        int i, first;
 123        int *prev;
 124
 125        /* setup cpu list for irq rotation */
 126        first = 0;
 127        prev = &first;
 128        for_each_online_cpu(i) {
 129                *prev = i;
 130                prev = &cpu_data(i).next;
 131        }
 132        *prev = first;
 133        local_ops->cache_all();
 134
 135        /* Ok, they are spinning and ready to go. */
 136}
 137
 138static void sun4m_send_ipi(int cpu, int level)
 139{
 140        sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
 141}
 142
 143static void sun4m_ipi_resched(int cpu)
 144{
 145        sun4m_send_ipi(cpu, IRQ_IPI_RESCHED);
 146}
 147
 148static void sun4m_ipi_single(int cpu)
 149{
 150        sun4m_send_ipi(cpu, IRQ_IPI_SINGLE);
 151}
 152
 153static void sun4m_ipi_mask_one(int cpu)
 154{
 155        sun4m_send_ipi(cpu, IRQ_IPI_MASK);
 156}
 157
 158static struct smp_funcall {
 159        smpfunc_t func;
 160        unsigned long arg1;
 161        unsigned long arg2;
 162        unsigned long arg3;
 163        unsigned long arg4;
 164        unsigned long arg5;
 165        unsigned long processors_in[SUN4M_NCPUS];  /* Set when ipi entered. */
 166        unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */
 167} ccall_info;
 168
 169static DEFINE_SPINLOCK(cross_call_lock);
 170
 171/* Cross calls must be serialized, at least currently. */
 172static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
 173                             unsigned long arg2, unsigned long arg3,
 174                             unsigned long arg4)
 175{
 176                register int ncpus = SUN4M_NCPUS;
 177                unsigned long flags;
 178
 179                spin_lock_irqsave(&cross_call_lock, flags);
 180
 181                /* Init function glue. */
 182                ccall_info.func = func;
 183                ccall_info.arg1 = arg1;
 184                ccall_info.arg2 = arg2;
 185                ccall_info.arg3 = arg3;
 186                ccall_info.arg4 = arg4;
 187                ccall_info.arg5 = 0;
 188
 189                /* Init receive/complete mapping, plus fire the IPI's off. */
 190                {
 191                        register int i;
 192
 193                        cpumask_clear_cpu(smp_processor_id(), &mask);
 194                        cpumask_and(&mask, cpu_online_mask, &mask);
 195                        for (i = 0; i < ncpus; i++) {
 196                                if (cpumask_test_cpu(i, &mask)) {
 197                                        ccall_info.processors_in[i] = 0;
 198                                        ccall_info.processors_out[i] = 0;
 199                                        sun4m_send_ipi(i, IRQ_CROSS_CALL);
 200                                } else {
 201                                        ccall_info.processors_in[i] = 1;
 202                                        ccall_info.processors_out[i] = 1;
 203                                }
 204                        }
 205                }
 206
 207                {
 208                        register int i;
 209
 210                        i = 0;
 211                        do {
 212                                if (!cpumask_test_cpu(i, &mask))
 213                                        continue;
 214                                while (!ccall_info.processors_in[i])
 215                                        barrier();
 216                        } while (++i < ncpus);
 217
 218                        i = 0;
 219                        do {
 220                                if (!cpumask_test_cpu(i, &mask))
 221                                        continue;
 222                                while (!ccall_info.processors_out[i])
 223                                        barrier();
 224                        } while (++i < ncpus);
 225                }
 226                spin_unlock_irqrestore(&cross_call_lock, flags);
 227}
 228
 229/* Running cross calls. */
 230void smp4m_cross_call_irq(void)
 231{
 232        int i = smp_processor_id();
 233
 234        ccall_info.processors_in[i] = 1;
 235        ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
 236                        ccall_info.arg4, ccall_info.arg5);
 237        ccall_info.processors_out[i] = 1;
 238}
 239
 240void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
 241{
 242        struct pt_regs *old_regs;
 243        struct clock_event_device *ce;
 244        int cpu = smp_processor_id();
 245
 246        old_regs = set_irq_regs(regs);
 247
 248        ce = &per_cpu(sparc32_clockevent, cpu);
 249
 250        if (ce->mode & CLOCK_EVT_MODE_PERIODIC)
 251                sun4m_clear_profile_irq(cpu);
 252        else
 253                sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */
 254
 255        irq_enter();
 256        ce->event_handler(ce);
 257        irq_exit();
 258
 259        set_irq_regs(old_regs);
 260}
 261
 262static const struct sparc32_ipi_ops sun4m_ipi_ops = {
 263        .cross_call = sun4m_cross_call,
 264        .resched    = sun4m_ipi_resched,
 265        .single     = sun4m_ipi_single,
 266        .mask_one   = sun4m_ipi_mask_one,
 267};
 268
 269void __init sun4m_init_smp(void)
 270{
 271        sparc32_ipi_ops = &sun4m_ipi_ops;
 272}
 273