linux/arch/sparc/kernel/sun4d_smp.c
<<
>>
Prefs
   1/* Sparc SS1000/SC2000 SMP support.
   2 *
   3 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   4 *
   5 * Based on sun4m's smp.c, which is:
   6 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   7 */
   8
   9#include <linux/interrupt.h>
  10#include <linux/profile.h>
  11#include <linux/delay.h>
  12#include <linux/cpu.h>
  13
  14#include <asm/sbi.h>
  15#include <asm/mmu.h>
  16#include <asm/tlbflush.h>
  17#include <asm/cacheflush.h>
  18
  19#include "kernel.h"
  20#include "irq.h"
  21
  22#define IRQ_CROSS_CALL          15
  23
  24static volatile int smp_processors_ready;
  25static int smp_highest_cpu;
  26
  27static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val)
  28{
  29        __asm__ __volatile__("swap [%1], %0\n\t" :
  30                             "=&r" (val), "=&r" (ptr) :
  31                             "0" (val), "1" (ptr));
  32        return val;
  33}
  34
  35static void smp_setup_percpu_timer(void);
  36
  37static unsigned char cpu_leds[32];
  38
  39static inline void show_leds(int cpuid)
  40{
  41        cpuid &= 0x1e;
  42        __asm__ __volatile__ ("stba %0, [%1] %2" : :
  43                              "r" ((cpu_leds[cpuid] << 4) | cpu_leds[cpuid+1]),
  44                              "r" (ECSR_BASE(cpuid) | BB_LEDS),
  45                              "i" (ASI_M_CTL));
  46}
  47
  48void __cpuinit smp4d_callin(void)
  49{
  50        int cpuid = hard_smp4d_processor_id();
  51        unsigned long flags;
  52
  53        /* Show we are alive */
  54        cpu_leds[cpuid] = 0x6;
  55        show_leds(cpuid);
  56
  57        /* Enable level15 interrupt, disable level14 interrupt for now */
  58        cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
  59
  60        local_flush_cache_all();
  61        local_flush_tlb_all();
  62
  63        notify_cpu_starting(cpuid);
  64        /*
  65         * Unblock the master CPU _only_ when the scheduler state
  66         * of all secondary CPUs will be up-to-date, so after
  67         * the SMP initialization the master will be just allowed
  68         * to call the scheduler code.
  69         */
  70        /* Get our local ticker going. */
  71        smp_setup_percpu_timer();
  72
  73        calibrate_delay();
  74        smp_store_cpu_info(cpuid);
  75        local_flush_cache_all();
  76        local_flush_tlb_all();
  77
  78        /* Allow master to continue. */
  79        sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
  80        local_flush_cache_all();
  81        local_flush_tlb_all();
  82
  83        cpu_probe();
  84
  85        while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
  86                barrier();
  87
  88        while (current_set[cpuid]->cpu != cpuid)
  89                barrier();
  90
  91        /* Fix idle thread fields. */
  92        __asm__ __volatile__("ld [%0], %%g6\n\t"
  93                             : : "r" (&current_set[cpuid])
  94                             : "memory" /* paranoid */);
  95
  96        cpu_leds[cpuid] = 0x9;
  97        show_leds(cpuid);
  98
  99        /* Attach to the address space of init_task. */
 100        atomic_inc(&init_mm.mm_count);
 101        current->active_mm = &init_mm;
 102
 103        local_flush_cache_all();
 104        local_flush_tlb_all();
 105
 106        local_irq_enable();     /* We don't allow PIL 14 yet */
 107
 108        while (!cpu_isset(cpuid, smp_commenced_mask))
 109                barrier();
 110
 111        spin_lock_irqsave(&sun4d_imsk_lock, flags);
 112        cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */
 113        spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
 114        set_cpu_online(cpuid, true);
 115
 116}
 117
 118/*
 119 *      Cycle through the processors asking the PROM to start each one.
 120 */
 121void __init smp4d_boot_cpus(void)
 122{
 123        if (boot_cpu_id)
 124                current_set[0] = NULL;
 125        smp_setup_percpu_timer();
 126        local_flush_cache_all();
 127}
 128
 129int __cpuinit smp4d_boot_one_cpu(int i)
 130{
 131        unsigned long *entry = &sun4d_cpu_startup;
 132        struct task_struct *p;
 133        int timeout;
 134        int cpu_node;
 135
 136        cpu_find_by_instance(i, &cpu_node, NULL);
 137        /* Cook up an idler for this guy. */
 138        p = fork_idle(i);
 139        current_set[i] = task_thread_info(p);
 140
 141        /*
 142         * Initialize the contexts table
 143         * Since the call to prom_startcpu() trashes the structure,
 144         * we need to re-initialize it for each cpu
 145         */
 146        smp_penguin_ctable.which_io = 0;
 147        smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
 148        smp_penguin_ctable.reg_size = 0;
 149
 150        /* whirrr, whirrr, whirrrrrrrrr... */
 151        printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
 152        local_flush_cache_all();
 153        prom_startcpu(cpu_node,
 154                      &smp_penguin_ctable, 0, (char *)entry);
 155
 156        printk(KERN_INFO "prom_startcpu returned :)\n");
 157
 158        /* wheee... it's going... */
 159        for (timeout = 0; timeout < 10000; timeout++) {
 160                if (cpu_callin_map[i])
 161                        break;
 162                udelay(200);
 163        }
 164
 165        if (!(cpu_callin_map[i])) {
 166                printk(KERN_ERR "Processor %d is stuck.\n", i);
 167                return -ENODEV;
 168
 169        }
 170        local_flush_cache_all();
 171        return 0;
 172}
 173
 174void __init smp4d_smp_done(void)
 175{
 176        int i, first;
 177        int *prev;
 178
 179        /* setup cpu list for irq rotation */
 180        first = 0;
 181        prev = &first;
 182        for_each_online_cpu(i) {
 183                *prev = i;
 184                prev = &cpu_data(i).next;
 185        }
 186        *prev = first;
 187        local_flush_cache_all();
 188
 189        /* Ok, they are spinning and ready to go. */
 190        smp_processors_ready = 1;
 191        sun4d_distribute_irqs();
 192}
 193
 194static struct smp_funcall {
 195        smpfunc_t func;
 196        unsigned long arg1;
 197        unsigned long arg2;
 198        unsigned long arg3;
 199        unsigned long arg4;
 200        unsigned long arg5;
 201        unsigned char processors_in[NR_CPUS];  /* Set when ipi entered. */
 202        unsigned char processors_out[NR_CPUS]; /* Set when ipi exited. */
 203} ccall_info __attribute__((aligned(8)));
 204
 205static DEFINE_SPINLOCK(cross_call_lock);
 206
 207/* Cross calls must be serialized, at least currently. */
 208static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
 209                             unsigned long arg2, unsigned long arg3,
 210                             unsigned long arg4)
 211{
 212        if (smp_processors_ready) {
 213                register int high = smp_highest_cpu;
 214                unsigned long flags;
 215
 216                spin_lock_irqsave(&cross_call_lock, flags);
 217
 218                {
 219                        /*
 220                         * If you make changes here, make sure
 221                         * gcc generates proper code...
 222                         */
 223                        register smpfunc_t f asm("i0") = func;
 224                        register unsigned long a1 asm("i1") = arg1;
 225                        register unsigned long a2 asm("i2") = arg2;
 226                        register unsigned long a3 asm("i3") = arg3;
 227                        register unsigned long a4 asm("i4") = arg4;
 228                        register unsigned long a5 asm("i5") = 0;
 229
 230                        __asm__ __volatile__(
 231                                "std %0, [%6]\n\t"
 232                                "std %2, [%6 + 8]\n\t"
 233                                "std %4, [%6 + 16]\n\t" : :
 234                                "r"(f), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
 235                                "r" (&ccall_info.func));
 236                }
 237
 238                /* Init receive/complete mapping, plus fire the IPI's off. */
 239                {
 240                        register int i;
 241
 242                        cpu_clear(smp_processor_id(), mask);
 243                        cpus_and(mask, cpu_online_map, mask);
 244                        for (i = 0; i <= high; i++) {
 245                                if (cpu_isset(i, mask)) {
 246                                        ccall_info.processors_in[i] = 0;
 247                                        ccall_info.processors_out[i] = 0;
 248                                        sun4d_send_ipi(i, IRQ_CROSS_CALL);
 249                                }
 250                        }
 251                }
 252
 253                {
 254                        register int i;
 255
 256                        i = 0;
 257                        do {
 258                                if (!cpu_isset(i, mask))
 259                                        continue;
 260                                while (!ccall_info.processors_in[i])
 261                                        barrier();
 262                        } while (++i <= high);
 263
 264                        i = 0;
 265                        do {
 266                                if (!cpu_isset(i, mask))
 267                                        continue;
 268                                while (!ccall_info.processors_out[i])
 269                                        barrier();
 270                        } while (++i <= high);
 271                }
 272
 273                spin_unlock_irqrestore(&cross_call_lock, flags);
 274        }
 275}
 276
 277/* Running cross calls. */
 278void smp4d_cross_call_irq(void)
 279{
 280        int i = hard_smp4d_processor_id();
 281
 282        ccall_info.processors_in[i] = 1;
 283        ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
 284                        ccall_info.arg4, ccall_info.arg5);
 285        ccall_info.processors_out[i] = 1;
 286}
 287
 288void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
 289{
 290        struct pt_regs *old_regs;
 291        int cpu = hard_smp4d_processor_id();
 292        static int cpu_tick[NR_CPUS];
 293        static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
 294
 295        old_regs = set_irq_regs(regs);
 296        bw_get_prof_limit(cpu);
 297        bw_clear_intr_mask(0, 1);       /* INTR_TABLE[0] & 1 is Profile IRQ */
 298
 299        cpu_tick[cpu]++;
 300        if (!(cpu_tick[cpu] & 15)) {
 301                if (cpu_tick[cpu] == 0x60)
 302                        cpu_tick[cpu] = 0;
 303                cpu_leds[cpu] = led_mask[cpu_tick[cpu] >> 4];
 304                show_leds(cpu);
 305        }
 306
 307        profile_tick(CPU_PROFILING);
 308
 309        if (!--prof_counter(cpu)) {
 310                int user = user_mode(regs);
 311
 312                irq_enter();
 313                update_process_times(user);
 314                irq_exit();
 315
 316                prof_counter(cpu) = prof_multiplier(cpu);
 317        }
 318        set_irq_regs(old_regs);
 319}
 320
 321static void __cpuinit smp_setup_percpu_timer(void)
 322{
 323        int cpu = hard_smp4d_processor_id();
 324
 325        prof_counter(cpu) = prof_multiplier(cpu) = 1;
 326        load_profile_irq(cpu, lvl14_resolution);
 327}
 328
 329void __init smp4d_blackbox_id(unsigned *addr)
 330{
 331        int rd = *addr & 0x3e000000;
 332
 333        addr[0] = 0xc0800800 | rd;              /* lda [%g0] ASI_M_VIKING_TMP1, reg */
 334        addr[1] = 0x01000000;                   /* nop */
 335        addr[2] = 0x01000000;                   /* nop */
 336}
 337
 338void __init smp4d_blackbox_current(unsigned *addr)
 339{
 340        int rd = *addr & 0x3e000000;
 341
 342        addr[0] = 0xc0800800 | rd;              /* lda [%g0] ASI_M_VIKING_TMP1, reg */
 343        addr[2] = 0x81282002 | rd | (rd >> 11); /* sll reg, 2, reg */
 344        addr[4] = 0x01000000;                   /* nop */
 345}
 346
 347void __init sun4d_init_smp(void)
 348{
 349        int i;
 350
 351        /* Patch ipi15 trap table */
 352        t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
 353
 354        /* And set btfixup... */
 355        BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
 356        BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
 357        BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
 358        BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
 359
 360        for (i = 0; i < NR_CPUS; i++) {
 361                ccall_info.processors_in[i] = 1;
 362                ccall_info.processors_out[i] = 1;
 363        }
 364}
 365