linux/arch/sparc/kernel/smp_32.c
<<
>>
Prefs
   1/* smp.c: Sparc SMP support.
   2 *
   3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
   6 */
   7
   8#include <asm/head.h>
   9
  10#include <linux/kernel.h>
  11#include <linux/sched.h>
  12#include <linux/threads.h>
  13#include <linux/smp.h>
  14#include <linux/interrupt.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/init.h>
  17#include <linux/spinlock.h>
  18#include <linux/mm.h>
  19#include <linux/fs.h>
  20#include <linux/seq_file.h>
  21#include <linux/cache.h>
  22#include <linux/delay.h>
  23
  24#include <asm/ptrace.h>
  25#include <asm/atomic.h>
  26
  27#include <asm/irq.h>
  28#include <asm/page.h>
  29#include <asm/pgalloc.h>
  30#include <asm/pgtable.h>
  31#include <asm/oplib.h>
  32#include <asm/cacheflush.h>
  33#include <asm/tlbflush.h>
  34#include <asm/cpudata.h>
  35
  36#include "irq.h"
  37
  38volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
  39unsigned char boot_cpu_id = 0;
  40unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
  41
  42cpumask_t smp_commenced_mask = CPU_MASK_NONE;
  43
  44/* The only guaranteed locking primitive available on all Sparc
  45 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
  46 * places the current byte at the effective address into dest_reg and
  47 * places 0xff there afterwards.  Pretty lame locking primitive
  48 * compared to the Alpha and the Intel no?  Most Sparcs have 'swap'
  49 * instruction which is much better...
  50 */
  51
  52void __cpuinit smp_store_cpu_info(int id)
  53{
  54        int cpu_node;
  55
  56        cpu_data(id).udelay_val = loops_per_jiffy;
  57
  58        cpu_find_by_mid(id, &cpu_node);
  59        cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
  60                                                     "clock-frequency", 0);
  61        cpu_data(id).prom_node = cpu_node;
  62        cpu_data(id).mid = cpu_get_hwmid(cpu_node);
  63
  64        if (cpu_data(id).mid < 0)
  65                panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
  66}
  67
  68void __init smp_cpus_done(unsigned int max_cpus)
  69{
  70        extern void smp4m_smp_done(void);
  71        extern void smp4d_smp_done(void);
  72        unsigned long bogosum = 0;
  73        int cpu, num = 0;
  74
  75        for_each_online_cpu(cpu) {
  76                num++;
  77                bogosum += cpu_data(cpu).udelay_val;
  78        }
  79
  80        printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
  81                num, bogosum/(500000/HZ),
  82                (bogosum/(5000/HZ))%100);
  83
  84        switch(sparc_cpu_model) {
  85        case sun4:
  86                printk("SUN4\n");
  87                BUG();
  88                break;
  89        case sun4c:
  90                printk("SUN4C\n");
  91                BUG();
  92                break;
  93        case sun4m:
  94                smp4m_smp_done();
  95                break;
  96        case sun4d:
  97                smp4d_smp_done();
  98                break;
  99        case sun4e:
 100                printk("SUN4E\n");
 101                BUG();
 102                break;
 103        case sun4u:
 104                printk("SUN4U\n");
 105                BUG();
 106                break;
 107        default:
 108                printk("UNKNOWN!\n");
 109                BUG();
 110                break;
 111        };
 112}
 113
 114void cpu_panic(void)
 115{
 116        printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
 117        panic("SMP bolixed\n");
 118}
 119
 120struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
 121
 122void smp_send_reschedule(int cpu)
 123{
 124        /* See sparc64 */
 125}
 126
 127void smp_send_stop(void)
 128{
 129}
 130
 131void smp_flush_cache_all(void)
 132{
 133        xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
 134        local_flush_cache_all();
 135}
 136
 137void smp_flush_tlb_all(void)
 138{
 139        xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
 140        local_flush_tlb_all();
 141}
 142
 143void smp_flush_cache_mm(struct mm_struct *mm)
 144{
 145        if(mm->context != NO_CONTEXT) {
 146                cpumask_t cpu_mask = *mm_cpumask(mm);
 147                cpu_clear(smp_processor_id(), cpu_mask);
 148                if (!cpus_empty(cpu_mask))
 149                        xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
 150                local_flush_cache_mm(mm);
 151        }
 152}
 153
 154void smp_flush_tlb_mm(struct mm_struct *mm)
 155{
 156        if(mm->context != NO_CONTEXT) {
 157                cpumask_t cpu_mask = *mm_cpumask(mm);
 158                cpu_clear(smp_processor_id(), cpu_mask);
 159                if (!cpus_empty(cpu_mask)) {
 160                        xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
 161                        if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
 162                                cpumask_copy(mm_cpumask(mm),
 163                                             cpumask_of(smp_processor_id()));
 164                }
 165                local_flush_tlb_mm(mm);
 166        }
 167}
 168
 169void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
 170                           unsigned long end)
 171{
 172        struct mm_struct *mm = vma->vm_mm;
 173
 174        if (mm->context != NO_CONTEXT) {
 175                cpumask_t cpu_mask = *mm_cpumask(mm);
 176                cpu_clear(smp_processor_id(), cpu_mask);
 177                if (!cpus_empty(cpu_mask))
 178                        xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
 179                local_flush_cache_range(vma, start, end);
 180        }
 181}
 182
 183void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 184                         unsigned long end)
 185{
 186        struct mm_struct *mm = vma->vm_mm;
 187
 188        if (mm->context != NO_CONTEXT) {
 189                cpumask_t cpu_mask = *mm_cpumask(mm);
 190                cpu_clear(smp_processor_id(), cpu_mask);
 191                if (!cpus_empty(cpu_mask))
 192                        xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
 193                local_flush_tlb_range(vma, start, end);
 194        }
 195}
 196
 197void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
 198{
 199        struct mm_struct *mm = vma->vm_mm;
 200
 201        if(mm->context != NO_CONTEXT) {
 202                cpumask_t cpu_mask = *mm_cpumask(mm);
 203                cpu_clear(smp_processor_id(), cpu_mask);
 204                if (!cpus_empty(cpu_mask))
 205                        xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
 206                local_flush_cache_page(vma, page);
 207        }
 208}
 209
 210void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 211{
 212        struct mm_struct *mm = vma->vm_mm;
 213
 214        if(mm->context != NO_CONTEXT) {
 215                cpumask_t cpu_mask = *mm_cpumask(mm);
 216                cpu_clear(smp_processor_id(), cpu_mask);
 217                if (!cpus_empty(cpu_mask))
 218                        xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
 219                local_flush_tlb_page(vma, page);
 220        }
 221}
 222
 223void smp_reschedule_irq(void)
 224{
 225        set_need_resched();
 226}
 227
 228void smp_flush_page_to_ram(unsigned long page)
 229{
 230        /* Current theory is that those who call this are the one's
 231         * who have just dirtied their cache with the pages contents
 232         * in kernel space, therefore we only run this on local cpu.
 233         *
 234         * XXX This experiment failed, research further... -DaveM
 235         */
 236#if 1
 237        xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
 238#endif
 239        local_flush_page_to_ram(page);
 240}
 241
 242void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
 243{
 244        cpumask_t cpu_mask = *mm_cpumask(mm);
 245        cpu_clear(smp_processor_id(), cpu_mask);
 246        if (!cpus_empty(cpu_mask))
 247                xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
 248        local_flush_sig_insns(mm, insn_addr);
 249}
 250
 251extern unsigned int lvl14_resolution;
 252
 253/* /proc/profile writes can call this, don't __init it please. */
 254static DEFINE_SPINLOCK(prof_setup_lock);
 255
 256int setup_profiling_timer(unsigned int multiplier)
 257{
 258        int i;
 259        unsigned long flags;
 260
 261        /* Prevent level14 ticker IRQ flooding. */
 262        if((!multiplier) || (lvl14_resolution / multiplier) < 500)
 263                return -EINVAL;
 264
 265        spin_lock_irqsave(&prof_setup_lock, flags);
 266        for_each_possible_cpu(i) {
 267                load_profile_irq(i, lvl14_resolution / multiplier);
 268                prof_multiplier(i) = multiplier;
 269        }
 270        spin_unlock_irqrestore(&prof_setup_lock, flags);
 271
 272        return 0;
 273}
 274
 275void __init smp_prepare_cpus(unsigned int max_cpus)
 276{
 277        extern void __init smp4m_boot_cpus(void);
 278        extern void __init smp4d_boot_cpus(void);
 279        int i, cpuid, extra;
 280
 281        printk("Entering SMP Mode...\n");
 282
 283        extra = 0;
 284        for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
 285                if (cpuid >= NR_CPUS)
 286                        extra++;
 287        }
 288        /* i = number of cpus */
 289        if (extra && max_cpus > i - extra)
 290                printk("Warning: NR_CPUS is too low to start all cpus\n");
 291
 292        smp_store_cpu_info(boot_cpu_id);
 293
 294        switch(sparc_cpu_model) {
 295        case sun4:
 296                printk("SUN4\n");
 297                BUG();
 298                break;
 299        case sun4c:
 300                printk("SUN4C\n");
 301                BUG();
 302                break;
 303        case sun4m:
 304                smp4m_boot_cpus();
 305                break;
 306        case sun4d:
 307                smp4d_boot_cpus();
 308                break;
 309        case sun4e:
 310                printk("SUN4E\n");
 311                BUG();
 312                break;
 313        case sun4u:
 314                printk("SUN4U\n");
 315                BUG();
 316                break;
 317        default:
 318                printk("UNKNOWN!\n");
 319                BUG();
 320                break;
 321        };
 322}
 323
 324/* Set this up early so that things like the scheduler can init
 325 * properly.  We use the same cpu mask for both the present and
 326 * possible cpu map.
 327 */
 328void __init smp_setup_cpu_possible_map(void)
 329{
 330        int instance, mid;
 331
 332        instance = 0;
 333        while (!cpu_find_by_instance(instance, NULL, &mid)) {
 334                if (mid < NR_CPUS) {
 335                        set_cpu_possible(mid, true);
 336                        set_cpu_present(mid, true);
 337                }
 338                instance++;
 339        }
 340}
 341
 342void __init smp_prepare_boot_cpu(void)
 343{
 344        int cpuid = hard_smp_processor_id();
 345
 346        if (cpuid >= NR_CPUS) {
 347                prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
 348                prom_halt();
 349        }
 350        if (cpuid != 0)
 351                printk("boot cpu id != 0, this could work but is untested\n");
 352
 353        current_thread_info()->cpu = cpuid;
 354        set_cpu_online(cpuid, true);
 355        set_cpu_possible(cpuid, true);
 356}
 357
 358int __cpuinit __cpu_up(unsigned int cpu)
 359{
 360        extern int __cpuinit smp4m_boot_one_cpu(int);
 361        extern int __cpuinit smp4d_boot_one_cpu(int);
 362        int ret=0;
 363
 364        switch(sparc_cpu_model) {
 365        case sun4:
 366                printk("SUN4\n");
 367                BUG();
 368                break;
 369        case sun4c:
 370                printk("SUN4C\n");
 371                BUG();
 372                break;
 373        case sun4m:
 374                ret = smp4m_boot_one_cpu(cpu);
 375                break;
 376        case sun4d:
 377                ret = smp4d_boot_one_cpu(cpu);
 378                break;
 379        case sun4e:
 380                printk("SUN4E\n");
 381                BUG();
 382                break;
 383        case sun4u:
 384                printk("SUN4U\n");
 385                BUG();
 386                break;
 387        default:
 388                printk("UNKNOWN!\n");
 389                BUG();
 390                break;
 391        };
 392
 393        if (!ret) {
 394                cpu_set(cpu, smp_commenced_mask);
 395                while (!cpu_online(cpu))
 396                        mb();
 397        }
 398        return ret;
 399}
 400
 401void smp_bogo(struct seq_file *m)
 402{
 403        int i;
 404        
 405        for_each_online_cpu(i) {
 406                seq_printf(m,
 407                           "Cpu%dBogo\t: %lu.%02lu\n",
 408                           i,
 409                           cpu_data(i).udelay_val/(500000/HZ),
 410                           (cpu_data(i).udelay_val/(5000/HZ))%100);
 411        }
 412}
 413
 414void smp_info(struct seq_file *m)
 415{
 416        int i;
 417
 418        seq_printf(m, "State:\n");
 419        for_each_online_cpu(i)
 420                seq_printf(m, "CPU%d\t\t: online\n", i);
 421}
 422