linux/arch/sparc/kernel/smp_32.c
<<
>>
Prefs
   1/* smp.c: Sparc SMP support.
   2 *
   3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
   6 */
   7
   8#include <asm/head.h>
   9
  10#include <linux/kernel.h>
  11#include <linux/sched.h>
  12#include <linux/threads.h>
  13#include <linux/smp.h>
  14#include <linux/interrupt.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/init.h>
  17#include <linux/spinlock.h>
  18#include <linux/mm.h>
  19#include <linux/fs.h>
  20#include <linux/seq_file.h>
  21#include <linux/cache.h>
  22#include <linux/delay.h>
  23
  24#include <asm/ptrace.h>
  25#include <linux/atomic.h>
  26
  27#include <asm/irq.h>
  28#include <asm/page.h>
  29#include <asm/pgalloc.h>
  30#include <asm/pgtable.h>
  31#include <asm/oplib.h>
  32#include <asm/cacheflush.h>
  33#include <asm/tlbflush.h>
  34#include <asm/cpudata.h>
  35#include <asm/leon.h>
  36
  37#include "irq.h"
  38
  39volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
  40
  41cpumask_t smp_commenced_mask = CPU_MASK_NONE;
  42
  43/* The only guaranteed locking primitive available on all Sparc
  44 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
  45 * places the current byte at the effective address into dest_reg and
  46 * places 0xff there afterwards.  Pretty lame locking primitive
  47 * compared to the Alpha and the Intel no?  Most Sparcs have 'swap'
  48 * instruction which is much better...
  49 */
  50
  51void __cpuinit smp_store_cpu_info(int id)
  52{
  53        int cpu_node;
  54        int mid;
  55
  56        cpu_data(id).udelay_val = loops_per_jiffy;
  57
  58        cpu_find_by_mid(id, &cpu_node);
  59        cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
  60                                                     "clock-frequency", 0);
  61        cpu_data(id).prom_node = cpu_node;
  62        mid = cpu_get_hwmid(cpu_node);
  63
  64        if (mid < 0) {
  65                printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
  66                mid = 0;
  67        }
  68        cpu_data(id).mid = mid;
  69}
  70
  71void __init smp_cpus_done(unsigned int max_cpus)
  72{
  73        extern void smp4m_smp_done(void);
  74        extern void smp4d_smp_done(void);
  75        unsigned long bogosum = 0;
  76        int cpu, num = 0;
  77
  78        for_each_online_cpu(cpu) {
  79                num++;
  80                bogosum += cpu_data(cpu).udelay_val;
  81        }
  82
  83        printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
  84                num, bogosum/(500000/HZ),
  85                (bogosum/(5000/HZ))%100);
  86
  87        switch(sparc_cpu_model) {
  88        case sun4:
  89                printk("SUN4\n");
  90                BUG();
  91                break;
  92        case sun4c:
  93                printk("SUN4C\n");
  94                BUG();
  95                break;
  96        case sun4m:
  97                smp4m_smp_done();
  98                break;
  99        case sun4d:
 100                smp4d_smp_done();
 101                break;
 102        case sparc_leon:
 103                leon_smp_done();
 104                break;
 105        case sun4e:
 106                printk("SUN4E\n");
 107                BUG();
 108                break;
 109        case sun4u:
 110                printk("SUN4U\n");
 111                BUG();
 112                break;
 113        default:
 114                printk("UNKNOWN!\n");
 115                BUG();
 116                break;
 117        }
 118}
 119
 120void cpu_panic(void)
 121{
 122        printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
 123        panic("SMP bolixed\n");
 124}
 125
 126struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
 127
 128void smp_send_reschedule(int cpu)
 129{
 130        /*
 131         * CPU model dependent way of implementing IPI generation targeting
 132         * a single CPU. The trap handler needs only to do trap entry/return
 133         * to call schedule.
 134         */
 135        BTFIXUP_CALL(smp_ipi_resched)(cpu);
 136}
 137
 138void smp_send_stop(void)
 139{
 140}
 141
 142void arch_send_call_function_single_ipi(int cpu)
 143{
 144        /* trigger one IPI single call on one CPU */
 145        BTFIXUP_CALL(smp_ipi_single)(cpu);
 146}
 147
 148void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 149{
 150        int cpu;
 151
 152        /* trigger IPI mask call on each CPU */
 153        for_each_cpu(cpu, mask)
 154                BTFIXUP_CALL(smp_ipi_mask_one)(cpu);
 155}
 156
 157void smp_resched_interrupt(void)
 158{
 159        irq_enter();
 160        scheduler_ipi();
 161        local_cpu_data().irq_resched_count++;
 162        irq_exit();
 163        /* re-schedule routine called by interrupt return code. */
 164}
 165
 166void smp_call_function_single_interrupt(void)
 167{
 168        irq_enter();
 169        generic_smp_call_function_single_interrupt();
 170        local_cpu_data().irq_call_count++;
 171        irq_exit();
 172}
 173
 174void smp_call_function_interrupt(void)
 175{
 176        irq_enter();
 177        generic_smp_call_function_interrupt();
 178        local_cpu_data().irq_call_count++;
 179        irq_exit();
 180}
 181
 182void smp_flush_cache_all(void)
 183{
 184        xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
 185        local_flush_cache_all();
 186}
 187
 188void smp_flush_tlb_all(void)
 189{
 190        xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
 191        local_flush_tlb_all();
 192}
 193
 194void smp_flush_cache_mm(struct mm_struct *mm)
 195{
 196        if(mm->context != NO_CONTEXT) {
 197                cpumask_t cpu_mask;
 198                cpumask_copy(&cpu_mask, mm_cpumask(mm));
 199                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
 200                if (!cpumask_empty(&cpu_mask))
 201                        xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
 202                local_flush_cache_mm(mm);
 203        }
 204}
 205
 206void smp_flush_tlb_mm(struct mm_struct *mm)
 207{
 208        if(mm->context != NO_CONTEXT) {
 209                cpumask_t cpu_mask;
 210                cpumask_copy(&cpu_mask, mm_cpumask(mm));
 211                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
 212                if (!cpumask_empty(&cpu_mask)) {
 213                        xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
 214                        if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
 215                                cpumask_copy(mm_cpumask(mm),
 216                                             cpumask_of(smp_processor_id()));
 217                }
 218                local_flush_tlb_mm(mm);
 219        }
 220}
 221
 222void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
 223                           unsigned long end)
 224{
 225        struct mm_struct *mm = vma->vm_mm;
 226
 227        if (mm->context != NO_CONTEXT) {
 228                cpumask_t cpu_mask;
 229                cpumask_copy(&cpu_mask, mm_cpumask(mm));
 230                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
 231                if (!cpumask_empty(&cpu_mask))
 232                        xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
 233                local_flush_cache_range(vma, start, end);
 234        }
 235}
 236
 237void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 238                         unsigned long end)
 239{
 240        struct mm_struct *mm = vma->vm_mm;
 241
 242        if (mm->context != NO_CONTEXT) {
 243                cpumask_t cpu_mask;
 244                cpumask_copy(&cpu_mask, mm_cpumask(mm));
 245                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
 246                if (!cpumask_empty(&cpu_mask))
 247                        xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
 248                local_flush_tlb_range(vma, start, end);
 249        }
 250}
 251
 252void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
 253{
 254        struct mm_struct *mm = vma->vm_mm;
 255
 256        if(mm->context != NO_CONTEXT) {
 257                cpumask_t cpu_mask;
 258                cpumask_copy(&cpu_mask, mm_cpumask(mm));
 259                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
 260                if (!cpumask_empty(&cpu_mask))
 261                        xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
 262                local_flush_cache_page(vma, page);
 263        }
 264}
 265
 266void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 267{
 268        struct mm_struct *mm = vma->vm_mm;
 269
 270        if(mm->context != NO_CONTEXT) {
 271                cpumask_t cpu_mask;
 272                cpumask_copy(&cpu_mask, mm_cpumask(mm));
 273                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
 274                if (!cpumask_empty(&cpu_mask))
 275                        xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
 276                local_flush_tlb_page(vma, page);
 277        }
 278}
 279
 280void smp_flush_page_to_ram(unsigned long page)
 281{
 282        /* Current theory is that those who call this are the one's
 283         * who have just dirtied their cache with the pages contents
 284         * in kernel space, therefore we only run this on local cpu.
 285         *
 286         * XXX This experiment failed, research further... -DaveM
 287         */
 288#if 1
 289        xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
 290#endif
 291        local_flush_page_to_ram(page);
 292}
 293
 294void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
 295{
 296        cpumask_t cpu_mask;
 297        cpumask_copy(&cpu_mask, mm_cpumask(mm));
 298        cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
 299        if (!cpumask_empty(&cpu_mask))
 300                xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
 301        local_flush_sig_insns(mm, insn_addr);
 302}
 303
 304extern unsigned int lvl14_resolution;
 305
 306/* /proc/profile writes can call this, don't __init it please. */
 307static DEFINE_SPINLOCK(prof_setup_lock);
 308
 309int setup_profiling_timer(unsigned int multiplier)
 310{
 311        int i;
 312        unsigned long flags;
 313
 314        /* Prevent level14 ticker IRQ flooding. */
 315        if((!multiplier) || (lvl14_resolution / multiplier) < 500)
 316                return -EINVAL;
 317
 318        spin_lock_irqsave(&prof_setup_lock, flags);
 319        for_each_possible_cpu(i) {
 320                load_profile_irq(i, lvl14_resolution / multiplier);
 321                prof_multiplier(i) = multiplier;
 322        }
 323        spin_unlock_irqrestore(&prof_setup_lock, flags);
 324
 325        return 0;
 326}
 327
 328void __init smp_prepare_cpus(unsigned int max_cpus)
 329{
 330        extern void __init smp4m_boot_cpus(void);
 331        extern void __init smp4d_boot_cpus(void);
 332        int i, cpuid, extra;
 333
 334        printk("Entering SMP Mode...\n");
 335
 336        extra = 0;
 337        for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
 338                if (cpuid >= NR_CPUS)
 339                        extra++;
 340        }
 341        /* i = number of cpus */
 342        if (extra && max_cpus > i - extra)
 343                printk("Warning: NR_CPUS is too low to start all cpus\n");
 344
 345        smp_store_cpu_info(boot_cpu_id);
 346
 347        switch(sparc_cpu_model) {
 348        case sun4:
 349                printk("SUN4\n");
 350                BUG();
 351                break;
 352        case sun4c:
 353                printk("SUN4C\n");
 354                BUG();
 355                break;
 356        case sun4m:
 357                smp4m_boot_cpus();
 358                break;
 359        case sun4d:
 360                smp4d_boot_cpus();
 361                break;
 362        case sparc_leon:
 363                leon_boot_cpus();
 364                break;
 365        case sun4e:
 366                printk("SUN4E\n");
 367                BUG();
 368                break;
 369        case sun4u:
 370                printk("SUN4U\n");
 371                BUG();
 372                break;
 373        default:
 374                printk("UNKNOWN!\n");
 375                BUG();
 376                break;
 377        }
 378}
 379
 380/* Set this up early so that things like the scheduler can init
 381 * properly.  We use the same cpu mask for both the present and
 382 * possible cpu map.
 383 */
 384void __init smp_setup_cpu_possible_map(void)
 385{
 386        int instance, mid;
 387
 388        instance = 0;
 389        while (!cpu_find_by_instance(instance, NULL, &mid)) {
 390                if (mid < NR_CPUS) {
 391                        set_cpu_possible(mid, true);
 392                        set_cpu_present(mid, true);
 393                }
 394                instance++;
 395        }
 396}
 397
 398void __init smp_prepare_boot_cpu(void)
 399{
 400        int cpuid = hard_smp_processor_id();
 401
 402        if (cpuid >= NR_CPUS) {
 403                prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
 404                prom_halt();
 405        }
 406        if (cpuid != 0)
 407                printk("boot cpu id != 0, this could work but is untested\n");
 408
 409        current_thread_info()->cpu = cpuid;
 410        set_cpu_online(cpuid, true);
 411        set_cpu_possible(cpuid, true);
 412}
 413
 414int __cpuinit __cpu_up(unsigned int cpu)
 415{
 416        extern int __cpuinit smp4m_boot_one_cpu(int);
 417        extern int __cpuinit smp4d_boot_one_cpu(int);
 418        int ret=0;
 419
 420        switch(sparc_cpu_model) {
 421        case sun4:
 422                printk("SUN4\n");
 423                BUG();
 424                break;
 425        case sun4c:
 426                printk("SUN4C\n");
 427                BUG();
 428                break;
 429        case sun4m:
 430                ret = smp4m_boot_one_cpu(cpu);
 431                break;
 432        case sun4d:
 433                ret = smp4d_boot_one_cpu(cpu);
 434                break;
 435        case sparc_leon:
 436                ret = leon_boot_one_cpu(cpu);
 437                break;
 438        case sun4e:
 439                printk("SUN4E\n");
 440                BUG();
 441                break;
 442        case sun4u:
 443                printk("SUN4U\n");
 444                BUG();
 445                break;
 446        default:
 447                printk("UNKNOWN!\n");
 448                BUG();
 449                break;
 450        }
 451
 452        if (!ret) {
 453                cpumask_set_cpu(cpu, &smp_commenced_mask);
 454                while (!cpu_online(cpu))
 455                        mb();
 456        }
 457        return ret;
 458}
 459
 460void smp_bogo(struct seq_file *m)
 461{
 462        int i;
 463        
 464        for_each_online_cpu(i) {
 465                seq_printf(m,
 466                           "Cpu%dBogo\t: %lu.%02lu\n",
 467                           i,
 468                           cpu_data(i).udelay_val/(500000/HZ),
 469                           (cpu_data(i).udelay_val/(5000/HZ))%100);
 470        }
 471}
 472
 473void smp_info(struct seq_file *m)
 474{
 475        int i;
 476
 477        seq_printf(m, "State:\n");
 478        for_each_online_cpu(i)
 479                seq_printf(m, "CPU%d\t\t: online\n", i);
 480}
 481