linux/arch/sparc/kernel/smp_32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* smp.c: Sparc SMP support.
   3 *
   4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   5 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   6 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
   7 */
   8
   9#include <asm/head.h>
  10
  11#include <linux/kernel.h>
  12#include <linux/sched.h>
  13#include <linux/threads.h>
  14#include <linux/smp.h>
  15#include <linux/interrupt.h>
  16#include <linux/kernel_stat.h>
  17#include <linux/init.h>
  18#include <linux/spinlock.h>
  19#include <linux/mm.h>
  20#include <linux/fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/cache.h>
  23#include <linux/delay.h>
  24#include <linux/profile.h>
  25#include <linux/cpu.h>
  26
  27#include <asm/ptrace.h>
  28#include <linux/atomic.h>
  29
  30#include <asm/irq.h>
  31#include <asm/page.h>
  32#include <asm/oplib.h>
  33#include <asm/cacheflush.h>
  34#include <asm/tlbflush.h>
  35#include <asm/cpudata.h>
  36#include <asm/timer.h>
  37#include <asm/leon.h>
  38
  39#include "kernel.h"
  40#include "irq.h"
  41
  42volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
  43
  44cpumask_t smp_commenced_mask = CPU_MASK_NONE;
  45
  46const struct sparc32_ipi_ops *sparc32_ipi_ops;
  47
  48/* The only guaranteed locking primitive available on all Sparc
  49 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
  50 * places the current byte at the effective address into dest_reg and
  51 * places 0xff there afterwards.  Pretty lame locking primitive
  52 * compared to the Alpha and the Intel no?  Most Sparcs have 'swap'
  53 * instruction which is much better...
  54 */
  55
  56void smp_store_cpu_info(int id)
  57{
  58        int cpu_node;
  59        int mid;
  60
  61        cpu_data(id).udelay_val = loops_per_jiffy;
  62
  63        cpu_find_by_mid(id, &cpu_node);
  64        cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
  65                                                     "clock-frequency", 0);
  66        cpu_data(id).prom_node = cpu_node;
  67        mid = cpu_get_hwmid(cpu_node);
  68
  69        if (mid < 0) {
  70                printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08x", id, cpu_node);
  71                mid = 0;
  72        }
  73        cpu_data(id).mid = mid;
  74}
  75
  76void __init smp_cpus_done(unsigned int max_cpus)
  77{
  78        unsigned long bogosum = 0;
  79        int cpu, num = 0;
  80
  81        for_each_online_cpu(cpu) {
  82                num++;
  83                bogosum += cpu_data(cpu).udelay_val;
  84        }
  85
  86        printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
  87                num, bogosum/(500000/HZ),
  88                (bogosum/(5000/HZ))%100);
  89
  90        switch(sparc_cpu_model) {
  91        case sun4m:
  92                smp4m_smp_done();
  93                break;
  94        case sun4d:
  95                smp4d_smp_done();
  96                break;
  97        case sparc_leon:
  98                leon_smp_done();
  99                break;
 100        case sun4e:
 101                printk("SUN4E\n");
 102                BUG();
 103                break;
 104        case sun4u:
 105                printk("SUN4U\n");
 106                BUG();
 107                break;
 108        default:
 109                printk("UNKNOWN!\n");
 110                BUG();
 111                break;
 112        }
 113}
 114
 115void cpu_panic(void)
 116{
 117        printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
 118        panic("SMP bolixed\n");
 119}
 120
 121struct linux_prom_registers smp_penguin_ctable = { 0 };
 122
 123void smp_send_reschedule(int cpu)
 124{
 125        /*
 126         * CPU model dependent way of implementing IPI generation targeting
 127         * a single CPU. The trap handler needs only to do trap entry/return
 128         * to call schedule.
 129         */
 130        sparc32_ipi_ops->resched(cpu);
 131}
 132
 133void smp_send_stop(void)
 134{
 135}
 136
 137void arch_send_call_function_single_ipi(int cpu)
 138{
 139        /* trigger one IPI single call on one CPU */
 140        sparc32_ipi_ops->single(cpu);
 141}
 142
 143void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 144{
 145        int cpu;
 146
 147        /* trigger IPI mask call on each CPU */
 148        for_each_cpu(cpu, mask)
 149                sparc32_ipi_ops->mask_one(cpu);
 150}
 151
 152void smp_resched_interrupt(void)
 153{
 154        irq_enter();
 155        scheduler_ipi();
 156        local_cpu_data().irq_resched_count++;
 157        irq_exit();
 158        /* re-schedule routine called by interrupt return code. */
 159}
 160
 161void smp_call_function_single_interrupt(void)
 162{
 163        irq_enter();
 164        generic_smp_call_function_single_interrupt();
 165        local_cpu_data().irq_call_count++;
 166        irq_exit();
 167}
 168
 169void smp_call_function_interrupt(void)
 170{
 171        irq_enter();
 172        generic_smp_call_function_interrupt();
 173        local_cpu_data().irq_call_count++;
 174        irq_exit();
 175}
 176
 177int setup_profiling_timer(unsigned int multiplier)
 178{
 179        return -EINVAL;
 180}
 181
 182void __init smp_prepare_cpus(unsigned int max_cpus)
 183{
 184        int i, cpuid, extra;
 185
 186        printk("Entering SMP Mode...\n");
 187
 188        extra = 0;
 189        for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
 190                if (cpuid >= NR_CPUS)
 191                        extra++;
 192        }
 193        /* i = number of cpus */
 194        if (extra && max_cpus > i - extra)
 195                printk("Warning: NR_CPUS is too low to start all cpus\n");
 196
 197        smp_store_cpu_info(boot_cpu_id);
 198
 199        switch(sparc_cpu_model) {
 200        case sun4m:
 201                smp4m_boot_cpus();
 202                break;
 203        case sun4d:
 204                smp4d_boot_cpus();
 205                break;
 206        case sparc_leon:
 207                leon_boot_cpus();
 208                break;
 209        case sun4e:
 210                printk("SUN4E\n");
 211                BUG();
 212                break;
 213        case sun4u:
 214                printk("SUN4U\n");
 215                BUG();
 216                break;
 217        default:
 218                printk("UNKNOWN!\n");
 219                BUG();
 220                break;
 221        }
 222}
 223
 224/* Set this up early so that things like the scheduler can init
 225 * properly.  We use the same cpu mask for both the present and
 226 * possible cpu map.
 227 */
 228void __init smp_setup_cpu_possible_map(void)
 229{
 230        int instance, mid;
 231
 232        instance = 0;
 233        while (!cpu_find_by_instance(instance, NULL, &mid)) {
 234                if (mid < NR_CPUS) {
 235                        set_cpu_possible(mid, true);
 236                        set_cpu_present(mid, true);
 237                }
 238                instance++;
 239        }
 240}
 241
 242void __init smp_prepare_boot_cpu(void)
 243{
 244        int cpuid = hard_smp_processor_id();
 245
 246        if (cpuid >= NR_CPUS) {
 247                prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
 248                prom_halt();
 249        }
 250        if (cpuid != 0)
 251                printk("boot cpu id != 0, this could work but is untested\n");
 252
 253        current_thread_info()->cpu = cpuid;
 254        set_cpu_online(cpuid, true);
 255        set_cpu_possible(cpuid, true);
 256}
 257
 258int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 259{
 260        int ret=0;
 261
 262        switch(sparc_cpu_model) {
 263        case sun4m:
 264                ret = smp4m_boot_one_cpu(cpu, tidle);
 265                break;
 266        case sun4d:
 267                ret = smp4d_boot_one_cpu(cpu, tidle);
 268                break;
 269        case sparc_leon:
 270                ret = leon_boot_one_cpu(cpu, tidle);
 271                break;
 272        case sun4e:
 273                printk("SUN4E\n");
 274                BUG();
 275                break;
 276        case sun4u:
 277                printk("SUN4U\n");
 278                BUG();
 279                break;
 280        default:
 281                printk("UNKNOWN!\n");
 282                BUG();
 283                break;
 284        }
 285
 286        if (!ret) {
 287                cpumask_set_cpu(cpu, &smp_commenced_mask);
 288                while (!cpu_online(cpu))
 289                        mb();
 290        }
 291        return ret;
 292}
 293
 294static void arch_cpu_pre_starting(void *arg)
 295{
 296        local_ops->cache_all();
 297        local_ops->tlb_all();
 298
 299        switch(sparc_cpu_model) {
 300        case sun4m:
 301                sun4m_cpu_pre_starting(arg);
 302                break;
 303        case sun4d:
 304                sun4d_cpu_pre_starting(arg);
 305                break;
 306        case sparc_leon:
 307                leon_cpu_pre_starting(arg);
 308                break;
 309        default:
 310                BUG();
 311        }
 312}
 313
 314static void arch_cpu_pre_online(void *arg)
 315{
 316        unsigned int cpuid = hard_smp_processor_id();
 317
 318        register_percpu_ce(cpuid);
 319
 320        calibrate_delay();
 321        smp_store_cpu_info(cpuid);
 322
 323        local_ops->cache_all();
 324        local_ops->tlb_all();
 325
 326        switch(sparc_cpu_model) {
 327        case sun4m:
 328                sun4m_cpu_pre_online(arg);
 329                break;
 330        case sun4d:
 331                sun4d_cpu_pre_online(arg);
 332                break;
 333        case sparc_leon:
 334                leon_cpu_pre_online(arg);
 335                break;
 336        default:
 337                BUG();
 338        }
 339}
 340
 341static void sparc_start_secondary(void *arg)
 342{
 343        unsigned int cpu;
 344
 345        /*
 346         * SMP booting is extremely fragile in some architectures. So run
 347         * the cpu initialization code first before anything else.
 348         */
 349        arch_cpu_pre_starting(arg);
 350
 351        cpu = smp_processor_id();
 352
 353        notify_cpu_starting(cpu);
 354        arch_cpu_pre_online(arg);
 355
 356        /* Set the CPU in the cpu_online_mask */
 357        set_cpu_online(cpu, true);
 358
 359        /* Enable local interrupts now */
 360        local_irq_enable();
 361
 362        wmb();
 363        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 364
 365        /* We should never reach here! */
 366        BUG();
 367}
 368
 369void smp_callin(void)
 370{
 371        sparc_start_secondary(NULL);
 372}
 373
 374void smp_bogo(struct seq_file *m)
 375{
 376        int i;
 377        
 378        for_each_online_cpu(i) {
 379                seq_printf(m,
 380                           "Cpu%dBogo\t: %lu.%02lu\n",
 381                           i,
 382                           cpu_data(i).udelay_val/(500000/HZ),
 383                           (cpu_data(i).udelay_val/(5000/HZ))%100);
 384        }
 385}
 386
 387void smp_info(struct seq_file *m)
 388{
 389        int i;
 390
 391        seq_printf(m, "State:\n");
 392        for_each_online_cpu(i)
 393                seq_printf(m, "CPU%d\t\t: online\n", i);
 394}
 395