linux/arch/parisc/kernel/smp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3** SMP Support
   4**
   5** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
   6** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
   7** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
   8** 
   9** Lots of stuff stolen from arch/alpha/kernel/smp.c
  10** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
  11**
  12** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work.
  13** -grant (1/12/2001)
  14**
  15*/
  16#include <linux/types.h>
  17#include <linux/spinlock.h>
  18
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/sched/mm.h>
  22#include <linux/init.h>
  23#include <linux/interrupt.h>
  24#include <linux/smp.h>
  25#include <linux/kernel_stat.h>
  26#include <linux/mm.h>
  27#include <linux/err.h>
  28#include <linux/delay.h>
  29#include <linux/bitops.h>
  30#include <linux/ftrace.h>
  31#include <linux/cpu.h>
  32
  33#include <linux/atomic.h>
  34#include <asm/current.h>
  35#include <asm/delay.h>
  36#include <asm/tlbflush.h>
  37
  38#include <asm/io.h>
  39#include <asm/irq.h>            /* for CPU_IRQ_REGION and friends */
  40#include <asm/mmu_context.h>
  41#include <asm/page.h>
  42#include <asm/processor.h>
  43#include <asm/ptrace.h>
  44#include <asm/unistd.h>
  45#include <asm/cacheflush.h>
  46
  47#undef DEBUG_SMP
  48#ifdef DEBUG_SMP
  49static int smp_debug_lvl = 0;
  50#define smp_debug(lvl, printargs...)            \
  51                if (lvl >= smp_debug_lvl)       \
  52                        printk(printargs);
  53#else
  54#define smp_debug(lvl, ...)     do { } while(0)
  55#endif /* DEBUG_SMP */
  56
  57volatile struct task_struct *smp_init_current_idle_task;
  58
  59/* track which CPU is booting */
  60static volatile int cpu_now_booting;
  61
  62static int parisc_max_cpus = 1;
  63
  64static DEFINE_PER_CPU(spinlock_t, ipi_lock);
  65
  66enum ipi_message_type {
  67        IPI_NOP=0,
  68        IPI_RESCHEDULE=1,
  69        IPI_CALL_FUNC,
  70        IPI_CPU_START,
  71        IPI_CPU_STOP,
  72        IPI_CPU_TEST
  73};
  74
  75
  76/********** SMP inter processor interrupt and communication routines */
  77
  78#undef PER_CPU_IRQ_REGION
  79#ifdef PER_CPU_IRQ_REGION
  80/* XXX REVISIT Ignore for now.
  81**    *May* need this "hook" to register IPI handler
  82**    once we have perCPU ExtIntr switch tables.
  83*/
  84static void
  85ipi_init(int cpuid)
  86{
  87#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
  88
  89        if(cpu_online(cpuid) )
  90        {
  91                switch_to_idle_task(current);
  92        }
  93
  94        return;
  95}
  96#endif
  97
  98
  99/*
 100** Yoink this CPU from the runnable list... 
 101**
 102*/
 103static void
 104halt_processor(void) 
 105{
 106        /* REVISIT : redirect I/O Interrupts to another CPU? */
 107        /* REVISIT : does PM *know* this CPU isn't available? */
 108        set_cpu_online(smp_processor_id(), false);
 109        local_irq_disable();
 110        __pdc_cpu_rendezvous();
 111        for (;;)
 112                ;
 113}
 114
 115
 116irqreturn_t __irq_entry
 117ipi_interrupt(int irq, void *dev_id) 
 118{
 119        int this_cpu = smp_processor_id();
 120        struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
 121        unsigned long ops;
 122        unsigned long flags;
 123
 124        for (;;) {
 125                spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
 126                spin_lock_irqsave(lock, flags);
 127                ops = p->pending_ipi;
 128                p->pending_ipi = 0;
 129                spin_unlock_irqrestore(lock, flags);
 130
 131                mb(); /* Order bit clearing and data access. */
 132
 133                if (!ops)
 134                    break;
 135
 136                while (ops) {
 137                        unsigned long which = ffz(~ops);
 138
 139                        ops &= ~(1 << which);
 140
 141                        switch (which) {
 142                        case IPI_NOP:
 143                                smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
 144                                break;
 145                                
 146                        case IPI_RESCHEDULE:
 147                                smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
 148                                inc_irq_stat(irq_resched_count);
 149                                scheduler_ipi();
 150                                break;
 151
 152                        case IPI_CALL_FUNC:
 153                                smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
 154                                inc_irq_stat(irq_call_count);
 155                                generic_smp_call_function_interrupt();
 156                                break;
 157
 158                        case IPI_CPU_START:
 159                                smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
 160                                break;
 161
 162                        case IPI_CPU_STOP:
 163                                smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
 164                                halt_processor();
 165                                break;
 166
 167                        case IPI_CPU_TEST:
 168                                smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
 169                                break;
 170
 171                        default:
 172                                printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
 173                                        this_cpu, which);
 174                                return IRQ_NONE;
 175                        } /* Switch */
 176
 177                        /* before doing more, let in any pending interrupts */
 178                        if (ops) {
 179                                local_irq_enable();
 180                                local_irq_disable();
 181                        }
 182                } /* while (ops) */
 183        }
 184        return IRQ_HANDLED;
 185}
 186
 187
 188static inline void
 189ipi_send(int cpu, enum ipi_message_type op)
 190{
 191        struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
 192        spinlock_t *lock = &per_cpu(ipi_lock, cpu);
 193        unsigned long flags;
 194
 195        spin_lock_irqsave(lock, flags);
 196        p->pending_ipi |= 1 << op;
 197        gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
 198        spin_unlock_irqrestore(lock, flags);
 199}
 200
 201static void
 202send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
 203{
 204        int cpu;
 205
 206        for_each_cpu(cpu, mask)
 207                ipi_send(cpu, op);
 208}
 209
 210static inline void
 211send_IPI_single(int dest_cpu, enum ipi_message_type op)
 212{
 213        BUG_ON(dest_cpu == NO_PROC_ID);
 214
 215        ipi_send(dest_cpu, op);
 216}
 217
 218static inline void
 219send_IPI_allbutself(enum ipi_message_type op)
 220{
 221        int i;
 222        
 223        for_each_online_cpu(i) {
 224                if (i != smp_processor_id())
 225                        send_IPI_single(i, op);
 226        }
 227}
 228
 229
 230inline void 
 231smp_send_stop(void)     { send_IPI_allbutself(IPI_CPU_STOP); }
 232
 233void 
 234smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
 235
 236void
 237smp_send_all_nop(void)
 238{
 239        send_IPI_allbutself(IPI_NOP);
 240}
 241
 242void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 243{
 244        send_IPI_mask(mask, IPI_CALL_FUNC);
 245}
 246
 247void arch_send_call_function_single_ipi(int cpu)
 248{
 249        send_IPI_single(cpu, IPI_CALL_FUNC);
 250}
 251
 252/*
 253 * Called by secondaries to update state and initialize CPU registers.
 254 */
 255static void __init
 256smp_cpu_init(int cpunum)
 257{
 258        extern void init_IRQ(void);    /* arch/parisc/kernel/irq.c */
 259        extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
 260
 261        /* Set modes and Enable floating point coprocessor */
 262        init_per_cpu(cpunum);
 263
 264        disable_sr_hashing();
 265
 266        mb();
 267
 268        /* Well, support 2.4 linux scheme as well. */
 269        if (cpu_online(cpunum)) {
 270                extern void machine_halt(void); /* arch/parisc.../process.c */
 271
 272                printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
 273                machine_halt();
 274        }
 275
 276        notify_cpu_starting(cpunum);
 277
 278        set_cpu_online(cpunum, true);
 279
 280        /* Initialise the idle task for this CPU */
 281        mmgrab(&init_mm);
 282        current->active_mm = &init_mm;
 283        BUG_ON(current->mm);
 284        enter_lazy_tlb(&init_mm, current);
 285
 286        init_IRQ();   /* make sure no IRQs are enabled or pending */
 287        start_cpu_itimer();
 288}
 289
 290
 291/*
 292 * Slaves start using C here. Indirectly called from smp_slave_stext.
 293 * Do what start_kernel() and main() do for boot strap processor (aka monarch)
 294 */
 295void __init smp_callin(unsigned long pdce_proc)
 296{
 297        int slave_id = cpu_now_booting;
 298
 299#ifdef CONFIG_64BIT
 300        WARN_ON(((unsigned long)(PAGE0->mem_pdc_hi) << 32
 301                        | PAGE0->mem_pdc) != pdce_proc);
 302#endif
 303
 304        smp_cpu_init(slave_id);
 305
 306        flush_cache_all_local(); /* start with known state */
 307        flush_tlb_all_local(NULL);
 308
 309        local_irq_enable();  /* Interrupts have been off until now */
 310
 311        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 312
 313        /* NOTREACHED */
 314        panic("smp_callin() AAAAaaaaahhhh....\n");
 315}
 316
 317/*
 318 * Bring one cpu online.
 319 */
 320int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
 321{
 322        const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
 323        long timeout;
 324
 325        task_thread_info(idle)->cpu = cpuid;
 326
 327        /* Let _start know what logical CPU we're booting
 328        ** (offset into init_tasks[],cpu_data[])
 329        */
 330        cpu_now_booting = cpuid;
 331
 332        /* 
 333        ** boot strap code needs to know the task address since
 334        ** it also contains the process stack.
 335        */
 336        smp_init_current_idle_task = idle ;
 337        mb();
 338
 339        printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
 340
 341        /*
 342        ** This gets PDC to release the CPU from a very tight loop.
 343        **
 344        ** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
 345        ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which 
 346        ** is executed after receiving the rendezvous signal (an interrupt to 
 347        ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 
 348        ** contents of memory are valid."
 349        */
 350        gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
 351        mb();
 352
 353        /* 
 354         * OK, wait a bit for that CPU to finish staggering about. 
 355         * Slave will set a bit when it reaches smp_cpu_init().
 356         * Once the "monarch CPU" sees the bit change, it can move on.
 357         */
 358        for (timeout = 0; timeout < 10000; timeout++) {
 359                if(cpu_online(cpuid)) {
 360                        /* Which implies Slave has started up */
 361                        cpu_now_booting = 0;
 362                        smp_init_current_idle_task = NULL;
 363                        goto alive ;
 364                }
 365                udelay(100);
 366                barrier();
 367        }
 368        printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
 369        return -1;
 370
 371alive:
 372        /* Remember the Slave data */
 373        smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
 374                cpuid, timeout * 100);
 375        return 0;
 376}
 377
 378void __init smp_prepare_boot_cpu(void)
 379{
 380        int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
 381
 382        /* Setup BSP mappings */
 383        printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
 384
 385        set_cpu_online(bootstrap_processor, true);
 386        set_cpu_present(bootstrap_processor, true);
 387}
 388
 389
 390
 391/*
 392** inventory.c:do_inventory() hasn't yet been run and thus we
 393** don't 'discover' the additional CPUs until later.
 394*/
 395void __init smp_prepare_cpus(unsigned int max_cpus)
 396{
 397        int cpu;
 398
 399        for_each_possible_cpu(cpu)
 400                spin_lock_init(&per_cpu(ipi_lock, cpu));
 401
 402        init_cpu_present(cpumask_of(0));
 403
 404        parisc_max_cpus = max_cpus;
 405        if (!max_cpus)
 406                printk(KERN_INFO "SMP mode deactivated.\n");
 407}
 408
 409
 410void smp_cpus_done(unsigned int cpu_max)
 411{
 412        return;
 413}
 414
 415
 416int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 417{
 418        if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle))
 419                return -ENOSYS;
 420
 421        return cpu_online(cpu) ? 0 : -ENOSYS;
 422}
 423
 424#ifdef CONFIG_PROC_FS
 425int setup_profiling_timer(unsigned int multiplier)
 426{
 427        return -EINVAL;
 428}
 429#endif
 430