linux/arch/mn10300/kernel/smp.c
<<
>>
Prefs
   1/* SMP support routines.
   2 *
   3 * Copyright (C) 2006-2008 Panasonic Corporation
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * version 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13 * GNU General Public License for more details.
  14 */
  15
  16#include <linux/interrupt.h>
  17#include <linux/spinlock.h>
  18#include <linux/init.h>
  19#include <linux/jiffies.h>
  20#include <linux/cpumask.h>
  21#include <linux/err.h>
  22#include <linux/kernel.h>
  23#include <linux/delay.h>
  24#include <linux/sched.h>
  25#include <linux/profile.h>
  26#include <linux/smp.h>
  27#include <linux/cpu.h>
  28#include <asm/tlbflush.h>
  29#include <asm/bitops.h>
  30#include <asm/processor.h>
  31#include <asm/bug.h>
  32#include <asm/exceptions.h>
  33#include <asm/hardirq.h>
  34#include <asm/fpu.h>
  35#include <asm/mmu_context.h>
  36#include <asm/thread_info.h>
  37#include <asm/cpu-regs.h>
  38#include <asm/intctl-regs.h>
  39#include "internal.h"
  40
  41#ifdef CONFIG_HOTPLUG_CPU
  42#include <asm/cacheflush.h>
  43
  44static unsigned long sleep_mode[NR_CPUS];
  45
  46static void run_sleep_cpu(unsigned int cpu);
  47static void run_wakeup_cpu(unsigned int cpu);
  48#endif /* CONFIG_HOTPLUG_CPU */
  49
  50/*
  51 * Debug Message function
  52 */
  53
  54#undef DEBUG_SMP
  55#ifdef DEBUG_SMP
  56#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
  57#else
  58#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
  59#endif
  60
  61/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
  62#define CALL_FUNCTION_NMI_IPI_TIMEOUT   0
  63
  64/*
  65 * Structure and data for smp_nmi_call_function().
  66 */
  67struct nmi_call_data_struct {
  68        smp_call_func_t func;
  69        void            *info;
  70        cpumask_t       started;
  71        cpumask_t       finished;
  72        int             wait;
  73        char            size_alignment[0]
  74        __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
  75} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
  76
  77static DEFINE_SPINLOCK(smp_nmi_call_lock);
  78static struct nmi_call_data_struct *nmi_call_data;
  79
  80/*
  81 * Data structures and variables
  82 */
  83static cpumask_t cpu_callin_map;        /* Bitmask of callin CPUs */
  84static cpumask_t cpu_callout_map;       /* Bitmask of callout CPUs */
  85cpumask_t cpu_boot_map;                 /* Bitmask of boot APs */
  86unsigned long start_stack[NR_CPUS - 1];
  87
  88/*
  89 * Per CPU parameters
  90 */
  91struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
  92
  93static int cpucount;                    /* The count of boot CPUs */
  94static cpumask_t smp_commenced_mask;
  95cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
  96
  97/*
  98 * Function Prototypes
  99 */
 100static int do_boot_cpu(int);
 101static void smp_show_cpu_info(int cpu_id);
 102static void smp_callin(void);
 103static void smp_online(void);
 104static void smp_store_cpu_info(int);
 105static void smp_cpu_init(void);
 106static void smp_tune_scheduling(void);
 107static void send_IPI_mask(const cpumask_t *cpumask, int irq);
 108static void init_ipi(void);
 109
 110/*
 111 * IPI Initialization interrupt definitions
 112 */
 113static void mn10300_ipi_disable(unsigned int irq);
 114static void mn10300_ipi_enable(unsigned int irq);
 115static void mn10300_ipi_chip_disable(struct irq_data *d);
 116static void mn10300_ipi_chip_enable(struct irq_data *d);
 117static void mn10300_ipi_ack(struct irq_data *d);
 118static void mn10300_ipi_nop(struct irq_data *d);
 119
 120static struct irq_chip mn10300_ipi_type = {
 121        .name           = "cpu_ipi",
 122        .irq_disable    = mn10300_ipi_chip_disable,
 123        .irq_enable     = mn10300_ipi_chip_enable,
 124        .irq_ack        = mn10300_ipi_ack,
 125        .irq_eoi        = mn10300_ipi_nop
 126};
 127
 128static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
 129static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
 130
 131static struct irqaction reschedule_ipi = {
 132        .handler        = smp_reschedule_interrupt,
 133        .flags          = IRQF_NOBALANCING,
 134        .name           = "smp reschedule IPI"
 135};
 136static struct irqaction call_function_ipi = {
 137        .handler        = smp_call_function_interrupt,
 138        .flags          = IRQF_NOBALANCING,
 139        .name           = "smp call function IPI"
 140};
 141
 142#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
 143static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
 144static struct irqaction local_timer_ipi = {
 145        .handler        = smp_ipi_timer_interrupt,
 146        .flags          = IRQF_NOBALANCING,
 147        .name           = "smp local timer IPI"
 148};
 149#endif
 150
 151/**
 152 * init_ipi - Initialise the IPI mechanism
 153 */
 154static void init_ipi(void)
 155{
 156        unsigned long flags;
 157        u16 tmp16;
 158
 159        /* set up the reschedule IPI */
 160        irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type,
 161                                 handle_percpu_irq);
 162        setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
 163        set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
 164        mn10300_ipi_enable(RESCHEDULE_IPI);
 165
 166        /* set up the call function IPI */
 167        irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type,
 168                                 handle_percpu_irq);
 169        setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
 170        set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
 171        mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
 172
 173        /* set up the local timer IPI */
 174#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
 175    defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
 176        irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type,
 177                                 handle_percpu_irq);
 178        setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
 179        set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
 180        mn10300_ipi_enable(LOCAL_TIMER_IPI);
 181#endif
 182
 183#ifdef CONFIG_MN10300_CACHE_ENABLED
 184        /* set up the cache flush IPI */
 185        irq_set_chip(FLUSH_CACHE_IPI, &mn10300_ipi_type);
 186        flags = arch_local_cli_save();
 187        __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
 188                        mn10300_low_ipi_handler);
 189        GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
 190        mn10300_ipi_enable(FLUSH_CACHE_IPI);
 191        arch_local_irq_restore(flags);
 192#endif
 193
 194        /* set up the NMI call function IPI */
 195        irq_set_chip(CALL_FUNCTION_NMI_IPI, &mn10300_ipi_type);
 196        flags = arch_local_cli_save();
 197        GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
 198        tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
 199        arch_local_irq_restore(flags);
 200
 201        /* set up the SMP boot IPI */
 202        flags = arch_local_cli_save();
 203        __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
 204                        mn10300_low_ipi_handler);
 205        arch_local_irq_restore(flags);
 206
 207#ifdef CONFIG_KERNEL_DEBUGGER
 208        irq_set_chip(DEBUGGER_NMI_IPI, &mn10300_ipi_type);
 209#endif
 210}
 211
 212/**
 213 * mn10300_ipi_shutdown - Shut down handling of an IPI
 214 * @irq: The IPI to be shut down.
 215 */
 216static void mn10300_ipi_shutdown(unsigned int irq)
 217{
 218        unsigned long flags;
 219        u16 tmp;
 220
 221        flags = arch_local_cli_save();
 222
 223        tmp = GxICR(irq);
 224        GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
 225        tmp = GxICR(irq);
 226
 227        arch_local_irq_restore(flags);
 228}
 229
 230/**
 231 * mn10300_ipi_enable - Enable an IPI
 232 * @irq: The IPI to be enabled.
 233 */
 234static void mn10300_ipi_enable(unsigned int irq)
 235{
 236        unsigned long flags;
 237        u16 tmp;
 238
 239        flags = arch_local_cli_save();
 240
 241        tmp = GxICR(irq);
 242        GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
 243        tmp = GxICR(irq);
 244
 245        arch_local_irq_restore(flags);
 246}
 247
 248static void mn10300_ipi_chip_enable(struct irq_data *d)
 249{
 250        mn10300_ipi_enable(d->irq);
 251}
 252
 253/**
 254 * mn10300_ipi_disable - Disable an IPI
 255 * @irq: The IPI to be disabled.
 256 */
 257static void mn10300_ipi_disable(unsigned int irq)
 258{
 259        unsigned long flags;
 260        u16 tmp;
 261
 262        flags = arch_local_cli_save();
 263
 264        tmp = GxICR(irq);
 265        GxICR(irq) = tmp & GxICR_LEVEL;
 266        tmp = GxICR(irq);
 267
 268        arch_local_irq_restore(flags);
 269}
 270
 271static void mn10300_ipi_chip_disable(struct irq_data *d)
 272{
 273        mn10300_ipi_disable(d->irq);
 274}
 275
 276
 277/**
 278 * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
 279 * @irq: The IPI to be acknowledged.
 280 *
 281 * Clear the interrupt detection flag for the IPI on the appropriate interrupt
 282 * channel in the PIC.
 283 */
 284static void mn10300_ipi_ack(struct irq_data *d)
 285{
 286        unsigned int irq = d->irq;
 287        unsigned long flags;
 288        u16 tmp;
 289
 290        flags = arch_local_cli_save();
 291        GxICR_u8(irq) = GxICR_DETECT;
 292        tmp = GxICR(irq);
 293        arch_local_irq_restore(flags);
 294}
 295
 296/**
 297 * mn10300_ipi_nop - Dummy IPI action
 298 * @irq: The IPI to be acted upon.
 299 */
 300static void mn10300_ipi_nop(struct irq_data *d)
 301{
 302}
 303
 304/**
 305 * send_IPI_mask - Send IPIs to all CPUs in list
 306 * @cpumask: The list of CPUs to target.
 307 * @irq: The IPI request to be sent.
 308 *
 309 * Send the specified IPI to all the CPUs in the list, not waiting for them to
 310 * finish before returning.  The caller is responsible for synchronisation if
 311 * that is needed.
 312 */
 313static void send_IPI_mask(const cpumask_t *cpumask, int irq)
 314{
 315        int i;
 316        u16 tmp;
 317
 318        for (i = 0; i < NR_CPUS; i++) {
 319                if (cpumask_test_cpu(i, cpumask)) {
 320                        /* send IPI */
 321                        tmp = CROSS_GxICR(irq, i);
 322                        CROSS_GxICR(irq, i) =
 323                                tmp | GxICR_REQUEST | GxICR_DETECT;
 324                        tmp = CROSS_GxICR(irq, i); /* flush write buffer */
 325                }
 326        }
 327}
 328
 329/**
 330 * send_IPI_self - Send an IPI to this CPU.
 331 * @irq: The IPI request to be sent.
 332 *
 333 * Send the specified IPI to the current CPU.
 334 */
 335void send_IPI_self(int irq)
 336{
 337        send_IPI_mask(cpumask_of(smp_processor_id()), irq);
 338}
 339
 340/**
 341 * send_IPI_allbutself - Send IPIs to all the other CPUs.
 342 * @irq: The IPI request to be sent.
 343 *
 344 * Send the specified IPI to all CPUs in the system barring the current one,
 345 * not waiting for them to finish before returning.  The caller is responsible
 346 * for synchronisation if that is needed.
 347 */
 348void send_IPI_allbutself(int irq)
 349{
 350        cpumask_t cpumask;
 351
 352        cpumask_copy(&cpumask, cpu_online_mask);
 353        cpumask_clear_cpu(smp_processor_id(), &cpumask);
 354        send_IPI_mask(&cpumask, irq);
 355}
 356
 357void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 358{
 359        BUG();
 360        /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
 361}
 362
 363void arch_send_call_function_single_ipi(int cpu)
 364{
 365        send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
 366}
 367
 368/**
 369 * smp_send_reschedule - Send reschedule IPI to a CPU
 370 * @cpu: The CPU to target.
 371 */
 372void smp_send_reschedule(int cpu)
 373{
 374        send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
 375}
 376
 377/**
 378 * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
 379 * @func: The function to ask to be run.
 380 * @info: The context data to pass to that function.
 381 * @wait: If true, wait (atomically) until function is run on all CPUs.
 382 *
 383 * Send a non-maskable request to all CPUs in the system, requesting them to
 384 * run the specified function with the given context data, and, potentially, to
 385 * wait for completion of that function on all CPUs.
 386 *
 387 * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
 388 * timeout.
 389 */
 390int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
 391{
 392        struct nmi_call_data_struct data;
 393        unsigned long flags;
 394        unsigned int cnt;
 395        int cpus, ret = 0;
 396
 397        cpus = num_online_cpus() - 1;
 398        if (cpus < 1)
 399                return 0;
 400
 401        data.func = func;
 402        data.info = info;
 403        cpumask_copy(&data.started, cpu_online_mask);
 404        cpumask_clear_cpu(smp_processor_id(), &data.started);
 405        data.wait = wait;
 406        if (wait)
 407                data.finished = data.started;
 408
 409        spin_lock_irqsave(&smp_nmi_call_lock, flags);
 410        nmi_call_data = &data;
 411        smp_mb();
 412
 413        /* Send a message to all other CPUs and wait for them to respond */
 414        send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
 415
 416        /* Wait for response */
 417        if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
 418                for (cnt = 0;
 419                     cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
 420                             !cpumask_empty(&data.started);
 421                     cnt++)
 422                        mdelay(1);
 423
 424                if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
 425                        for (cnt = 0;
 426                             cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
 427                                     !cpumask_empty(&data.finished);
 428                             cnt++)
 429                                mdelay(1);
 430                }
 431
 432                if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
 433                        ret = -ETIMEDOUT;
 434
 435        } else {
 436                /* If timeout value is zero, wait until cpumask has been
 437                 * cleared */
 438                while (!cpumask_empty(&data.started))
 439                        barrier();
 440                if (wait)
 441                        while (!cpumask_empty(&data.finished))
 442                                barrier();
 443        }
 444
 445        spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
 446        return ret;
 447}
 448
 449/**
 450 * smp_jump_to_debugger - Make other CPUs enter the debugger by sending an IPI
 451 *
 452 * Send a non-maskable request to all other CPUs in the system, instructing
 453 * them to jump into the debugger.  The caller is responsible for checking that
 454 * the other CPUs responded to the instruction.
 455 *
 456 * The caller should make sure that this CPU's debugger IPI is disabled.
 457 */
 458void smp_jump_to_debugger(void)
 459{
 460        if (num_online_cpus() > 1)
 461                /* Send a message to all other CPUs */
 462                send_IPI_allbutself(DEBUGGER_NMI_IPI);
 463}
 464
 465/**
 466 * stop_this_cpu - Callback to stop a CPU.
 467 * @unused: Callback context (ignored).
 468 */
 469void stop_this_cpu(void *unused)
 470{
 471        static volatile int stopflag;
 472        unsigned long flags;
 473
 474#ifdef CONFIG_GDBSTUB
 475        /* In case of single stepping smp_send_stop by other CPU,
 476         * clear procindebug to avoid deadlock.
 477         */
 478        atomic_set(&procindebug[smp_processor_id()], 0);
 479#endif  /* CONFIG_GDBSTUB */
 480
 481        flags = arch_local_cli_save();
 482        set_cpu_online(smp_processor_id(), false);
 483
 484        while (!stopflag)
 485                cpu_relax();
 486
 487        set_cpu_online(smp_processor_id(), true);
 488        arch_local_irq_restore(flags);
 489}
 490
 491/**
 492 * smp_send_stop - Send a stop request to all CPUs.
 493 */
 494void smp_send_stop(void)
 495{
 496        smp_nmi_call_function(stop_this_cpu, NULL, 0);
 497}
 498
 499/**
 500 * smp_reschedule_interrupt - Reschedule IPI handler
 501 * @irq: The interrupt number.
 502 * @dev_id: The device ID.
 503 *
 504 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
 505 */
 506static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
 507{
 508        scheduler_ipi();
 509        return IRQ_HANDLED;
 510}
 511
 512/**
 513 * smp_call_function_interrupt - Call function IPI handler
 514 * @irq: The interrupt number.
 515 * @dev_id: The device ID.
 516 *
 517 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
 518 */
 519static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
 520{
 521        /* generic_smp_call_function_interrupt(); */
 522        generic_smp_call_function_single_interrupt();
 523        return IRQ_HANDLED;
 524}
 525
 526/**
 527 * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
 528 */
 529void smp_nmi_call_function_interrupt(void)
 530{
 531        smp_call_func_t func = nmi_call_data->func;
 532        void *info = nmi_call_data->info;
 533        int wait = nmi_call_data->wait;
 534
 535        /* Notify the initiating CPU that I've grabbed the data and am about to
 536         * execute the function
 537         */
 538        smp_mb();
 539        cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started);
 540        (*func)(info);
 541
 542        if (wait) {
 543                smp_mb();
 544                cpumask_clear_cpu(smp_processor_id(),
 545                                  &nmi_call_data->finished);
 546        }
 547}
 548
 549#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
 550    defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
 551/**
 552 * smp_ipi_timer_interrupt - Local timer IPI handler
 553 * @irq: The interrupt number.
 554 * @dev_id: The device ID.
 555 *
 556 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
 557 */
 558static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
 559{
 560        return local_timer_interrupt();
 561}
 562#endif
 563
 564void __init smp_init_cpus(void)
 565{
 566        int i;
 567        for (i = 0; i < NR_CPUS; i++) {
 568                set_cpu_possible(i, true);
 569                set_cpu_present(i, true);
 570        }
 571}
 572
 573/**
 574 * smp_cpu_init - Initialise AP in start_secondary.
 575 *
 576 * For this Application Processor, set up init_mm, initialise FPU and set
 577 * interrupt level 0-6 setting.
 578 */
 579static void __init smp_cpu_init(void)
 580{
 581        unsigned long flags;
 582        int cpu_id = smp_processor_id();
 583        u16 tmp16;
 584
 585        if (test_and_set_bit(cpu_id, &cpu_initialized)) {
 586                printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
 587                for (;;)
 588                        local_irq_enable();
 589        }
 590        printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
 591
 592        atomic_inc(&init_mm.mm_count);
 593        current->active_mm = &init_mm;
 594        BUG_ON(current->mm);
 595
 596        enter_lazy_tlb(&init_mm, current);
 597
 598        /* Force FPU initialization */
 599        clear_using_fpu(current);
 600
 601        GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
 602        mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
 603
 604        GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
 605        mn10300_ipi_enable(LOCAL_TIMER_IPI);
 606
 607        GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
 608        mn10300_ipi_enable(RESCHEDULE_IPI);
 609
 610#ifdef CONFIG_MN10300_CACHE_ENABLED
 611        GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
 612        mn10300_ipi_enable(FLUSH_CACHE_IPI);
 613#endif
 614
 615        mn10300_ipi_shutdown(SMP_BOOT_IRQ);
 616
 617        /* Set up the non-maskable call function IPI */
 618        flags = arch_local_cli_save();
 619        GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
 620        tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
 621        arch_local_irq_restore(flags);
 622}
 623
 624/**
 625 * smp_prepare_cpu_init - Initialise CPU in startup_secondary
 626 *
 627 * Set interrupt level 0-6 setting and init ICR of the kernel debugger.
 628 */
 629void smp_prepare_cpu_init(void)
 630{
 631        int loop;
 632
 633        /* Set the interrupt vector registers */
 634        IVAR0 = EXCEP_IRQ_LEVEL0;
 635        IVAR1 = EXCEP_IRQ_LEVEL1;
 636        IVAR2 = EXCEP_IRQ_LEVEL2;
 637        IVAR3 = EXCEP_IRQ_LEVEL3;
 638        IVAR4 = EXCEP_IRQ_LEVEL4;
 639        IVAR5 = EXCEP_IRQ_LEVEL5;
 640        IVAR6 = EXCEP_IRQ_LEVEL6;
 641
 642        /* Disable all interrupts and set to priority 6 (lowest) */
 643        for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
 644                GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
 645
 646#ifdef CONFIG_KERNEL_DEBUGGER
 647        /* initialise the kernel debugger interrupt */
 648        do {
 649                unsigned long flags;
 650                u16 tmp16;
 651
 652                flags = arch_local_cli_save();
 653                GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
 654                tmp16 = GxICR(DEBUGGER_NMI_IPI);
 655                arch_local_irq_restore(flags);
 656        } while (0);
 657#endif
 658}
 659
 660/**
 661 * start_secondary - Activate a secondary CPU (AP)
 662 * @unused: Thread parameter (ignored).
 663 */
 664int __init start_secondary(void *unused)
 665{
 666        smp_cpu_init();
 667        smp_callin();
 668        while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
 669                cpu_relax();
 670
 671        local_flush_tlb();
 672        preempt_disable();
 673        smp_online();
 674
 675#ifdef CONFIG_GENERIC_CLOCKEVENTS
 676        init_clockevents();
 677#endif
 678        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 679        return 0;
 680}
 681
 682/**
 683 * smp_prepare_cpus - Boot up secondary CPUs (APs)
 684 * @max_cpus: Maximum number of CPUs to boot.
 685 *
 686 * Call do_boot_cpu, and boot up APs.
 687 */
 688void __init smp_prepare_cpus(unsigned int max_cpus)
 689{
 690        int phy_id;
 691
 692        /* Setup boot CPU information */
 693        smp_store_cpu_info(0);
 694        smp_tune_scheduling();
 695
 696        init_ipi();
 697
 698        /* If SMP should be disabled, then finish */
 699        if (max_cpus == 0) {
 700                printk(KERN_INFO "SMP mode deactivated.\n");
 701                goto smp_done;
 702        }
 703
 704        /* Boot secondary CPUs (for which phy_id > 0) */
 705        for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
 706                /* Don't boot primary CPU */
 707                if (max_cpus <= cpucount + 1)
 708                        continue;
 709                if (phy_id != 0)
 710                        do_boot_cpu(phy_id);
 711                set_cpu_possible(phy_id, true);
 712                smp_show_cpu_info(phy_id);
 713        }
 714
 715smp_done:
 716        Dprintk("Boot done.\n");
 717}
 718
 719/**
 720 * smp_store_cpu_info - Save a CPU's information
 721 * @cpu: The CPU to save for.
 722 *
 723 * Save boot_cpu_data and jiffy for the specified CPU.
 724 */
 725static void __init smp_store_cpu_info(int cpu)
 726{
 727        struct mn10300_cpuinfo *ci = &cpu_data[cpu];
 728
 729        *ci = boot_cpu_data;
 730        ci->loops_per_jiffy = loops_per_jiffy;
 731        ci->type = CPUREV;
 732}
 733
 734/**
 735 * smp_tune_scheduling - Set time slice value
 736 *
 737 * Nothing to do here.
 738 */
 739static void __init smp_tune_scheduling(void)
 740{
 741}
 742
 743/**
 744 * do_boot_cpu: Boot up one CPU
 745 * @phy_id: Physical ID of CPU to boot.
 746 *
 747 * Send an IPI to a secondary CPU to boot it.  Returns 0 on success, 1
 748 * otherwise.
 749 */
 750static int __init do_boot_cpu(int phy_id)
 751{
 752        struct task_struct *idle;
 753        unsigned long send_status, callin_status;
 754        int timeout, cpu_id;
 755
 756        send_status = GxICR_REQUEST;
 757        callin_status = 0;
 758        timeout = 0;
 759        cpu_id = phy_id;
 760
 761        cpucount++;
 762
 763        /* Create idle thread for this CPU */
 764        idle = fork_idle(cpu_id);
 765        if (IS_ERR(idle))
 766                panic("Failed fork for CPU#%d.", cpu_id);
 767
 768        idle->thread.pc = (unsigned long)start_secondary;
 769
 770        printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
 771        start_stack[cpu_id - 1] = idle->thread.sp;
 772
 773        task_thread_info(idle)->cpu = cpu_id;
 774
 775        /* Send boot IPI to AP */
 776        send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
 777
 778        Dprintk("Waiting for send to finish...\n");
 779
 780        /* Wait for AP's IPI receive in 100[ms] */
 781        do {
 782                udelay(1000);
 783                send_status =
 784                        CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
 785        } while (send_status == GxICR_REQUEST && timeout++ < 100);
 786
 787        Dprintk("Waiting for cpu_callin_map.\n");
 788
 789        if (send_status == 0) {
 790                /* Allow AP to start initializing */
 791                cpumask_set_cpu(cpu_id, &cpu_callout_map);
 792
 793                /* Wait for setting cpu_callin_map */
 794                timeout = 0;
 795                do {
 796                        udelay(1000);
 797                        callin_status = cpumask_test_cpu(cpu_id,
 798                                                         &cpu_callin_map);
 799                } while (callin_status == 0 && timeout++ < 5000);
 800
 801                if (callin_status == 0)
 802                        Dprintk("Not responding.\n");
 803        } else {
 804                printk(KERN_WARNING "IPI not delivered.\n");
 805        }
 806
 807        if (send_status == GxICR_REQUEST || callin_status == 0) {
 808                cpumask_clear_cpu(cpu_id, &cpu_callout_map);
 809                cpumask_clear_cpu(cpu_id, &cpu_callin_map);
 810                cpumask_clear_cpu(cpu_id, &cpu_initialized);
 811                cpucount--;
 812                return 1;
 813        }
 814        return 0;
 815}
 816
 817/**
 818 * smp_show_cpu_info - Show SMP CPU information
 819 * @cpu: The CPU of interest.
 820 */
 821static void __init smp_show_cpu_info(int cpu)
 822{
 823        struct mn10300_cpuinfo *ci = &cpu_data[cpu];
 824
 825        printk(KERN_INFO
 826               "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
 827               cpu,
 828               MN10300_IOCLK / 1000000,
 829               (MN10300_IOCLK / 10000) % 100,
 830               ci->loops_per_jiffy / (500000 / HZ),
 831               (ci->loops_per_jiffy / (5000 / HZ)) % 100);
 832}
 833
 834/**
 835 * smp_callin - Set cpu_callin_map of the current CPU ID
 836 */
 837static void __init smp_callin(void)
 838{
 839        unsigned long timeout;
 840        int cpu;
 841
 842        cpu = smp_processor_id();
 843        timeout = jiffies + (2 * HZ);
 844
 845        if (cpumask_test_cpu(cpu, &cpu_callin_map)) {
 846                printk(KERN_ERR "CPU#%d already present.\n", cpu);
 847                BUG();
 848        }
 849        Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
 850
 851        /* Wait for AP startup 2s total */
 852        while (time_before(jiffies, timeout)) {
 853                if (cpumask_test_cpu(cpu, &cpu_callout_map))
 854                        break;
 855                cpu_relax();
 856        }
 857
 858        if (!time_before(jiffies, timeout)) {
 859                printk(KERN_ERR
 860                       "BUG: CPU#%d started up but did not get a callout!\n",
 861                       cpu);
 862                BUG();
 863        }
 864
 865#ifdef CONFIG_CALIBRATE_DELAY
 866        calibrate_delay();              /* Get our bogomips */
 867#endif
 868
 869        /* Save our processor parameters */
 870        smp_store_cpu_info(cpu);
 871
 872        /* Allow the boot processor to continue */
 873        cpumask_set_cpu(cpu, &cpu_callin_map);
 874}
 875
 876/**
 877 * smp_online - Set cpu_online_mask
 878 */
 879static void __init smp_online(void)
 880{
 881        int cpu;
 882
 883        cpu = smp_processor_id();
 884
 885        notify_cpu_starting(cpu);
 886
 887        set_cpu_online(cpu, true);
 888
 889        local_irq_enable();
 890}
 891
 892/**
 893 * smp_cpus_done -
 894 * @max_cpus: Maximum CPU count.
 895 *
 896 * Do nothing.
 897 */
 898void __init smp_cpus_done(unsigned int max_cpus)
 899{
 900}
 901
 902/*
 903 * smp_prepare_boot_cpu - Set up stuff for the boot processor.
 904 *
 905 * Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot
 906 * processor (CPU 0).
 907 */
 908void smp_prepare_boot_cpu(void)
 909{
 910        cpumask_set_cpu(0, &cpu_callout_map);
 911        cpumask_set_cpu(0, &cpu_callin_map);
 912        current_thread_info()->cpu = 0;
 913}
 914
 915/*
 916 * initialize_secondary - Initialise a secondary CPU (Application Processor).
 917 *
 918 * Set SP register and jump to thread's PC address.
 919 */
 920void initialize_secondary(void)
 921{
 922        asm volatile (
 923                "mov    %0,sp   \n"
 924                "jmp    (%1)    \n"
 925                :
 926                : "a"(current->thread.sp), "a"(current->thread.pc));
 927}
 928
 929/**
 930 * __cpu_up - Set smp_commenced_mask for the nominated CPU
 931 * @cpu: The target CPU.
 932 */
 933int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 934{
 935        int timeout;
 936
 937#ifdef CONFIG_HOTPLUG_CPU
 938        if (sleep_mode[cpu])
 939                run_wakeup_cpu(cpu);
 940#endif /* CONFIG_HOTPLUG_CPU */
 941
 942        cpumask_set_cpu(cpu, &smp_commenced_mask);
 943
 944        /* Wait 5s total for a response */
 945        for (timeout = 0 ; timeout < 5000 ; timeout++) {
 946                if (cpu_online(cpu))
 947                        break;
 948                udelay(1000);
 949        }
 950
 951        BUG_ON(!cpu_online(cpu));
 952        return 0;
 953}
 954
 955/**
 956 * setup_profiling_timer - Set up the profiling timer
 957 * @multiplier - The frequency multiplier to use
 958 *
 959 * The frequency of the profiling timer can be changed by writing a multiplier
 960 * value into /proc/profile.
 961 */
 962int setup_profiling_timer(unsigned int multiplier)
 963{
 964        return -EINVAL;
 965}
 966
 967/*
 968 * CPU hotplug routines
 969 */
 970#ifdef CONFIG_HOTPLUG_CPU
 971
 972static DEFINE_PER_CPU(struct cpu, cpu_devices);
 973
 974static int __init topology_init(void)
 975{
 976        int cpu, ret;
 977
 978        for_each_cpu(cpu) {
 979                ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
 980                if (ret)
 981                        printk(KERN_WARNING
 982                               "topology_init: register_cpu %d failed (%d)\n",
 983                               cpu, ret);
 984        }
 985        return 0;
 986}
 987
 988subsys_initcall(topology_init);
 989
 990int __cpu_disable(void)
 991{
 992        int cpu = smp_processor_id();
 993        if (cpu == 0)
 994                return -EBUSY;
 995
 996        migrate_irqs();
 997        cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm));
 998        return 0;
 999}
1000
1001void __cpu_die(unsigned int cpu)
1002{
1003        run_sleep_cpu(cpu);
1004}
1005
1006#ifdef CONFIG_MN10300_CACHE_ENABLED
1007static inline void hotplug_cpu_disable_cache(void)
1008{
1009        int tmp;
1010        asm volatile(
1011                "       movhu   (%1),%0 \n"
1012                "       and     %2,%0   \n"
1013                "       movhu   %0,(%1) \n"
1014                "1:     movhu   (%1),%0 \n"
1015                "       btst    %3,%0   \n"
1016                "       bne     1b      \n"
1017                : "=&r"(tmp)
1018                : "a"(&CHCTR),
1019                  "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
1020                  "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
1021                : "memory", "cc");
1022}
1023
1024static inline void hotplug_cpu_enable_cache(void)
1025{
1026        int tmp;
1027        asm volatile(
1028                "movhu  (%1),%0 \n"
1029                "or     %2,%0   \n"
1030                "movhu  %0,(%1) \n"
1031                : "=&r"(tmp)
1032                : "a"(&CHCTR),
1033                  "i"(CHCTR_ICEN | CHCTR_DCEN)
1034                : "memory", "cc");
1035}
1036
1037static inline void hotplug_cpu_invalidate_cache(void)
1038{
1039        int tmp;
1040        asm volatile (
1041                "movhu  (%1),%0 \n"
1042                "or     %2,%0   \n"
1043                "movhu  %0,(%1) \n"
1044                : "=&r"(tmp)
1045                : "a"(&CHCTR),
1046                  "i"(CHCTR_ICINV | CHCTR_DCINV)
1047                : "cc");
1048}
1049
1050#else /* CONFIG_MN10300_CACHE_ENABLED */
1051#define hotplug_cpu_disable_cache()     do {} while (0)
1052#define hotplug_cpu_enable_cache()      do {} while (0)
1053#define hotplug_cpu_invalidate_cache()  do {} while (0)
1054#endif /* CONFIG_MN10300_CACHE_ENABLED */
1055
1056/**
1057 * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
1058 * @cpumask: List of target CPUs.
1059 * @func: The function to call on those CPUs.
1060 * @info: The context data for the function to be called.
1061 * @wait: Whether to wait for the calls to complete.
1062 *
1063 * Non-maskably call a function on another CPU for hotplug purposes.
1064 *
1065 * This function must be called with maskable interrupts disabled.
1066 */
1067static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
1068                                         smp_call_func_t func, void *info,
1069                                         int wait)
1070{
1071        /*
1072         * The address and the size of nmi_call_func_mask_data
1073         * need to be aligned on L1_CACHE_BYTES.
1074         */
1075        static struct nmi_call_data_struct nmi_call_func_mask_data
1076                __cacheline_aligned;
1077        unsigned long start, end;
1078
1079        start = (unsigned long)&nmi_call_func_mask_data;
1080        end = start + sizeof(struct nmi_call_data_struct);
1081
1082        nmi_call_func_mask_data.func = func;
1083        nmi_call_func_mask_data.info = info;
1084        nmi_call_func_mask_data.started = cpumask;
1085        nmi_call_func_mask_data.wait = wait;
1086        if (wait)
1087                nmi_call_func_mask_data.finished = cpumask;
1088
1089        spin_lock(&smp_nmi_call_lock);
1090        nmi_call_data = &nmi_call_func_mask_data;
1091        mn10300_local_dcache_flush_range(start, end);
1092        smp_wmb();
1093
1094        send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
1095
1096        do {
1097                mn10300_local_dcache_inv_range(start, end);
1098                barrier();
1099        } while (!cpumask_empty(&nmi_call_func_mask_data.started));
1100
1101        if (wait) {
1102                do {
1103                        mn10300_local_dcache_inv_range(start, end);
1104                        barrier();
1105                } while (!cpumask_empty(&nmi_call_func_mask_data.finished));
1106        }
1107
1108        spin_unlock(&smp_nmi_call_lock);
1109        return 0;
1110}
1111
1112static void restart_wakeup_cpu(void)
1113{
1114        unsigned int cpu = smp_processor_id();
1115
1116        cpumask_set_cpu(cpu, &cpu_callin_map);
1117        local_flush_tlb();
1118        set_cpu_online(cpu, true);
1119        smp_wmb();
1120}
1121
1122static void prepare_sleep_cpu(void *unused)
1123{
1124        sleep_mode[smp_processor_id()] = 1;
1125        smp_mb();
1126        mn10300_local_dcache_flush_inv();
1127        hotplug_cpu_disable_cache();
1128        hotplug_cpu_invalidate_cache();
1129}
1130
1131/* when this function called, IE=0, NMID=0. */
1132static void sleep_cpu(void *unused)
1133{
1134        unsigned int cpu_id = smp_processor_id();
1135        /*
1136         * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
1137         * before this cpu goes in SLEEP mode.
1138         */
1139        do {
1140                smp_mb();
1141                __sleep_cpu();
1142        } while (sleep_mode[cpu_id]);
1143        restart_wakeup_cpu();
1144}
1145
1146static void run_sleep_cpu(unsigned int cpu)
1147{
1148        unsigned long flags;
1149        cpumask_t cpumask;
1150
1151        cpumask_copy(&cpumask, &cpumask_of(cpu));
1152        flags = arch_local_cli_save();
1153        hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
1154        hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
1155        udelay(1);              /* delay for the cpu to sleep. */
1156        arch_local_irq_restore(flags);
1157}
1158
1159static void wakeup_cpu(void)
1160{
1161        hotplug_cpu_invalidate_cache();
1162        hotplug_cpu_enable_cache();
1163        smp_mb();
1164        sleep_mode[smp_processor_id()] = 0;
1165}
1166
1167static void run_wakeup_cpu(unsigned int cpu)
1168{
1169        unsigned long flags;
1170
1171        flags = arch_local_cli_save();
1172#if NR_CPUS == 2
1173        mn10300_local_dcache_flush_inv();
1174#else
1175        /*
1176         * Before waking up the cpu,
1177         * all online cpus should stop and flush D-Cache for global data.
1178         */
1179#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
1180#endif
1181        hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
1182        arch_local_irq_restore(flags);
1183}
1184
1185#endif /* CONFIG_HOTPLUG_CPU */
1186