linux/arch/mn10300/kernel/smp.c
<<
>>
Prefs
   1/* SMP support routines.
   2 *
   3 * Copyright (C) 2006-2008 Panasonic Corporation
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * version 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13 * GNU General Public License for more details.
  14 */
  15
  16#include <linux/interrupt.h>
  17#include <linux/spinlock.h>
  18#include <linux/init.h>
  19#include <linux/jiffies.h>
  20#include <linux/cpumask.h>
  21#include <linux/err.h>
  22#include <linux/kernel.h>
  23#include <linux/delay.h>
  24#include <linux/sched.h>
  25#include <linux/profile.h>
  26#include <linux/smp.h>
  27#include <asm/tlbflush.h>
  28#include <asm/system.h>
  29#include <asm/bitops.h>
  30#include <asm/processor.h>
  31#include <asm/bug.h>
  32#include <asm/exceptions.h>
  33#include <asm/hardirq.h>
  34#include <asm/fpu.h>
  35#include <asm/mmu_context.h>
  36#include <asm/thread_info.h>
  37#include <asm/cpu-regs.h>
  38#include <asm/intctl-regs.h>
  39#include "internal.h"
  40
  41#ifdef CONFIG_HOTPLUG_CPU
  42#include <linux/cpu.h>
  43#include <asm/cacheflush.h>
  44
  45static unsigned long sleep_mode[NR_CPUS];
  46
  47static void run_sleep_cpu(unsigned int cpu);
  48static void run_wakeup_cpu(unsigned int cpu);
  49#endif /* CONFIG_HOTPLUG_CPU */
  50
  51/*
  52 * Debug Message function
  53 */
  54
  55#undef DEBUG_SMP
  56#ifdef DEBUG_SMP
  57#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
  58#else
  59#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
  60#endif
  61
  62/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
  63#define CALL_FUNCTION_NMI_IPI_TIMEOUT   0
  64
  65/*
  66 * Structure and data for smp_nmi_call_function().
  67 */
  68struct nmi_call_data_struct {
  69        smp_call_func_t func;
  70        void            *info;
  71        cpumask_t       started;
  72        cpumask_t       finished;
  73        int             wait;
  74        char            size_alignment[0]
  75        __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
  76} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
  77
  78static DEFINE_SPINLOCK(smp_nmi_call_lock);
  79static struct nmi_call_data_struct *nmi_call_data;
  80
  81/*
  82 * Data structures and variables
  83 */
  84static cpumask_t cpu_callin_map;        /* Bitmask of callin CPUs */
  85static cpumask_t cpu_callout_map;       /* Bitmask of callout CPUs */
  86cpumask_t cpu_boot_map;                 /* Bitmask of boot APs */
  87unsigned long start_stack[NR_CPUS - 1];
  88
  89/*
  90 * Per CPU parameters
  91 */
  92struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
  93
  94static int cpucount;                    /* The count of boot CPUs */
  95static cpumask_t smp_commenced_mask;
  96cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
  97
  98/*
  99 * Function Prototypes
 100 */
 101static int do_boot_cpu(int);
 102static void smp_show_cpu_info(int cpu_id);
 103static void smp_callin(void);
 104static void smp_online(void);
 105static void smp_store_cpu_info(int);
 106static void smp_cpu_init(void);
 107static void smp_tune_scheduling(void);
 108static void send_IPI_mask(const cpumask_t *cpumask, int irq);
 109static void init_ipi(void);
 110
 111/*
 112 * IPI Initialization interrupt definitions
 113 */
 114static void mn10300_ipi_disable(unsigned int irq);
 115static void mn10300_ipi_enable(unsigned int irq);
 116static void mn10300_ipi_ack(unsigned int irq);
 117static void mn10300_ipi_nop(unsigned int irq);
 118
 119static struct irq_chip mn10300_ipi_type = {
 120        .name           = "cpu_ipi",
 121        .disable        = mn10300_ipi_disable,
 122        .enable         = mn10300_ipi_enable,
 123        .ack            = mn10300_ipi_ack,
 124        .eoi            = mn10300_ipi_nop
 125};
 126
 127static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
 128static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
 129
 130static struct irqaction reschedule_ipi = {
 131        .handler        = smp_reschedule_interrupt,
 132        .name           = "smp reschedule IPI"
 133};
 134static struct irqaction call_function_ipi = {
 135        .handler        = smp_call_function_interrupt,
 136        .name           = "smp call function IPI"
 137};
 138
 139#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
 140static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
 141static struct irqaction local_timer_ipi = {
 142        .handler        = smp_ipi_timer_interrupt,
 143        .flags          = IRQF_DISABLED,
 144        .name           = "smp local timer IPI"
 145};
 146#endif
 147
 148/**
 149 * init_ipi - Initialise the IPI mechanism
 150 */
 151static void init_ipi(void)
 152{
 153        unsigned long flags;
 154        u16 tmp16;
 155
 156        /* set up the reschedule IPI */
 157        set_irq_chip_and_handler(RESCHEDULE_IPI,
 158                                 &mn10300_ipi_type, handle_percpu_irq);
 159        setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
 160        set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
 161        mn10300_ipi_enable(RESCHEDULE_IPI);
 162
 163        /* set up the call function IPI */
 164        set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI,
 165                                 &mn10300_ipi_type, handle_percpu_irq);
 166        setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
 167        set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
 168        mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
 169
 170        /* set up the local timer IPI */
 171#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
 172    defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
 173        set_irq_chip_and_handler(LOCAL_TIMER_IPI,
 174                                 &mn10300_ipi_type, handle_percpu_irq);
 175        setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
 176        set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
 177        mn10300_ipi_enable(LOCAL_TIMER_IPI);
 178#endif
 179
 180#ifdef CONFIG_MN10300_CACHE_ENABLED
 181        /* set up the cache flush IPI */
 182        flags = arch_local_cli_save();
 183        __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
 184                        mn10300_low_ipi_handler);
 185        GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
 186        mn10300_ipi_enable(FLUSH_CACHE_IPI);
 187        arch_local_irq_restore(flags);
 188#endif
 189
 190        /* set up the NMI call function IPI */
 191        flags = arch_local_cli_save();
 192        GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
 193        tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
 194        arch_local_irq_restore(flags);
 195
 196        /* set up the SMP boot IPI */
 197        flags = arch_local_cli_save();
 198        __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
 199                        mn10300_low_ipi_handler);
 200        arch_local_irq_restore(flags);
 201}
 202
 203/**
 204 * mn10300_ipi_shutdown - Shut down handling of an IPI
 205 * @irq: The IPI to be shut down.
 206 */
 207static void mn10300_ipi_shutdown(unsigned int irq)
 208{
 209        unsigned long flags;
 210        u16 tmp;
 211
 212        flags = arch_local_cli_save();
 213
 214        tmp = GxICR(irq);
 215        GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
 216        tmp = GxICR(irq);
 217
 218        arch_local_irq_restore(flags);
 219}
 220
 221/**
 222 * mn10300_ipi_enable - Enable an IPI
 223 * @irq: The IPI to be enabled.
 224 */
 225static void mn10300_ipi_enable(unsigned int irq)
 226{
 227        unsigned long flags;
 228        u16 tmp;
 229
 230        flags = arch_local_cli_save();
 231
 232        tmp = GxICR(irq);
 233        GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
 234        tmp = GxICR(irq);
 235
 236        arch_local_irq_restore(flags);
 237}
 238
 239/**
 240 * mn10300_ipi_disable - Disable an IPI
 241 * @irq: The IPI to be disabled.
 242 */
 243static void mn10300_ipi_disable(unsigned int irq)
 244{
 245        unsigned long flags;
 246        u16 tmp;
 247
 248        flags = arch_local_cli_save();
 249
 250        tmp = GxICR(irq);
 251        GxICR(irq) = tmp & GxICR_LEVEL;
 252        tmp = GxICR(irq);
 253
 254        arch_local_irq_restore(flags);
 255}
 256
 257/**
 258 * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
 259 * @irq: The IPI to be acknowledged.
 260 *
 261 * Clear the interrupt detection flag for the IPI on the appropriate interrupt
 262 * channel in the PIC.
 263 */
 264static void mn10300_ipi_ack(unsigned int irq)
 265{
 266        unsigned long flags;
 267        u16 tmp;
 268
 269        flags = arch_local_cli_save();
 270        GxICR_u8(irq) = GxICR_DETECT;
 271        tmp = GxICR(irq);
 272        arch_local_irq_restore(flags);
 273}
 274
 275/**
 276 * mn10300_ipi_nop - Dummy IPI action
 277 * @irq: The IPI to be acted upon.
 278 */
 279static void mn10300_ipi_nop(unsigned int irq)
 280{
 281}
 282
 283/**
 284 * send_IPI_mask - Send IPIs to all CPUs in list
 285 * @cpumask: The list of CPUs to target.
 286 * @irq: The IPI request to be sent.
 287 *
 288 * Send the specified IPI to all the CPUs in the list, not waiting for them to
 289 * finish before returning.  The caller is responsible for synchronisation if
 290 * that is needed.
 291 */
 292static void send_IPI_mask(const cpumask_t *cpumask, int irq)
 293{
 294        int i;
 295        u16 tmp;
 296
 297        for (i = 0; i < NR_CPUS; i++) {
 298                if (cpu_isset(i, *cpumask)) {
 299                        /* send IPI */
 300                        tmp = CROSS_GxICR(irq, i);
 301                        CROSS_GxICR(irq, i) =
 302                                tmp | GxICR_REQUEST | GxICR_DETECT;
 303                        tmp = CROSS_GxICR(irq, i); /* flush write buffer */
 304                }
 305        }
 306}
 307
 308/**
 309 * send_IPI_self - Send an IPI to this CPU.
 310 * @irq: The IPI request to be sent.
 311 *
 312 * Send the specified IPI to the current CPU.
 313 */
 314void send_IPI_self(int irq)
 315{
 316        send_IPI_mask(cpumask_of(smp_processor_id()), irq);
 317}
 318
 319/**
 320 * send_IPI_allbutself - Send IPIs to all the other CPUs.
 321 * @irq: The IPI request to be sent.
 322 *
 323 * Send the specified IPI to all CPUs in the system barring the current one,
 324 * not waiting for them to finish before returning.  The caller is responsible
 325 * for synchronisation if that is needed.
 326 */
 327void send_IPI_allbutself(int irq)
 328{
 329        cpumask_t cpumask;
 330
 331        cpumask = cpu_online_map;
 332        cpu_clear(smp_processor_id(), cpumask);
 333        send_IPI_mask(&cpumask, irq);
 334}
 335
 336void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 337{
 338        BUG();
 339        /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
 340}
 341
 342void arch_send_call_function_single_ipi(int cpu)
 343{
 344        send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
 345}
 346
 347/**
 348 * smp_send_reschedule - Send reschedule IPI to a CPU
 349 * @cpu: The CPU to target.
 350 */
 351void smp_send_reschedule(int cpu)
 352{
 353        send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
 354}
 355
 356/**
 357 * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
 358 * @func: The function to ask to be run.
 359 * @info: The context data to pass to that function.
 360 * @wait: If true, wait (atomically) until function is run on all CPUs.
 361 *
 362 * Send a non-maskable request to all CPUs in the system, requesting them to
 363 * run the specified function with the given context data, and, potentially, to
 364 * wait for completion of that function on all CPUs.
 365 *
 366 * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
 367 * timeout.
 368 */
 369int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
 370{
 371        struct nmi_call_data_struct data;
 372        unsigned long flags;
 373        unsigned int cnt;
 374        int cpus, ret = 0;
 375
 376        cpus = num_online_cpus() - 1;
 377        if (cpus < 1)
 378                return 0;
 379
 380        data.func = func;
 381        data.info = info;
 382        data.started = cpu_online_map;
 383        cpu_clear(smp_processor_id(), data.started);
 384        data.wait = wait;
 385        if (wait)
 386                data.finished = data.started;
 387
 388        spin_lock_irqsave(&smp_nmi_call_lock, flags);
 389        nmi_call_data = &data;
 390        smp_mb();
 391
 392        /* Send a message to all other CPUs and wait for them to respond */
 393        send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
 394
 395        /* Wait for response */
 396        if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
 397                for (cnt = 0;
 398                     cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
 399                             !cpus_empty(data.started);
 400                     cnt++)
 401                        mdelay(1);
 402
 403                if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
 404                        for (cnt = 0;
 405                             cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
 406                                     !cpus_empty(data.finished);
 407                             cnt++)
 408                                mdelay(1);
 409                }
 410
 411                if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
 412                        ret = -ETIMEDOUT;
 413
 414        } else {
 415                /* If timeout value is zero, wait until cpumask has been
 416                 * cleared */
 417                while (!cpus_empty(data.started))
 418                        barrier();
 419                if (wait)
 420                        while (!cpus_empty(data.finished))
 421                                barrier();
 422        }
 423
 424        spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
 425        return ret;
 426}
 427
 428/**
 429 * stop_this_cpu - Callback to stop a CPU.
 430 * @unused: Callback context (ignored).
 431 */
 432void stop_this_cpu(void *unused)
 433{
 434        static volatile int stopflag;
 435        unsigned long flags;
 436
 437#ifdef CONFIG_GDBSTUB
 438        /* In case of single stepping smp_send_stop by other CPU,
 439         * clear procindebug to avoid deadlock.
 440         */
 441        atomic_set(&procindebug[smp_processor_id()], 0);
 442#endif  /* CONFIG_GDBSTUB */
 443
 444        flags = arch_local_cli_save();
 445        cpu_clear(smp_processor_id(), cpu_online_map);
 446
 447        while (!stopflag)
 448                cpu_relax();
 449
 450        cpu_set(smp_processor_id(), cpu_online_map);
 451        arch_local_irq_restore(flags);
 452}
 453
 454/**
 455 * smp_send_stop - Send a stop request to all CPUs.
 456 */
 457void smp_send_stop(void)
 458{
 459        smp_nmi_call_function(stop_this_cpu, NULL, 0);
 460}
 461
 462/**
 463 * smp_reschedule_interrupt - Reschedule IPI handler
 464 * @irq: The interrupt number.
 465 * @dev_id: The device ID.
 466 *
 467 * We need do nothing here, since the scheduling will be effected on our way
 468 * back through entry.S.
 469 *
 470 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
 471 */
 472static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
 473{
 474        /* do nothing */
 475        return IRQ_HANDLED;
 476}
 477
 478/**
 479 * smp_call_function_interrupt - Call function IPI handler
 480 * @irq: The interrupt number.
 481 * @dev_id: The device ID.
 482 *
 483 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
 484 */
 485static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
 486{
 487        /* generic_smp_call_function_interrupt(); */
 488        generic_smp_call_function_single_interrupt();
 489        return IRQ_HANDLED;
 490}
 491
 492/**
 493 * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
 494 */
 495void smp_nmi_call_function_interrupt(void)
 496{
 497        smp_call_func_t func = nmi_call_data->func;
 498        void *info = nmi_call_data->info;
 499        int wait = nmi_call_data->wait;
 500
 501        /* Notify the initiating CPU that I've grabbed the data and am about to
 502         * execute the function
 503         */
 504        smp_mb();
 505        cpu_clear(smp_processor_id(), nmi_call_data->started);
 506        (*func)(info);
 507
 508        if (wait) {
 509                smp_mb();
 510                cpu_clear(smp_processor_id(), nmi_call_data->finished);
 511        }
 512}
 513
 514#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
 515    defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
 516/**
 517 * smp_ipi_timer_interrupt - Local timer IPI handler
 518 * @irq: The interrupt number.
 519 * @dev_id: The device ID.
 520 *
 521 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
 522 */
 523static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
 524{
 525        return local_timer_interrupt();
 526}
 527#endif
 528
 529void __init smp_init_cpus(void)
 530{
 531        int i;
 532        for (i = 0; i < NR_CPUS; i++) {
 533                set_cpu_possible(i, true);
 534                set_cpu_present(i, true);
 535        }
 536}
 537
 538/**
 539 * smp_cpu_init - Initialise AP in start_secondary.
 540 *
 541 * For this Application Processor, set up init_mm, initialise FPU and set
 542 * interrupt level 0-6 setting.
 543 */
 544static void __init smp_cpu_init(void)
 545{
 546        unsigned long flags;
 547        int cpu_id = smp_processor_id();
 548        u16 tmp16;
 549
 550        if (test_and_set_bit(cpu_id, &cpu_initialized)) {
 551                printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
 552                for (;;)
 553                        local_irq_enable();
 554        }
 555        printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
 556
 557        atomic_inc(&init_mm.mm_count);
 558        current->active_mm = &init_mm;
 559        BUG_ON(current->mm);
 560
 561        enter_lazy_tlb(&init_mm, current);
 562
 563        /* Force FPU initialization */
 564        clear_using_fpu(current);
 565
 566        GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
 567        mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
 568
 569        GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
 570        mn10300_ipi_enable(LOCAL_TIMER_IPI);
 571
 572        GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
 573        mn10300_ipi_enable(RESCHEDULE_IPI);
 574
 575#ifdef CONFIG_MN10300_CACHE_ENABLED
 576        GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
 577        mn10300_ipi_enable(FLUSH_CACHE_IPI);
 578#endif
 579
 580        mn10300_ipi_shutdown(SMP_BOOT_IRQ);
 581
 582        /* Set up the non-maskable call function IPI */
 583        flags = arch_local_cli_save();
 584        GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
 585        tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
 586        arch_local_irq_restore(flags);
 587}
 588
 589/**
 590 * smp_prepare_cpu_init - Initialise CPU in startup_secondary
 591 *
 592 * Set interrupt level 0-6 setting and init ICR of gdbstub.
 593 */
 594void smp_prepare_cpu_init(void)
 595{
 596        int loop;
 597
 598        /* Set the interrupt vector registers */
 599        IVAR0 = EXCEP_IRQ_LEVEL0;
 600        IVAR1 = EXCEP_IRQ_LEVEL1;
 601        IVAR2 = EXCEP_IRQ_LEVEL2;
 602        IVAR3 = EXCEP_IRQ_LEVEL3;
 603        IVAR4 = EXCEP_IRQ_LEVEL4;
 604        IVAR5 = EXCEP_IRQ_LEVEL5;
 605        IVAR6 = EXCEP_IRQ_LEVEL6;
 606
 607        /* Disable all interrupts and set to priority 6 (lowest) */
 608        for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
 609                GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
 610
 611#ifdef CONFIG_GDBSTUB
 612        /* initialise GDB-stub */
 613        do {
 614                unsigned long flags;
 615                u16 tmp16;
 616
 617                flags = arch_local_cli_save();
 618                GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
 619                tmp16 = GxICR(GDB_NMI_IPI);
 620                arch_local_irq_restore(flags);
 621        } while (0);
 622#endif
 623}
 624
 625/**
 626 * start_secondary - Activate a secondary CPU (AP)
 627 * @unused: Thread parameter (ignored).
 628 */
 629int __init start_secondary(void *unused)
 630{
 631        smp_cpu_init();
 632        smp_callin();
 633        while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
 634                cpu_relax();
 635
 636        local_flush_tlb();
 637        preempt_disable();
 638        smp_online();
 639
 640#ifdef CONFIG_GENERIC_CLOCKEVENTS
 641        init_clockevents();
 642#endif
 643        cpu_idle();
 644        return 0;
 645}
 646
 647/**
 648 * smp_prepare_cpus - Boot up secondary CPUs (APs)
 649 * @max_cpus: Maximum number of CPUs to boot.
 650 *
 651 * Call do_boot_cpu, and boot up APs.
 652 */
 653void __init smp_prepare_cpus(unsigned int max_cpus)
 654{
 655        int phy_id;
 656
 657        /* Setup boot CPU information */
 658        smp_store_cpu_info(0);
 659        smp_tune_scheduling();
 660
 661        init_ipi();
 662
 663        /* If SMP should be disabled, then finish */
 664        if (max_cpus == 0) {
 665                printk(KERN_INFO "SMP mode deactivated.\n");
 666                goto smp_done;
 667        }
 668
 669        /* Boot secondary CPUs (for which phy_id > 0) */
 670        for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
 671                /* Don't boot primary CPU */
 672                if (max_cpus <= cpucount + 1)
 673                        continue;
 674                if (phy_id != 0)
 675                        do_boot_cpu(phy_id);
 676                set_cpu_possible(phy_id, true);
 677                smp_show_cpu_info(phy_id);
 678        }
 679
 680smp_done:
 681        Dprintk("Boot done.\n");
 682}
 683
 684/**
 685 * smp_store_cpu_info - Save a CPU's information
 686 * @cpu: The CPU to save for.
 687 *
 688 * Save boot_cpu_data and jiffy for the specified CPU.
 689 */
 690static void __init smp_store_cpu_info(int cpu)
 691{
 692        struct mn10300_cpuinfo *ci = &cpu_data[cpu];
 693
 694        *ci = boot_cpu_data;
 695        ci->loops_per_jiffy = loops_per_jiffy;
 696        ci->type = CPUREV;
 697}
 698
 699/**
 700 * smp_tune_scheduling - Set time slice value
 701 *
 702 * Nothing to do here.
 703 */
 704static void __init smp_tune_scheduling(void)
 705{
 706}
 707
 708/**
 709 * do_boot_cpu: Boot up one CPU
 710 * @phy_id: Physical ID of CPU to boot.
 711 *
 712 * Send an IPI to a secondary CPU to boot it.  Returns 0 on success, 1
 713 * otherwise.
 714 */
 715static int __init do_boot_cpu(int phy_id)
 716{
 717        struct task_struct *idle;
 718        unsigned long send_status, callin_status;
 719        int timeout, cpu_id;
 720
 721        send_status = GxICR_REQUEST;
 722        callin_status = 0;
 723        timeout = 0;
 724        cpu_id = phy_id;
 725
 726        cpucount++;
 727
 728        /* Create idle thread for this CPU */
 729        idle = fork_idle(cpu_id);
 730        if (IS_ERR(idle))
 731                panic("Failed fork for CPU#%d.", cpu_id);
 732
 733        idle->thread.pc = (unsigned long)start_secondary;
 734
 735        printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
 736        start_stack[cpu_id - 1] = idle->thread.sp;
 737
 738        task_thread_info(idle)->cpu = cpu_id;
 739
 740        /* Send boot IPI to AP */
 741        send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
 742
 743        Dprintk("Waiting for send to finish...\n");
 744
 745        /* Wait for AP's IPI receive in 100[ms] */
 746        do {
 747                udelay(1000);
 748                send_status =
 749                        CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
 750        } while (send_status == GxICR_REQUEST && timeout++ < 100);
 751
 752        Dprintk("Waiting for cpu_callin_map.\n");
 753
 754        if (send_status == 0) {
 755                /* Allow AP to start initializing */
 756                cpu_set(cpu_id, cpu_callout_map);
 757
 758                /* Wait for setting cpu_callin_map */
 759                timeout = 0;
 760                do {
 761                        udelay(1000);
 762                        callin_status = cpu_isset(cpu_id, cpu_callin_map);
 763                } while (callin_status == 0 && timeout++ < 5000);
 764
 765                if (callin_status == 0)
 766                        Dprintk("Not responding.\n");
 767        } else {
 768                printk(KERN_WARNING "IPI not delivered.\n");
 769        }
 770
 771        if (send_status == GxICR_REQUEST || callin_status == 0) {
 772                cpu_clear(cpu_id, cpu_callout_map);
 773                cpu_clear(cpu_id, cpu_callin_map);
 774                cpu_clear(cpu_id, cpu_initialized);
 775                cpucount--;
 776                return 1;
 777        }
 778        return 0;
 779}
 780
 781/**
 782 * smp_show_cpu_info - Show SMP CPU information
 783 * @cpu: The CPU of interest.
 784 */
 785static void __init smp_show_cpu_info(int cpu)
 786{
 787        struct mn10300_cpuinfo *ci = &cpu_data[cpu];
 788
 789        printk(KERN_INFO
 790               "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
 791               cpu,
 792               MN10300_IOCLK / 1000000,
 793               (MN10300_IOCLK / 10000) % 100,
 794               ci->loops_per_jiffy / (500000 / HZ),
 795               (ci->loops_per_jiffy / (5000 / HZ)) % 100);
 796}
 797
 798/**
 799 * smp_callin - Set cpu_callin_map of the current CPU ID
 800 */
 801static void __init smp_callin(void)
 802{
 803        unsigned long timeout;
 804        int cpu;
 805
 806        cpu = smp_processor_id();
 807        timeout = jiffies + (2 * HZ);
 808
 809        if (cpu_isset(cpu, cpu_callin_map)) {
 810                printk(KERN_ERR "CPU#%d already present.\n", cpu);
 811                BUG();
 812        }
 813        Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
 814
 815        /* Wait for AP startup 2s total */
 816        while (time_before(jiffies, timeout)) {
 817                if (cpu_isset(cpu, cpu_callout_map))
 818                        break;
 819                cpu_relax();
 820        }
 821
 822        if (!time_before(jiffies, timeout)) {
 823                printk(KERN_ERR
 824                       "BUG: CPU#%d started up but did not get a callout!\n",
 825                       cpu);
 826                BUG();
 827        }
 828
 829#ifdef CONFIG_CALIBRATE_DELAY
 830        calibrate_delay();              /* Get our bogomips */
 831#endif
 832
 833        /* Save our processor parameters */
 834        smp_store_cpu_info(cpu);
 835
 836        /* Allow the boot processor to continue */
 837        cpu_set(cpu, cpu_callin_map);
 838}
 839
 840/**
 841 * smp_online - Set cpu_online_map
 842 */
 843static void __init smp_online(void)
 844{
 845        int cpu;
 846
 847        cpu = smp_processor_id();
 848
 849        local_irq_enable();
 850
 851        cpu_set(cpu, cpu_online_map);
 852        smp_wmb();
 853}
 854
 855/**
 856 * smp_cpus_done -
 857 * @max_cpus: Maximum CPU count.
 858 *
 859 * Do nothing.
 860 */
 861void __init smp_cpus_done(unsigned int max_cpus)
 862{
 863}
 864
 865/*
 866 * smp_prepare_boot_cpu - Set up stuff for the boot processor.
 867 *
 868 * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot
 869 * processor (CPU 0).
 870 */
 871void __devinit smp_prepare_boot_cpu(void)
 872{
 873        cpu_set(0, cpu_callout_map);
 874        cpu_set(0, cpu_callin_map);
 875        current_thread_info()->cpu = 0;
 876}
 877
 878/*
 879 * initialize_secondary - Initialise a secondary CPU (Application Processor).
 880 *
 881 * Set SP register and jump to thread's PC address.
 882 */
 883void initialize_secondary(void)
 884{
 885        asm volatile (
 886                "mov    %0,sp   \n"
 887                "jmp    (%1)    \n"
 888                :
 889                : "a"(current->thread.sp), "a"(current->thread.pc));
 890}
 891
 892/**
 893 * __cpu_up - Set smp_commenced_mask for the nominated CPU
 894 * @cpu: The target CPU.
 895 */
 896int __devinit __cpu_up(unsigned int cpu)
 897{
 898        int timeout;
 899
 900#ifdef CONFIG_HOTPLUG_CPU
 901        if (num_online_cpus() == 1)
 902                disable_hlt();
 903        if (sleep_mode[cpu])
 904                run_wakeup_cpu(cpu);
 905#endif /* CONFIG_HOTPLUG_CPU */
 906
 907        cpu_set(cpu, smp_commenced_mask);
 908
 909        /* Wait 5s total for a response */
 910        for (timeout = 0 ; timeout < 5000 ; timeout++) {
 911                if (cpu_isset(cpu, cpu_online_map))
 912                        break;
 913                udelay(1000);
 914        }
 915
 916        BUG_ON(!cpu_isset(cpu, cpu_online_map));
 917        return 0;
 918}
 919
 920/**
 921 * setup_profiling_timer - Set up the profiling timer
 922 * @multiplier - The frequency multiplier to use
 923 *
 924 * The frequency of the profiling timer can be changed by writing a multiplier
 925 * value into /proc/profile.
 926 */
 927int setup_profiling_timer(unsigned int multiplier)
 928{
 929        return -EINVAL;
 930}
 931
 932/*
 933 * CPU hotplug routines
 934 */
 935#ifdef CONFIG_HOTPLUG_CPU
 936
 937static DEFINE_PER_CPU(struct cpu, cpu_devices);
 938
 939static int __init topology_init(void)
 940{
 941        int cpu, ret;
 942
 943        for_each_cpu(cpu) {
 944                ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
 945                if (ret)
 946                        printk(KERN_WARNING
 947                               "topology_init: register_cpu %d failed (%d)\n",
 948                               cpu, ret);
 949        }
 950        return 0;
 951}
 952
 953subsys_initcall(topology_init);
 954
 955int __cpu_disable(void)
 956{
 957        int cpu = smp_processor_id();
 958        if (cpu == 0)
 959                return -EBUSY;
 960
 961        migrate_irqs();
 962        cpu_clear(cpu, current->active_mm->cpu_vm_mask);
 963        return 0;
 964}
 965
 966void __cpu_die(unsigned int cpu)
 967{
 968        run_sleep_cpu(cpu);
 969
 970        if (num_online_cpus() == 1)
 971                enable_hlt();
 972}
 973
 974#ifdef CONFIG_MN10300_CACHE_ENABLED
 975static inline void hotplug_cpu_disable_cache(void)
 976{
 977        int tmp;
 978        asm volatile(
 979                "       movhu   (%1),%0 \n"
 980                "       and     %2,%0   \n"
 981                "       movhu   %0,(%1) \n"
 982                "1:     movhu   (%1),%0 \n"
 983                "       btst    %3,%0   \n"
 984                "       bne     1b      \n"
 985                : "=&r"(tmp)
 986                : "a"(&CHCTR),
 987                  "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
 988                  "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
 989                : "memory", "cc");
 990}
 991
 992static inline void hotplug_cpu_enable_cache(void)
 993{
 994        int tmp;
 995        asm volatile(
 996                "movhu  (%1),%0 \n"
 997                "or     %2,%0   \n"
 998                "movhu  %0,(%1) \n"
 999                : "=&r"(tmp)
1000                : "a"(&CHCTR),
1001                  "i"(CHCTR_ICEN | CHCTR_DCEN)
1002                : "memory", "cc");
1003}
1004
1005static inline void hotplug_cpu_invalidate_cache(void)
1006{
1007        int tmp;
1008        asm volatile (
1009                "movhu  (%1),%0 \n"
1010                "or     %2,%0   \n"
1011                "movhu  %0,(%1) \n"
1012                : "=&r"(tmp)
1013                : "a"(&CHCTR),
1014                  "i"(CHCTR_ICINV | CHCTR_DCINV)
1015                : "cc");
1016}
1017
1018#else /* CONFIG_MN10300_CACHE_ENABLED */
1019#define hotplug_cpu_disable_cache()     do {} while (0)
1020#define hotplug_cpu_enable_cache()      do {} while (0)
1021#define hotplug_cpu_invalidate_cache()  do {} while (0)
1022#endif /* CONFIG_MN10300_CACHE_ENABLED */
1023
1024/**
1025 * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
1026 * @cpumask: List of target CPUs.
1027 * @func: The function to call on those CPUs.
1028 * @info: The context data for the function to be called.
1029 * @wait: Whether to wait for the calls to complete.
1030 *
1031 * Non-maskably call a function on another CPU for hotplug purposes.
1032 *
1033 * This function must be called with maskable interrupts disabled.
1034 */
1035static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
1036                                         smp_call_func_t func, void *info,
1037                                         int wait)
1038{
1039        /*
1040         * The address and the size of nmi_call_func_mask_data
1041         * need to be aligned on L1_CACHE_BYTES.
1042         */
1043        static struct nmi_call_data_struct nmi_call_func_mask_data
1044                __cacheline_aligned;
1045        unsigned long start, end;
1046
1047        start = (unsigned long)&nmi_call_func_mask_data;
1048        end = start + sizeof(struct nmi_call_data_struct);
1049
1050        nmi_call_func_mask_data.func = func;
1051        nmi_call_func_mask_data.info = info;
1052        nmi_call_func_mask_data.started = cpumask;
1053        nmi_call_func_mask_data.wait = wait;
1054        if (wait)
1055                nmi_call_func_mask_data.finished = cpumask;
1056
1057        spin_lock(&smp_nmi_call_lock);
1058        nmi_call_data = &nmi_call_func_mask_data;
1059        mn10300_local_dcache_flush_range(start, end);
1060        smp_wmb();
1061
1062        send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
1063
1064        do {
1065                mn10300_local_dcache_inv_range(start, end);
1066                barrier();
1067        } while (!cpus_empty(nmi_call_func_mask_data.started));
1068
1069        if (wait) {
1070                do {
1071                        mn10300_local_dcache_inv_range(start, end);
1072                        barrier();
1073                } while (!cpus_empty(nmi_call_func_mask_data.finished));
1074        }
1075
1076        spin_unlock(&smp_nmi_call_lock);
1077        return 0;
1078}
1079
1080static void restart_wakeup_cpu(void)
1081{
1082        unsigned int cpu = smp_processor_id();
1083
1084        cpu_set(cpu, cpu_callin_map);
1085        local_flush_tlb();
1086        cpu_set(cpu, cpu_online_map);
1087        smp_wmb();
1088}
1089
1090static void prepare_sleep_cpu(void *unused)
1091{
1092        sleep_mode[smp_processor_id()] = 1;
1093        smp_mb();
1094        mn10300_local_dcache_flush_inv();
1095        hotplug_cpu_disable_cache();
1096        hotplug_cpu_invalidate_cache();
1097}
1098
1099/* when this function called, IE=0, NMID=0. */
1100static void sleep_cpu(void *unused)
1101{
1102        unsigned int cpu_id = smp_processor_id();
1103        /*
1104         * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
1105         * before this cpu goes in SLEEP mode.
1106         */
1107        do {
1108                smp_mb();
1109                __sleep_cpu();
1110        } while (sleep_mode[cpu_id]);
1111        restart_wakeup_cpu();
1112}
1113
1114static void run_sleep_cpu(unsigned int cpu)
1115{
1116        unsigned long flags;
1117        cpumask_t cpumask = cpumask_of(cpu);
1118
1119        flags = arch_local_cli_save();
1120        hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
1121        hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
1122        udelay(1);              /* delay for the cpu to sleep. */
1123        arch_local_irq_restore(flags);
1124}
1125
1126static void wakeup_cpu(void)
1127{
1128        hotplug_cpu_invalidate_cache();
1129        hotplug_cpu_enable_cache();
1130        smp_mb();
1131        sleep_mode[smp_processor_id()] = 0;
1132}
1133
1134static void run_wakeup_cpu(unsigned int cpu)
1135{
1136        unsigned long flags;
1137
1138        flags = arch_local_cli_save();
1139#if NR_CPUS == 2
1140        mn10300_local_dcache_flush_inv();
1141#else
1142        /*
1143         * Before waking up the cpu,
1144         * all online cpus should stop and flush D-Cache for global data.
1145         */
1146#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
1147#endif
1148        hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
1149        arch_local_irq_restore(flags);
1150}
1151
1152#endif /* CONFIG_HOTPLUG_CPU */
1153