linux/arch/blackfin/mach-common/smp.c
<<
>>
Prefs
   1/*
   2 * IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
   3 *
   4 * Copyright 2007-2009 Analog Devices Inc.
   5 *                         Philippe Gerum <rpm@xenomai.org>
   6 *
   7 * Licensed under the GPL-2.
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/delay.h>
  12#include <linux/init.h>
  13#include <linux/spinlock.h>
  14#include <linux/sched.h>
  15#include <linux/interrupt.h>
  16#include <linux/cache.h>
  17#include <linux/clockchips.h>
  18#include <linux/profile.h>
  19#include <linux/errno.h>
  20#include <linux/mm.h>
  21#include <linux/cpu.h>
  22#include <linux/smp.h>
  23#include <linux/cpumask.h>
  24#include <linux/seq_file.h>
  25#include <linux/irq.h>
  26#include <linux/slab.h>
  27#include <linux/atomic.h>
  28#include <asm/cacheflush.h>
  29#include <asm/irq_handler.h>
  30#include <asm/mmu_context.h>
  31#include <asm/pgtable.h>
  32#include <asm/pgalloc.h>
  33#include <asm/processor.h>
  34#include <asm/ptrace.h>
  35#include <asm/cpu.h>
  36#include <asm/time.h>
  37#include <linux/err.h>
  38
  39/*
  40 * Anomaly notes:
  41 * 05000120 - we always define corelock as 32-bit integer in L2
  42 */
  43struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
  44
  45#ifdef CONFIG_ICACHE_FLUSH_L1
  46unsigned long blackfin_iflush_l1_entry[NR_CPUS];
  47#endif
  48
  49struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
  50
  51enum ipi_message_type {
  52        BFIN_IPI_TIMER,
  53        BFIN_IPI_RESCHEDULE,
  54        BFIN_IPI_CALL_FUNC,
  55        BFIN_IPI_CALL_FUNC_SINGLE,
  56        BFIN_IPI_CPU_STOP,
  57};
  58
  59struct blackfin_flush_data {
  60        unsigned long start;
  61        unsigned long end;
  62};
  63
  64void *secondary_stack;
  65
  66static struct blackfin_flush_data smp_flush_data;
  67
  68static DEFINE_SPINLOCK(stop_lock);
  69
  70/* A magic number - stress test shows this is safe for common cases */
  71#define BFIN_IPI_MSGQ_LEN 5
  72
  73/* Simple FIFO buffer, overflow leads to panic */
  74struct ipi_data {
  75        unsigned long count;
  76        unsigned long bits;
  77};
  78
  79static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
  80
  81static void ipi_cpu_stop(unsigned int cpu)
  82{
  83        spin_lock(&stop_lock);
  84        printk(KERN_CRIT "CPU%u: stopping\n", cpu);
  85        dump_stack();
  86        spin_unlock(&stop_lock);
  87
  88        set_cpu_online(cpu, false);
  89
  90        local_irq_disable();
  91
  92        while (1)
  93                SSYNC();
  94}
  95
  96static void ipi_flush_icache(void *info)
  97{
  98        struct blackfin_flush_data *fdata = info;
  99
 100        /* Invalidate the memory holding the bounds of the flushed region. */
 101        blackfin_dcache_invalidate_range((unsigned long)fdata,
 102                                         (unsigned long)fdata + sizeof(*fdata));
 103
 104        /* Make sure all write buffers in the data side of the core
 105         * are flushed before trying to invalidate the icache.  This
 106         * needs to be after the data flush and before the icache
 107         * flush so that the SSYNC does the right thing in preventing
 108         * the instruction prefetcher from hitting things in cached
 109         * memory at the wrong time -- it runs much further ahead than
 110         * the pipeline.
 111         */
 112        SSYNC();
 113
 114        /* ipi_flaush_icache is invoked by generic flush_icache_range,
 115         * so call blackfin arch icache flush directly here.
 116         */
 117        blackfin_icache_flush_range(fdata->start, fdata->end);
 118}
 119
 120/* Use IRQ_SUPPLE_0 to request reschedule.
 121 * When returning from interrupt to user space,
 122 * there is chance to reschedule */
 123static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
 124{
 125        unsigned int cpu = smp_processor_id();
 126
 127        platform_clear_ipi(cpu, IRQ_SUPPLE_0);
 128        return IRQ_HANDLED;
 129}
 130
 131DECLARE_PER_CPU(struct clock_event_device, coretmr_events);
 132void ipi_timer(void)
 133{
 134        int cpu = smp_processor_id();
 135        struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
 136        evt->event_handler(evt);
 137}
 138
 139static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
 140{
 141        struct ipi_data *bfin_ipi_data;
 142        unsigned int cpu = smp_processor_id();
 143        unsigned long pending;
 144        unsigned long msg;
 145
 146        platform_clear_ipi(cpu, IRQ_SUPPLE_1);
 147
 148        bfin_ipi_data = &__get_cpu_var(bfin_ipi);
 149        smp_mb();
 150        while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
 151                msg = 0;
 152                do {
 153                        msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
 154                        switch (msg) {
 155                        case BFIN_IPI_TIMER:
 156                                ipi_timer();
 157                                break;
 158                        case BFIN_IPI_RESCHEDULE:
 159                                scheduler_ipi();
 160                                break;
 161                        case BFIN_IPI_CALL_FUNC:
 162                                generic_smp_call_function_interrupt();
 163                                break;
 164
 165                        case BFIN_IPI_CALL_FUNC_SINGLE:
 166                                generic_smp_call_function_single_interrupt();
 167                                break;
 168
 169                        case BFIN_IPI_CPU_STOP:
 170                                ipi_cpu_stop(cpu);
 171                                break;
 172                        }
 173                } while (msg < BITS_PER_LONG);
 174
 175                smp_mb();
 176        }
 177        return IRQ_HANDLED;
 178}
 179
 180static void bfin_ipi_init(void)
 181{
 182        unsigned int cpu;
 183        struct ipi_data *bfin_ipi_data;
 184        for_each_possible_cpu(cpu) {
 185                bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
 186                bfin_ipi_data->bits = 0;
 187                bfin_ipi_data->count = 0;
 188        }
 189}
 190
 191void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
 192{
 193        unsigned int cpu;
 194        struct ipi_data *bfin_ipi_data;
 195        unsigned long flags;
 196
 197        local_irq_save(flags);
 198        smp_mb();
 199        for_each_cpu(cpu, cpumask) {
 200                bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
 201                smp_mb();
 202                set_bit(msg, &bfin_ipi_data->bits);
 203                bfin_ipi_data->count++;
 204                platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
 205        }
 206
 207        local_irq_restore(flags);
 208}
 209
 210void arch_send_call_function_single_ipi(int cpu)
 211{
 212        send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
 213}
 214
 215void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 216{
 217        send_ipi(mask, BFIN_IPI_CALL_FUNC);
 218}
 219
 220void smp_send_reschedule(int cpu)
 221{
 222        send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
 223
 224        return;
 225}
 226
 227void smp_send_msg(const struct cpumask *mask, unsigned long type)
 228{
 229        send_ipi(mask, type);
 230}
 231
 232void smp_timer_broadcast(const struct cpumask *mask)
 233{
 234        smp_send_msg(mask, BFIN_IPI_TIMER);
 235}
 236
 237void smp_send_stop(void)
 238{
 239        cpumask_t callmap;
 240
 241        preempt_disable();
 242        cpumask_copy(&callmap, cpu_online_mask);
 243        cpumask_clear_cpu(smp_processor_id(), &callmap);
 244        if (!cpumask_empty(&callmap))
 245                send_ipi(&callmap, BFIN_IPI_CPU_STOP);
 246
 247        preempt_enable();
 248
 249        return;
 250}
 251
 252int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
 253{
 254        int ret;
 255
 256        secondary_stack = task_stack_page(idle) + THREAD_SIZE;
 257
 258        ret = platform_boot_secondary(cpu, idle);
 259
 260        secondary_stack = NULL;
 261
 262        return ret;
 263}
 264
 265static void __cpuinit setup_secondary(unsigned int cpu)
 266{
 267        unsigned long ilat;
 268
 269        bfin_write_IMASK(0);
 270        CSYNC();
 271        ilat = bfin_read_ILAT();
 272        CSYNC();
 273        bfin_write_ILAT(ilat);
 274        CSYNC();
 275
 276        /* Enable interrupt levels IVG7-15. IARs have been already
 277         * programmed by the boot CPU.  */
 278        bfin_irq_flags |= IMASK_IVG15 |
 279            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
 280            IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
 281}
 282
 283void __cpuinit secondary_start_kernel(void)
 284{
 285        unsigned int cpu = smp_processor_id();
 286        struct mm_struct *mm = &init_mm;
 287
 288        if (_bfin_swrst & SWRST_DBL_FAULT_B) {
 289                printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
 290#ifdef CONFIG_DEBUG_DOUBLEFAULT
 291                printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
 292                        initial_pda_coreb.seqstat_doublefault & SEQSTAT_EXCAUSE,
 293                        initial_pda_coreb.retx_doublefault);
 294                printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n",
 295                        initial_pda_coreb.dcplb_doublefault_addr);
 296                printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n",
 297                        initial_pda_coreb.icplb_doublefault_addr);
 298#endif
 299                printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
 300                        initial_pda_coreb.retx);
 301        }
 302
 303        /*
 304         * We want the D-cache to be enabled early, in case the atomic
 305         * support code emulates cache coherence (see
 306         * __ARCH_SYNC_CORE_DCACHE).
 307         */
 308        init_exception_vectors();
 309
 310        local_irq_disable();
 311
 312        /* Attach the new idle task to the global mm. */
 313        atomic_inc(&mm->mm_users);
 314        atomic_inc(&mm->mm_count);
 315        current->active_mm = mm;
 316
 317        preempt_disable();
 318
 319        setup_secondary(cpu);
 320
 321        platform_secondary_init(cpu);
 322
 323        /* setup local core timer */
 324        bfin_local_timer_setup();
 325
 326        local_irq_enable();
 327
 328        bfin_setup_caches(cpu);
 329
 330        notify_cpu_starting(cpu);
 331        /*
 332         * Calibrate loops per jiffy value.
 333         * IRQs need to be enabled here - D-cache can be invalidated
 334         * in timer irq handler, so core B can read correct jiffies.
 335         */
 336        calibrate_delay();
 337
 338        cpu_idle();
 339}
 340
 341void __init smp_prepare_boot_cpu(void)
 342{
 343}
 344
 345void __init smp_prepare_cpus(unsigned int max_cpus)
 346{
 347        platform_prepare_cpus(max_cpus);
 348        bfin_ipi_init();
 349        platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
 350        platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
 351}
 352
 353void __init smp_cpus_done(unsigned int max_cpus)
 354{
 355        unsigned long bogosum = 0;
 356        unsigned int cpu;
 357
 358        for_each_online_cpu(cpu)
 359                bogosum += loops_per_jiffy;
 360
 361        printk(KERN_INFO "SMP: Total of %d processors activated "
 362               "(%lu.%02lu BogoMIPS).\n",
 363               num_online_cpus(),
 364               bogosum / (500000/HZ),
 365               (bogosum / (5000/HZ)) % 100);
 366}
 367
 368void smp_icache_flush_range_others(unsigned long start, unsigned long end)
 369{
 370        smp_flush_data.start = start;
 371        smp_flush_data.end = end;
 372
 373        preempt_disable();
 374        if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
 375                printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
 376        preempt_enable();
 377}
 378EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
 379
 380#ifdef __ARCH_SYNC_CORE_ICACHE
 381unsigned long icache_invld_count[NR_CPUS];
 382void resync_core_icache(void)
 383{
 384        unsigned int cpu = get_cpu();
 385        blackfin_invalidate_entire_icache();
 386        icache_invld_count[cpu]++;
 387        put_cpu();
 388}
 389EXPORT_SYMBOL(resync_core_icache);
 390#endif
 391
 392#ifdef __ARCH_SYNC_CORE_DCACHE
 393unsigned long dcache_invld_count[NR_CPUS];
 394unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
 395
 396void resync_core_dcache(void)
 397{
 398        unsigned int cpu = get_cpu();
 399        blackfin_invalidate_entire_dcache();
 400        dcache_invld_count[cpu]++;
 401        put_cpu();
 402}
 403EXPORT_SYMBOL(resync_core_dcache);
 404#endif
 405
 406#ifdef CONFIG_HOTPLUG_CPU
 407int __cpuexit __cpu_disable(void)
 408{
 409        unsigned int cpu = smp_processor_id();
 410
 411        if (cpu == 0)
 412                return -EPERM;
 413
 414        set_cpu_online(cpu, false);
 415        return 0;
 416}
 417
 418static DECLARE_COMPLETION(cpu_killed);
 419
 420int __cpuexit __cpu_die(unsigned int cpu)
 421{
 422        return wait_for_completion_timeout(&cpu_killed, 5000);
 423}
 424
 425void cpu_die(void)
 426{
 427        complete(&cpu_killed);
 428
 429        atomic_dec(&init_mm.mm_users);
 430        atomic_dec(&init_mm.mm_count);
 431
 432        local_irq_disable();
 433        platform_cpu_die();
 434}
 435#endif
 436