linux/arch/blackfin/mach-common/smp.c
<<
>>
Prefs
   1/*
   2 * IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
   3 *
   4 * Copyright 2007-2009 Analog Devices Inc.
   5 *                         Philippe Gerum <rpm@xenomai.org>
   6 *
   7 * Licensed under the GPL-2.
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/delay.h>
  12#include <linux/init.h>
  13#include <linux/spinlock.h>
  14#include <linux/sched.h>
  15#include <linux/interrupt.h>
  16#include <linux/cache.h>
  17#include <linux/clockchips.h>
  18#include <linux/profile.h>
  19#include <linux/errno.h>
  20#include <linux/mm.h>
  21#include <linux/cpu.h>
  22#include <linux/smp.h>
  23#include <linux/cpumask.h>
  24#include <linux/seq_file.h>
  25#include <linux/irq.h>
  26#include <linux/slab.h>
  27#include <linux/atomic.h>
  28#include <asm/cacheflush.h>
  29#include <asm/irq_handler.h>
  30#include <asm/mmu_context.h>
  31#include <asm/pgtable.h>
  32#include <asm/pgalloc.h>
  33#include <asm/processor.h>
  34#include <asm/ptrace.h>
  35#include <asm/cpu.h>
  36#include <asm/time.h>
  37#include <linux/err.h>
  38
  39/*
  40 * Anomaly notes:
  41 * 05000120 - we always define corelock as 32-bit integer in L2
  42 */
  43struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
  44
  45#ifdef CONFIG_ICACHE_FLUSH_L1
  46unsigned long blackfin_iflush_l1_entry[NR_CPUS];
  47#endif
  48
  49struct blackfin_initial_pda initial_pda_coreb;
  50
  51enum ipi_message_type {
  52        BFIN_IPI_NONE,
  53        BFIN_IPI_TIMER,
  54        BFIN_IPI_RESCHEDULE,
  55        BFIN_IPI_CALL_FUNC,
  56        BFIN_IPI_CPU_STOP,
  57};
  58
  59struct blackfin_flush_data {
  60        unsigned long start;
  61        unsigned long end;
  62};
  63
  64void *secondary_stack;
  65
  66static struct blackfin_flush_data smp_flush_data;
  67
  68static DEFINE_SPINLOCK(stop_lock);
  69
  70/* A magic number - stress test shows this is safe for common cases */
  71#define BFIN_IPI_MSGQ_LEN 5
  72
  73/* Simple FIFO buffer, overflow leads to panic */
  74struct ipi_data {
  75        atomic_t count;
  76        atomic_t bits;
  77};
  78
  79static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
  80
  81static void ipi_cpu_stop(unsigned int cpu)
  82{
  83        spin_lock(&stop_lock);
  84        printk(KERN_CRIT "CPU%u: stopping\n", cpu);
  85        dump_stack();
  86        spin_unlock(&stop_lock);
  87
  88        set_cpu_online(cpu, false);
  89
  90        local_irq_disable();
  91
  92        while (1)
  93                SSYNC();
  94}
  95
  96static void ipi_flush_icache(void *info)
  97{
  98        struct blackfin_flush_data *fdata = info;
  99
 100        /* Invalidate the memory holding the bounds of the flushed region. */
 101        blackfin_dcache_invalidate_range((unsigned long)fdata,
 102                                         (unsigned long)fdata + sizeof(*fdata));
 103
 104        /* Make sure all write buffers in the data side of the core
 105         * are flushed before trying to invalidate the icache.  This
 106         * needs to be after the data flush and before the icache
 107         * flush so that the SSYNC does the right thing in preventing
 108         * the instruction prefetcher from hitting things in cached
 109         * memory at the wrong time -- it runs much further ahead than
 110         * the pipeline.
 111         */
 112        SSYNC();
 113
 114        /* ipi_flaush_icache is invoked by generic flush_icache_range,
 115         * so call blackfin arch icache flush directly here.
 116         */
 117        blackfin_icache_flush_range(fdata->start, fdata->end);
 118}
 119
 120/* Use IRQ_SUPPLE_0 to request reschedule.
 121 * When returning from interrupt to user space,
 122 * there is chance to reschedule */
 123static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
 124{
 125        unsigned int cpu = smp_processor_id();
 126
 127        platform_clear_ipi(cpu, IRQ_SUPPLE_0);
 128        return IRQ_HANDLED;
 129}
 130
 131DECLARE_PER_CPU(struct clock_event_device, coretmr_events);
 132void ipi_timer(void)
 133{
 134        int cpu = smp_processor_id();
 135        struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
 136        evt->event_handler(evt);
 137}
 138
 139static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
 140{
 141        struct ipi_data *bfin_ipi_data;
 142        unsigned int cpu = smp_processor_id();
 143        unsigned long pending;
 144        unsigned long msg;
 145
 146        platform_clear_ipi(cpu, IRQ_SUPPLE_1);
 147
 148        smp_rmb();
 149        bfin_ipi_data = this_cpu_ptr(&bfin_ipi);
 150        while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) {
 151                msg = 0;
 152                do {
 153                        msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
 154                        switch (msg) {
 155                        case BFIN_IPI_TIMER:
 156                                ipi_timer();
 157                                break;
 158                        case BFIN_IPI_RESCHEDULE:
 159                                scheduler_ipi();
 160                                break;
 161                        case BFIN_IPI_CALL_FUNC:
 162                                generic_smp_call_function_interrupt();
 163                                break;
 164                        case BFIN_IPI_CPU_STOP:
 165                                ipi_cpu_stop(cpu);
 166                                break;
 167                        default:
 168                                goto out;
 169                        }
 170                        atomic_dec(&bfin_ipi_data->count);
 171                } while (msg < BITS_PER_LONG);
 172
 173        }
 174out:
 175        return IRQ_HANDLED;
 176}
 177
 178static void bfin_ipi_init(void)
 179{
 180        unsigned int cpu;
 181        struct ipi_data *bfin_ipi_data;
 182        for_each_possible_cpu(cpu) {
 183                bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
 184                atomic_set(&bfin_ipi_data->bits, 0);
 185                atomic_set(&bfin_ipi_data->count, 0);
 186        }
 187}
 188
 189void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
 190{
 191        unsigned int cpu;
 192        struct ipi_data *bfin_ipi_data;
 193        unsigned long flags;
 194
 195        local_irq_save(flags);
 196        for_each_cpu(cpu, cpumask) {
 197                bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
 198                atomic_or((1 << msg), &bfin_ipi_data->bits);
 199                atomic_inc(&bfin_ipi_data->count);
 200        }
 201        local_irq_restore(flags);
 202        smp_wmb();
 203        for_each_cpu(cpu, cpumask)
 204                platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
 205}
 206
 207void arch_send_call_function_single_ipi(int cpu)
 208{
 209        send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC);
 210}
 211
 212void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 213{
 214        send_ipi(mask, BFIN_IPI_CALL_FUNC);
 215}
 216
 217void smp_send_reschedule(int cpu)
 218{
 219        send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
 220
 221        return;
 222}
 223
 224void smp_send_msg(const struct cpumask *mask, unsigned long type)
 225{
 226        send_ipi(mask, type);
 227}
 228
 229void smp_timer_broadcast(const struct cpumask *mask)
 230{
 231        smp_send_msg(mask, BFIN_IPI_TIMER);
 232}
 233
 234void smp_send_stop(void)
 235{
 236        cpumask_t callmap;
 237
 238        preempt_disable();
 239        cpumask_copy(&callmap, cpu_online_mask);
 240        cpumask_clear_cpu(smp_processor_id(), &callmap);
 241        if (!cpumask_empty(&callmap))
 242                send_ipi(&callmap, BFIN_IPI_CPU_STOP);
 243
 244        preempt_enable();
 245
 246        return;
 247}
 248
 249int __cpu_up(unsigned int cpu, struct task_struct *idle)
 250{
 251        int ret;
 252
 253        secondary_stack = task_stack_page(idle) + THREAD_SIZE;
 254
 255        ret = platform_boot_secondary(cpu, idle);
 256
 257        secondary_stack = NULL;
 258
 259        return ret;
 260}
 261
 262static void setup_secondary(unsigned int cpu)
 263{
 264        unsigned long ilat;
 265
 266        bfin_write_IMASK(0);
 267        CSYNC();
 268        ilat = bfin_read_ILAT();
 269        CSYNC();
 270        bfin_write_ILAT(ilat);
 271        CSYNC();
 272
 273        /* Enable interrupt levels IVG7-15. IARs have been already
 274         * programmed by the boot CPU.  */
 275        bfin_irq_flags |= IMASK_IVG15 |
 276            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
 277            IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
 278}
 279
 280void secondary_start_kernel(void)
 281{
 282        unsigned int cpu = smp_processor_id();
 283        struct mm_struct *mm = &init_mm;
 284
 285        if (_bfin_swrst & SWRST_DBL_FAULT_B) {
 286                printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
 287#ifdef CONFIG_DEBUG_DOUBLEFAULT
 288                printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
 289                        initial_pda_coreb.seqstat_doublefault & SEQSTAT_EXCAUSE,
 290                        initial_pda_coreb.retx_doublefault);
 291                printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n",
 292                        initial_pda_coreb.dcplb_doublefault_addr);
 293                printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n",
 294                        initial_pda_coreb.icplb_doublefault_addr);
 295#endif
 296                printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
 297                        initial_pda_coreb.retx);
 298        }
 299
 300        /*
 301         * We want the D-cache to be enabled early, in case the atomic
 302         * support code emulates cache coherence (see
 303         * __ARCH_SYNC_CORE_DCACHE).
 304         */
 305        init_exception_vectors();
 306
 307        local_irq_disable();
 308
 309        /* Attach the new idle task to the global mm. */
 310        atomic_inc(&mm->mm_users);
 311        atomic_inc(&mm->mm_count);
 312        current->active_mm = mm;
 313
 314        preempt_disable();
 315
 316        setup_secondary(cpu);
 317
 318        platform_secondary_init(cpu);
 319        /* setup local core timer */
 320        bfin_local_timer_setup();
 321
 322        local_irq_enable();
 323
 324        bfin_setup_caches(cpu);
 325
 326        notify_cpu_starting(cpu);
 327        /*
 328         * Calibrate loops per jiffy value.
 329         * IRQs need to be enabled here - D-cache can be invalidated
 330         * in timer irq handler, so core B can read correct jiffies.
 331         */
 332        calibrate_delay();
 333
 334        /* We are done with local CPU inits, unblock the boot CPU. */
 335        set_cpu_online(cpu, true);
 336        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 337}
 338
 339void __init smp_prepare_boot_cpu(void)
 340{
 341}
 342
 343void __init smp_prepare_cpus(unsigned int max_cpus)
 344{
 345        platform_prepare_cpus(max_cpus);
 346        bfin_ipi_init();
 347        platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
 348        platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
 349}
 350
 351void __init smp_cpus_done(unsigned int max_cpus)
 352{
 353        unsigned long bogosum = 0;
 354        unsigned int cpu;
 355
 356        for_each_online_cpu(cpu)
 357                bogosum += loops_per_jiffy;
 358
 359        printk(KERN_INFO "SMP: Total of %d processors activated "
 360               "(%lu.%02lu BogoMIPS).\n",
 361               num_online_cpus(),
 362               bogosum / (500000/HZ),
 363               (bogosum / (5000/HZ)) % 100);
 364}
 365
 366void smp_icache_flush_range_others(unsigned long start, unsigned long end)
 367{
 368        smp_flush_data.start = start;
 369        smp_flush_data.end = end;
 370
 371        preempt_disable();
 372        if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
 373                printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
 374        preempt_enable();
 375}
 376EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
 377
 378#ifdef __ARCH_SYNC_CORE_ICACHE
 379unsigned long icache_invld_count[NR_CPUS];
 380void resync_core_icache(void)
 381{
 382        unsigned int cpu = get_cpu();
 383        blackfin_invalidate_entire_icache();
 384        icache_invld_count[cpu]++;
 385        put_cpu();
 386}
 387EXPORT_SYMBOL(resync_core_icache);
 388#endif
 389
 390#ifdef __ARCH_SYNC_CORE_DCACHE
 391unsigned long dcache_invld_count[NR_CPUS];
 392unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
 393
 394void resync_core_dcache(void)
 395{
 396        unsigned int cpu = get_cpu();
 397        blackfin_invalidate_entire_dcache();
 398        dcache_invld_count[cpu]++;
 399        put_cpu();
 400}
 401EXPORT_SYMBOL(resync_core_dcache);
 402#endif
 403
 404#ifdef CONFIG_HOTPLUG_CPU
 405int __cpu_disable(void)
 406{
 407        unsigned int cpu = smp_processor_id();
 408
 409        if (cpu == 0)
 410                return -EPERM;
 411
 412        set_cpu_online(cpu, false);
 413        return 0;
 414}
 415
 416int __cpu_die(unsigned int cpu)
 417{
 418        return cpu_wait_death(cpu, 5);
 419}
 420
 421void cpu_die(void)
 422{
 423        (void)cpu_report_death();
 424
 425        atomic_dec(&init_mm.mm_users);
 426        atomic_dec(&init_mm.mm_count);
 427
 428        local_irq_disable();
 429        platform_cpu_die();
 430}
 431#endif
 432