linux/arch/blackfin/mach-common/smp.c
<<
>>
Prefs
   1/*
   2 * IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
   3 *
   4 * Copyright 2007-2009 Analog Devices Inc.
   5 *                         Philippe Gerum <rpm@xenomai.org>
   6 *
   7 * Licensed under the GPL-2.
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/delay.h>
  12#include <linux/init.h>
  13#include <linux/spinlock.h>
  14#include <linux/sched.h>
  15#include <linux/interrupt.h>
  16#include <linux/cache.h>
  17#include <linux/profile.h>
  18#include <linux/errno.h>
  19#include <linux/mm.h>
  20#include <linux/cpu.h>
  21#include <linux/smp.h>
  22#include <linux/seq_file.h>
  23#include <linux/irq.h>
  24#include <asm/atomic.h>
  25#include <asm/cacheflush.h>
  26#include <asm/mmu_context.h>
  27#include <asm/pgtable.h>
  28#include <asm/pgalloc.h>
  29#include <asm/processor.h>
  30#include <asm/ptrace.h>
  31#include <asm/cpu.h>
  32#include <asm/time.h>
  33#include <linux/err.h>
  34
  35/*
  36 * Anomaly notes:
  37 * 05000120 - we always define corelock as 32-bit integer in L2
  38 */
  39struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
  40
  41void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
  42        *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
  43        *init_saved_dcplb_fault_addr_coreb;
  44
  45cpumask_t cpu_possible_map;
  46EXPORT_SYMBOL(cpu_possible_map);
  47
  48cpumask_t cpu_online_map;
  49EXPORT_SYMBOL(cpu_online_map);
  50
  51#define BFIN_IPI_RESCHEDULE   0
  52#define BFIN_IPI_CALL_FUNC    1
  53#define BFIN_IPI_CPU_STOP     2
  54
  55struct blackfin_flush_data {
  56        unsigned long start;
  57        unsigned long end;
  58};
  59
  60void *secondary_stack;
  61
  62
  63struct smp_call_struct {
  64        void (*func)(void *info);
  65        void *info;
  66        int wait;
  67        cpumask_t pending;
  68        cpumask_t waitmask;
  69};
  70
  71static struct blackfin_flush_data smp_flush_data;
  72
  73static DEFINE_SPINLOCK(stop_lock);
  74
  75struct ipi_message {
  76        struct list_head list;
  77        unsigned long type;
  78        struct smp_call_struct call_struct;
  79};
  80
  81struct ipi_message_queue {
  82        struct list_head head;
  83        spinlock_t lock;
  84        unsigned long count;
  85};
  86
  87static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
  88
  89static void ipi_cpu_stop(unsigned int cpu)
  90{
  91        spin_lock(&stop_lock);
  92        printk(KERN_CRIT "CPU%u: stopping\n", cpu);
  93        dump_stack();
  94        spin_unlock(&stop_lock);
  95
  96        cpu_clear(cpu, cpu_online_map);
  97
  98        local_irq_disable();
  99
 100        while (1)
 101                SSYNC();
 102}
 103
 104static void ipi_flush_icache(void *info)
 105{
 106        struct blackfin_flush_data *fdata = info;
 107
 108        /* Invalidate the memory holding the bounds of the flushed region. */
 109        blackfin_dcache_invalidate_range((unsigned long)fdata,
 110                                         (unsigned long)fdata + sizeof(*fdata));
 111
 112        blackfin_icache_flush_range(fdata->start, fdata->end);
 113}
 114
 115static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
 116{
 117        int wait;
 118        void (*func)(void *info);
 119        void *info;
 120        func = msg->call_struct.func;
 121        info = msg->call_struct.info;
 122        wait = msg->call_struct.wait;
 123        cpu_clear(cpu, msg->call_struct.pending);
 124        func(info);
 125        if (wait)
 126                cpu_clear(cpu, msg->call_struct.waitmask);
 127        else
 128                kfree(msg);
 129}
 130
 131static irqreturn_t ipi_handler(int irq, void *dev_instance)
 132{
 133        struct ipi_message *msg;
 134        struct ipi_message_queue *msg_queue;
 135        unsigned int cpu = smp_processor_id();
 136
 137        platform_clear_ipi(cpu);
 138
 139        msg_queue = &__get_cpu_var(ipi_msg_queue);
 140        msg_queue->count++;
 141
 142        spin_lock(&msg_queue->lock);
 143        while (!list_empty(&msg_queue->head)) {
 144                msg = list_entry(msg_queue->head.next, typeof(*msg), list);
 145                list_del(&msg->list);
 146                switch (msg->type) {
 147                case BFIN_IPI_RESCHEDULE:
 148                        /* That's the easiest one; leave it to
 149                         * return_from_int. */
 150                        kfree(msg);
 151                        break;
 152                case BFIN_IPI_CALL_FUNC:
 153                        spin_unlock(&msg_queue->lock);
 154                        ipi_call_function(cpu, msg);
 155                        spin_lock(&msg_queue->lock);
 156                        break;
 157                case BFIN_IPI_CPU_STOP:
 158                        spin_unlock(&msg_queue->lock);
 159                        ipi_cpu_stop(cpu);
 160                        spin_lock(&msg_queue->lock);
 161                        kfree(msg);
 162                        break;
 163                default:
 164                        printk(KERN_CRIT "CPU%u: Unknown IPI message \
 165                        0x%lx\n", cpu, msg->type);
 166                        kfree(msg);
 167                        break;
 168                }
 169        }
 170        spin_unlock(&msg_queue->lock);
 171        return IRQ_HANDLED;
 172}
 173
 174static void ipi_queue_init(void)
 175{
 176        unsigned int cpu;
 177        struct ipi_message_queue *msg_queue;
 178        for_each_possible_cpu(cpu) {
 179                msg_queue = &per_cpu(ipi_msg_queue, cpu);
 180                INIT_LIST_HEAD(&msg_queue->head);
 181                spin_lock_init(&msg_queue->lock);
 182                msg_queue->count = 0;
 183        }
 184}
 185
 186int smp_call_function(void (*func)(void *info), void *info, int wait)
 187{
 188        unsigned int cpu;
 189        cpumask_t callmap;
 190        unsigned long flags;
 191        struct ipi_message_queue *msg_queue;
 192        struct ipi_message *msg;
 193
 194        callmap = cpu_online_map;
 195        cpu_clear(smp_processor_id(), callmap);
 196        if (cpus_empty(callmap))
 197                return 0;
 198
 199        msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
 200        if (!msg)
 201                return -ENOMEM;
 202        INIT_LIST_HEAD(&msg->list);
 203        msg->call_struct.func = func;
 204        msg->call_struct.info = info;
 205        msg->call_struct.wait = wait;
 206        msg->call_struct.pending = callmap;
 207        msg->call_struct.waitmask = callmap;
 208        msg->type = BFIN_IPI_CALL_FUNC;
 209
 210        for_each_cpu_mask(cpu, callmap) {
 211                msg_queue = &per_cpu(ipi_msg_queue, cpu);
 212                spin_lock_irqsave(&msg_queue->lock, flags);
 213                list_add_tail(&msg->list, &msg_queue->head);
 214                spin_unlock_irqrestore(&msg_queue->lock, flags);
 215                platform_send_ipi_cpu(cpu);
 216        }
 217        if (wait) {
 218                while (!cpus_empty(msg->call_struct.waitmask))
 219                        blackfin_dcache_invalidate_range(
 220                                (unsigned long)(&msg->call_struct.waitmask),
 221                                (unsigned long)(&msg->call_struct.waitmask));
 222                kfree(msg);
 223        }
 224        return 0;
 225}
 226EXPORT_SYMBOL_GPL(smp_call_function);
 227
 228int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
 229                                int wait)
 230{
 231        unsigned int cpu = cpuid;
 232        cpumask_t callmap;
 233        unsigned long flags;
 234        struct ipi_message_queue *msg_queue;
 235        struct ipi_message *msg;
 236
 237        if (cpu_is_offline(cpu))
 238                return 0;
 239        cpus_clear(callmap);
 240        cpu_set(cpu, callmap);
 241
 242        msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
 243        if (!msg)
 244                return -ENOMEM;
 245        INIT_LIST_HEAD(&msg->list);
 246        msg->call_struct.func = func;
 247        msg->call_struct.info = info;
 248        msg->call_struct.wait = wait;
 249        msg->call_struct.pending = callmap;
 250        msg->call_struct.waitmask = callmap;
 251        msg->type = BFIN_IPI_CALL_FUNC;
 252
 253        msg_queue = &per_cpu(ipi_msg_queue, cpu);
 254        spin_lock_irqsave(&msg_queue->lock, flags);
 255        list_add_tail(&msg->list, &msg_queue->head);
 256        spin_unlock_irqrestore(&msg_queue->lock, flags);
 257        platform_send_ipi_cpu(cpu);
 258
 259        if (wait) {
 260                while (!cpus_empty(msg->call_struct.waitmask))
 261                        blackfin_dcache_invalidate_range(
 262                                (unsigned long)(&msg->call_struct.waitmask),
 263                                (unsigned long)(&msg->call_struct.waitmask));
 264                kfree(msg);
 265        }
 266        return 0;
 267}
 268EXPORT_SYMBOL_GPL(smp_call_function_single);
 269
 270void smp_send_reschedule(int cpu)
 271{
 272        unsigned long flags;
 273        struct ipi_message_queue *msg_queue;
 274        struct ipi_message *msg;
 275
 276        if (cpu_is_offline(cpu))
 277                return;
 278
 279        msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
 280        if (!msg)
 281                return;
 282        INIT_LIST_HEAD(&msg->list);
 283        msg->type = BFIN_IPI_RESCHEDULE;
 284
 285        msg_queue = &per_cpu(ipi_msg_queue, cpu);
 286        spin_lock_irqsave(&msg_queue->lock, flags);
 287        list_add_tail(&msg->list, &msg_queue->head);
 288        spin_unlock_irqrestore(&msg_queue->lock, flags);
 289        platform_send_ipi_cpu(cpu);
 290
 291        return;
 292}
 293
 294void smp_send_stop(void)
 295{
 296        unsigned int cpu;
 297        cpumask_t callmap;
 298        unsigned long flags;
 299        struct ipi_message_queue *msg_queue;
 300        struct ipi_message *msg;
 301
 302        callmap = cpu_online_map;
 303        cpu_clear(smp_processor_id(), callmap);
 304        if (cpus_empty(callmap))
 305                return;
 306
 307        msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
 308        if (!msg)
 309                return;
 310        INIT_LIST_HEAD(&msg->list);
 311        msg->type = BFIN_IPI_CPU_STOP;
 312
 313        for_each_cpu_mask(cpu, callmap) {
 314                msg_queue = &per_cpu(ipi_msg_queue, cpu);
 315                spin_lock_irqsave(&msg_queue->lock, flags);
 316                list_add_tail(&msg->list, &msg_queue->head);
 317                spin_unlock_irqrestore(&msg_queue->lock, flags);
 318                platform_send_ipi_cpu(cpu);
 319        }
 320        return;
 321}
 322
 323int __cpuinit __cpu_up(unsigned int cpu)
 324{
 325        struct task_struct *idle;
 326        int ret;
 327
 328        idle = fork_idle(cpu);
 329        if (IS_ERR(idle)) {
 330                printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
 331                return PTR_ERR(idle);
 332        }
 333
 334        secondary_stack = task_stack_page(idle) + THREAD_SIZE;
 335        smp_wmb();
 336
 337        ret = platform_boot_secondary(cpu, idle);
 338
 339        if (ret) {
 340                cpu_clear(cpu, cpu_present_map);
 341                printk(KERN_CRIT "CPU%u: processor failed to boot (%d)\n", cpu, ret);
 342                free_task(idle);
 343        } else
 344                cpu_set(cpu, cpu_online_map);
 345
 346        secondary_stack = NULL;
 347
 348        return ret;
 349}
 350
 351static void __cpuinit setup_secondary(unsigned int cpu)
 352{
 353#if !defined(CONFIG_TICKSOURCE_GPTMR0)
 354        struct irq_desc *timer_desc;
 355#endif
 356        unsigned long ilat;
 357
 358        bfin_write_IMASK(0);
 359        CSYNC();
 360        ilat = bfin_read_ILAT();
 361        CSYNC();
 362        bfin_write_ILAT(ilat);
 363        CSYNC();
 364
 365        /* Enable interrupt levels IVG7-15. IARs have been already
 366         * programmed by the boot CPU.  */
 367        bfin_irq_flags |= IMASK_IVG15 |
 368            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
 369            IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
 370
 371#if defined(CONFIG_TICKSOURCE_GPTMR0)
 372        /* Power down the core timer, just to play safe. */
 373        bfin_write_TCNTL(0);
 374
 375        /* system timer0 has been setup by CoreA. */
 376#else
 377        timer_desc = irq_desc + IRQ_CORETMR;
 378        setup_core_timer();
 379        timer_desc->chip->enable(IRQ_CORETMR);
 380#endif
 381}
 382
 383void __cpuinit secondary_start_kernel(void)
 384{
 385        unsigned int cpu = smp_processor_id();
 386        struct mm_struct *mm = &init_mm;
 387
 388        if (_bfin_swrst & SWRST_DBL_FAULT_B) {
 389                printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
 390#ifdef CONFIG_DEBUG_DOUBLEFAULT
 391                printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
 392                        (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
 393                printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
 394                printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
 395#endif
 396                printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
 397                        init_retx_coreb);
 398        }
 399
 400        /*
 401         * We want the D-cache to be enabled early, in case the atomic
 402         * support code emulates cache coherence (see
 403         * __ARCH_SYNC_CORE_DCACHE).
 404         */
 405        init_exception_vectors();
 406
 407        bfin_setup_caches(cpu);
 408
 409        local_irq_disable();
 410
 411        /* Attach the new idle task to the global mm. */
 412        atomic_inc(&mm->mm_users);
 413        atomic_inc(&mm->mm_count);
 414        current->active_mm = mm;
 415        BUG_ON(current->mm);    /* Can't be, but better be safe than sorry. */
 416
 417        preempt_disable();
 418
 419        setup_secondary(cpu);
 420
 421        local_irq_enable();
 422
 423        platform_secondary_init(cpu);
 424
 425        cpu_idle();
 426}
 427
 428void __init smp_prepare_boot_cpu(void)
 429{
 430}
 431
 432void __init smp_prepare_cpus(unsigned int max_cpus)
 433{
 434        platform_prepare_cpus(max_cpus);
 435        ipi_queue_init();
 436        platform_request_ipi(&ipi_handler);
 437}
 438
 439void __init smp_cpus_done(unsigned int max_cpus)
 440{
 441        unsigned long bogosum = 0;
 442        unsigned int cpu;
 443
 444        for_each_online_cpu(cpu)
 445                bogosum += loops_per_jiffy;
 446
 447        printk(KERN_INFO "SMP: Total of %d processors activated "
 448               "(%lu.%02lu BogoMIPS).\n",
 449               num_online_cpus(),
 450               bogosum / (500000/HZ),
 451               (bogosum / (5000/HZ)) % 100);
 452}
 453
 454void smp_icache_flush_range_others(unsigned long start, unsigned long end)
 455{
 456        smp_flush_data.start = start;
 457        smp_flush_data.end = end;
 458
 459        if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
 460                printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
 461}
 462EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
 463
 464#ifdef __ARCH_SYNC_CORE_ICACHE
 465void resync_core_icache(void)
 466{
 467        unsigned int cpu = get_cpu();
 468        blackfin_invalidate_entire_icache();
 469        ++per_cpu(cpu_data, cpu).icache_invld_count;
 470        put_cpu();
 471}
 472EXPORT_SYMBOL(resync_core_icache);
 473#endif
 474
 475#ifdef __ARCH_SYNC_CORE_DCACHE
 476unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
 477
 478void resync_core_dcache(void)
 479{
 480        unsigned int cpu = get_cpu();
 481        blackfin_invalidate_entire_dcache();
 482        ++per_cpu(cpu_data, cpu).dcache_invld_count;
 483        put_cpu();
 484}
 485EXPORT_SYMBOL(resync_core_dcache);
 486#endif
 487