linux/arch/hexagon/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * SMP support for Hexagon
   3 *
   4 * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 and
   8 * only version 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18 * 02110-1301, USA.
  19 */
  20
  21#include <linux/err.h>
  22#include <linux/errno.h>
  23#include <linux/kernel.h>
  24#include <linux/init.h>
  25#include <linux/interrupt.h>
  26#include <linux/module.h>
  27#include <linux/percpu.h>
  28#include <linux/sched.h>
  29#include <linux/smp.h>
  30#include <linux/spinlock.h>
  31#include <linux/cpu.h>
  32
  33#include <asm/time.h>    /*  timer_interrupt  */
  34#include <asm/hexagon_vm.h>
  35
  36#define BASE_IPI_IRQ 26
  37
  38/*
  39 * cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
  40 * (which is prior to any of our smp_prepare_cpu crap), in order to set
  41 * up the...  per_cpu areas.
  42 */
  43
  44struct ipi_data {
  45        unsigned long bits;
  46};
  47
  48static DEFINE_PER_CPU(struct ipi_data, ipi_data);
  49
  50static inline void __handle_ipi(unsigned long *ops, struct ipi_data *ipi,
  51                                int cpu)
  52{
  53        unsigned long msg = 0;
  54        do {
  55                msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
  56
  57                switch (msg) {
  58
  59                case IPI_TIMER:
  60                        ipi_timer();
  61                        break;
  62
  63                case IPI_CALL_FUNC:
  64                        generic_smp_call_function_interrupt();
  65                        break;
  66
  67                case IPI_CALL_FUNC_SINGLE:
  68                        generic_smp_call_function_single_interrupt();
  69                        break;
  70
  71                case IPI_CPU_STOP:
  72                        /*
  73                         * call vmstop()
  74                         */
  75                        __vmstop();
  76                        break;
  77
  78                case IPI_RESCHEDULE:
  79                        scheduler_ipi();
  80                        break;
  81                }
  82        } while (msg < BITS_PER_LONG);
  83}
  84
  85/*  Used for IPI call from other CPU's to unmask int  */
  86void smp_vm_unmask_irq(void *info)
  87{
  88        __vmintop_locen((long) info);
  89}
  90
  91
  92/*
  93 * This is based on Alpha's IPI stuff.
  94 * Supposed to take (int, void*) as args now.
  95 * Specifically, first arg is irq, second is the irq_desc.
  96 */
  97
  98irqreturn_t handle_ipi(int irq, void *desc)
  99{
 100        int cpu = smp_processor_id();
 101        struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
 102        unsigned long ops;
 103
 104        while ((ops = xchg(&ipi->bits, 0)) != 0)
 105                __handle_ipi(&ops, ipi, cpu);
 106        return IRQ_HANDLED;
 107}
 108
 109void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
 110{
 111        unsigned long flags;
 112        unsigned long cpu;
 113        unsigned long retval;
 114
 115        local_irq_save(flags);
 116
 117        for_each_cpu(cpu, cpumask) {
 118                struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
 119
 120                set_bit(msg, &ipi->bits);
 121                /*  Possible barrier here  */
 122                retval = __vmintop_post(BASE_IPI_IRQ+cpu);
 123
 124                if (retval != 0) {
 125                        printk(KERN_ERR "interrupt %ld not configured?\n",
 126                                BASE_IPI_IRQ+cpu);
 127                }
 128        }
 129
 130        local_irq_restore(flags);
 131}
 132
 133static struct irqaction ipi_intdesc = {
 134        .handler = handle_ipi,
 135        .flags = IRQF_TRIGGER_RISING,
 136        .name = "ipi_handler"
 137};
 138
 139void __init smp_prepare_boot_cpu(void)
 140{
 141}
 142
 143/*
 144 * interrupts should already be disabled from the VM
 145 * SP should already be correct; need to set THREADINFO_REG
 146 * to point to current thread info
 147 */
 148
 149void start_secondary(void)
 150{
 151        unsigned int cpu;
 152        unsigned long thread_ptr;
 153
 154        /*  Calculate thread_info pointer from stack pointer  */
 155        __asm__ __volatile__(
 156                "%0 = SP;\n"
 157                : "=r" (thread_ptr)
 158        );
 159
 160        thread_ptr = thread_ptr & ~(THREAD_SIZE-1);
 161
 162        __asm__ __volatile__(
 163                QUOTED_THREADINFO_REG " = %0;\n"
 164                :
 165                : "r" (thread_ptr)
 166        );
 167
 168        /*  Set the memory struct  */
 169        atomic_inc(&init_mm.mm_count);
 170        current->active_mm = &init_mm;
 171
 172        cpu = smp_processor_id();
 173
 174        setup_irq(BASE_IPI_IRQ + cpu, &ipi_intdesc);
 175
 176        /*  Register the clock_event dummy  */
 177        setup_percpu_clockdev();
 178
 179        printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
 180
 181        notify_cpu_starting(cpu);
 182
 183        set_cpu_online(cpu, true);
 184
 185        local_irq_enable();
 186
 187        cpu_startup_entry(CPUHP_ONLINE);
 188}
 189
 190
 191/*
 192 * called once for each present cpu
 193 * apparently starts up the CPU and then
 194 * maintains control until "cpu_online(cpu)" is set.
 195 */
 196
 197int __cpu_up(unsigned int cpu, struct task_struct *idle)
 198{
 199        struct thread_info *thread = (struct thread_info *)idle->stack;
 200        void *stack_start;
 201
 202        thread->cpu = cpu;
 203
 204        /*  Boot to the head.  */
 205        stack_start =  ((void *) thread) + THREAD_SIZE;
 206        __vmstart(start_secondary, stack_start);
 207
 208        while (!cpu_online(cpu))
 209                barrier();
 210
 211        return 0;
 212}
 213
 214void __init smp_cpus_done(unsigned int max_cpus)
 215{
 216}
 217
 218void __init smp_prepare_cpus(unsigned int max_cpus)
 219{
 220        int i;
 221
 222        /*
 223         * should eventually have some sort of machine
 224         * descriptor that has this stuff
 225         */
 226
 227        /*  Right now, let's just fake it. */
 228        for (i = 0; i < max_cpus; i++)
 229                set_cpu_present(i, true);
 230
 231        /*  Also need to register the interrupts for IPI  */
 232        if (max_cpus > 1)
 233                setup_irq(BASE_IPI_IRQ, &ipi_intdesc);
 234}
 235
 236void smp_send_reschedule(int cpu)
 237{
 238        send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
 239}
 240
 241void smp_send_stop(void)
 242{
 243        struct cpumask targets;
 244        cpumask_copy(&targets, cpu_online_mask);
 245        cpumask_clear_cpu(smp_processor_id(), &targets);
 246        send_ipi(&targets, IPI_CPU_STOP);
 247}
 248
 249void arch_send_call_function_single_ipi(int cpu)
 250{
 251        send_ipi(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
 252}
 253
 254void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 255{
 256        send_ipi(mask, IPI_CALL_FUNC);
 257}
 258
 259int setup_profiling_timer(unsigned int multiplier)
 260{
 261        return -EINVAL;
 262}
 263
 264void smp_start_cpus(void)
 265{
 266        int i;
 267
 268        for (i = 0; i < NR_CPUS; i++)
 269                set_cpu_possible(i, true);
 270}
 271