linux/arch/openrisc/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
   3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
   4 *
   5 * Based on arm64 and arc implementations
   6 * Copyright (C) 2013 ARM Ltd.
   7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   8 *
   9 * This file is licensed under the terms of the GNU General Public License
  10 * version 2.  This program is licensed "as is" without any warranty of any
  11 * kind, whether express or implied.
  12 */
  13
  14#include <linux/smp.h>
  15#include <linux/cpu.h>
  16#include <linux/sched.h>
  17#include <linux/irq.h>
  18#include <asm/cpuinfo.h>
  19#include <asm/mmu_context.h>
  20#include <asm/tlbflush.h>
  21#include <asm/cacheflush.h>
  22#include <asm/time.h>
  23
  24static void (*smp_cross_call)(const struct cpumask *, unsigned int);
  25
  26unsigned long secondary_release = -1;
  27struct thread_info *secondary_thread_info;
  28
  29enum ipi_msg_type {
  30        IPI_WAKEUP,
  31        IPI_RESCHEDULE,
  32        IPI_CALL_FUNC,
  33        IPI_CALL_FUNC_SINGLE,
  34};
  35
  36static DEFINE_SPINLOCK(boot_lock);
  37
  38static void boot_secondary(unsigned int cpu, struct task_struct *idle)
  39{
  40        /*
  41         * set synchronisation state between this boot processor
  42         * and the secondary one
  43         */
  44        spin_lock(&boot_lock);
  45
  46        secondary_release = cpu;
  47        smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
  48
  49        /*
  50         * now the secondary core is starting up let it run its
  51         * calibrations, then wait for it to finish
  52         */
  53        spin_unlock(&boot_lock);
  54}
  55
  56void __init smp_prepare_boot_cpu(void)
  57{
  58}
  59
  60void __init smp_init_cpus(void)
  61{
  62        int i;
  63
  64        for (i = 0; i < NR_CPUS; i++)
  65                set_cpu_possible(i, true);
  66}
  67
  68void __init smp_prepare_cpus(unsigned int max_cpus)
  69{
  70        int i;
  71
  72        /*
  73         * Initialise the present map, which describes the set of CPUs
  74         * actually populated at the present time.
  75         */
  76        for (i = 0; i < max_cpus; i++)
  77                set_cpu_present(i, true);
  78}
  79
  80void __init smp_cpus_done(unsigned int max_cpus)
  81{
  82}
  83
  84static DECLARE_COMPLETION(cpu_running);
  85
  86int __cpu_up(unsigned int cpu, struct task_struct *idle)
  87{
  88        if (smp_cross_call == NULL) {
  89                pr_warn("CPU%u: failed to start, IPI controller missing",
  90                        cpu);
  91                return -EIO;
  92        }
  93
  94        secondary_thread_info = task_thread_info(idle);
  95        current_pgd[cpu] = init_mm.pgd;
  96
  97        boot_secondary(cpu, idle);
  98        if (!wait_for_completion_timeout(&cpu_running,
  99                                        msecs_to_jiffies(1000))) {
 100                pr_crit("CPU%u: failed to start\n", cpu);
 101                return -EIO;
 102        }
 103        synchronise_count_master(cpu);
 104
 105        return 0;
 106}
 107
 108asmlinkage __init void secondary_start_kernel(void)
 109{
 110        struct mm_struct *mm = &init_mm;
 111        unsigned int cpu = smp_processor_id();
 112        /*
 113         * All kernel threads share the same mm context; grab a
 114         * reference and switch to it.
 115         */
 116        atomic_inc(&mm->mm_count);
 117        current->active_mm = mm;
 118        cpumask_set_cpu(cpu, mm_cpumask(mm));
 119
 120        pr_info("CPU%u: Booted secondary processor\n", cpu);
 121
 122        setup_cpuinfo();
 123        openrisc_clockevent_init();
 124
 125        notify_cpu_starting(cpu);
 126
 127        /*
 128         * OK, now it's safe to let the boot CPU continue
 129         */
 130        complete(&cpu_running);
 131
 132        synchronise_count_slave(cpu);
 133        set_cpu_online(cpu, true);
 134
 135        local_irq_enable();
 136
 137        preempt_disable();
 138        /*
 139         * OK, it's off to the idle thread for us
 140         */
 141        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 142}
 143
 144void handle_IPI(unsigned int ipi_msg)
 145{
 146        unsigned int cpu = smp_processor_id();
 147
 148        switch (ipi_msg) {
 149        case IPI_WAKEUP:
 150                break;
 151
 152        case IPI_RESCHEDULE:
 153                scheduler_ipi();
 154                break;
 155
 156        case IPI_CALL_FUNC:
 157                generic_smp_call_function_interrupt();
 158                break;
 159
 160        case IPI_CALL_FUNC_SINGLE:
 161                generic_smp_call_function_single_interrupt();
 162                break;
 163
 164        default:
 165                WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
 166                break;
 167        }
 168}
 169
 170void smp_send_reschedule(int cpu)
 171{
 172        smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 173}
 174
 175static void stop_this_cpu(void *dummy)
 176{
 177        /* Remove this CPU */
 178        set_cpu_online(smp_processor_id(), false);
 179
 180        local_irq_disable();
 181        /* CPU Doze */
 182        if (mfspr(SPR_UPR) & SPR_UPR_PMP)
 183                mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
 184        /* If that didn't work, infinite loop */
 185        while (1)
 186                ;
 187}
 188
 189void smp_send_stop(void)
 190{
 191        smp_call_function(stop_this_cpu, NULL, 0);
 192}
 193
 194/* not supported, yet */
 195int setup_profiling_timer(unsigned int multiplier)
 196{
 197        return -EINVAL;
 198}
 199
 200void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
 201{
 202        smp_cross_call = fn;
 203}
 204
 205void arch_send_call_function_single_ipi(int cpu)
 206{
 207        smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
 208}
 209
 210void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 211{
 212        smp_cross_call(mask, IPI_CALL_FUNC);
 213}
 214
 215/* TLB flush operations - Performed on each CPU*/
 216static inline void ipi_flush_tlb_all(void *ignored)
 217{
 218        local_flush_tlb_all();
 219}
 220
 221void flush_tlb_all(void)
 222{
 223        on_each_cpu(ipi_flush_tlb_all, NULL, 1);
 224}
 225
 226/*
 227 * FIXME: implement proper functionality instead of flush_tlb_all.
 228 * *But*, as things currently stands, the local_tlb_flush_* functions will
 229 * all boil down to local_tlb_flush_all anyway.
 230 */
 231void flush_tlb_mm(struct mm_struct *mm)
 232{
 233        on_each_cpu(ipi_flush_tlb_all, NULL, 1);
 234}
 235
 236void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 237{
 238        on_each_cpu(ipi_flush_tlb_all, NULL, 1);
 239}
 240
 241void flush_tlb_range(struct vm_area_struct *vma,
 242                     unsigned long start, unsigned long end)
 243{
 244        on_each_cpu(ipi_flush_tlb_all, NULL, 1);
 245}
 246
 247/* Instruction cache invalidate - performed on each cpu */
 248static void ipi_icache_page_inv(void *arg)
 249{
 250        struct page *page = arg;
 251
 252        local_icache_page_inv(page);
 253}
 254
 255void smp_icache_page_inv(struct page *page)
 256{
 257        on_each_cpu(ipi_icache_page_inv, page, 1);
 258}
 259EXPORT_SYMBOL(smp_icache_page_inv);
 260