linux/arch/riscv/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * SMP initialisation and IPI support
   3 * Based on arch/arm64/kernel/smp.c
   4 *
   5 * Copyright (C) 2012 ARM Ltd.
   6 * Copyright (C) 2015 Regents of the University of California
   7 * Copyright (C) 2017 SiFive
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  20 */
  21
  22#include <linux/interrupt.h>
  23#include <linux/smp.h>
  24#include <linux/sched.h>
  25#include <linux/seq_file.h>
  26#include <linux/delay.h>
  27
  28#include <asm/sbi.h>
  29#include <asm/tlbflush.h>
  30#include <asm/cacheflush.h>
  31
  32enum ipi_message_type {
  33        IPI_RESCHEDULE,
  34        IPI_CALL_FUNC,
  35        IPI_CPU_STOP,
  36        IPI_MAX
  37};
  38
  39unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
  40        [0 ... NR_CPUS-1] = INVALID_HARTID
  41};
  42
  43void __init smp_setup_processor_id(void)
  44{
  45       cpuid_to_hartid_map(0) = boot_cpu_hartid;
  46}
  47
  48/* A collection of single bit ipi messages.  */
  49static struct {
  50        unsigned long stats[IPI_MAX] ____cacheline_aligned;
  51        unsigned long bits ____cacheline_aligned;
  52} ipi_data[NR_CPUS] __cacheline_aligned;
  53
  54int riscv_hartid_to_cpuid(int hartid)
  55{
  56        int i = -1;
  57
  58        for (i = 0; i < NR_CPUS; i++)
  59                if (cpuid_to_hartid_map(i) == hartid)
  60                        return i;
  61
  62        pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
  63        return i;
  64}
  65
  66void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
  67{
  68        int cpu;
  69
  70        for_each_cpu(cpu, in)
  71                cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
  72}
  73/* Unsupported */
  74int setup_profiling_timer(unsigned int multiplier)
  75{
  76        return -EINVAL;
  77}
  78
  79static void ipi_stop(void)
  80{
  81        set_cpu_online(smp_processor_id(), false);
  82        while (1)
  83                wait_for_interrupt();
  84}
  85
  86void riscv_software_interrupt(void)
  87{
  88        unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
  89        unsigned long *stats = ipi_data[smp_processor_id()].stats;
  90
  91        /* Clear pending IPI */
  92        csr_clear(sip, SIE_SSIE);
  93
  94        while (true) {
  95                unsigned long ops;
  96
  97                /* Order bit clearing and data access. */
  98                mb();
  99
 100                ops = xchg(pending_ipis, 0);
 101                if (ops == 0)
 102                        return;
 103
 104                if (ops & (1 << IPI_RESCHEDULE)) {
 105                        stats[IPI_RESCHEDULE]++;
 106                        scheduler_ipi();
 107                }
 108
 109                if (ops & (1 << IPI_CALL_FUNC)) {
 110                        stats[IPI_CALL_FUNC]++;
 111                        generic_smp_call_function_interrupt();
 112                }
 113
 114                if (ops & (1 << IPI_CPU_STOP)) {
 115                        stats[IPI_CPU_STOP]++;
 116                        ipi_stop();
 117                }
 118
 119                BUG_ON((ops >> IPI_MAX) != 0);
 120
 121                /* Order data access and bit testing. */
 122                mb();
 123        }
 124}
 125
 126static void
 127send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
 128{
 129        int cpuid, hartid;
 130        struct cpumask hartid_mask;
 131
 132        cpumask_clear(&hartid_mask);
 133        mb();
 134        for_each_cpu(cpuid, to_whom) {
 135                set_bit(operation, &ipi_data[cpuid].bits);
 136                hartid = cpuid_to_hartid_map(cpuid);
 137                cpumask_set_cpu(hartid, &hartid_mask);
 138        }
 139        mb();
 140        sbi_send_ipi(cpumask_bits(&hartid_mask));
 141}
 142
 143static const char * const ipi_names[] = {
 144        [IPI_RESCHEDULE]        = "Rescheduling interrupts",
 145        [IPI_CALL_FUNC]         = "Function call interrupts",
 146        [IPI_CPU_STOP]          = "CPU stop interrupts",
 147};
 148
 149void show_ipi_stats(struct seq_file *p, int prec)
 150{
 151        unsigned int cpu, i;
 152
 153        for (i = 0; i < IPI_MAX; i++) {
 154                seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
 155                           prec >= 4 ? " " : "");
 156                for_each_online_cpu(cpu)
 157                        seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
 158                seq_printf(p, " %s\n", ipi_names[i]);
 159        }
 160}
 161
 162void arch_send_call_function_ipi_mask(struct cpumask *mask)
 163{
 164        send_ipi_message(mask, IPI_CALL_FUNC);
 165}
 166
 167void arch_send_call_function_single_ipi(int cpu)
 168{
 169        send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
 170}
 171
 172void smp_send_stop(void)
 173{
 174        unsigned long timeout;
 175
 176        if (num_online_cpus() > 1) {
 177                cpumask_t mask;
 178
 179                cpumask_copy(&mask, cpu_online_mask);
 180                cpumask_clear_cpu(smp_processor_id(), &mask);
 181
 182                if (system_state <= SYSTEM_RUNNING)
 183                        pr_crit("SMP: stopping secondary CPUs\n");
 184                send_ipi_message(&mask, IPI_CPU_STOP);
 185        }
 186
 187        /* Wait up to one second for other CPUs to stop */
 188        timeout = USEC_PER_SEC;
 189        while (num_online_cpus() > 1 && timeout--)
 190                udelay(1);
 191
 192        if (num_online_cpus() > 1)
 193                pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
 194                           cpumask_pr_args(cpu_online_mask));
 195}
 196
 197void smp_send_reschedule(int cpu)
 198{
 199        send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 200}
 201
 202/*
 203 * Performs an icache flush for the given MM context.  RISC-V has no direct
 204 * mechanism for instruction cache shoot downs, so instead we send an IPI that
 205 * informs the remote harts they need to flush their local instruction caches.
 206 * To avoid pathologically slow behavior in a common case (a bunch of
 207 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
 208 * IPIs for harts that are not currently executing a MM context and instead
 209 * schedule a deferred local instruction cache flush to be performed before
 210 * execution resumes on each hart.
 211 */
 212void flush_icache_mm(struct mm_struct *mm, bool local)
 213{
 214        unsigned int cpu;
 215        cpumask_t others, hmask, *mask;
 216
 217        preempt_disable();
 218
 219        /* Mark every hart's icache as needing a flush for this MM. */
 220        mask = &mm->context.icache_stale_mask;
 221        cpumask_setall(mask);
 222        /* Flush this hart's I$ now, and mark it as flushed. */
 223        cpu = smp_processor_id();
 224        cpumask_clear_cpu(cpu, mask);
 225        local_flush_icache_all();
 226
 227        /*
 228         * Flush the I$ of other harts concurrently executing, and mark them as
 229         * flushed.
 230         */
 231        cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
 232        local |= cpumask_empty(&others);
 233        if (mm != current->active_mm || !local) {
 234                cpumask_clear(&hmask);
 235                riscv_cpuid_to_hartid_mask(&others, &hmask);
 236                sbi_remote_fence_i(hmask.bits);
 237        } else {
 238                /*
 239                 * It's assumed that at least one strongly ordered operation is
 240                 * performed on this hart between setting a hart's cpumask bit
 241                 * and scheduling this MM context on that hart.  Sending an SBI
 242                 * remote message will do this, but in the case where no
 243                 * messages are sent we still need to order this hart's writes
 244                 * with flush_icache_deferred().
 245                 */
 246                smp_mb();
 247        }
 248
 249        preempt_enable();
 250}
 251