linux/arch/riscv/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * SMP initialisation and IPI support
   3 * Based on arch/arm64/kernel/smp.c
   4 *
   5 * Copyright (C) 2012 ARM Ltd.
   6 * Copyright (C) 2015 Regents of the University of California
   7 * Copyright (C) 2017 SiFive
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  20 */
  21
  22#include <linux/interrupt.h>
  23#include <linux/smp.h>
  24#include <linux/sched.h>
  25
  26#include <asm/sbi.h>
  27#include <asm/tlbflush.h>
  28#include <asm/cacheflush.h>
  29
  30/* A collection of single bit ipi messages.  */
  31static struct {
  32        unsigned long bits ____cacheline_aligned;
  33} ipi_data[NR_CPUS] __cacheline_aligned;
  34
  35enum ipi_message_type {
  36        IPI_RESCHEDULE,
  37        IPI_CALL_FUNC,
  38        IPI_MAX
  39};
  40
  41
  42/* Unsupported */
  43int setup_profiling_timer(unsigned int multiplier)
  44{
  45        return -EINVAL;
  46}
  47
  48void riscv_software_interrupt(void)
  49{
  50        unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
  51
  52        /* Clear pending IPI */
  53        csr_clear(sip, SIE_SSIE);
  54
  55        while (true) {
  56                unsigned long ops;
  57
  58                /* Order bit clearing and data access. */
  59                mb();
  60
  61                ops = xchg(pending_ipis, 0);
  62                if (ops == 0)
  63                        return;
  64
  65                if (ops & (1 << IPI_RESCHEDULE))
  66                        scheduler_ipi();
  67
  68                if (ops & (1 << IPI_CALL_FUNC))
  69                        generic_smp_call_function_interrupt();
  70
  71                BUG_ON((ops >> IPI_MAX) != 0);
  72
  73                /* Order data access and bit testing. */
  74                mb();
  75        }
  76}
  77
  78static void
  79send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
  80{
  81        int i;
  82
  83        mb();
  84        for_each_cpu(i, to_whom)
  85                set_bit(operation, &ipi_data[i].bits);
  86
  87        mb();
  88        sbi_send_ipi(cpumask_bits(to_whom));
  89}
  90
  91void arch_send_call_function_ipi_mask(struct cpumask *mask)
  92{
  93        send_ipi_message(mask, IPI_CALL_FUNC);
  94}
  95
  96void arch_send_call_function_single_ipi(int cpu)
  97{
  98        send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
  99}
 100
 101static void ipi_stop(void *unused)
 102{
 103        while (1)
 104                wait_for_interrupt();
 105}
 106
 107void smp_send_stop(void)
 108{
 109        on_each_cpu(ipi_stop, NULL, 1);
 110}
 111
 112void smp_send_reschedule(int cpu)
 113{
 114        send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 115}
 116
 117/*
 118 * Performs an icache flush for the given MM context.  RISC-V has no direct
 119 * mechanism for instruction cache shoot downs, so instead we send an IPI that
 120 * informs the remote harts they need to flush their local instruction caches.
 121 * To avoid pathologically slow behavior in a common case (a bunch of
 122 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
 123 * IPIs for harts that are not currently executing a MM context and instead
 124 * schedule a deferred local instruction cache flush to be performed before
 125 * execution resumes on each hart.
 126 */
 127void flush_icache_mm(struct mm_struct *mm, bool local)
 128{
 129        unsigned int cpu;
 130        cpumask_t others, *mask;
 131
 132        preempt_disable();
 133
 134        /* Mark every hart's icache as needing a flush for this MM. */
 135        mask = &mm->context.icache_stale_mask;
 136        cpumask_setall(mask);
 137        /* Flush this hart's I$ now, and mark it as flushed. */
 138        cpu = smp_processor_id();
 139        cpumask_clear_cpu(cpu, mask);
 140        local_flush_icache_all();
 141
 142        /*
 143         * Flush the I$ of other harts concurrently executing, and mark them as
 144         * flushed.
 145         */
 146        cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
 147        local |= cpumask_empty(&others);
 148        if (mm != current->active_mm || !local)
 149                sbi_remote_fence_i(others.bits);
 150        else {
 151                /*
 152                 * It's assumed that at least one strongly ordered operation is
 153                 * performed on this hart between setting a hart's cpumask bit
 154                 * and scheduling this MM context on that hart.  Sending an SBI
 155                 * remote message will do this, but in the case where no
 156                 * messages are sent we still need to order this hart's writes
 157                 * with flush_icache_deferred().
 158                 */
 159                smp_mb();
 160        }
 161
 162        preempt_enable();
 163}
 164