linux/arch/tile/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 *
  14 * TILE SMP support routines.
  15 */
  16
  17#include <linux/smp.h>
  18#include <linux/interrupt.h>
  19#include <linux/io.h>
  20#include <linux/irq.h>
  21#include <linux/irq_work.h>
  22#include <linux/module.h>
  23#include <asm/cacheflush.h>
  24#include <asm/homecache.h>
  25
  26/*
  27 * We write to width and height with a single store in head_NN.S,
  28 * so make the variable aligned to "long".
  29 */
  30HV_Topology smp_topology __ro_after_init __aligned(sizeof(long));
  31EXPORT_SYMBOL(smp_topology);
  32
  33#if CHIP_HAS_IPI()
  34static unsigned long __iomem *ipi_mappings[NR_CPUS];
  35#endif
  36
  37/* Does messaging work correctly to the local cpu? */
  38bool self_interrupt_ok;
  39
  40/*
  41 * Top-level send_IPI*() functions to send messages to other cpus.
  42 */
  43
  44/* Set by smp_send_stop() to avoid recursive panics. */
  45static int stopping_cpus;
  46
  47static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag)
  48{
  49        int sent = 0;
  50        while (sent < nrecip) {
  51                int rc = hv_send_message(recip, nrecip,
  52                                         (HV_VirtAddr)&tag, sizeof(tag));
  53                if (rc < 0) {
  54                        if (!stopping_cpus)  /* avoid recursive panic */
  55                                panic("hv_send_message returned %d", rc);
  56                        break;
  57                }
  58                WARN_ONCE(rc == 0, "hv_send_message() returned zero\n");
  59                sent += rc;
  60        }
  61}
  62
  63void send_IPI_single(int cpu, int tag)
  64{
  65        HV_Recipient recip = {
  66                .y = cpu / smp_width,
  67                .x = cpu % smp_width,
  68                .state = HV_TO_BE_SENT
  69        };
  70        __send_IPI_many(&recip, 1, tag);
  71}
  72
  73void send_IPI_many(const struct cpumask *mask, int tag)
  74{
  75        HV_Recipient recip[NR_CPUS];
  76        int cpu;
  77        int nrecip = 0;
  78        int my_cpu = smp_processor_id();
  79        for_each_cpu(cpu, mask) {
  80                HV_Recipient *r;
  81                BUG_ON(cpu == my_cpu);
  82                r = &recip[nrecip++];
  83                r->y = cpu / smp_width;
  84                r->x = cpu % smp_width;
  85                r->state = HV_TO_BE_SENT;
  86        }
  87        __send_IPI_many(recip, nrecip, tag);
  88}
  89
  90void send_IPI_allbutself(int tag)
  91{
  92        struct cpumask mask;
  93        cpumask_copy(&mask, cpu_online_mask);
  94        cpumask_clear_cpu(smp_processor_id(), &mask);
  95        send_IPI_many(&mask, tag);
  96}
  97
  98/*
  99 * Functions related to starting/stopping cpus.
 100 */
 101
 102/* Handler to start the current cpu. */
 103static void smp_start_cpu_interrupt(void)
 104{
 105        get_irq_regs()->pc = start_cpu_function_addr;
 106}
 107
 108/* Handler to stop the current cpu. */
 109static void smp_stop_cpu_interrupt(void)
 110{
 111        arch_local_irq_disable_all();
 112        set_cpu_online(smp_processor_id(), 0);
 113        for (;;)
 114                asm("nap; nop");
 115}
 116
 117/* This function calls the 'stop' function on all other CPUs in the system. */
 118void smp_send_stop(void)
 119{
 120        stopping_cpus = 1;
 121        send_IPI_allbutself(MSG_TAG_STOP_CPU);
 122}
 123
 124/* On panic, just wait; we may get an smp_send_stop() later on. */
 125void panic_smp_self_stop(void)
 126{
 127        while (1)
 128                asm("nap; nop");
 129}
 130
 131/*
 132 * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
 133 */
 134void evaluate_message(int tag)
 135{
 136        switch (tag) {
 137        case MSG_TAG_START_CPU: /* Start up a cpu */
 138                smp_start_cpu_interrupt();
 139                break;
 140
 141        case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */
 142                smp_stop_cpu_interrupt();
 143                break;
 144
 145        case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */
 146                generic_smp_call_function_interrupt();
 147                break;
 148
 149        case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */
 150                generic_smp_call_function_single_interrupt();
 151                break;
 152
 153        case MSG_TAG_IRQ_WORK: /* Invoke IRQ work */
 154                irq_work_run();
 155                break;
 156
 157        default:
 158                panic("Unknown IPI message tag %d", tag);
 159                break;
 160        }
 161}
 162
 163
 164/*
 165 * flush_icache_range() code uses smp_call_function().
 166 */
 167
 168struct ipi_flush {
 169        unsigned long start;
 170        unsigned long end;
 171};
 172
 173static void ipi_flush_icache_range(void *info)
 174{
 175        struct ipi_flush *flush = (struct ipi_flush *) info;
 176        __flush_icache_range(flush->start, flush->end);
 177}
 178
 179void flush_icache_range(unsigned long start, unsigned long end)
 180{
 181        struct ipi_flush flush = { start, end };
 182
 183        /* If invoked with irqs disabled, we can not issue IPIs. */
 184        if (irqs_disabled())
 185                flush_remote(0, HV_FLUSH_EVICT_L1I, NULL, 0, 0, 0,
 186                        NULL, NULL, 0);
 187        else {
 188                preempt_disable();
 189                on_each_cpu(ipi_flush_icache_range, &flush, 1);
 190                preempt_enable();
 191        }
 192}
 193EXPORT_SYMBOL(flush_icache_range);
 194
 195
 196#ifdef CONFIG_IRQ_WORK
 197void arch_irq_work_raise(void)
 198{
 199        if (arch_irq_work_has_interrupt())
 200                send_IPI_single(smp_processor_id(), MSG_TAG_IRQ_WORK);
 201}
 202#endif
 203
 204
 205/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
 206static irqreturn_t handle_reschedule_ipi(int irq, void *token)
 207{
 208        __this_cpu_inc(irq_stat.irq_resched_count);
 209        scheduler_ipi();
 210
 211        return IRQ_HANDLED;
 212}
 213
 214static struct irqaction resched_action = {
 215        .handler = handle_reschedule_ipi,
 216        .name = "resched",
 217        .dev_id = handle_reschedule_ipi /* unique token */,
 218};
 219
 220void __init ipi_init(void)
 221{
 222        int cpu = smp_processor_id();
 223        HV_Recipient recip = { .y = cpu_y(cpu), .x = cpu_x(cpu),
 224                               .state = HV_TO_BE_SENT };
 225        int tag = MSG_TAG_CALL_FUNCTION_SINGLE;
 226
 227        /*
 228         * Test if we can message ourselves for arch_irq_work_raise.
 229         * This functionality is only available in the Tilera hypervisor
 230         * in versions 4.3.4 and following.
 231         */
 232        if (hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)) == 1)
 233                self_interrupt_ok = true;
 234        else
 235                pr_warn("Older hypervisor: disabling fast irq_work_raise\n");
 236
 237#if CHIP_HAS_IPI()
 238        /* Map IPI trigger MMIO addresses. */
 239        for_each_possible_cpu(cpu) {
 240                HV_Coord tile;
 241                HV_PTE pte;
 242                unsigned long offset;
 243
 244                tile.x = cpu_x(cpu);
 245                tile.y = cpu_y(cpu);
 246                if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
 247                        panic("Failed to initialize IPI for cpu %d\n", cpu);
 248
 249                offset = PFN_PHYS(pte_pfn(pte));
 250                ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
 251        }
 252#endif
 253
 254        /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
 255        tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU);
 256        BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action));
 257}
 258
 259#if CHIP_HAS_IPI()
 260
 261void smp_send_reschedule(int cpu)
 262{
 263        WARN_ON(cpu_is_offline(cpu));
 264
 265        /*
 266         * We just want to do an MMIO store.  The traditional writeq()
 267         * functions aren't really correct here, since they're always
 268         * directed at the PCI shim.  For now, just do a raw store,
 269         * casting away the __iomem attribute.
 270         */
 271        ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0;
 272}
 273
 274#else
 275
 276void smp_send_reschedule(int cpu)
 277{
 278        HV_Coord coord;
 279
 280        WARN_ON(cpu_is_offline(cpu));
 281
 282        coord.y = cpu_y(cpu);
 283        coord.x = cpu_x(cpu);
 284        hv_trigger_ipi(coord, IRQ_RESCHEDULE);
 285}
 286
 287#endif /* CHIP_HAS_IPI() */
 288