linux/arch/mips/mti-malta/malta-smtc.c
<<
>>
Prefs
   1/*
   2 * Malta Platform-specific hooks for SMP operation
   3 */
   4#include <linux/irq.h>
   5#include <linux/init.h>
   6
   7#include <asm/mipsregs.h>
   8#include <asm/mipsmtregs.h>
   9#include <asm/smtc.h>
  10#include <asm/smtc_ipi.h>
  11
  12/* VPE/SMP Prototype implements platform interfaces directly */
  13
  14/*
  15 * Cause the specified action to be performed on a targeted "CPU"
  16 */
  17
  18static void msmtc_send_ipi_single(int cpu, unsigned int action)
  19{
  20        /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
  21        smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
  22}
  23
  24static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
  25{
  26        unsigned int i;
  27
  28        for_each_cpu(i, mask)
  29                msmtc_send_ipi_single(i, action);
  30}
  31
  32/*
  33 * Post-config but pre-boot cleanup entry point
  34 */
  35static void __cpuinit msmtc_init_secondary(void)
  36{
  37        void smtc_init_secondary(void);
  38        int myvpe;
  39
  40        /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
  41        myvpe = read_c0_tcbind() & TCBIND_CURVPE;
  42        if (myvpe != 0) {
  43                /* Ideally, this should be done only once per VPE, but... */
  44                clear_c0_status(ST0_IM);
  45                set_c0_status((0x100 << cp0_compare_irq)
  46                                | (0x100 << MIPS_CPU_IPI_IRQ));
  47                if (cp0_perfcount_irq >= 0)
  48                        set_c0_status(0x100 << cp0_perfcount_irq);
  49        }
  50
  51        smtc_init_secondary();
  52}
  53
  54/*
  55 * Platform "CPU" startup hook
  56 */
  57static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
  58{
  59        smtc_boot_secondary(cpu, idle);
  60}
  61
  62/*
  63 * SMP initialization finalization entry point
  64 */
  65static void __cpuinit msmtc_smp_finish(void)
  66{
  67        smtc_smp_finish();
  68}
  69
  70/*
  71 * Hook for after all CPUs are online
  72 */
  73
  74static void msmtc_cpus_done(void)
  75{
  76}
  77
  78/*
  79 * Platform SMP pre-initialization
  80 *
  81 * As noted above, we can assume a single CPU for now
  82 * but it may be multithreaded.
  83 */
  84
  85static void __init msmtc_smp_setup(void)
  86{
  87        /*
  88         * we won't get the definitive value until
  89         * we've run smtc_prepare_cpus later, but
  90         * we would appear to need an upper bound now.
  91         */
  92        smp_num_siblings = smtc_build_cpu_map(0);
  93}
  94
  95static void __init msmtc_prepare_cpus(unsigned int max_cpus)
  96{
  97        smtc_prepare_cpus(max_cpus);
  98}
  99
 100struct plat_smp_ops msmtc_smp_ops = {
 101        .send_ipi_single        = msmtc_send_ipi_single,
 102        .send_ipi_mask          = msmtc_send_ipi_mask,
 103        .init_secondary         = msmtc_init_secondary,
 104        .smp_finish             = msmtc_smp_finish,
 105        .cpus_done              = msmtc_cpus_done,
 106        .boot_secondary         = msmtc_boot_secondary,
 107        .smp_setup              = msmtc_smp_setup,
 108        .prepare_cpus           = msmtc_prepare_cpus,
 109};
 110
 111#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
 112/*
 113 * IRQ affinity hook
 114 */
 115
 116
 117int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
 118{
 119        cpumask_t tmask;
 120        int cpu = 0;
 121        void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
 122
 123        /*
 124         * On the legacy Malta development board, all I/O interrupts
 125         * are routed through the 8259 and combined in a single signal
 126         * to the CPU daughterboard, and on the CoreFPGA2/3 34K models,
 127         * that signal is brought to IP2 of both VPEs. To avoid racing
 128         * concurrent interrupt service events, IP2 is enabled only on
 129         * one VPE, by convention VPE0.  So long as no bits are ever
 130         * cleared in the affinity mask, there will never be any
 131         * interrupt forwarding.  But as soon as a program or operator
 132         * sets affinity for one of the related IRQs, we need to make
 133         * sure that we don't ever try to forward across the VPE boundry,
 134         * at least not until we engineer a system where the interrupt
 135         * _ack() or _end() function can somehow know that it corresponds
 136         * to an interrupt taken on another VPE, and perform the appropriate
 137         * restoration of Status.IM state using MFTR/MTTR instead of the
 138         * normal local behavior. We also ensure that no attempt will
 139         * be made to forward to an offline "CPU".
 140         */
 141
 142        cpumask_copy(&tmask, affinity);
 143        for_each_cpu(cpu, affinity) {
 144                if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
 145                        cpu_clear(cpu, tmask);
 146        }
 147        cpumask_copy(irq_desc[irq].affinity, &tmask);
 148
 149        if (cpus_empty(tmask))
 150                /*
 151                 * We could restore a default mask here, but the
 152                 * runtime code can anyway deal with the null set
 153                 */
 154                printk(KERN_WARNING
 155                        "IRQ affinity leaves no legal CPU for IRQ %d\n", irq);
 156
 157        /* Do any generic SMTC IRQ affinity setup */
 158        smtc_set_irq_affinity(irq, tmask);
 159
 160        return 0;
 161}
 162#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
 163