linux/arch/mips/kernel/smp-mt.c
<<
>>
Prefs
   1/*
   2 *  This program is free software; you can distribute it and/or modify it
   3 *  under the terms of the GNU General Public License (Version 2) as
   4 *  published by the Free Software Foundation.
   5 *
   6 *  This program is distributed in the hope it will be useful, but WITHOUT
   7 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   8 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   9 *  for more details.
  10 *
  11 *  You should have received a copy of the GNU General Public License along
  12 *  with this program; if not, write to the Free Software Foundation, Inc.,
  13 *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
  14 *
  15 * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
  16 *    Elizabeth Clarke (beth@mips.com)
  17 *    Ralf Baechle (ralf@linux-mips.org)
  18 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
  19 */
  20#include <linux/kernel.h>
  21#include <linux/sched.h>
  22#include <linux/cpumask.h>
  23#include <linux/interrupt.h>
  24#include <linux/compiler.h>
  25#include <linux/smp.h>
  26
  27#include <asm/atomic.h>
  28#include <asm/cacheflush.h>
  29#include <asm/cpu.h>
  30#include <asm/processor.h>
  31#include <asm/system.h>
  32#include <asm/hardirq.h>
  33#include <asm/mmu_context.h>
  34#include <asm/time.h>
  35#include <asm/mipsregs.h>
  36#include <asm/mipsmtregs.h>
  37#include <asm/mips_mt.h>
  38
  39static void __init smvp_copy_vpe_config(void)
  40{
  41        write_vpe_c0_status(
  42                (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
  43
  44        /* set config to be the same as vpe0, particularly kseg0 coherency alg */
  45        write_vpe_c0_config( read_c0_config());
  46
  47        /* make sure there are no software interrupts pending */
  48        write_vpe_c0_cause(0);
  49
  50        /* Propagate Config7 */
  51        write_vpe_c0_config7(read_c0_config7());
  52
  53        write_vpe_c0_count(read_c0_count());
  54}
  55
  56static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
  57        unsigned int ncpu)
  58{
  59        if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
  60                return ncpu;
  61
  62        /* Deactivate all but VPE 0 */
  63        if (tc != 0) {
  64                unsigned long tmp = read_vpe_c0_vpeconf0();
  65
  66                tmp &= ~VPECONF0_VPA;
  67
  68                /* master VPE */
  69                tmp |= VPECONF0_MVP;
  70                write_vpe_c0_vpeconf0(tmp);
  71
  72                /* Record this as available CPU */
  73                set_cpu_possible(tc, true);
  74                __cpu_number_map[tc]    = ++ncpu;
  75                __cpu_logical_map[ncpu] = tc;
  76        }
  77
  78        /* Disable multi-threading with TC's */
  79        write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
  80
  81        if (tc != 0)
  82                smvp_copy_vpe_config();
  83
  84        return ncpu;
  85}
  86
  87static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
  88{
  89        unsigned long tmp;
  90
  91        if (!tc)
  92                return;
  93
  94        /* bind a TC to each VPE, May as well put all excess TC's
  95           on the last VPE */
  96        if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1))
  97                write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));
  98        else {
  99                write_tc_c0_tcbind(read_tc_c0_tcbind() | tc);
 100
 101                /* and set XTC */
 102                write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT));
 103        }
 104
 105        tmp = read_tc_c0_tcstatus();
 106
 107        /* mark not allocated and not dynamically allocatable */
 108        tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
 109        tmp |= TCSTATUS_IXMT;           /* interrupt exempt */
 110        write_tc_c0_tcstatus(tmp);
 111
 112        write_tc_c0_tchalt(TCHALT_H);
 113}
 114
 115static void vsmp_send_ipi_single(int cpu, unsigned int action)
 116{
 117        int i;
 118        unsigned long flags;
 119        int vpflags;
 120
 121        local_irq_save(flags);
 122
 123        vpflags = dvpe();       /* cant access the other CPU's registers whilst MVPE enabled */
 124
 125        switch (action) {
 126        case SMP_CALL_FUNCTION:
 127                i = C_SW1;
 128                break;
 129
 130        case SMP_RESCHEDULE_YOURSELF:
 131        default:
 132                i = C_SW0;
 133                break;
 134        }
 135
 136        /* 1:1 mapping of vpe and tc... */
 137        settc(cpu);
 138        write_vpe_c0_cause(read_vpe_c0_cause() | i);
 139        evpe(vpflags);
 140
 141        local_irq_restore(flags);
 142}
 143
 144static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
 145{
 146        unsigned int i;
 147
 148        for_each_cpu(i, mask)
 149                vsmp_send_ipi_single(i, action);
 150}
 151
 152static void __cpuinit vsmp_init_secondary(void)
 153{
 154        extern int gic_present;
 155
 156        /* This is Malta specific: IPI,performance and timer inetrrupts */
 157        if (gic_present)
 158                change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
 159                                         STATUSF_IP6 | STATUSF_IP7);
 160        else
 161                change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
 162                                         STATUSF_IP6 | STATUSF_IP7);
 163}
 164
 165static void __cpuinit vsmp_smp_finish(void)
 166{
 167        /* CDFIXME: remove this? */
 168        write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
 169
 170#ifdef CONFIG_MIPS_MT_FPAFF
 171        /* If we have an FPU, enroll ourselves in the FPU-full mask */
 172        if (cpu_has_fpu)
 173                cpu_set(smp_processor_id(), mt_fpu_cpumask);
 174#endif /* CONFIG_MIPS_MT_FPAFF */
 175
 176        local_irq_enable();
 177}
 178
 179static void vsmp_cpus_done(void)
 180{
 181}
 182
 183/*
 184 * Setup the PC, SP, and GP of a secondary processor and start it
 185 * running!
 186 * smp_bootstrap is the place to resume from
 187 * __KSTK_TOS(idle) is apparently the stack pointer
 188 * (unsigned long)idle->thread_info the gp
 189 * assumes a 1:1 mapping of TC => VPE
 190 */
 191static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
 192{
 193        struct thread_info *gp = task_thread_info(idle);
 194        dvpe();
 195        set_c0_mvpcontrol(MVPCONTROL_VPC);
 196
 197        settc(cpu);
 198
 199        /* restart */
 200        write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
 201
 202        /* enable the tc this vpe/cpu will be running */
 203        write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
 204
 205        write_tc_c0_tchalt(0);
 206
 207        /* enable the VPE */
 208        write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
 209
 210        /* stack pointer */
 211        write_tc_gpr_sp( __KSTK_TOS(idle));
 212
 213        /* global pointer */
 214        write_tc_gpr_gp((unsigned long)gp);
 215
 216        flush_icache_range((unsigned long)gp,
 217                           (unsigned long)(gp + sizeof(struct thread_info)));
 218
 219        /* finally out of configuration and into chaos */
 220        clear_c0_mvpcontrol(MVPCONTROL_VPC);
 221
 222        evpe(EVPE_ENABLE);
 223}
 224
 225/*
 226 * Common setup before any secondaries are started
 227 * Make sure all CPU's are in a sensible state before we boot any of the
 228 * secondaries
 229 */
 230static void __init vsmp_smp_setup(void)
 231{
 232        unsigned int mvpconf0, ntc, tc, ncpu = 0;
 233        unsigned int nvpe;
 234
 235#ifdef CONFIG_MIPS_MT_FPAFF
 236        /* If we have an FPU, enroll ourselves in the FPU-full mask */
 237        if (cpu_has_fpu)
 238                cpu_set(0, mt_fpu_cpumask);
 239#endif /* CONFIG_MIPS_MT_FPAFF */
 240        if (!cpu_has_mipsmt)
 241                return;
 242
 243        /* disable MT so we can configure */
 244        dvpe();
 245        dmt();
 246
 247        /* Put MVPE's into 'configuration state' */
 248        set_c0_mvpcontrol(MVPCONTROL_VPC);
 249
 250        mvpconf0 = read_c0_mvpconf0();
 251        ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
 252
 253        nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
 254        smp_num_siblings = nvpe;
 255
 256        /* we'll always have more TC's than VPE's, so loop setting everything
 257           to a sensible state */
 258        for (tc = 0; tc <= ntc; tc++) {
 259                settc(tc);
 260
 261                smvp_tc_init(tc, mvpconf0);
 262                ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
 263        }
 264
 265        /* Release config state */
 266        clear_c0_mvpcontrol(MVPCONTROL_VPC);
 267
 268        /* We'll wait until starting the secondaries before starting MVPE */
 269
 270        printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
 271}
 272
 273static void __init vsmp_prepare_cpus(unsigned int max_cpus)
 274{
 275        mips_mt_set_cpuoptions();
 276}
 277
 278struct plat_smp_ops vsmp_smp_ops = {
 279        .send_ipi_single        = vsmp_send_ipi_single,
 280        .send_ipi_mask          = vsmp_send_ipi_mask,
 281        .init_secondary         = vsmp_init_secondary,
 282        .smp_finish             = vsmp_smp_finish,
 283        .cpus_done              = vsmp_cpus_done,
 284        .boot_secondary         = vsmp_boot_secondary,
 285        .smp_setup              = vsmp_smp_setup,
 286        .prepare_cpus           = vsmp_prepare_cpus,
 287};
 288