linux/arch/mips/kernel/smp-cps.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Imagination Technologies
   3 * Author: Paul Burton <paul.burton@imgtec.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License as published by the
   7 * Free Software Foundation;  either version 2 of the  License, or (at your
   8 * option) any later version.
   9 */
  10
  11#include <linux/delay.h>
  12#include <linux/io.h>
  13#include <linux/irqchip/mips-gic.h>
  14#include <linux/sched.h>
  15#include <linux/slab.h>
  16#include <linux/smp.h>
  17#include <linux/types.h>
  18
  19#include <asm/bcache.h>
  20#include <asm/mips-cm.h>
  21#include <asm/mips-cpc.h>
  22#include <asm/mips_mt.h>
  23#include <asm/mipsregs.h>
  24#include <asm/pm-cps.h>
  25#include <asm/r4kcache.h>
  26#include <asm/smp-cps.h>
  27#include <asm/time.h>
  28#include <asm/uasm.h>
  29
  30static bool threads_disabled;
  31static DECLARE_BITMAP(core_power, NR_CPUS);
  32
  33struct core_boot_config *mips_cps_core_bootcfg;
  34
  35static int __init setup_nothreads(char *s)
  36{
  37        threads_disabled = true;
  38        return 0;
  39}
  40early_param("nothreads", setup_nothreads);
  41
  42static unsigned core_vpe_count(unsigned core)
  43{
  44        unsigned cfg;
  45
  46        if (threads_disabled)
  47                return 1;
  48
  49        if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
  50                && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
  51                return 1;
  52
  53        mips_cm_lock_other(core, 0);
  54        cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
  55        mips_cm_unlock_other();
  56        return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
  57}
  58
  59static void __init cps_smp_setup(void)
  60{
  61        unsigned int ncores, nvpes, core_vpes;
  62        unsigned long core_entry;
  63        int c, v;
  64
  65        /* Detect & record VPE topology */
  66        ncores = mips_cm_numcores();
  67        pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
  68        for (c = nvpes = 0; c < ncores; c++) {
  69                core_vpes = core_vpe_count(c);
  70                pr_cont("%c%u", c ? ',' : '{', core_vpes);
  71
  72                /* Use the number of VPEs in core 0 for smp_num_siblings */
  73                if (!c)
  74                        smp_num_siblings = core_vpes;
  75
  76                for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
  77                        cpu_data[nvpes + v].core = c;
  78#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
  79                        cpu_data[nvpes + v].vpe_id = v;
  80#endif
  81                }
  82
  83                nvpes += core_vpes;
  84        }
  85        pr_cont("} total %u\n", nvpes);
  86
  87        /* Indicate present CPUs (CPU being synonymous with VPE) */
  88        for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
  89                set_cpu_possible(v, true);
  90                set_cpu_present(v, true);
  91                __cpu_number_map[v] = v;
  92                __cpu_logical_map[v] = v;
  93        }
  94
  95        /* Set a coherent default CCA (CWB) */
  96        change_c0_config(CONF_CM_CMASK, 0x5);
  97
  98        /* Core 0 is powered up (we're running on it) */
  99        bitmap_set(core_power, 0, 1);
 100
 101        /* Initialise core 0 */
 102        mips_cps_core_init();
 103
 104        /* Make core 0 coherent with everything */
 105        write_gcr_cl_coherence(0xff);
 106
 107        if (mips_cm_revision() >= CM_REV_CM3) {
 108                core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
 109                write_gcr_bev_base(core_entry);
 110        }
 111
 112#ifdef CONFIG_MIPS_MT_FPAFF
 113        /* If we have an FPU, enroll ourselves in the FPU-full mask */
 114        if (cpu_has_fpu)
 115                cpumask_set_cpu(0, &mt_fpu_cpumask);
 116#endif /* CONFIG_MIPS_MT_FPAFF */
 117}
 118
 119static void __init cps_prepare_cpus(unsigned int max_cpus)
 120{
 121        unsigned ncores, core_vpes, c, cca;
 122        bool cca_unsuitable;
 123        u32 *entry_code;
 124
 125        mips_mt_set_cpuoptions();
 126
 127        /* Detect whether the CCA is unsuited to multi-core SMP */
 128        cca = read_c0_config() & CONF_CM_CMASK;
 129        switch (cca) {
 130        case 0x4: /* CWBE */
 131        case 0x5: /* CWB */
 132                /* The CCA is coherent, multi-core is fine */
 133                cca_unsuitable = false;
 134                break;
 135
 136        default:
 137                /* CCA is not coherent, multi-core is not usable */
 138                cca_unsuitable = true;
 139        }
 140
 141        /* Warn the user if the CCA prevents multi-core */
 142        ncores = mips_cm_numcores();
 143        if (cca_unsuitable && ncores > 1) {
 144                pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
 145                        cca);
 146
 147                for_each_present_cpu(c) {
 148                        if (cpu_data[c].core)
 149                                set_cpu_present(c, false);
 150                }
 151        }
 152
 153        /*
 154         * Patch the start of mips_cps_core_entry to provide:
 155         *
 156         * s0 = kseg0 CCA
 157         */
 158        entry_code = (u32 *)&mips_cps_core_entry;
 159        uasm_i_addiu(&entry_code, 16, 0, cca);
 160        blast_dcache_range((unsigned long)&mips_cps_core_entry,
 161                           (unsigned long)entry_code);
 162        bc_wback_inv((unsigned long)&mips_cps_core_entry,
 163                     (void *)entry_code - (void *)&mips_cps_core_entry);
 164        __sync();
 165
 166        /* Allocate core boot configuration structs */
 167        mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
 168                                        GFP_KERNEL);
 169        if (!mips_cps_core_bootcfg) {
 170                pr_err("Failed to allocate boot config for %u cores\n", ncores);
 171                goto err_out;
 172        }
 173
 174        /* Allocate VPE boot configuration structs */
 175        for (c = 0; c < ncores; c++) {
 176                core_vpes = core_vpe_count(c);
 177                mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
 178                                sizeof(*mips_cps_core_bootcfg[c].vpe_config),
 179                                GFP_KERNEL);
 180                if (!mips_cps_core_bootcfg[c].vpe_config) {
 181                        pr_err("Failed to allocate %u VPE boot configs\n",
 182                               core_vpes);
 183                        goto err_out;
 184                }
 185        }
 186
 187        /* Mark this CPU as booted */
 188        atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
 189                   1 << cpu_vpe_id(&current_cpu_data));
 190
 191        return;
 192err_out:
 193        /* Clean up allocations */
 194        if (mips_cps_core_bootcfg) {
 195                for (c = 0; c < ncores; c++)
 196                        kfree(mips_cps_core_bootcfg[c].vpe_config);
 197                kfree(mips_cps_core_bootcfg);
 198                mips_cps_core_bootcfg = NULL;
 199        }
 200
 201        /* Effectively disable SMP by declaring CPUs not present */
 202        for_each_possible_cpu(c) {
 203                if (c == 0)
 204                        continue;
 205                set_cpu_present(c, false);
 206        }
 207}
 208
 209static void boot_core(unsigned int core, unsigned int vpe_id)
 210{
 211        u32 access, stat, seq_state;
 212        unsigned timeout;
 213
 214        /* Select the appropriate core */
 215        mips_cm_lock_other(core, 0);
 216
 217        /* Set its reset vector */
 218        write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
 219
 220        /* Ensure its coherency is disabled */
 221        write_gcr_co_coherence(0);
 222
 223        /* Start it with the legacy memory map and exception base */
 224        write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB);
 225
 226        /* Ensure the core can access the GCRs */
 227        access = read_gcr_access();
 228        access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
 229        write_gcr_access(access);
 230
 231        if (mips_cpc_present()) {
 232                /* Reset the core */
 233                mips_cpc_lock_other(core);
 234
 235                if (mips_cm_revision() >= CM_REV_CM3) {
 236                        /* Run only the requested VP following the reset */
 237                        write_cpc_co_vp_stop(0xf);
 238                        write_cpc_co_vp_run(1 << vpe_id);
 239
 240                        /*
 241                         * Ensure that the VP_RUN register is written before the
 242                         * core leaves reset.
 243                         */
 244                        wmb();
 245                }
 246
 247                write_cpc_co_cmd(CPC_Cx_CMD_RESET);
 248
 249                timeout = 100;
 250                while (true) {
 251                        stat = read_cpc_co_stat_conf();
 252                        seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK;
 253
 254                        /* U6 == coherent execution, ie. the core is up */
 255                        if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
 256                                break;
 257
 258                        /* Delay a little while before we start warning */
 259                        if (timeout) {
 260                                timeout--;
 261                                mdelay(10);
 262                                continue;
 263                        }
 264
 265                        pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
 266                                core, stat);
 267                        mdelay(1000);
 268                }
 269
 270                mips_cpc_unlock_other();
 271        } else {
 272                /* Take the core out of reset */
 273                write_gcr_co_reset_release(0);
 274        }
 275
 276        mips_cm_unlock_other();
 277
 278        /* The core is now powered up */
 279        bitmap_set(core_power, core, 1);
 280}
 281
 282static void remote_vpe_boot(void *dummy)
 283{
 284        unsigned core = current_cpu_data.core;
 285        struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
 286
 287        mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
 288}
 289
 290static void cps_boot_secondary(int cpu, struct task_struct *idle)
 291{
 292        unsigned core = cpu_data[cpu].core;
 293        unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
 294        struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
 295        struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
 296        unsigned long core_entry;
 297        unsigned int remote;
 298        int err;
 299
 300        vpe_cfg->pc = (unsigned long)&smp_bootstrap;
 301        vpe_cfg->sp = __KSTK_TOS(idle);
 302        vpe_cfg->gp = (unsigned long)task_thread_info(idle);
 303
 304        atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
 305
 306        preempt_disable();
 307
 308        if (!test_bit(core, core_power)) {
 309                /* Boot a VPE on a powered down core */
 310                boot_core(core, vpe_id);
 311                goto out;
 312        }
 313
 314        if (cpu_has_vp) {
 315                mips_cm_lock_other(core, vpe_id);
 316                core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
 317                write_gcr_co_reset_base(core_entry);
 318                mips_cm_unlock_other();
 319        }
 320
 321        if (core != current_cpu_data.core) {
 322                /* Boot a VPE on another powered up core */
 323                for (remote = 0; remote < NR_CPUS; remote++) {
 324                        if (cpu_data[remote].core != core)
 325                                continue;
 326                        if (cpu_online(remote))
 327                                break;
 328                }
 329                BUG_ON(remote >= NR_CPUS);
 330
 331                err = smp_call_function_single(remote, remote_vpe_boot,
 332                                               NULL, 1);
 333                if (err)
 334                        panic("Failed to call remote CPU\n");
 335                goto out;
 336        }
 337
 338        BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
 339
 340        /* Boot a VPE on this core */
 341        mips_cps_boot_vpes(core_cfg, vpe_id);
 342out:
 343        preempt_enable();
 344}
 345
 346static void cps_init_secondary(void)
 347{
 348        /* Disable MT - we only want to run 1 TC per VPE */
 349        if (cpu_has_mipsmt)
 350                dmt();
 351
 352        if (mips_cm_revision() >= CM_REV_CM3) {
 353                unsigned ident = gic_read_local_vp_id();
 354
 355                /*
 356                 * Ensure that our calculation of the VP ID matches up with
 357                 * what the GIC reports, otherwise we'll have configured
 358                 * interrupts incorrectly.
 359                 */
 360                BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
 361        }
 362
 363        if (cpu_has_veic)
 364                clear_c0_status(ST0_IM);
 365        else
 366                change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
 367                                         STATUSF_IP4 | STATUSF_IP5 |
 368                                         STATUSF_IP6 | STATUSF_IP7);
 369}
 370
 371static void cps_smp_finish(void)
 372{
 373        write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
 374
 375#ifdef CONFIG_MIPS_MT_FPAFF
 376        /* If we have an FPU, enroll ourselves in the FPU-full mask */
 377        if (cpu_has_fpu)
 378                cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
 379#endif /* CONFIG_MIPS_MT_FPAFF */
 380
 381        local_irq_enable();
 382}
 383
 384#ifdef CONFIG_HOTPLUG_CPU
 385
 386static int cps_cpu_disable(void)
 387{
 388        unsigned cpu = smp_processor_id();
 389        struct core_boot_config *core_cfg;
 390
 391        if (!cpu)
 392                return -EBUSY;
 393
 394        if (!cps_pm_support_state(CPS_PM_POWER_GATED))
 395                return -EINVAL;
 396
 397        core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
 398        atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
 399        smp_mb__after_atomic();
 400        set_cpu_online(cpu, false);
 401        calculate_cpu_foreign_map();
 402        cpumask_clear_cpu(cpu, &cpu_callin_map);
 403
 404        return 0;
 405}
 406
 407static DECLARE_COMPLETION(cpu_death_chosen);
 408static unsigned cpu_death_sibling;
 409static enum {
 410        CPU_DEATH_HALT,
 411        CPU_DEATH_POWER,
 412} cpu_death;
 413
 414void play_dead(void)
 415{
 416        unsigned int cpu, core, vpe_id;
 417
 418        local_irq_disable();
 419        idle_task_exit();
 420        cpu = smp_processor_id();
 421        cpu_death = CPU_DEATH_POWER;
 422
 423        pr_debug("CPU%d going offline\n", cpu);
 424
 425        if (cpu_has_mipsmt || cpu_has_vp) {
 426                core = cpu_data[cpu].core;
 427
 428                /* Look for another online VPE within the core */
 429                for_each_online_cpu(cpu_death_sibling) {
 430                        if (cpu_data[cpu_death_sibling].core != core)
 431                                continue;
 432
 433                        /*
 434                         * There is an online VPE within the core. Just halt
 435                         * this TC and leave the core alone.
 436                         */
 437                        cpu_death = CPU_DEATH_HALT;
 438                        break;
 439                }
 440        }
 441
 442        /* This CPU has chosen its way out */
 443        complete(&cpu_death_chosen);
 444
 445        if (cpu_death == CPU_DEATH_HALT) {
 446                vpe_id = cpu_vpe_id(&cpu_data[cpu]);
 447
 448                pr_debug("Halting core %d VP%d\n", core, vpe_id);
 449                if (cpu_has_mipsmt) {
 450                        /* Halt this TC */
 451                        write_c0_tchalt(TCHALT_H);
 452                        instruction_hazard();
 453                } else if (cpu_has_vp) {
 454                        write_cpc_cl_vp_stop(1 << vpe_id);
 455
 456                        /* Ensure that the VP_STOP register is written */
 457                        wmb();
 458                }
 459        } else {
 460                pr_debug("Gating power to core %d\n", core);
 461                /* Power down the core */
 462                cps_pm_enter_state(CPS_PM_POWER_GATED);
 463        }
 464
 465        /* This should never be reached */
 466        panic("Failed to offline CPU %u", cpu);
 467}
 468
 469static void wait_for_sibling_halt(void *ptr_cpu)
 470{
 471        unsigned cpu = (unsigned long)ptr_cpu;
 472        unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
 473        unsigned halted;
 474        unsigned long flags;
 475
 476        do {
 477                local_irq_save(flags);
 478                settc(vpe_id);
 479                halted = read_tc_c0_tchalt();
 480                local_irq_restore(flags);
 481        } while (!(halted & TCHALT_H));
 482}
 483
 484static void cps_cpu_die(unsigned int cpu)
 485{
 486        unsigned core = cpu_data[cpu].core;
 487        unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
 488        unsigned stat;
 489        int err;
 490
 491        /* Wait for the cpu to choose its way out */
 492        if (!wait_for_completion_timeout(&cpu_death_chosen,
 493                                         msecs_to_jiffies(5000))) {
 494                pr_err("CPU%u: didn't offline\n", cpu);
 495                return;
 496        }
 497
 498        /*
 499         * Now wait for the CPU to actually offline. Without doing this that
 500         * offlining may race with one or more of:
 501         *
 502         *   - Onlining the CPU again.
 503         *   - Powering down the core if another VPE within it is offlined.
 504         *   - A sibling VPE entering a non-coherent state.
 505         *
 506         * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
 507         * with which we could race, so do nothing.
 508         */
 509        if (cpu_death == CPU_DEATH_POWER) {
 510                /*
 511                 * Wait for the core to enter a powered down or clock gated
 512                 * state, the latter happening when a JTAG probe is connected
 513                 * in which case the CPC will refuse to power down the core.
 514                 */
 515                do {
 516                        mips_cm_lock_other(core, 0);
 517                        mips_cpc_lock_other(core);
 518                        stat = read_cpc_co_stat_conf();
 519                        stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
 520                        mips_cpc_unlock_other();
 521                        mips_cm_unlock_other();
 522                } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
 523                         stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
 524                         stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
 525
 526                /* Indicate the core is powered off */
 527                bitmap_clear(core_power, core, 1);
 528        } else if (cpu_has_mipsmt) {
 529                /*
 530                 * Have a CPU with access to the offlined CPUs registers wait
 531                 * for its TC to halt.
 532                 */
 533                err = smp_call_function_single(cpu_death_sibling,
 534                                               wait_for_sibling_halt,
 535                                               (void *)(unsigned long)cpu, 1);
 536                if (err)
 537                        panic("Failed to call remote sibling CPU\n");
 538        } else if (cpu_has_vp) {
 539                do {
 540                        mips_cm_lock_other(core, vpe_id);
 541                        stat = read_cpc_co_vp_running();
 542                        mips_cm_unlock_other();
 543                } while (stat & (1 << vpe_id));
 544        }
 545}
 546
 547#endif /* CONFIG_HOTPLUG_CPU */
 548
 549static struct plat_smp_ops cps_smp_ops = {
 550        .smp_setup              = cps_smp_setup,
 551        .prepare_cpus           = cps_prepare_cpus,
 552        .boot_secondary         = cps_boot_secondary,
 553        .init_secondary         = cps_init_secondary,
 554        .smp_finish             = cps_smp_finish,
 555        .send_ipi_single        = mips_smp_send_ipi_single,
 556        .send_ipi_mask          = mips_smp_send_ipi_mask,
 557#ifdef CONFIG_HOTPLUG_CPU
 558        .cpu_disable            = cps_cpu_disable,
 559        .cpu_die                = cps_cpu_die,
 560#endif
 561};
 562
 563bool mips_cps_smp_in_use(void)
 564{
 565        extern struct plat_smp_ops *mp_ops;
 566        return mp_ops == &cps_smp_ops;
 567}
 568
 569int register_cps_smp_ops(void)
 570{
 571        if (!mips_cm_present()) {
 572                pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
 573                return -ENODEV;
 574        }
 575
 576        /* check we have a GIC - we need one for IPIs */
 577        if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
 578                pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
 579                return -ENODEV;
 580        }
 581
 582        register_smp_ops(&cps_smp_ops);
 583        return 0;
 584}
 585