linux/arch/mips/kernel/smp-cps.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Imagination Technologies
   3 * Author: Paul Burton <paul.burton@imgtec.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License as published by the
   7 * Free Software Foundation;  either version 2 of the  License, or (at your
   8 * option) any later version.
   9 */
  10
  11#include <linux/delay.h>
  12#include <linux/io.h>
  13#include <linux/irqchip/mips-gic.h>
  14#include <linux/sched/task_stack.h>
  15#include <linux/sched/hotplug.h>
  16#include <linux/slab.h>
  17#include <linux/smp.h>
  18#include <linux/types.h>
  19
  20#include <asm/bcache.h>
  21#include <asm/mips-cm.h>
  22#include <asm/mips-cpc.h>
  23#include <asm/mips_mt.h>
  24#include <asm/mipsregs.h>
  25#include <asm/pm-cps.h>
  26#include <asm/r4kcache.h>
  27#include <asm/smp-cps.h>
  28#include <asm/time.h>
  29#include <asm/uasm.h>
  30
  31static bool threads_disabled;
  32static DECLARE_BITMAP(core_power, NR_CPUS);
  33
  34struct core_boot_config *mips_cps_core_bootcfg;
  35
  36static int __init setup_nothreads(char *s)
  37{
  38        threads_disabled = true;
  39        return 0;
  40}
  41early_param("nothreads", setup_nothreads);
  42
  43static unsigned core_vpe_count(unsigned core)
  44{
  45        unsigned cfg;
  46
  47        if (threads_disabled)
  48                return 1;
  49
  50        if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
  51                && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
  52                return 1;
  53
  54        mips_cm_lock_other(core, 0);
  55        cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
  56        mips_cm_unlock_other();
  57        return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
  58}
  59
  60static void __init cps_smp_setup(void)
  61{
  62        unsigned int ncores, nvpes, core_vpes;
  63        unsigned long core_entry;
  64        int c, v;
  65
  66        /* Detect & record VPE topology */
  67        ncores = mips_cm_numcores();
  68        pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
  69        for (c = nvpes = 0; c < ncores; c++) {
  70                core_vpes = core_vpe_count(c);
  71                pr_cont("%c%u", c ? ',' : '{', core_vpes);
  72
  73                /* Use the number of VPEs in core 0 for smp_num_siblings */
  74                if (!c)
  75                        smp_num_siblings = core_vpes;
  76
  77                for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
  78                        cpu_data[nvpes + v].core = c;
  79#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
  80                        cpu_data[nvpes + v].vpe_id = v;
  81#endif
  82                }
  83
  84                nvpes += core_vpes;
  85        }
  86        pr_cont("} total %u\n", nvpes);
  87
  88        /* Indicate present CPUs (CPU being synonymous with VPE) */
  89        for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
  90                set_cpu_possible(v, true);
  91                set_cpu_present(v, true);
  92                __cpu_number_map[v] = v;
  93                __cpu_logical_map[v] = v;
  94        }
  95
  96        /* Set a coherent default CCA (CWB) */
  97        change_c0_config(CONF_CM_CMASK, 0x5);
  98
  99        /* Core 0 is powered up (we're running on it) */
 100        bitmap_set(core_power, 0, 1);
 101
 102        /* Initialise core 0 */
 103        mips_cps_core_init();
 104
 105        /* Make core 0 coherent with everything */
 106        write_gcr_cl_coherence(0xff);
 107
 108        if (mips_cm_revision() >= CM_REV_CM3) {
 109                core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
 110                write_gcr_bev_base(core_entry);
 111        }
 112
 113#ifdef CONFIG_MIPS_MT_FPAFF
 114        /* If we have an FPU, enroll ourselves in the FPU-full mask */
 115        if (cpu_has_fpu)
 116                cpumask_set_cpu(0, &mt_fpu_cpumask);
 117#endif /* CONFIG_MIPS_MT_FPAFF */
 118}
 119
 120static void __init cps_prepare_cpus(unsigned int max_cpus)
 121{
 122        unsigned ncores, core_vpes, c, cca;
 123        bool cca_unsuitable;
 124        u32 *entry_code;
 125
 126        mips_mt_set_cpuoptions();
 127
 128        /* Detect whether the CCA is unsuited to multi-core SMP */
 129        cca = read_c0_config() & CONF_CM_CMASK;
 130        switch (cca) {
 131        case 0x4: /* CWBE */
 132        case 0x5: /* CWB */
 133                /* The CCA is coherent, multi-core is fine */
 134                cca_unsuitable = false;
 135                break;
 136
 137        default:
 138                /* CCA is not coherent, multi-core is not usable */
 139                cca_unsuitable = true;
 140        }
 141
 142        /* Warn the user if the CCA prevents multi-core */
 143        ncores = mips_cm_numcores();
 144        if (cca_unsuitable && ncores > 1) {
 145                pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
 146                        cca);
 147
 148                for_each_present_cpu(c) {
 149                        if (cpu_data[c].core)
 150                                set_cpu_present(c, false);
 151                }
 152        }
 153
 154        /*
 155         * Patch the start of mips_cps_core_entry to provide:
 156         *
 157         * s0 = kseg0 CCA
 158         */
 159        entry_code = (u32 *)&mips_cps_core_entry;
 160        uasm_i_addiu(&entry_code, 16, 0, cca);
 161        blast_dcache_range((unsigned long)&mips_cps_core_entry,
 162                           (unsigned long)entry_code);
 163        bc_wback_inv((unsigned long)&mips_cps_core_entry,
 164                     (void *)entry_code - (void *)&mips_cps_core_entry);
 165        __sync();
 166
 167        /* Allocate core boot configuration structs */
 168        mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
 169                                        GFP_KERNEL);
 170        if (!mips_cps_core_bootcfg) {
 171                pr_err("Failed to allocate boot config for %u cores\n", ncores);
 172                goto err_out;
 173        }
 174
 175        /* Allocate VPE boot configuration structs */
 176        for (c = 0; c < ncores; c++) {
 177                core_vpes = core_vpe_count(c);
 178                mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
 179                                sizeof(*mips_cps_core_bootcfg[c].vpe_config),
 180                                GFP_KERNEL);
 181                if (!mips_cps_core_bootcfg[c].vpe_config) {
 182                        pr_err("Failed to allocate %u VPE boot configs\n",
 183                               core_vpes);
 184                        goto err_out;
 185                }
 186        }
 187
 188        /* Mark this CPU as booted */
 189        atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
 190                   1 << cpu_vpe_id(&current_cpu_data));
 191
 192        return;
 193err_out:
 194        /* Clean up allocations */
 195        if (mips_cps_core_bootcfg) {
 196                for (c = 0; c < ncores; c++)
 197                        kfree(mips_cps_core_bootcfg[c].vpe_config);
 198                kfree(mips_cps_core_bootcfg);
 199                mips_cps_core_bootcfg = NULL;
 200        }
 201
 202        /* Effectively disable SMP by declaring CPUs not present */
 203        for_each_possible_cpu(c) {
 204                if (c == 0)
 205                        continue;
 206                set_cpu_present(c, false);
 207        }
 208}
 209
 210static void boot_core(unsigned int core, unsigned int vpe_id)
 211{
 212        u32 access, stat, seq_state;
 213        unsigned timeout;
 214
 215        /* Select the appropriate core */
 216        mips_cm_lock_other(core, 0);
 217
 218        /* Set its reset vector */
 219        write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
 220
 221        /* Ensure its coherency is disabled */
 222        write_gcr_co_coherence(0);
 223
 224        /* Start it with the legacy memory map and exception base */
 225        write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB);
 226
 227        /* Ensure the core can access the GCRs */
 228        access = read_gcr_access();
 229        access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
 230        write_gcr_access(access);
 231
 232        if (mips_cpc_present()) {
 233                /* Reset the core */
 234                mips_cpc_lock_other(core);
 235
 236                if (mips_cm_revision() >= CM_REV_CM3) {
 237                        /* Run only the requested VP following the reset */
 238                        write_cpc_co_vp_stop(0xf);
 239                        write_cpc_co_vp_run(1 << vpe_id);
 240
 241                        /*
 242                         * Ensure that the VP_RUN register is written before the
 243                         * core leaves reset.
 244                         */
 245                        wmb();
 246                }
 247
 248                write_cpc_co_cmd(CPC_Cx_CMD_RESET);
 249
 250                timeout = 100;
 251                while (true) {
 252                        stat = read_cpc_co_stat_conf();
 253                        seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK;
 254
 255                        /* U6 == coherent execution, ie. the core is up */
 256                        if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
 257                                break;
 258
 259                        /* Delay a little while before we start warning */
 260                        if (timeout) {
 261                                timeout--;
 262                                mdelay(10);
 263                                continue;
 264                        }
 265
 266                        pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
 267                                core, stat);
 268                        mdelay(1000);
 269                }
 270
 271                mips_cpc_unlock_other();
 272        } else {
 273                /* Take the core out of reset */
 274                write_gcr_co_reset_release(0);
 275        }
 276
 277        mips_cm_unlock_other();
 278
 279        /* The core is now powered up */
 280        bitmap_set(core_power, core, 1);
 281}
 282
 283static void remote_vpe_boot(void *dummy)
 284{
 285        unsigned core = current_cpu_data.core;
 286        struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
 287
 288        mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
 289}
 290
 291static void cps_boot_secondary(int cpu, struct task_struct *idle)
 292{
 293        unsigned core = cpu_data[cpu].core;
 294        unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
 295        struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
 296        struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
 297        unsigned long core_entry;
 298        unsigned int remote;
 299        int err;
 300
 301        vpe_cfg->pc = (unsigned long)&smp_bootstrap;
 302        vpe_cfg->sp = __KSTK_TOS(idle);
 303        vpe_cfg->gp = (unsigned long)task_thread_info(idle);
 304
 305        atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
 306
 307        preempt_disable();
 308
 309        if (!test_bit(core, core_power)) {
 310                /* Boot a VPE on a powered down core */
 311                boot_core(core, vpe_id);
 312                goto out;
 313        }
 314
 315        if (cpu_has_vp) {
 316                mips_cm_lock_other(core, vpe_id);
 317                core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
 318                write_gcr_co_reset_base(core_entry);
 319                mips_cm_unlock_other();
 320        }
 321
 322        if (core != current_cpu_data.core) {
 323                /* Boot a VPE on another powered up core */
 324                for (remote = 0; remote < NR_CPUS; remote++) {
 325                        if (cpu_data[remote].core != core)
 326                                continue;
 327                        if (cpu_online(remote))
 328                                break;
 329                }
 330                if (remote >= NR_CPUS) {
 331                        pr_crit("No online CPU in core %u to start CPU%d\n",
 332                                core, cpu);
 333                        goto out;
 334                }
 335
 336                err = smp_call_function_single(remote, remote_vpe_boot,
 337                                               NULL, 1);
 338                if (err)
 339                        panic("Failed to call remote CPU\n");
 340                goto out;
 341        }
 342
 343        BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
 344
 345        /* Boot a VPE on this core */
 346        mips_cps_boot_vpes(core_cfg, vpe_id);
 347out:
 348        preempt_enable();
 349}
 350
 351static void cps_init_secondary(void)
 352{
 353        /* Disable MT - we only want to run 1 TC per VPE */
 354        if (cpu_has_mipsmt)
 355                dmt();
 356
 357        if (mips_cm_revision() >= CM_REV_CM3) {
 358                unsigned ident = gic_read_local_vp_id();
 359
 360                /*
 361                 * Ensure that our calculation of the VP ID matches up with
 362                 * what the GIC reports, otherwise we'll have configured
 363                 * interrupts incorrectly.
 364                 */
 365                BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
 366        }
 367
 368        if (cpu_has_veic)
 369                clear_c0_status(ST0_IM);
 370        else
 371                change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
 372                                         STATUSF_IP4 | STATUSF_IP5 |
 373                                         STATUSF_IP6 | STATUSF_IP7);
 374}
 375
 376static void cps_smp_finish(void)
 377{
 378        write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
 379
 380#ifdef CONFIG_MIPS_MT_FPAFF
 381        /* If we have an FPU, enroll ourselves in the FPU-full mask */
 382        if (cpu_has_fpu)
 383                cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
 384#endif /* CONFIG_MIPS_MT_FPAFF */
 385
 386        local_irq_enable();
 387}
 388
 389#ifdef CONFIG_HOTPLUG_CPU
 390
 391static int cps_cpu_disable(void)
 392{
 393        unsigned cpu = smp_processor_id();
 394        struct core_boot_config *core_cfg;
 395
 396        if (!cpu)
 397                return -EBUSY;
 398
 399        if (!cps_pm_support_state(CPS_PM_POWER_GATED))
 400                return -EINVAL;
 401
 402        core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
 403        atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
 404        smp_mb__after_atomic();
 405        set_cpu_online(cpu, false);
 406        calculate_cpu_foreign_map();
 407
 408        return 0;
 409}
 410
 411static DECLARE_COMPLETION(cpu_death_chosen);
 412static unsigned cpu_death_sibling;
 413static enum {
 414        CPU_DEATH_HALT,
 415        CPU_DEATH_POWER,
 416} cpu_death;
 417
 418void play_dead(void)
 419{
 420        unsigned int cpu, core, vpe_id;
 421
 422        local_irq_disable();
 423        idle_task_exit();
 424        cpu = smp_processor_id();
 425        core = cpu_data[cpu].core;
 426        cpu_death = CPU_DEATH_POWER;
 427
 428        pr_debug("CPU%d going offline\n", cpu);
 429
 430        if (cpu_has_mipsmt || cpu_has_vp) {
 431                /* Look for another online VPE within the core */
 432                for_each_online_cpu(cpu_death_sibling) {
 433                        if (cpu_data[cpu_death_sibling].core != core)
 434                                continue;
 435
 436                        /*
 437                         * There is an online VPE within the core. Just halt
 438                         * this TC and leave the core alone.
 439                         */
 440                        cpu_death = CPU_DEATH_HALT;
 441                        break;
 442                }
 443        }
 444
 445        /* This CPU has chosen its way out */
 446        complete(&cpu_death_chosen);
 447
 448        if (cpu_death == CPU_DEATH_HALT) {
 449                vpe_id = cpu_vpe_id(&cpu_data[cpu]);
 450
 451                pr_debug("Halting core %d VP%d\n", core, vpe_id);
 452                if (cpu_has_mipsmt) {
 453                        /* Halt this TC */
 454                        write_c0_tchalt(TCHALT_H);
 455                        instruction_hazard();
 456                } else if (cpu_has_vp) {
 457                        write_cpc_cl_vp_stop(1 << vpe_id);
 458
 459                        /* Ensure that the VP_STOP register is written */
 460                        wmb();
 461                }
 462        } else {
 463                pr_debug("Gating power to core %d\n", core);
 464                /* Power down the core */
 465                cps_pm_enter_state(CPS_PM_POWER_GATED);
 466        }
 467
 468        /* This should never be reached */
 469        panic("Failed to offline CPU %u", cpu);
 470}
 471
 472static void wait_for_sibling_halt(void *ptr_cpu)
 473{
 474        unsigned cpu = (unsigned long)ptr_cpu;
 475        unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
 476        unsigned halted;
 477        unsigned long flags;
 478
 479        do {
 480                local_irq_save(flags);
 481                settc(vpe_id);
 482                halted = read_tc_c0_tchalt();
 483                local_irq_restore(flags);
 484        } while (!(halted & TCHALT_H));
 485}
 486
 487static void cps_cpu_die(unsigned int cpu)
 488{
 489        unsigned core = cpu_data[cpu].core;
 490        unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
 491        unsigned stat;
 492        int err;
 493
 494        /* Wait for the cpu to choose its way out */
 495        if (!wait_for_completion_timeout(&cpu_death_chosen,
 496                                         msecs_to_jiffies(5000))) {
 497                pr_err("CPU%u: didn't offline\n", cpu);
 498                return;
 499        }
 500
 501        /*
 502         * Now wait for the CPU to actually offline. Without doing this that
 503         * offlining may race with one or more of:
 504         *
 505         *   - Onlining the CPU again.
 506         *   - Powering down the core if another VPE within it is offlined.
 507         *   - A sibling VPE entering a non-coherent state.
 508         *
 509         * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
 510         * with which we could race, so do nothing.
 511         */
 512        if (cpu_death == CPU_DEATH_POWER) {
 513                /*
 514                 * Wait for the core to enter a powered down or clock gated
 515                 * state, the latter happening when a JTAG probe is connected
 516                 * in which case the CPC will refuse to power down the core.
 517                 */
 518                do {
 519                        mips_cm_lock_other(core, 0);
 520                        mips_cpc_lock_other(core);
 521                        stat = read_cpc_co_stat_conf();
 522                        stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
 523                        mips_cpc_unlock_other();
 524                        mips_cm_unlock_other();
 525                } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
 526                         stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
 527                         stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
 528
 529                /* Indicate the core is powered off */
 530                bitmap_clear(core_power, core, 1);
 531        } else if (cpu_has_mipsmt) {
 532                /*
 533                 * Have a CPU with access to the offlined CPUs registers wait
 534                 * for its TC to halt.
 535                 */
 536                err = smp_call_function_single(cpu_death_sibling,
 537                                               wait_for_sibling_halt,
 538                                               (void *)(unsigned long)cpu, 1);
 539                if (err)
 540                        panic("Failed to call remote sibling CPU\n");
 541        } else if (cpu_has_vp) {
 542                do {
 543                        mips_cm_lock_other(core, vpe_id);
 544                        stat = read_cpc_co_vp_running();
 545                        mips_cm_unlock_other();
 546                } while (stat & (1 << vpe_id));
 547        }
 548}
 549
 550#endif /* CONFIG_HOTPLUG_CPU */
 551
 552static struct plat_smp_ops cps_smp_ops = {
 553        .smp_setup              = cps_smp_setup,
 554        .prepare_cpus           = cps_prepare_cpus,
 555        .boot_secondary         = cps_boot_secondary,
 556        .init_secondary         = cps_init_secondary,
 557        .smp_finish             = cps_smp_finish,
 558        .send_ipi_single        = mips_smp_send_ipi_single,
 559        .send_ipi_mask          = mips_smp_send_ipi_mask,
 560#ifdef CONFIG_HOTPLUG_CPU
 561        .cpu_disable            = cps_cpu_disable,
 562        .cpu_die                = cps_cpu_die,
 563#endif
 564};
 565
 566bool mips_cps_smp_in_use(void)
 567{
 568        extern struct plat_smp_ops *mp_ops;
 569        return mp_ops == &cps_smp_ops;
 570}
 571
 572int register_cps_smp_ops(void)
 573{
 574        if (!mips_cm_present()) {
 575                pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
 576                return -ENODEV;
 577        }
 578
 579        /* check we have a GIC - we need one for IPIs */
 580        if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
 581                pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
 582                return -ENODEV;
 583        }
 584
 585        register_smp_ops(&cps_smp_ops);
 586        return 0;
 587}
 588