linux/arch/arm/mach-omap2/omap-smp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * OMAP4 SMP source file. It contains platform specific functions
   4 * needed for the linux smp kernel.
   5 *
   6 * Copyright (C) 2009 Texas Instruments, Inc.
   7 *
   8 * Author:
   9 *      Santosh Shilimkar <santosh.shilimkar@ti.com>
  10 *
  11 * Platform file needed for the OMAP4 SMP. This file is based on arm
  12 * realview smp platform.
  13 * * Copyright (c) 2002 ARM Limited.
  14 */
  15#include <linux/init.h>
  16#include <linux/device.h>
  17#include <linux/smp.h>
  18#include <linux/io.h>
  19#include <linux/irqchip/arm-gic.h>
  20
  21#include <asm/sections.h>
  22#include <asm/smp_scu.h>
  23#include <asm/virt.h>
  24
  25#include "omap-secure.h"
  26#include "omap-wakeupgen.h"
  27#include <asm/cputype.h>
  28
  29#include "soc.h"
  30#include "iomap.h"
  31#include "common.h"
  32#include "clockdomain.h"
  33#include "pm.h"
  34
  35#define CPU_MASK                0xff0ffff0
  36#define CPU_CORTEX_A9           0x410FC090
  37#define CPU_CORTEX_A15          0x410FC0F0
  38
  39#define OMAP5_CORE_COUNT        0x2
  40
  41#define AUX_CORE_BOOT0_GP_RELEASE       0x020
  42#define AUX_CORE_BOOT0_HS_RELEASE       0x200
  43
  44struct omap_smp_config {
  45        unsigned long cpu1_rstctrl_pa;
  46        void __iomem *cpu1_rstctrl_va;
  47        void __iomem *scu_base;
  48        void __iomem *wakeupgen_base;
  49        void *startup_addr;
  50};
  51
  52static struct omap_smp_config cfg;
  53
  54static const struct omap_smp_config omap443x_cfg __initconst = {
  55        .cpu1_rstctrl_pa = 0x4824380c,
  56        .startup_addr = omap4_secondary_startup,
  57};
  58
  59static const struct omap_smp_config omap446x_cfg __initconst = {
  60        .cpu1_rstctrl_pa = 0x4824380c,
  61        .startup_addr = omap4460_secondary_startup,
  62};
  63
  64static const struct omap_smp_config omap5_cfg __initconst = {
  65        .cpu1_rstctrl_pa = 0x48243810,
  66        .startup_addr = omap5_secondary_startup,
  67};
  68
  69void __iomem *omap4_get_scu_base(void)
  70{
  71        return cfg.scu_base;
  72}
  73
  74#ifdef CONFIG_OMAP5_ERRATA_801819
  75static void omap5_erratum_workaround_801819(void)
  76{
  77        u32 acr, revidr;
  78        u32 acr_mask;
  79
  80        /* REVIDR[3] indicates erratum fix available on silicon */
  81        asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr));
  82        if (revidr & (0x1 << 3))
  83                return;
  84
  85        asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
  86        /*
  87         * BIT(27) - Disables streaming. All write-allocate lines allocate in
  88         * the L1 or L2 cache.
  89         * BIT(25) - Disables streaming. All write-allocate lines allocate in
  90         * the L1 cache.
  91         */
  92        acr_mask = (0x3 << 25) | (0x3 << 27);
  93        /* do we already have it done.. if yes, skip expensive smc */
  94        if ((acr & acr_mask) == acr_mask)
  95                return;
  96
  97        acr |= acr_mask;
  98        omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
  99
 100        pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n",
 101                 __func__, smp_processor_id());
 102}
 103#else
 104static inline void omap5_erratum_workaround_801819(void) { }
 105#endif
 106
 107#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 108/*
 109 * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
 110 * ICIALLU) to activate the workaround for secondary Core.
 111 * NOTE: it is assumed that the primary core's configuration is done
 112 * by the boot loader (kernel will detect a misconfiguration and complain
 113 * if this is not done).
 114 *
 115 * In General Purpose(GP) devices, ACR bit settings can only be done
 116 * by ROM code in "secure world" using the smc call and there is no
 117 * option to update the "firmware" on such devices. This also works for
 118 * High security(HS) devices, as a backup option in case the
 119 * "update" is not done in the "security firmware".
 120 */
 121static void omap5_secondary_harden_predictor(void)
 122{
 123        u32 acr, acr_mask;
 124
 125        asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
 126
 127        /*
 128         * ACTLR[0] (Enable invalidates of BTB with ICIALLU)
 129         */
 130        acr_mask = BIT(0);
 131
 132        /* Do we already have it done.. if yes, skip expensive smc */
 133        if ((acr & acr_mask) == acr_mask)
 134                return;
 135
 136        acr |= acr_mask;
 137        omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
 138
 139        pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
 140                 __func__, smp_processor_id());
 141}
 142#else
 143static inline void omap5_secondary_harden_predictor(void) { }
 144#endif
 145
 146static void omap4_secondary_init(unsigned int cpu)
 147{
 148        /*
 149         * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
 150         * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
 151         * init and for CPU1, a secure PPA API provided. CPU0 must be ON
 152         * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
 153         * OMAP443X GP devices- SMP bit isn't accessible.
 154         * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
 155         */
 156        if (soc_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
 157                omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
 158                                                        4, 0, 0, 0, 0, 0);
 159
 160        if (soc_is_omap54xx() || soc_is_dra7xx()) {
 161                /*
 162                 * Configure the CNTFRQ register for the secondary cpu's which
 163                 * indicates the frequency of the cpu local timers.
 164                 */
 165                set_cntfreq();
 166                /* Configure ACR to disable streaming WA for 801819 */
 167                omap5_erratum_workaround_801819();
 168                /* Enable ACR to allow for ICUALLU workaround */
 169                omap5_secondary_harden_predictor();
 170        }
 171}
 172
 173static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
 174{
 175        static struct clockdomain *cpu1_clkdm;
 176        static bool booted;
 177        static struct powerdomain *cpu1_pwrdm;
 178
 179        /*
 180         * Update the AuxCoreBoot0 with boot state for secondary core.
 181         * omap4_secondary_startup() routine will hold the secondary core till
 182         * the AuxCoreBoot1 register is updated with cpu state
 183         * A barrier is added to ensure that write buffer is drained
 184         */
 185        if (omap_secure_apis_support())
 186                omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
 187                                         0xfffffdff);
 188        else
 189                writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
 190                               cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
 191
 192        if (!cpu1_clkdm && !cpu1_pwrdm) {
 193                cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
 194                cpu1_pwrdm = pwrdm_lookup("cpu1_pwrdm");
 195        }
 196
 197        /*
 198         * The SGI(Software Generated Interrupts) are not wakeup capable
 199         * from low power states. This is known limitation on OMAP4 and
 200         * needs to be worked around by using software forced clockdomain
 201         * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to
 202         * software force wakeup. The clockdomain is then put back to
 203         * hardware supervised mode.
 204         * More details can be found in OMAP4430 TRM - Version J
 205         * Section :
 206         *      4.3.4.2 Power States of CPU0 and CPU1
 207         */
 208        if (booted && cpu1_pwrdm && cpu1_clkdm) {
 209                /*
 210                 * GIC distributor control register has changed between
 211                 * CortexA9 r1pX and r2pX. The Control Register secure
 212                 * banked version is now composed of 2 bits:
 213                 * bit 0 == Secure Enable
 214                 * bit 1 == Non-Secure Enable
 215                 * The Non-Secure banked register has not changed
 216                 * Because the ROM Code is based on the r1pX GIC, the CPU1
 217                 * GIC restoration will cause a problem to CPU0 Non-Secure SW.
 218                 * The workaround must be:
 219                 * 1) Before doing the CPU1 wakeup, CPU0 must disable
 220                 * the GIC distributor
 221                 * 2) CPU1 must re-enable the GIC distributor on
 222                 * it's wakeup path.
 223                 */
 224                if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
 225                        local_irq_disable();
 226                        gic_dist_disable();
 227                }
 228
 229                /*
 230                 * Ensure that CPU power state is set to ON to avoid CPU
 231                 * powerdomain transition on wfi
 232                 */
 233                clkdm_deny_idle_nolock(cpu1_clkdm);
 234                pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON);
 235                clkdm_allow_idle_nolock(cpu1_clkdm);
 236
 237                if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
 238                        while (gic_dist_disabled()) {
 239                                udelay(1);
 240                                cpu_relax();
 241                        }
 242                        gic_timer_retrigger();
 243                        local_irq_enable();
 244                }
 245        } else {
 246                dsb_sev();
 247                booted = true;
 248        }
 249
 250        arch_send_wakeup_ipi_mask(cpumask_of(cpu));
 251
 252        return 0;
 253}
 254
 255/*
 256 * Initialise the CPU possible map early - this describes the CPUs
 257 * which may be present or become present in the system.
 258 */
 259static void __init omap4_smp_init_cpus(void)
 260{
 261        unsigned int i = 0, ncores = 1, cpu_id;
 262
 263        /* Use ARM cpuid check here, as SoC detection will not work so early */
 264        cpu_id = read_cpuid_id() & CPU_MASK;
 265        if (cpu_id == CPU_CORTEX_A9) {
 266                /*
 267                 * Currently we can't call ioremap here because
 268                 * SoC detection won't work until after init_early.
 269                 */
 270                cfg.scu_base =  OMAP2_L4_IO_ADDRESS(scu_a9_get_base());
 271                BUG_ON(!cfg.scu_base);
 272                ncores = scu_get_core_count(cfg.scu_base);
 273        } else if (cpu_id == CPU_CORTEX_A15) {
 274                ncores = OMAP5_CORE_COUNT;
 275        }
 276
 277        /* sanity check */
 278        if (ncores > nr_cpu_ids) {
 279                pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
 280                        ncores, nr_cpu_ids);
 281                ncores = nr_cpu_ids;
 282        }
 283
 284        for (i = 0; i < ncores; i++)
 285                set_cpu_possible(i, true);
 286}
 287
 288/*
 289 * For now, just make sure the start-up address is not within the booting
 290 * kernel space as that means we just overwrote whatever secondary_startup()
 291 * code there was.
 292 */
 293static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
 294{
 295        if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
 296                return false;
 297
 298        return true;
 299}
 300
 301/*
 302 * We may need to reset CPU1 before configuring, otherwise kexec boot can end
 303 * up trying to use old kernel startup address or suspend-resume will
 304 * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
 305 * idle states.
 306 */
 307static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
 308{
 309        unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
 310        bool needs_reset = false;
 311        u32 released;
 312
 313        if (omap_secure_apis_support())
 314                released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
 315        else
 316                released = readl_relaxed(cfg.wakeupgen_base +
 317                                         OMAP_AUX_CORE_BOOT_0) &
 318                                                AUX_CORE_BOOT0_GP_RELEASE;
 319        if (released) {
 320                pr_warn("smp: CPU1 not parked?\n");
 321
 322                return;
 323        }
 324
 325        cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
 326                                        OMAP_AUX_CORE_BOOT_1);
 327
 328        /* Did the configured secondary_startup() get overwritten? */
 329        if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
 330                needs_reset = true;
 331
 332        /*
 333         * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
 334         * deeper idle state in WFI and will wake to an invalid address.
 335         */
 336        if ((soc_is_omap44xx() || soc_is_omap54xx())) {
 337                cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
 338                if (!omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
 339                        needs_reset = true;
 340        } else {
 341                cpu1_ns_pa_addr = 0;
 342        }
 343
 344        if (!needs_reset || !c->cpu1_rstctrl_va)
 345                return;
 346
 347        pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
 348                cpu1_startup_pa, cpu1_ns_pa_addr);
 349
 350        writel_relaxed(1, c->cpu1_rstctrl_va);
 351        readl_relaxed(c->cpu1_rstctrl_va);
 352        writel_relaxed(0, c->cpu1_rstctrl_va);
 353}
 354
 355static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
 356{
 357        const struct omap_smp_config *c = NULL;
 358
 359        if (soc_is_omap443x())
 360                c = &omap443x_cfg;
 361        else if (soc_is_omap446x())
 362                c = &omap446x_cfg;
 363        else if (soc_is_dra74x() || soc_is_omap54xx() || soc_is_dra76x())
 364                c = &omap5_cfg;
 365
 366        if (!c) {
 367                pr_err("%s Unknown SMP SoC?\n", __func__);
 368                return;
 369        }
 370
 371        /* Must preserve cfg.scu_base set earlier */
 372        cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
 373        cfg.startup_addr = c->startup_addr;
 374        cfg.wakeupgen_base = omap_get_wakeupgen_base();
 375
 376        if (soc_is_dra74x() || soc_is_omap54xx() || soc_is_dra76x()) {
 377                if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
 378                        cfg.startup_addr = omap5_secondary_hyp_startup;
 379                omap5_erratum_workaround_801819();
 380        }
 381
 382        cfg.cpu1_rstctrl_va = ioremap(cfg.cpu1_rstctrl_pa, 4);
 383        if (!cfg.cpu1_rstctrl_va)
 384                return;
 385
 386        /*
 387         * Initialise the SCU and wake up the secondary core using
 388         * wakeup_secondary().
 389         */
 390        if (cfg.scu_base)
 391                scu_enable(cfg.scu_base);
 392
 393        omap4_smp_maybe_reset_cpu1(&cfg);
 394
 395        /*
 396         * Write the address of secondary startup routine into the
 397         * AuxCoreBoot1 where ROM code will jump and start executing
 398         * on secondary core once out of WFE
 399         * A barrier is added to ensure that write buffer is drained
 400         */
 401        if (omap_secure_apis_support())
 402                omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
 403        else
 404                writel_relaxed(__pa_symbol(cfg.startup_addr),
 405                               cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
 406}
 407
 408const struct smp_operations omap4_smp_ops __initconst = {
 409        .smp_init_cpus          = omap4_smp_init_cpus,
 410        .smp_prepare_cpus       = omap4_smp_prepare_cpus,
 411        .smp_secondary_init     = omap4_secondary_init,
 412        .smp_boot_secondary     = omap4_boot_secondary,
 413#ifdef CONFIG_HOTPLUG_CPU
 414        .cpu_die                = omap4_cpu_die,
 415        .cpu_kill               = omap4_cpu_kill,
 416#endif
 417};
 418