linux/arch/arm/mach-zynq/platsmp.c
<<
>>
Prefs
   1/*
   2 * This file contains Xilinx specific SMP code, used to start up
   3 * the second processor.
   4 *
   5 * Copyright (C) 2011-2013 Xilinx
   6 *
   7 * based on linux/arch/arm/mach-realview/platsmp.c
   8 *
   9 * Copyright (C) 2002 ARM Ltd.
  10 *
  11 * This software is licensed under the terms of the GNU General Public
  12 * License version 2, as published by the Free Software Foundation, and
  13 * may be copied, distributed, and modified under those terms.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 */
  20
  21#include <linux/export.h>
  22#include <linux/jiffies.h>
  23#include <linux/init.h>
  24#include <linux/io.h>
  25#include <asm/cacheflush.h>
  26#include <asm/smp_scu.h>
  27#include <linux/irqchip/arm-gic.h>
  28#include "common.h"
  29
  30/*
  31 * Store number of cores in the system
  32 * Because of scu_get_core_count() must be in __init section and can't
  33 * be called from zynq_cpun_start() because it is not in __init section.
  34 */
  35static int ncores;
  36
  37int zynq_cpun_start(u32 address, int cpu)
  38{
  39        u32 trampoline_code_size = &zynq_secondary_trampoline_end -
  40                                                &zynq_secondary_trampoline;
  41
  42        /* MS: Expectation that SLCR are directly map and accessible */
  43        /* Not possible to jump to non aligned address */
  44        if (!(address & 3) && (!address || (address >= trampoline_code_size))) {
  45                /* Store pointer to ioremap area which points to address 0x0 */
  46                static u8 __iomem *zero;
  47                u32 trampoline_size = &zynq_secondary_trampoline_jump -
  48                                                &zynq_secondary_trampoline;
  49
  50                zynq_slcr_cpu_stop(cpu);
  51                if (address) {
  52                        if (__pa(PAGE_OFFSET)) {
  53                                zero = ioremap(0, trampoline_code_size);
  54                                if (!zero) {
  55                                        pr_warn("BOOTUP jump vectors not accessible\n");
  56                                        return -1;
  57                                }
  58                        } else {
  59                                zero = (__force u8 __iomem *)PAGE_OFFSET;
  60                        }
  61
  62                        /*
  63                        * This is elegant way how to jump to any address
  64                        * 0x0: Load address at 0x8 to r0
  65                        * 0x4: Jump by mov instruction
  66                        * 0x8: Jumping address
  67                        */
  68                        memcpy((__force void *)zero, &zynq_secondary_trampoline,
  69                                                        trampoline_size);
  70                        writel(address, zero + trampoline_size);
  71
  72                        flush_cache_all();
  73                        outer_flush_range(0, trampoline_code_size);
  74                        smp_wmb();
  75
  76                        if (__pa(PAGE_OFFSET))
  77                                iounmap(zero);
  78                }
  79                zynq_slcr_cpu_start(cpu);
  80
  81                return 0;
  82        }
  83
  84        pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address);
  85
  86        return -1;
  87}
  88EXPORT_SYMBOL(zynq_cpun_start);
  89
  90static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
  91{
  92        return zynq_cpun_start(__pa_symbol(secondary_startup), cpu);
  93}
  94
  95/*
  96 * Initialise the CPU possible map early - this describes the CPUs
  97 * which may be present or become present in the system.
  98 */
  99static void __init zynq_smp_init_cpus(void)
 100{
 101        int i;
 102
 103        ncores = scu_get_core_count(zynq_scu_base);
 104
 105        for (i = 0; i < ncores && i < CONFIG_NR_CPUS; i++)
 106                set_cpu_possible(i, true);
 107}
 108
 109static void __init zynq_smp_prepare_cpus(unsigned int max_cpus)
 110{
 111        scu_enable(zynq_scu_base);
 112}
 113
 114/**
 115 * zynq_secondary_init - Initialize secondary CPU cores
 116 * @cpu:        CPU that is initialized
 117 *
 118 * This function is in the hotplug path. Don't move it into the
 119 * init section!!
 120 */
 121static void zynq_secondary_init(unsigned int cpu)
 122{
 123        zynq_core_pm_init();
 124}
 125
 126#ifdef CONFIG_HOTPLUG_CPU
 127static int zynq_cpu_kill(unsigned cpu)
 128{
 129        unsigned long timeout = jiffies + msecs_to_jiffies(50);
 130
 131        while (zynq_slcr_cpu_state_read(cpu))
 132                if (time_after(jiffies, timeout))
 133                        return 0;
 134
 135        zynq_slcr_cpu_stop(cpu);
 136        return 1;
 137}
 138
 139/**
 140 * zynq_cpu_die - Let a CPU core die
 141 * @cpu:        Dying CPU
 142 *
 143 * Platform-specific code to shutdown a CPU.
 144 * Called with IRQs disabled on the dying CPU.
 145 */
 146static void zynq_cpu_die(unsigned int cpu)
 147{
 148        zynq_slcr_cpu_state_write(cpu, true);
 149
 150        /*
 151         * there is no power-control hardware on this platform, so all
 152         * we can do is put the core into WFI; this is safe as the calling
 153         * code will have already disabled interrupts
 154         */
 155        for (;;)
 156                cpu_do_idle();
 157}
 158#endif
 159
 160const struct smp_operations zynq_smp_ops __initconst = {
 161        .smp_init_cpus          = zynq_smp_init_cpus,
 162        .smp_prepare_cpus       = zynq_smp_prepare_cpus,
 163        .smp_boot_secondary     = zynq_boot_secondary,
 164        .smp_secondary_init     = zynq_secondary_init,
 165#ifdef CONFIG_HOTPLUG_CPU
 166        .cpu_die                = zynq_cpu_die,
 167        .cpu_kill               = zynq_cpu_kill,
 168#endif
 169};
 170