linux/arch/arm/plat-versatile/platsmp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/arch/arm/plat-versatile/platsmp.c
   4 *
   5 *  Copyright (C) 2002 ARM Ltd.
   6 *  All Rights Reserved
   7 *
   8 * This code is specific to the hardware found on ARM Realview and
   9 * Versatile Express platforms where the CPUs are unable to be individually
  10 * woken, and where there is no way to hot-unplug CPUs.  Real platforms
  11 * should not copy this code.
  12 */
  13#include <linux/init.h>
  14#include <linux/errno.h>
  15#include <linux/delay.h>
  16#include <linux/device.h>
  17#include <linux/jiffies.h>
  18#include <linux/smp.h>
  19
  20#include <asm/cacheflush.h>
  21#include <asm/smp_plat.h>
  22
  23#include <plat/platsmp.h>
  24
  25/*
  26 * versatile_cpu_release controls the release of CPUs from the holding
  27 * pen in headsmp.S, which exists because we are not always able to
  28 * control the release of individual CPUs from the board firmware.
  29 * Production platforms do not need this.
  30 */
  31volatile int versatile_cpu_release = -1;
  32
  33/*
  34 * Write versatile_cpu_release in a way that is guaranteed to be visible to
  35 * all observers, irrespective of whether they're taking part in coherency
  36 * or not.  This is necessary for the hotplug code to work reliably.
  37 */
  38static void versatile_write_cpu_release(int val)
  39{
  40        versatile_cpu_release = val;
  41        smp_wmb();
  42        sync_cache_w(&versatile_cpu_release);
  43}
  44
  45/*
  46 * versatile_lock exists to avoid running the loops_per_jiffy delay loop
  47 * calibrations on the secondary CPU while the requesting CPU is using
  48 * the limited-bandwidth bus - which affects the calibration value.
  49 * Production platforms do not need this.
  50 */
  51static DEFINE_RAW_SPINLOCK(versatile_lock);
  52
  53void versatile_secondary_init(unsigned int cpu)
  54{
  55        /*
  56         * let the primary processor know we're out of the
  57         * pen, then head off into the C entry point
  58         */
  59        versatile_write_cpu_release(-1);
  60
  61        /*
  62         * Synchronise with the boot thread.
  63         */
  64        raw_spin_lock(&versatile_lock);
  65        raw_spin_unlock(&versatile_lock);
  66}
  67
  68int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
  69{
  70        unsigned long timeout;
  71
  72        /*
  73         * Set synchronisation state between this boot processor
  74         * and the secondary one
  75         */
  76        raw_spin_lock(&versatile_lock);
  77
  78        /*
  79         * This is really belt and braces; we hold unintended secondary
  80         * CPUs in the holding pen until we're ready for them.  However,
  81         * since we haven't sent them a soft interrupt, they shouldn't
  82         * be there.
  83         */
  84        versatile_write_cpu_release(cpu_logical_map(cpu));
  85
  86        /*
  87         * Send the secondary CPU a soft interrupt, thereby causing
  88         * the boot monitor to read the system wide flags register,
  89         * and branch to the address found there.
  90         */
  91        arch_send_wakeup_ipi_mask(cpumask_of(cpu));
  92
  93        timeout = jiffies + (1 * HZ);
  94        while (time_before(jiffies, timeout)) {
  95                smp_rmb();
  96                if (versatile_cpu_release == -1)
  97                        break;
  98
  99                udelay(10);
 100        }
 101
 102        /*
 103         * now the secondary core is starting up let it run its
 104         * calibrations, then wait for it to finish
 105         */
 106        raw_spin_unlock(&versatile_lock);
 107
 108        return versatile_cpu_release != -1 ? -ENOSYS : 0;
 109}
 110