uboot/arch/x86/include/asm/mp.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (c) 2015 Google, Inc
   4 *
   5 * Taken from coreboot file of the same name
   6 */
   7
   8#ifndef _X86_MP_H_
   9#define _X86_MP_H_
  10
  11#include <asm/atomic.h>
  12#include <asm/cache.h>
  13
  14enum {
  15        /* Indicates that the function should run on all CPUs */
  16        MP_SELECT_ALL   = -1,
  17
  18        /* Run on boot CPUs */
  19        MP_SELECT_BSP   = -2,
  20
  21        /* Run on non-boot CPUs */
  22        MP_SELECT_APS   = -3,
  23};
  24
  25typedef int (*mp_callback_t)(struct udevice *cpu, void *arg);
  26
  27/*
  28 * A mp_flight_record details a sequence of calls for the APs to perform
  29 * along with the BSP to coordinate sequencing. Each flight record either
  30 * provides a barrier for each AP before calling the callback or the APs
  31 * are allowed to perform the callback without waiting. Regardless, each
  32 * record has the cpus_entered field incremented for each record. When
  33 * the BSP observes that the cpus_entered matches the number of APs
  34 * the bsp_call is called with bsp_arg and upon returning releases the
  35 * barrier allowing the APs to make further progress.
  36 *
  37 * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
  38 * callback will just not be called.
  39 *
  40 * @barrier: Ensures that the BSP and AP don't run the flight record at the same
  41 *      time
  42 * @cpus_entered: Counts the number of APs that have run this record
  43 * @ap_call: Function for the APs to call
  44 * @ap_arg: Argument to pass to @ap_call
  45 * @bsp_call: Function for the BSP to call
  46 * @bsp_arg: Argument to pass to @bsp_call
  47 */
  48struct mp_flight_record {
  49        atomic_t barrier;
  50        atomic_t cpus_entered;
  51        mp_callback_t ap_call;
  52        void *ap_arg;
  53        mp_callback_t bsp_call;
  54        void *bsp_arg;
  55} __attribute__((aligned(ARCH_DMA_MINALIGN)));
  56
  57#define MP_FLIGHT_RECORD(barrier_, ap_func_, ap_arg_, bsp_func_, bsp_arg_) \
  58        {                                                       \
  59                .barrier = ATOMIC_INIT(barrier_),               \
  60                .cpus_entered = ATOMIC_INIT(0),                 \
  61                .ap_call = ap_func_,                            \
  62                .ap_arg = ap_arg_,                              \
  63                .bsp_call = bsp_func_,                          \
  64                .bsp_arg = bsp_arg_,                            \
  65        }
  66
  67#define MP_FR_BLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
  68        MP_FLIGHT_RECORD(0, ap_func, ap_arg, bsp_func, bsp_arg)
  69
  70#define MP_FR_NOBLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
  71        MP_FLIGHT_RECORD(1, ap_func, ap_arg, bsp_func, bsp_arg)
  72
  73/*
  74 * mp_init() will set up the SIPI vector and bring up the APs according to
  75 * mp_params. Each flight record will be executed according to the plan. Note
  76 * that the MP infrastructure uses SMM default area without saving it. It's
  77 * up to the chipset or mainboard to either e820 reserve this area or save this
  78 * region prior to calling mp_init() and restoring it after mp_init returns.
  79 *
  80 * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
  81 * caching is enabled before running the flight plan.
  82 *
  83 * The MP init has the following properties:
  84 * 1. APs are brought up in parallel.
  85 * 2. The ordering of cpu number and APIC ids is not deterministic.
  86 *    Therefore, one cannot rely on this property or the order of devices in
  87 *    the device tree unless the chipset or mainboard know the APIC ids
  88 *    a priori.
  89 *
  90 * mp_init() returns < 0 on error, 0 on success.
  91 */
  92int mp_init(void);
  93
  94/**
  95 * x86_mp_init() - Set up additional CPUs
  96 *
  97 * @returns < 0 on error, 0 on success.
  98 */
  99int x86_mp_init(void);
 100
 101/**
 102 * mp_run_func() - Function to call on the AP
 103 *
 104 * @arg: Argument to pass
 105 */
 106typedef void (*mp_run_func)(void *arg);
 107
 108#if CONFIG_IS_ENABLED(SMP) && !CONFIG_IS_ENABLED(X86_64)
 109/**
 110 * mp_run_on_cpus() - Run a function on one or all CPUs
 111 *
 112 * This does not return until all CPUs have completed the work
 113 *
 114 * Running on anything other than the boot CPU is only supported if
 115 * CONFIG_SMP_AP_WORK is enabled
 116 *
 117 * @cpu_select: CPU to run on (its dev->req_seq value), or MP_SELECT_ALL for
 118 *      all, or MP_SELECT_BSP for BSP
 119 * @func: Function to run
 120 * @arg: Argument to pass to the function
 121 * @return 0 on success, -ve on error
 122 */
 123int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg);
 124
 125/**
 126 * mp_park_aps() - Park the APs ready for the OS
 127 *
 128 * This halts all CPUs except the main one, ready for the OS to use them
 129 *
 130 * @return 0 if OK, -ve on error
 131 */
 132int mp_park_aps(void);
 133
 134/**
 135 * mp_first_cpu() - Get the first CPU to process, from a selection
 136 *
 137 * This is used to iterate through selected CPUs. Call this function first, then
 138 * call mp_next_cpu() repeatedly (with the same @cpu_select) until it returns
 139 * -EFBIG.
 140 *
 141 * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...)
 142 * @return next CPU number to run on (e.g. 0)
 143 */
 144int mp_first_cpu(int cpu_select);
 145
 146/**
 147 * mp_next_cpu() - Get the next CPU to process, from a selection
 148 *
 149 * This is used to iterate through selected CPUs. After first calling
 150 * mp_first_cpu() once, call this function repeatedly until it returns -EFBIG.
 151 *
 152 * The value of @cpu_select must be the same for all calls and must match the
 153 * value passed to mp_first_cpu(), otherwise the behaviour is undefined.
 154 *
 155 * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...)
 156 * @prev_cpu: Previous value returned by mp_first_cpu()/mp_next_cpu()
 157 * @return next CPU number to run on (e.g. 0)
 158 */
 159int mp_next_cpu(int cpu_select, int prev_cpu);
 160#else
 161static inline int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg)
 162{
 163        /* There is only one CPU, so just call the function here */
 164        func(arg);
 165
 166        return 0;
 167}
 168
 169static inline int mp_park_aps(void)
 170{
 171        /* No APs to park */
 172
 173        return 0;
 174}
 175
 176static inline int mp_first_cpu(int cpu_select)
 177{
 178        /* We cannot run on any APs, nor a selected CPU */
 179        return cpu_select == MP_SELECT_APS ? -EFBIG : MP_SELECT_BSP;
 180}
 181
 182static inline int mp_next_cpu(int cpu_select, int prev_cpu)
 183{
 184        /*
 185         * When MP is not enabled, there is only one CPU and we did it in
 186         * mp_first_cpu()
 187         */
 188        return -EFBIG;
 189}
 190
 191#endif
 192
 193#endif /* _X86_MP_H_ */
 194