linux/include/linux/smp.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_SMP_H
   3#define __LINUX_SMP_H
   4
   5/*
   6 *      Generic SMP support
   7 *              Alan Cox. <alan@redhat.com>
   8 */
   9
  10#include <linux/errno.h>
  11#include <linux/types.h>
  12#include <linux/list.h>
  13#include <linux/cpumask.h>
  14#include <linux/init.h>
  15#include <linux/llist.h>
  16
  17typedef void (*smp_call_func_t)(void *info);
  18struct __call_single_data {
  19        struct llist_node llist;
  20        smp_call_func_t func;
  21        void *info;
  22        unsigned int flags;
  23};
  24
  25/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
  26typedef struct __call_single_data call_single_data_t
  27        __aligned(sizeof(struct __call_single_data));
  28
  29/* total number of cpus in this system (may exceed NR_CPUS) */
  30extern unsigned int total_cpus;
  31
  32int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
  33                             int wait);
  34
  35/*
  36 * Call a function on all processors
  37 */
  38void on_each_cpu(smp_call_func_t func, void *info, int wait);
  39
  40/*
  41 * Call a function on processors specified by mask, which might include
  42 * the local one.
  43 */
  44void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
  45                void *info, bool wait);
  46
  47/*
  48 * Call a function on each processor for which the supplied function
  49 * cond_func returns a positive value. This may include the local
  50 * processor.
  51 */
  52void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
  53                smp_call_func_t func, void *info, bool wait,
  54                gfp_t gfp_flags);
  55
  56void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
  57                smp_call_func_t func, void *info, bool wait,
  58                gfp_t gfp_flags, const struct cpumask *mask);
  59
  60int smp_call_function_single_async(int cpu, call_single_data_t *csd);
  61
  62#ifdef CONFIG_SMP
  63
  64#include <linux/preempt.h>
  65#include <linux/kernel.h>
  66#include <linux/compiler.h>
  67#include <linux/thread_info.h>
  68#include <asm/smp.h>
  69
  70/*
  71 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
  72 * (defined in asm header):
  73 */
  74
  75/*
  76 * stops all CPUs but the current one:
  77 */
  78extern void smp_send_stop(void);
  79
  80/*
  81 * sends a 'reschedule' event to another CPU:
  82 */
  83extern void smp_send_reschedule(int cpu);
  84
  85
  86/*
  87 * Prepare machine for booting other CPUs.
  88 */
  89extern void smp_prepare_cpus(unsigned int max_cpus);
  90
  91/*
  92 * Bring a CPU up
  93 */
  94extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
  95
  96/*
  97 * Final polishing of CPUs
  98 */
  99extern void smp_cpus_done(unsigned int max_cpus);
 100
 101/*
 102 * Call a function on all other processors
 103 */
 104void smp_call_function(smp_call_func_t func, void *info, int wait);
 105void smp_call_function_many(const struct cpumask *mask,
 106                            smp_call_func_t func, void *info, bool wait);
 107
 108int smp_call_function_any(const struct cpumask *mask,
 109                          smp_call_func_t func, void *info, int wait);
 110
 111void kick_all_cpus_sync(void);
 112void wake_up_all_idle_cpus(void);
 113
 114/*
 115 * Generic and arch helpers
 116 */
 117void __init call_function_init(void);
 118void generic_smp_call_function_single_interrupt(void);
 119#define generic_smp_call_function_interrupt \
 120        generic_smp_call_function_single_interrupt
 121
 122/*
 123 * Mark the boot cpu "online" so that it can call console drivers in
 124 * printk() and can access its per-cpu storage.
 125 */
 126void smp_prepare_boot_cpu(void);
 127
 128extern unsigned int setup_max_cpus;
 129extern void __init setup_nr_cpu_ids(void);
 130extern void __init smp_init(void);
 131
 132extern int __boot_cpu_id;
 133
 134static inline int get_boot_cpu_id(void)
 135{
 136        return __boot_cpu_id;
 137}
 138
 139#else /* !SMP */
 140
 141static inline void smp_send_stop(void) { }
 142
 143/*
 144 *      These macros fold the SMP functionality into a single CPU system
 145 */
 146#define raw_smp_processor_id()                  0
 147static inline void up_smp_call_function(smp_call_func_t func, void *info)
 148{
 149}
 150#define smp_call_function(func, info, wait) \
 151                        (up_smp_call_function(func, info))
 152
 153static inline void smp_send_reschedule(int cpu) { }
 154#define smp_prepare_boot_cpu()                  do {} while (0)
 155#define smp_call_function_many(mask, func, info, wait) \
 156                        (up_smp_call_function(func, info))
 157static inline void call_function_init(void) { }
 158
 159static inline int
 160smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
 161                      void *info, int wait)
 162{
 163        return smp_call_function_single(0, func, info, wait);
 164}
 165
 166static inline void kick_all_cpus_sync(void) {  }
 167static inline void wake_up_all_idle_cpus(void) {  }
 168
 169#ifdef CONFIG_UP_LATE_INIT
 170extern void __init up_late_init(void);
 171static inline void smp_init(void) { up_late_init(); }
 172#else
 173static inline void smp_init(void) { }
 174#endif
 175
 176static inline int get_boot_cpu_id(void)
 177{
 178        return 0;
 179}
 180
 181#endif /* !SMP */
 182
 183/**
 184 * raw_processor_id() - get the current (unstable) CPU id
 185 *
 186 * For then you know what you are doing and need an unstable
 187 * CPU id.
 188 */
 189
 190/**
 191 * smp_processor_id() - get the current (stable) CPU id
 192 *
 193 * This is the normal accessor to the CPU id and should be used
 194 * whenever possible.
 195 *
 196 * The CPU id is stable when:
 197 *
 198 *  - IRQs are disabled;
 199 *  - preemption is disabled;
 200 *  - the task is CPU affine.
 201 *
 202 * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
 203 * when smp_processor_id() is used when the CPU id is not stable.
 204 */
 205
 206/*
 207 * Allow the architecture to differentiate between a stable and unstable read.
 208 * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
 209 * regular asm read for the stable.
 210 */
 211#ifndef __smp_processor_id
 212#define __smp_processor_id(x) raw_smp_processor_id(x)
 213#endif
 214
 215#ifdef CONFIG_DEBUG_PREEMPT
 216  extern unsigned int debug_smp_processor_id(void);
 217# define smp_processor_id() debug_smp_processor_id()
 218#else
 219# define smp_processor_id() __smp_processor_id()
 220#endif
 221
 222#define get_cpu()               ({ preempt_disable(); __smp_processor_id(); })
 223#define put_cpu()               preempt_enable()
 224
 225/*
 226 * Callback to arch code if there's nosmp or maxcpus=0 on the
 227 * boot command line:
 228 */
 229extern void arch_disable_smp_support(void);
 230
 231extern void arch_enable_nonboot_cpus_begin(void);
 232extern void arch_enable_nonboot_cpus_end(void);
 233
 234void smp_setup_processor_id(void);
 235
 236int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
 237                    bool phys);
 238
 239/* SMP core functions */
 240int smpcfd_prepare_cpu(unsigned int cpu);
 241int smpcfd_dead_cpu(unsigned int cpu);
 242int smpcfd_dying_cpu(unsigned int cpu);
 243
 244#endif /* __LINUX_SMP_H */
 245