linux/arch/powerpc/include/asm/smp.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/* 
   3 * smp.h: PowerPC-specific SMP code.
   4 *
   5 * Original was a copy of sparc smp.h.  Now heavily modified
   6 * for PPC.
   7 *
   8 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   9 * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
  10 */
  11
  12#ifndef _ASM_POWERPC_SMP_H
  13#define _ASM_POWERPC_SMP_H
  14#ifdef __KERNEL__
  15
  16#include <linux/threads.h>
  17#include <linux/cpumask.h>
  18#include <linux/kernel.h>
  19#include <linux/irqreturn.h>
  20
  21#ifndef __ASSEMBLY__
  22
  23#ifdef CONFIG_PPC64
  24#include <asm/paca.h>
  25#endif
  26#include <asm/percpu.h>
  27
  28extern int boot_cpuid;
  29extern int spinning_secondaries;
  30extern u32 *cpu_to_phys_id;
  31extern bool coregroup_enabled;
  32
  33extern int cpu_to_chip_id(int cpu);
  34
  35#ifdef CONFIG_SMP
  36
  37struct smp_ops_t {
  38        void  (*message_pass)(int cpu, int msg);
  39#ifdef CONFIG_PPC_SMP_MUXED_IPI
  40        void  (*cause_ipi)(int cpu);
  41#endif
  42        int   (*cause_nmi_ipi)(int cpu);
  43        void  (*probe)(void);
  44        int   (*kick_cpu)(int nr);
  45        int   (*prepare_cpu)(int nr);
  46        void  (*setup_cpu)(int nr);
  47        void  (*bringup_done)(void);
  48        void  (*take_timebase)(void);
  49        void  (*give_timebase)(void);
  50        int   (*cpu_disable)(void);
  51        void  (*cpu_die)(unsigned int nr);
  52        int   (*cpu_bootable)(unsigned int nr);
  53#ifdef CONFIG_HOTPLUG_CPU
  54        void  (*cpu_offline_self)(void);
  55#endif
  56};
  57
  58extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
  59extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
  60extern void smp_send_debugger_break(void);
  61extern void start_secondary_resume(void);
  62extern void smp_generic_give_timebase(void);
  63extern void smp_generic_take_timebase(void);
  64
  65DECLARE_PER_CPU(unsigned int, cpu_pvr);
  66
  67#ifdef CONFIG_HOTPLUG_CPU
  68int generic_cpu_disable(void);
  69void generic_cpu_die(unsigned int cpu);
  70void generic_set_cpu_dead(unsigned int cpu);
  71void generic_set_cpu_up(unsigned int cpu);
  72int generic_check_cpu_restart(unsigned int cpu);
  73int is_cpu_dead(unsigned int cpu);
  74#else
  75#define generic_set_cpu_up(i)   do { } while (0)
  76#endif
  77
  78#ifdef CONFIG_PPC64
  79#define raw_smp_processor_id()  (local_paca->paca_index)
  80#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
  81#else
  82/* 32-bit */
  83extern int smp_hw_index[];
  84
  85/*
  86 * This is particularly ugly: it appears we can't actually get the definition
  87 * of task_struct here, but we need access to the CPU this task is running on.
  88 * Instead of using task_struct we're using _TASK_CPU which is extracted from
  89 * asm-offsets.h by kbuild to get the current processor ID.
  90 *
  91 * This also needs to be safeguarded when building asm-offsets.s because at
  92 * that time _TASK_CPU is not defined yet. It could have been guarded by
  93 * _TASK_CPU itself, but we want the build to fail if _TASK_CPU is missing
  94 * when building something else than asm-offsets.s
  95 */
  96#ifdef GENERATING_ASM_OFFSETS
  97#define raw_smp_processor_id()          (0)
  98#else
  99#define raw_smp_processor_id()          (*(unsigned int *)((void *)current + _TASK_CPU))
 100#endif
 101#define hard_smp_processor_id()         (smp_hw_index[smp_processor_id()])
 102
 103static inline int get_hard_smp_processor_id(int cpu)
 104{
 105        return smp_hw_index[cpu];
 106}
 107
 108static inline void set_hard_smp_processor_id(int cpu, int phys)
 109{
 110        smp_hw_index[cpu] = phys;
 111}
 112#endif
 113
 114DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
 115DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
 116DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
 117DECLARE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
 118
 119static inline struct cpumask *cpu_sibling_mask(int cpu)
 120{
 121        return per_cpu(cpu_sibling_map, cpu);
 122}
 123
 124static inline struct cpumask *cpu_l2_cache_mask(int cpu)
 125{
 126        return per_cpu(cpu_l2_cache_map, cpu);
 127}
 128
 129static inline struct cpumask *cpu_smallcore_mask(int cpu)
 130{
 131        return per_cpu(cpu_smallcore_map, cpu);
 132}
 133
 134extern int cpu_to_core_id(int cpu);
 135
 136extern bool has_big_cores;
 137extern bool thread_group_shares_l2;
 138
 139#define cpu_smt_mask cpu_smt_mask
 140#ifdef CONFIG_SCHED_SMT
 141static inline const struct cpumask *cpu_smt_mask(int cpu)
 142{
 143        if (has_big_cores)
 144                return per_cpu(cpu_smallcore_map, cpu);
 145
 146        return per_cpu(cpu_sibling_map, cpu);
 147}
 148#endif /* CONFIG_SCHED_SMT */
 149
 150/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
 151 *
 152 * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
 153 * in /proc/interrupts will be wrong!!! --Troy */
 154#define PPC_MSG_CALL_FUNCTION   0
 155#define PPC_MSG_RESCHEDULE      1
 156#define PPC_MSG_TICK_BROADCAST  2
 157#define PPC_MSG_NMI_IPI         3
 158
 159/* This is only used by the powernv kernel */
 160#define PPC_MSG_RM_HOST_ACTION  4
 161
 162#define NMI_IPI_ALL_OTHERS              -2
 163
 164#ifdef CONFIG_NMI_IPI
 165extern int smp_handle_nmi_ipi(struct pt_regs *regs);
 166#else
 167static inline int smp_handle_nmi_ipi(struct pt_regs *regs) { return 0; }
 168#endif
 169
 170/* for irq controllers that have dedicated ipis per message (4) */
 171extern int smp_request_message_ipi(int virq, int message);
 172extern const char *smp_ipi_name[];
 173
 174/* for irq controllers with only a single ipi */
 175extern void smp_muxed_ipi_message_pass(int cpu, int msg);
 176extern void smp_muxed_ipi_set_message(int cpu, int msg);
 177extern irqreturn_t smp_ipi_demux(void);
 178extern irqreturn_t smp_ipi_demux_relaxed(void);
 179
 180void smp_init_pSeries(void);
 181void smp_init_cell(void);
 182void smp_setup_cpu_maps(void);
 183
 184extern int __cpu_disable(void);
 185extern void __cpu_die(unsigned int cpu);
 186
 187#else
 188/* for UP */
 189#define hard_smp_processor_id()         get_hard_smp_processor_id(0)
 190#define smp_setup_cpu_maps()
 191#define thread_group_shares_l2  0
 192static inline void inhibit_secondary_onlining(void) {}
 193static inline void uninhibit_secondary_onlining(void) {}
 194static inline const struct cpumask *cpu_sibling_mask(int cpu)
 195{
 196        return cpumask_of(cpu);
 197}
 198
 199static inline const struct cpumask *cpu_smallcore_mask(int cpu)
 200{
 201        return cpumask_of(cpu);
 202}
 203
 204static inline const struct cpumask *cpu_l2_cache_mask(int cpu)
 205{
 206        return cpumask_of(cpu);
 207}
 208#endif /* CONFIG_SMP */
 209
 210#ifdef CONFIG_PPC64
 211static inline int get_hard_smp_processor_id(int cpu)
 212{
 213        return paca_ptrs[cpu]->hw_cpu_id;
 214}
 215
 216static inline void set_hard_smp_processor_id(int cpu, int phys)
 217{
 218        paca_ptrs[cpu]->hw_cpu_id = phys;
 219}
 220#else
 221/* 32-bit */
 222#ifndef CONFIG_SMP
 223extern int boot_cpuid_phys;
 224static inline int get_hard_smp_processor_id(int cpu)
 225{
 226        return boot_cpuid_phys;
 227}
 228
 229static inline void set_hard_smp_processor_id(int cpu, int phys)
 230{
 231        boot_cpuid_phys = phys;
 232}
 233#endif /* !CONFIG_SMP */
 234#endif /* !CONFIG_PPC64 */
 235
 236#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE))
 237extern void smp_release_cpus(void);
 238#else
 239static inline void smp_release_cpus(void) { };
 240#endif
 241
 242extern int smt_enabled_at_boot;
 243
 244extern void smp_mpic_probe(void);
 245extern void smp_mpic_setup_cpu(int cpu);
 246extern int smp_generic_kick_cpu(int nr);
 247extern int smp_generic_cpu_bootable(unsigned int nr);
 248
 249
 250extern void smp_generic_give_timebase(void);
 251extern void smp_generic_take_timebase(void);
 252
 253extern struct smp_ops_t *smp_ops;
 254
 255extern void arch_send_call_function_single_ipi(int cpu);
 256extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 257
 258/* Definitions relative to the secondary CPU spin loop
 259 * and entry point. Not all of them exist on both 32 and
 260 * 64-bit but defining them all here doesn't harm
 261 */
 262extern void generic_secondary_smp_init(void);
 263extern unsigned long __secondary_hold_spinloop;
 264extern unsigned long __secondary_hold_acknowledge;
 265extern char __secondary_hold;
 266extern unsigned int booting_thread_hwid;
 267
 268extern void __early_start(void);
 269#endif /* __ASSEMBLY__ */
 270
 271#endif /* __KERNEL__ */
 272#endif /* _ASM_POWERPC_SMP_H) */
 273