1
2#ifndef _ASM_X86_SMP_H
3#define _ASM_X86_SMP_H
4#ifndef __ASSEMBLY__
5#include <linux/cpumask.h>
6#include <asm/percpu.h>
7
8
9
10
11#ifdef CONFIG_X86_LOCAL_APIC
12# include <asm/mpspec.h>
13# include <asm/apic.h>
14# ifdef CONFIG_X86_IO_APIC
15# include <asm/io_apic.h>
16# endif
17#endif
18#include <asm/thread_info.h>
19#include <asm/cpumask.h>
20
21extern int smp_num_siblings;
22extern unsigned int num_processors;
23
24DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
25DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
26DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
27
28DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
29DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
30DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
31
32static inline struct cpumask *cpu_llc_shared_mask(int cpu)
33{
34 return per_cpu(cpu_llc_shared_map, cpu);
35}
36
37DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
38DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
39DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
40#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
41DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
42#endif
43
44struct task_struct;
45
46struct smp_ops {
47 void (*smp_prepare_boot_cpu)(void);
48 void (*smp_prepare_cpus)(unsigned max_cpus);
49 void (*smp_cpus_done)(unsigned max_cpus);
50
51 void (*stop_other_cpus)(int wait);
52 void (*crash_stop_other_cpus)(void);
53 void (*smp_send_reschedule)(int cpu);
54
55 int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
56 int (*cpu_disable)(void);
57 void (*cpu_die)(unsigned int cpu);
58 void (*play_dead)(void);
59
60 void (*send_call_func_ipi)(const struct cpumask *mask);
61 void (*send_call_func_single_ipi)(int cpu);
62};
63
64
65extern void set_cpu_sibling_map(int cpu);
66
67#ifdef CONFIG_SMP
68extern struct smp_ops smp_ops;
69
70static inline void smp_send_stop(void)
71{
72 smp_ops.stop_other_cpus(0);
73}
74
75static inline void stop_other_cpus(void)
76{
77 smp_ops.stop_other_cpus(1);
78}
79
80static inline void smp_prepare_boot_cpu(void)
81{
82 smp_ops.smp_prepare_boot_cpu();
83}
84
85static inline void smp_prepare_cpus(unsigned int max_cpus)
86{
87 smp_ops.smp_prepare_cpus(max_cpus);
88}
89
90static inline void smp_cpus_done(unsigned int max_cpus)
91{
92 smp_ops.smp_cpus_done(max_cpus);
93}
94
95static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
96{
97 return smp_ops.cpu_up(cpu, tidle);
98}
99
100static inline int __cpu_disable(void)
101{
102 return smp_ops.cpu_disable();
103}
104
105static inline void __cpu_die(unsigned int cpu)
106{
107 smp_ops.cpu_die(cpu);
108}
109
110static inline void play_dead(void)
111{
112 smp_ops.play_dead();
113}
114
115static inline void smp_send_reschedule(int cpu)
116{
117 smp_ops.smp_send_reschedule(cpu);
118}
119
120static inline void arch_send_call_function_single_ipi(int cpu)
121{
122 smp_ops.send_call_func_single_ipi(cpu);
123}
124
125static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
126{
127 smp_ops.send_call_func_ipi(mask);
128}
129
130void cpu_disable_common(void);
131void native_smp_prepare_boot_cpu(void);
132void native_smp_prepare_cpus(unsigned int max_cpus);
133void calculate_max_logical_packages(void);
134void native_smp_cpus_done(unsigned int max_cpus);
135int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
136int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
137int native_cpu_disable(void);
138int common_cpu_die(unsigned int cpu);
139void native_cpu_die(unsigned int cpu);
140void hlt_play_dead(void);
141void native_play_dead(void);
142void play_dead_common(void);
143void wbinvd_on_cpu(int cpu);
144int wbinvd_on_all_cpus(void);
145void cond_wakeup_cpu0(void);
146
147void native_send_call_func_ipi(const struct cpumask *mask);
148void native_send_call_func_single_ipi(int cpu);
149void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
150
151void smp_store_boot_cpu_info(void);
152void smp_store_cpu_info(int id);
153
154asmlinkage __visible void smp_reboot_interrupt(void);
155__visible void smp_reschedule_interrupt(struct pt_regs *regs);
156__visible void smp_call_function_interrupt(struct pt_regs *regs);
157__visible void smp_call_function_single_interrupt(struct pt_regs *r);
158
159#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
160#define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu)
161
162
163
164
165
166
167#define raw_smp_processor_id() this_cpu_read(cpu_number)
168#define __smp_processor_id() __this_cpu_read(cpu_number)
169
170#ifdef CONFIG_X86_32
171extern int safe_smp_processor_id(void);
172#else
173# define safe_smp_processor_id() smp_processor_id()
174#endif
175
176#else
177#define wbinvd_on_cpu(cpu) wbinvd()
178static inline int wbinvd_on_all_cpus(void)
179{
180 wbinvd();
181 return 0;
182}
183#endif
184
185extern unsigned disabled_cpus;
186
187#ifdef CONFIG_X86_LOCAL_APIC
188extern int hard_smp_processor_id(void);
189
190#else
191#define hard_smp_processor_id() 0
192#endif
193
194#ifdef CONFIG_DEBUG_NMI_SELFTEST
195extern void nmi_selftest(void);
196#else
197#define nmi_selftest() do { } while (0)
198#endif
199
200#endif
201#endif
202