1
2#ifndef _ASM_X86_SMP_H
3#define _ASM_X86_SMP_H
4#ifndef __ASSEMBLY__
5#include <linux/cpumask.h>
6#include <asm/percpu.h>
7
8
9
10
11#ifdef CONFIG_X86_LOCAL_APIC
12# include <asm/mpspec.h>
13# include <asm/apic.h>
14# ifdef CONFIG_X86_IO_APIC
15# include <asm/io_apic.h>
16# endif
17#endif
18#include <asm/thread_info.h>
19#include <asm/cpumask.h>
20
21extern int smp_num_siblings;
22extern unsigned int num_processors;
23
24DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
25DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
26
27DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
28DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
29DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
30
31static inline struct cpumask *cpu_llc_shared_mask(int cpu)
32{
33 return per_cpu(cpu_llc_shared_map, cpu);
34}
35
36DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
37DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
38DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
39#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
40DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
41#endif
42
43struct task_struct;
44
45struct smp_ops {
46 void (*smp_prepare_boot_cpu)(void);
47 void (*smp_prepare_cpus)(unsigned max_cpus);
48 void (*smp_cpus_done)(unsigned max_cpus);
49
50 void (*stop_other_cpus)(int wait);
51 void (*crash_stop_other_cpus)(void);
52 void (*smp_send_reschedule)(int cpu);
53
54 int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
55 int (*cpu_disable)(void);
56 void (*cpu_die)(unsigned int cpu);
57 void (*play_dead)(void);
58
59 void (*send_call_func_ipi)(const struct cpumask *mask);
60 void (*send_call_func_single_ipi)(int cpu);
61};
62
63
64extern void set_cpu_sibling_map(int cpu);
65
66#ifdef CONFIG_SMP
67extern struct smp_ops smp_ops;
68
69static inline void smp_send_stop(void)
70{
71 smp_ops.stop_other_cpus(0);
72}
73
74static inline void stop_other_cpus(void)
75{
76 smp_ops.stop_other_cpus(1);
77}
78
79static inline void smp_prepare_boot_cpu(void)
80{
81 smp_ops.smp_prepare_boot_cpu();
82}
83
84static inline void smp_prepare_cpus(unsigned int max_cpus)
85{
86 smp_ops.smp_prepare_cpus(max_cpus);
87}
88
89static inline void smp_cpus_done(unsigned int max_cpus)
90{
91 smp_ops.smp_cpus_done(max_cpus);
92}
93
94static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
95{
96 return smp_ops.cpu_up(cpu, tidle);
97}
98
99static inline int __cpu_disable(void)
100{
101 return smp_ops.cpu_disable();
102}
103
104static inline void __cpu_die(unsigned int cpu)
105{
106 smp_ops.cpu_die(cpu);
107}
108
109static inline void play_dead(void)
110{
111 smp_ops.play_dead();
112}
113
114static inline void smp_send_reschedule(int cpu)
115{
116 smp_ops.smp_send_reschedule(cpu);
117}
118
119static inline void arch_send_call_function_single_ipi(int cpu)
120{
121 smp_ops.send_call_func_single_ipi(cpu);
122}
123
124static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
125{
126 smp_ops.send_call_func_ipi(mask);
127}
128
129void cpu_disable_common(void);
130void native_smp_prepare_boot_cpu(void);
131void native_smp_prepare_cpus(unsigned int max_cpus);
132void native_smp_cpus_done(unsigned int max_cpus);
133void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
134int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
135int native_cpu_disable(void);
136int common_cpu_die(unsigned int cpu);
137void native_cpu_die(unsigned int cpu);
138void hlt_play_dead(void);
139void native_play_dead(void);
140void play_dead_common(void);
141void wbinvd_on_cpu(int cpu);
142int wbinvd_on_all_cpus(void);
143
144void native_send_call_func_ipi(const struct cpumask *mask);
145void native_send_call_func_single_ipi(int cpu);
146void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
147
148void smp_store_boot_cpu_info(void);
149void smp_store_cpu_info(int id);
150#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
151#define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu)
152
153
154
155
156
157
158#define raw_smp_processor_id() (this_cpu_read(cpu_number))
159
160#ifdef CONFIG_X86_32
161extern int safe_smp_processor_id(void);
162#else
163# define safe_smp_processor_id() smp_processor_id()
164#endif
165
166#else
167#define wbinvd_on_cpu(cpu) wbinvd()
168static inline int wbinvd_on_all_cpus(void)
169{
170 wbinvd();
171 return 0;
172}
173#define smp_num_siblings 1
174#endif
175
176extern unsigned disabled_cpus;
177
178#ifdef CONFIG_X86_LOCAL_APIC
179
180#ifndef CONFIG_X86_64
181static inline int logical_smp_processor_id(void)
182{
183
184 return GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
185}
186
187#endif
188
189extern int hard_smp_processor_id(void);
190
191#else
192#define hard_smp_processor_id() 0
193#endif
194
195#ifdef CONFIG_DEBUG_NMI_SELFTEST
196extern void nmi_selftest(void);
197#else
198#define nmi_selftest() do { } while (0)
199#endif
200
201#endif
202#endif
203