1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/sched.h>
20#include <linux/kernel_stat.h>
21#include <linux/bootmem.h>
22#include <linux/notifier.h>
23#include <linux/cpu.h>
24#include <linux/percpu.h>
25#include <linux/delay.h>
26#include <linux/err.h>
27#include <linux/irq.h>
28#include <asm/mmu_context.h>
29#include <asm/tlbflush.h>
30#include <asm/sections.h>
31
32
33static DEFINE_PER_CPU(int, cpu_state) = { 0 };
34
35
36unsigned long start_cpu_function_addr;
37
38
39void __init smp_prepare_boot_cpu(void)
40{
41 int cpu = smp_processor_id();
42 set_cpu_online(cpu, 1);
43 set_cpu_present(cpu, 1);
44 __get_cpu_var(cpu_state) = CPU_ONLINE;
45
46 init_messaging();
47}
48
49static void start_secondary(void);
50
51
52
53
54
55
56void __init smp_prepare_cpus(unsigned int max_cpus)
57{
58 long rc;
59 int cpu, cpu_count;
60 int boot_cpu = smp_processor_id();
61
62 current_thread_info()->cpu = boot_cpu;
63
64
65
66
67
68 rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
69 if (rc != 0)
70 pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc);
71
72
73 print_disabled_cpus();
74
75
76
77
78
79
80
81 start_cpu_function_addr = (unsigned long) &online_secondary;
82
83
84 cpu_count = 1;
85 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
86 struct task_struct *idle;
87
88 if (cpu == boot_cpu)
89 continue;
90
91 if (!cpu_possible(cpu)) {
92
93
94
95
96
97 per_cpu(boot_sp, cpu) = 0;
98 per_cpu(boot_pc, cpu) = (unsigned long) smp_nap;
99 continue;
100 }
101
102
103 idle = fork_idle(cpu);
104 if (IS_ERR(idle))
105 panic("failed fork for CPU %d", cpu);
106 idle->thread.pc = (unsigned long) start_secondary;
107
108
109 per_cpu(boot_sp, cpu) = task_ksp0(idle);
110 per_cpu(boot_pc, cpu) = idle->thread.pc;
111
112 ++cpu_count;
113 }
114 BUG_ON(cpu_count > (max_cpus ? max_cpus : 1));
115
116
117 init_cpu_present(cpu_possible_mask);
118 if (cpumask_weight(cpu_present_mask) > 1) {
119 mb();
120 hv_start_all_tiles();
121 }
122}
123
124static __initdata struct cpumask init_affinity;
125
126static __init int reset_init_affinity(void)
127{
128 long rc = sched_setaffinity(current->pid, &init_affinity);
129 if (rc != 0)
130 pr_warning("couldn't reset init affinity (%ld)\n",
131 rc);
132 return 0;
133}
134late_initcall(reset_init_affinity);
135
136static struct cpumask cpu_started __cpuinitdata;
137
138
139
140
141
142
143static void __cpuinit start_secondary(void)
144{
145 int cpuid = smp_processor_id();
146
147
148 set_my_cpu_offset(__per_cpu_offset[cpuid]);
149
150 preempt_disable();
151
152
153
154
155
156
157
158
159 __get_cpu_var(current_asid) = min_asid;
160
161
162 atomic_inc(&init_mm.mm_count);
163 current->active_mm = &init_mm;
164 if (current->mm)
165 BUG();
166 enter_lazy_tlb(&init_mm, current);
167
168
169 init_messaging();
170 local_irq_enable();
171
172
173
174 if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
175 pr_warning("CPU#%d already started!\n", cpuid);
176 for (;;)
177 local_irq_enable();
178 }
179
180 smp_nap();
181}
182
183
184
185
186void __cpuinit online_secondary(void)
187{
188
189
190
191
192 local_flush_tlb();
193
194 BUG_ON(in_interrupt());
195
196
197 wmb();
198
199 notify_cpu_starting(smp_processor_id());
200
201 set_cpu_online(smp_processor_id(), 1);
202 __get_cpu_var(cpu_state) = CPU_ONLINE;
203
204
205 setup_cpu(0);
206
207
208 setup_tile_timer();
209
210 preempt_enable();
211
212 cpu_idle();
213}
214
215int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
216{
217
218 static int timeout;
219 for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) {
220 if (timeout >= 50000) {
221 pr_info("skipping unresponsive cpu%d\n", cpu);
222 local_irq_enable();
223 return -EIO;
224 }
225 udelay(100);
226 }
227
228 local_irq_enable();
229 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
230
231
232 send_IPI_single(cpu, MSG_TAG_START_CPU);
233 while (!cpumask_test_cpu(cpu, cpu_online_mask))
234 cpu_relax();
235 return 0;
236}
237
238static void panic_start_cpu(void)
239{
240 panic("Received a MSG_START_CPU IPI after boot finished.");
241}
242
243void __init smp_cpus_done(unsigned int max_cpus)
244{
245 int cpu, next, rc;
246
247
248 start_cpu_function_addr = (unsigned long) &panic_start_cpu;
249
250 cpumask_copy(&init_affinity, cpu_online_mask);
251
252
253
254
255
256
257
258
259
260
261
262 for (cpu = cpumask_first(&init_affinity);
263 (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids;
264 cpu = next)
265 ;
266 rc = sched_setaffinity(current->pid, cpumask_of(cpu));
267 if (rc != 0)
268 pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc);
269}
270