1
2
3
4
5
6
7
8
9
10
11#include <linux/kexec.h>
12#include <linux/smp.h>
13#include <linux/thread_info.h>
14#include <linux/init_task.h>
15#include <linux/errno.h>
16#include <linux/kernel.h>
17#include <linux/cpu.h>
18#include <linux/hardirq.h>
19
20#include <asm/page.h>
21#include <asm/current.h>
22#include <asm/machdep.h>
23#include <asm/cacheflush.h>
24#include <asm/firmware.h>
25#include <asm/paca.h>
26#include <asm/mmu.h>
27#include <asm/sections.h>
28#include <asm/prom.h>
29#include <asm/smp.h>
30#include <asm/hw_breakpoint.h>
31#include <asm/asm-prototypes.h>
32
33int default_machine_kexec_prepare(struct kimage *image)
34{
35 int i;
36 unsigned long begin, end;
37 unsigned long low, high;
38 struct device_node *node;
39 const unsigned long *basep;
40 const unsigned int *sizep;
41
42
43
44
45
46
47 for (i = 0; i < image->nr_segments; i++)
48 if (image->segment[i].mem < __pa(_end))
49 return -ETXTBSY;
50
51
52 for_each_node_by_type(node, "pci") {
53 basep = of_get_property(node, "linux,tce-base", NULL);
54 sizep = of_get_property(node, "linux,tce-size", NULL);
55 if (basep == NULL || sizep == NULL)
56 continue;
57
58 low = *basep;
59 high = low + (*sizep);
60
61 for (i = 0; i < image->nr_segments; i++) {
62 begin = image->segment[i].mem;
63 end = begin + image->segment[i].memsz;
64
65 if ((begin < high) && (end > low))
66 return -ETXTBSY;
67 }
68 }
69
70 return 0;
71}
72
73static void copy_segments(unsigned long ind)
74{
75 unsigned long entry;
76 unsigned long *ptr;
77 void *dest;
78 void *addr;
79
80
81
82
83
84
85
86 ptr = NULL;
87 dest = NULL;
88
89 for (entry = ind; !(entry & IND_DONE); entry = *ptr++) {
90 addr = __va(entry & PAGE_MASK);
91
92 switch (entry & IND_FLAGS) {
93 case IND_DESTINATION:
94 dest = addr;
95 break;
96 case IND_INDIRECTION:
97 ptr = addr;
98 break;
99 case IND_SOURCE:
100 copy_page(dest, addr);
101 dest += PAGE_SIZE;
102 }
103 }
104}
105
106void kexec_copy_flush(struct kimage *image)
107{
108 long i, nr_segments = image->nr_segments;
109 struct kexec_segment ranges[KEXEC_SEGMENT_MAX];
110
111
112 memcpy(ranges, image->segment, sizeof(ranges));
113
114
115
116
117
118
119
120 copy_segments(image->head);
121
122
123
124
125
126 for (i = 0; i < nr_segments; i++)
127 flush_icache_range((unsigned long)__va(ranges[i].mem),
128 (unsigned long)__va(ranges[i].mem + ranges[i].memsz));
129}
130
131#ifdef CONFIG_SMP
132
133static int kexec_all_irq_disabled = 0;
134
135static void kexec_smp_down(void *arg)
136{
137 local_irq_disable();
138 hard_irq_disable();
139
140 mb();
141 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
142 while(kexec_all_irq_disabled == 0)
143 cpu_relax();
144 mb();
145 hw_breakpoint_disable();
146
147
148
149
150 if (ppc_md.kexec_cpu_down)
151 ppc_md.kexec_cpu_down(0, 1);
152
153 kexec_smp_wait();
154
155}
156
157static void kexec_prepare_cpus_wait(int wait_state)
158{
159 int my_cpu, i, notified=-1;
160
161 hw_breakpoint_disable();
162 my_cpu = get_cpu();
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178 for_each_online_cpu(i) {
179 if (i == my_cpu)
180 continue;
181
182 while (paca_ptrs[i]->kexec_state < wait_state) {
183 barrier();
184 if (i != notified) {
185 printk(KERN_INFO "kexec: waiting for cpu %d "
186 "(physical %d) to enter %i state\n",
187 i, paca_ptrs[i]->hw_cpu_id, wait_state);
188 notified = i;
189 }
190 }
191 }
192 mb();
193}
194
195
196
197
198
199
200
201
202
203
204
205static void wake_offline_cpus(void)
206{
207 int cpu = 0;
208
209 for_each_present_cpu(cpu) {
210 if (!cpu_online(cpu)) {
211 printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
212 cpu);
213 WARN_ON(cpu_up(cpu));
214 }
215 }
216}
217
218static void kexec_prepare_cpus(void)
219{
220 wake_offline_cpus();
221 smp_call_function(kexec_smp_down, NULL, 0);
222 local_irq_disable();
223 hard_irq_disable();
224
225 mb();
226 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
227
228 kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
229
230 kexec_all_irq_disabled = 1;
231
232
233
234
235
236 kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
237
238
239 if (ppc_md.kexec_cpu_down)
240 ppc_md.kexec_cpu_down(0, 0);
241
242 put_cpu();
243}
244
245#else
246
247static void kexec_prepare_cpus(void)
248{
249
250
251
252
253
254
255
256
257
258 smp_release_cpus();
259 if (ppc_md.kexec_cpu_down)
260 ppc_md.kexec_cpu_down(0, 0);
261 local_irq_disable();
262 hard_irq_disable();
263}
264
265#endif
266
267
268
269
270
271
272
273
274
275
276
277
278
279static union thread_union kexec_stack __init_task_data =
280 { };
281
282
283
284
285
286struct paca_struct kexec_paca;
287
288
289extern void kexec_sequence(void *newstack, unsigned long start,
290 void *image, void *control,
291 void (*clear_all)(void),
292 bool copy_with_mmu_off) __noreturn;
293
294
295void default_machine_kexec(struct kimage *image)
296{
297 bool copy_with_mmu_off;
298
299
300
301
302
303
304
305
306
307
308
309 if (!kdump_in_progress())
310 kexec_prepare_cpus();
311
312 printk("kexec: Starting switchover sequence.\n");
313
314
315
316
317
318 current_thread_info()->flags = 0;
319 current_thread_info()->preempt_count = HARDIRQ_OFFSET;
320
321
322
323
324
325 memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct));
326 kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL;
327#ifdef CONFIG_PPC_PSERIES
328 kexec_paca.lppaca_ptr = NULL;
329#endif
330 paca_ptrs[kexec_paca.paca_index] = &kexec_paca;
331
332 setup_paca(&kexec_paca);
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349#ifdef CONFIG_PPC_BOOK3E
350 copy_with_mmu_off = false;
351#else
352 copy_with_mmu_off = radix_enabled() ||
353 !(firmware_has_feature(FW_FEATURE_LPAR) ||
354 firmware_has_feature(FW_FEATURE_PS3_LV1));
355#endif
356
357
358
359
360 kexec_sequence(&kexec_stack, image->start, image,
361 page_address(image->control_code_page),
362 mmu_cleanup_all, copy_with_mmu_off);
363
364}
365
366#ifdef CONFIG_PPC_BOOK3S_64
367
368static unsigned long htab_base;
369static unsigned long htab_size;
370
371static struct property htab_base_prop = {
372 .name = "linux,htab-base",
373 .length = sizeof(unsigned long),
374 .value = &htab_base,
375};
376
377static struct property htab_size_prop = {
378 .name = "linux,htab-size",
379 .length = sizeof(unsigned long),
380 .value = &htab_size,
381};
382
383static int __init export_htab_values(void)
384{
385 struct device_node *node;
386
387
388 if (!htab_address)
389 return -ENODEV;
390
391 node = of_find_node_by_path("/chosen");
392 if (!node)
393 return -ENODEV;
394
395
396 of_remove_property(node, of_find_property(node, htab_base_prop.name, NULL));
397 of_remove_property(node, of_find_property(node, htab_size_prop.name, NULL));
398
399 htab_base = cpu_to_be64(__pa(htab_address));
400 of_add_property(node, &htab_base_prop);
401 htab_size = cpu_to_be64(htab_size_bytes);
402 of_add_property(node, &htab_size_prop);
403
404 of_node_put(node);
405 return 0;
406}
407late_initcall(export_htab_values);
408#endif
409