1
2
3
4
5
6
7
8
9#include <linux/interrupt.h>
10#include <linux/irq.h>
11#include <linux/kernel.h>
12#include <linux/kexec.h>
13#include <linux/page-flags.h>
14#include <linux/smp.h>
15
16#include <asm/cacheflush.h>
17#include <asm/cpu_ops.h>
18#include <asm/daifflags.h>
19#include <asm/memory.h>
20#include <asm/mmu.h>
21#include <asm/mmu_context.h>
22#include <asm/page.h>
23
24#include "cpu-reset.h"
25
26
27extern const unsigned char arm64_relocate_new_kernel[];
28extern const unsigned long arm64_relocate_new_kernel_size;
29
30
31
32
33#define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
34static void _kexec_image_info(const char *func, int line,
35 const struct kimage *kimage)
36{
37 unsigned long i;
38
39 pr_debug("%s:%d:\n", func, line);
40 pr_debug(" kexec kimage info:\n");
41 pr_debug(" type: %d\n", kimage->type);
42 pr_debug(" start: %lx\n", kimage->start);
43 pr_debug(" head: %lx\n", kimage->head);
44 pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
45
46 for (i = 0; i < kimage->nr_segments; i++) {
47 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
48 i,
49 kimage->segment[i].mem,
50 kimage->segment[i].mem + kimage->segment[i].memsz,
51 kimage->segment[i].memsz,
52 kimage->segment[i].memsz / PAGE_SIZE);
53 }
54}
55
56void machine_kexec_cleanup(struct kimage *kimage)
57{
58
59}
60
61
62
63
64
65
66
67
68int machine_kexec_prepare(struct kimage *kimage)
69{
70 kexec_image_info(kimage);
71
72 if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
73 pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
74 return -EBUSY;
75 }
76
77 return 0;
78}
79
80
81
82
83static void kexec_list_flush(struct kimage *kimage)
84{
85 kimage_entry_t *entry;
86
87 for (entry = &kimage->head; ; entry++) {
88 unsigned int flag;
89 void *addr;
90
91
92 __flush_dcache_area(entry, sizeof(kimage_entry_t));
93
94 flag = *entry & IND_FLAGS;
95 if (flag == IND_DONE)
96 break;
97
98 addr = phys_to_virt(*entry & PAGE_MASK);
99
100 switch (flag) {
101 case IND_INDIRECTION:
102
103 entry = (kimage_entry_t *)addr - 1;
104 break;
105 case IND_SOURCE:
106
107 __flush_dcache_area(addr, PAGE_SIZE);
108 break;
109 case IND_DESTINATION:
110 break;
111 default:
112 BUG();
113 }
114 }
115}
116
117
118
119
120static void kexec_segment_flush(const struct kimage *kimage)
121{
122 unsigned long i;
123
124 pr_debug("%s:\n", __func__);
125
126 for (i = 0; i < kimage->nr_segments; i++) {
127 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
128 i,
129 kimage->segment[i].mem,
130 kimage->segment[i].mem + kimage->segment[i].memsz,
131 kimage->segment[i].memsz,
132 kimage->segment[i].memsz / PAGE_SIZE);
133
134 __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
135 kimage->segment[i].memsz);
136 }
137}
138
139
140
141
142
143
144void machine_kexec(struct kimage *kimage)
145{
146 phys_addr_t reboot_code_buffer_phys;
147 void *reboot_code_buffer;
148 bool in_kexec_crash = (kimage == kexec_crash_image);
149 bool stuck_cpus = cpus_are_stuck_in_kernel();
150
151
152
153
154 BUG_ON(!in_kexec_crash && (stuck_cpus || (num_online_cpus() > 1)));
155 WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()),
156 "Some CPUs may be stale, kdump will be unreliable.\n");
157
158 reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
159 reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
160
161 kexec_image_info(kimage);
162
163
164
165
166
167 memcpy(reboot_code_buffer, arm64_relocate_new_kernel,
168 arm64_relocate_new_kernel_size);
169
170
171 __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
172
173
174
175
176
177
178
179 __flush_icache_range((uintptr_t)reboot_code_buffer,
180 (uintptr_t)reboot_code_buffer +
181 arm64_relocate_new_kernel_size);
182
183
184 kexec_list_flush(kimage);
185
186
187 if ((kimage != kexec_crash_image) && (kimage->head & IND_DONE))
188 kexec_segment_flush(kimage);
189
190 pr_info("Bye!\n");
191
192 local_daif_mask();
193
194
195
196
197
198
199
200
201
202
203
204
205
206 cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start,
207#ifdef CONFIG_KEXEC_FILE
208 kimage->arch.dtb_mem);
209#else
210 0);
211#endif
212
213 BUG();
214}
215
216static void machine_kexec_mask_interrupts(void)
217{
218 unsigned int i;
219 struct irq_desc *desc;
220
221 for_each_irq_desc(i, desc) {
222 struct irq_chip *chip;
223 int ret;
224
225 chip = irq_desc_get_chip(desc);
226 if (!chip)
227 continue;
228
229
230
231
232
233 ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
234
235 if (ret && irqd_irq_inprogress(&desc->irq_data) &&
236 chip->irq_eoi)
237 chip->irq_eoi(&desc->irq_data);
238
239 if (chip->irq_mask)
240 chip->irq_mask(&desc->irq_data);
241
242 if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
243 chip->irq_disable(&desc->irq_data);
244 }
245}
246
247
248
249
250void machine_crash_shutdown(struct pt_regs *regs)
251{
252 local_irq_disable();
253
254
255 crash_smp_send_stop();
256
257
258 crash_save_cpu(regs, smp_processor_id());
259 machine_kexec_mask_interrupts();
260
261 pr_info("Starting crashdump kernel...\n");
262}
263
264void arch_kexec_protect_crashkres(void)
265{
266 int i;
267
268 kexec_segment_flush(kexec_crash_image);
269
270 for (i = 0; i < kexec_crash_image->nr_segments; i++)
271 set_memory_valid(
272 __phys_to_virt(kexec_crash_image->segment[i].mem),
273 kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0);
274}
275
276void arch_kexec_unprotect_crashkres(void)
277{
278 int i;
279
280 for (i = 0; i < kexec_crash_image->nr_segments; i++)
281 set_memory_valid(
282 __phys_to_virt(kexec_crash_image->segment[i].mem),
283 kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
284}
285
286#ifdef CONFIG_HIBERNATION
287
288
289
290
291void crash_prepare_suspend(void)
292{
293 if (kexec_crash_image)
294 arch_kexec_unprotect_crashkres();
295}
296
297void crash_post_resume(void)
298{
299 if (kexec_crash_image)
300 arch_kexec_protect_crashkres();
301}
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317bool crash_is_nosave(unsigned long pfn)
318{
319 int i;
320 phys_addr_t addr;
321
322 if (!crashk_res.end)
323 return false;
324
325
326 addr = __pfn_to_phys(pfn);
327 if ((addr < crashk_res.start) || (crashk_res.end < addr))
328 return false;
329
330 if (!kexec_crash_image)
331 return true;
332
333
334 for (i = 0; i < kexec_crash_image->nr_segments; i++)
335 if (addr >= kexec_crash_image->segment[i].mem &&
336 addr < (kexec_crash_image->segment[i].mem +
337 kexec_crash_image->segment[i].memsz))
338 return false;
339
340 return true;
341}
342
343void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
344{
345 unsigned long addr;
346 struct page *page;
347
348 for (addr = begin; addr < end; addr += PAGE_SIZE) {
349 page = phys_to_page(addr);
350 free_reserved_page(page);
351 }
352}
353#endif
354