1
2
3
4
5
6
7
8
9#include <linux/capability.h>
10#include <linux/mm.h>
11#include <linux/file.h>
12#include <linux/kexec.h>
13#include <linux/mutex.h>
14#include <linux/list.h>
15#include <linux/syscalls.h>
16#include <linux/vmalloc.h>
17#include <linux/slab.h>
18#include <linux/security.h>
19
20#include "kexec_internal.h"
21
22static int copy_user_segment_list(struct kimage *image,
23 unsigned long nr_segments,
24 struct kexec_segment __user *segments)
25{
26 int ret;
27 size_t segment_bytes;
28
29
30 image->nr_segments = nr_segments;
31 segment_bytes = nr_segments * sizeof(*segments);
32 ret = copy_from_user(image->segment, segments, segment_bytes);
33 if (ret)
34 ret = -EFAULT;
35
36 return ret;
37}
38
39static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
40 unsigned long nr_segments,
41 struct kexec_segment __user *segments,
42 unsigned long flags)
43{
44 int ret;
45 struct kimage *image;
46 bool kexec_on_panic = flags & KEXEC_ON_CRASH;
47
48 if (kexec_on_panic) {
49
50 if ((entry < crashk_res.start) || (entry > crashk_res.end))
51 return -EADDRNOTAVAIL;
52 }
53
54
55 image = do_kimage_alloc_init();
56 if (!image)
57 return -ENOMEM;
58
59 image->start = entry;
60
61 ret = copy_user_segment_list(image, nr_segments, segments);
62 if (ret)
63 goto out_free_image;
64
65 ret = sanity_check_segment_list(image);
66 if (ret)
67 goto out_free_image;
68
69
70 if (kexec_on_panic) {
71 image->control_page = crashk_res.start;
72 image->type = KEXEC_TYPE_CRASH;
73 }
74
75
76
77
78
79
80 ret = -ENOMEM;
81 image->control_code_page = kimage_alloc_control_pages(image,
82 get_order(KEXEC_CONTROL_PAGE_SIZE));
83 if (!image->control_code_page) {
84 pr_err("Could not allocate control_code_buffer\n");
85 goto out_free_image;
86 }
87
88 if (!kexec_on_panic) {
89 image->swap_page = kimage_alloc_control_pages(image, 0);
90 if (!image->swap_page) {
91 pr_err("Could not allocate swap buffer\n");
92 goto out_free_control_pages;
93 }
94 }
95
96 *rimage = image;
97 return 0;
98out_free_control_pages:
99 kimage_free_page_list(&image->control_pages);
100out_free_image:
101 kfree(image);
102 return ret;
103}
104
105static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
106 struct kexec_segment __user *segments, unsigned long flags)
107{
108 struct kimage **dest_image, *image;
109 unsigned long i;
110 int ret;
111
112 if (flags & KEXEC_ON_CRASH) {
113 dest_image = &kexec_crash_image;
114 if (kexec_crash_image)
115 arch_kexec_unprotect_crashkres();
116 } else {
117 dest_image = &kexec_image;
118 }
119
120 if (nr_segments == 0) {
121
122 kimage_free(xchg(dest_image, NULL));
123 return 0;
124 }
125 if (flags & KEXEC_ON_CRASH) {
126
127
128
129
130
131 kimage_free(xchg(&kexec_crash_image, NULL));
132 }
133
134 ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
135 if (ret)
136 return ret;
137
138 if (flags & KEXEC_PRESERVE_CONTEXT)
139 image->preserve_context = 1;
140
141 ret = machine_kexec_prepare(image);
142 if (ret)
143 goto out;
144
145 for (i = 0; i < nr_segments; i++) {
146 ret = kimage_load_segment(image, &image->segment[i]);
147 if (ret)
148 goto out;
149 }
150
151 kimage_terminate(image);
152
153
154 image = xchg(dest_image, image);
155
156out:
157 if ((flags & KEXEC_ON_CRASH) && kexec_crash_image)
158 arch_kexec_protect_crashkres();
159
160 kimage_free(image);
161 return ret;
162}
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
186 struct kexec_segment __user *, segments, unsigned long, flags)
187{
188 int result;
189
190
191 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
192 return -EPERM;
193
194 if (get_securelevel() > 0)
195 return -EPERM;
196
197
198
199
200
201 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
202 return -EINVAL;
203
204
205 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
206 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
207 return -EINVAL;
208
209
210
211
212 if (nr_segments > KEXEC_SEGMENT_MAX)
213 return -EINVAL;
214
215
216
217
218
219
220
221
222
223 if (!mutex_trylock(&kexec_mutex))
224 return -EBUSY;
225
226 result = do_kexec_load(entry, nr_segments, segments, flags);
227
228 mutex_unlock(&kexec_mutex);
229
230 return result;
231}
232
233#ifdef CONFIG_COMPAT
234COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
235 compat_ulong_t, nr_segments,
236 struct compat_kexec_segment __user *, segments,
237 compat_ulong_t, flags)
238{
239 struct compat_kexec_segment in;
240 struct kexec_segment out, __user *ksegments;
241 unsigned long i, result;
242
243
244
245
246 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
247 return -EINVAL;
248
249 if (nr_segments > KEXEC_SEGMENT_MAX)
250 return -EINVAL;
251
252 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
253 for (i = 0; i < nr_segments; i++) {
254 result = copy_from_user(&in, &segments[i], sizeof(in));
255 if (result)
256 return -EFAULT;
257
258 out.buf = compat_ptr(in.buf);
259 out.bufsz = in.bufsz;
260 out.mem = in.mem;
261 out.memsz = in.memsz;
262
263 result = copy_to_user(&ksegments[i], &out, sizeof(out));
264 if (result)
265 return -EFAULT;
266 }
267
268 return sys_kexec_load(entry, nr_segments, ksegments, flags);
269}
270#endif
271