1
2
3
4
5
6
7
8
9
10#include <linux/cache.h>
11#include <linux/clocksource.h>
12#include <linux/elf.h>
13#include <linux/err.h>
14#include <linux/errno.h>
15#include <linux/gfp.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/sched.h>
19#include <linux/signal.h>
20#include <linux/slab.h>
21#include <linux/time_namespace.h>
22#include <linux/timekeeper_internal.h>
23#include <linux/vmalloc.h>
24#include <vdso/datapage.h>
25#include <vdso/helpers.h>
26#include <vdso/vsyscall.h>
27
28#include <asm/cacheflush.h>
29#include <asm/signal32.h>
30#include <asm/vdso.h>
31
32extern char vdso_start[], vdso_end[];
33extern char vdso32_start[], vdso32_end[];
34
35enum vdso_abi {
36 VDSO_ABI_AA64,
37 VDSO_ABI_AA32,
38};
39
40enum vvar_pages {
41 VVAR_DATA_PAGE_OFFSET,
42 VVAR_TIMENS_PAGE_OFFSET,
43 VVAR_NR_PAGES,
44};
45
46struct vdso_abi_info {
47 const char *name;
48 const char *vdso_code_start;
49 const char *vdso_code_end;
50 unsigned long vdso_pages;
51
52 struct vm_special_mapping *dm;
53
54 struct vm_special_mapping *cm;
55};
56
57static struct vdso_abi_info vdso_info[] __ro_after_init = {
58 [VDSO_ABI_AA64] = {
59 .name = "vdso",
60 .vdso_code_start = vdso_start,
61 .vdso_code_end = vdso_end,
62 },
63#ifdef CONFIG_COMPAT_VDSO
64 [VDSO_ABI_AA32] = {
65 .name = "vdso32",
66 .vdso_code_start = vdso32_start,
67 .vdso_code_end = vdso32_end,
68 },
69#endif
70};
71
72
73
74
75static union {
76 struct vdso_data data[CS_BASES];
77 u8 page[PAGE_SIZE];
78} vdso_data_store __page_aligned_data;
79struct vdso_data *vdso_data = vdso_data_store.data;
80
81static int vdso_mremap(const struct vm_special_mapping *sm,
82 struct vm_area_struct *new_vma)
83{
84 current->mm->context.vdso = (void *)new_vma->vm_start;
85
86 return 0;
87}
88
89static int __vdso_init(enum vdso_abi abi)
90{
91 int i;
92 struct page **vdso_pagelist;
93 unsigned long pfn;
94
95 if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
96 pr_err("vDSO is not a valid ELF object!\n");
97 return -EINVAL;
98 }
99
100 vdso_info[abi].vdso_pages = (
101 vdso_info[abi].vdso_code_end -
102 vdso_info[abi].vdso_code_start) >>
103 PAGE_SHIFT;
104
105 vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
106 sizeof(struct page *),
107 GFP_KERNEL);
108 if (vdso_pagelist == NULL)
109 return -ENOMEM;
110
111
112 pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
113
114 for (i = 0; i < vdso_info[abi].vdso_pages; i++)
115 vdso_pagelist[i] = pfn_to_page(pfn + i);
116
117 vdso_info[abi].cm->pages = vdso_pagelist;
118
119 return 0;
120}
121
122#ifdef CONFIG_TIME_NS
123struct vdso_data *arch_get_vdso_data(void *vvar_page)
124{
125 return (struct vdso_data *)(vvar_page);
126}
127
128
129
130
131
132
133
134
135int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
136{
137 struct mm_struct *mm = task->mm;
138 struct vm_area_struct *vma;
139
140 mmap_read_lock(mm);
141
142 for (vma = mm->mmap; vma; vma = vma->vm_next) {
143 unsigned long size = vma->vm_end - vma->vm_start;
144
145 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
146 zap_page_range(vma, vma->vm_start, size);
147#ifdef CONFIG_COMPAT_VDSO
148 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
149 zap_page_range(vma, vma->vm_start, size);
150#endif
151 }
152
153 mmap_read_unlock(mm);
154 return 0;
155}
156
157static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
158{
159 if (likely(vma->vm_mm == current->mm))
160 return current->nsproxy->time_ns->vvar_page;
161
162
163
164
165
166
167
168
169 WARN(1, "vvar_page accessed remotely");
170
171 return NULL;
172}
173#else
174static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
175{
176 return NULL;
177}
178#endif
179
180static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
181 struct vm_area_struct *vma, struct vm_fault *vmf)
182{
183 struct page *timens_page = find_timens_vvar_page(vma);
184 unsigned long pfn;
185
186 switch (vmf->pgoff) {
187 case VVAR_DATA_PAGE_OFFSET:
188 if (timens_page)
189 pfn = page_to_pfn(timens_page);
190 else
191 pfn = sym_to_pfn(vdso_data);
192 break;
193#ifdef CONFIG_TIME_NS
194 case VVAR_TIMENS_PAGE_OFFSET:
195
196
197
198
199
200
201
202 if (!timens_page)
203 return VM_FAULT_SIGBUS;
204 pfn = sym_to_pfn(vdso_data);
205 break;
206#endif
207 default:
208 return VM_FAULT_SIGBUS;
209 }
210
211 return vmf_insert_pfn(vma, vmf->address, pfn);
212}
213
214static int __setup_additional_pages(enum vdso_abi abi,
215 struct mm_struct *mm,
216 struct linux_binprm *bprm,
217 int uses_interp)
218{
219 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
220 unsigned long gp_flags = 0;
221 void *ret;
222
223 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
224
225 vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
226
227 vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
228
229 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
230 if (IS_ERR_VALUE(vdso_base)) {
231 ret = ERR_PTR(vdso_base);
232 goto up_fail;
233 }
234
235 ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
236 VM_READ|VM_MAYREAD|VM_PFNMAP,
237 vdso_info[abi].dm);
238 if (IS_ERR(ret))
239 goto up_fail;
240
241 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
242 gp_flags = VM_ARM64_BTI;
243
244 vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
245 mm->context.vdso = (void *)vdso_base;
246 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
247 VM_READ|VM_EXEC|gp_flags|
248 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
249 vdso_info[abi].cm);
250 if (IS_ERR(ret))
251 goto up_fail;
252
253 return 0;
254
255up_fail:
256 mm->context.vdso = NULL;
257 return PTR_ERR(ret);
258}
259
260#ifdef CONFIG_COMPAT
261
262
263
264enum aarch32_map {
265 AA32_MAP_VECTORS,
266 AA32_MAP_SIGPAGE,
267 AA32_MAP_VVAR,
268 AA32_MAP_VDSO,
269};
270
271static struct page *aarch32_vectors_page __ro_after_init;
272static struct page *aarch32_sig_page __ro_after_init;
273
274static struct vm_special_mapping aarch32_vdso_maps[] = {
275 [AA32_MAP_VECTORS] = {
276 .name = "[vectors]",
277 .pages = &aarch32_vectors_page,
278 },
279 [AA32_MAP_SIGPAGE] = {
280 .name = "[sigpage]",
281 .pages = &aarch32_sig_page,
282 },
283 [AA32_MAP_VVAR] = {
284 .name = "[vvar]",
285 .fault = vvar_fault,
286 },
287 [AA32_MAP_VDSO] = {
288 .name = "[vdso]",
289 .mremap = vdso_mremap,
290 },
291};
292
293static int aarch32_alloc_kuser_vdso_page(void)
294{
295 extern char __kuser_helper_start[], __kuser_helper_end[];
296 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
297 unsigned long vdso_page;
298
299 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
300 return 0;
301
302 vdso_page = get_zeroed_page(GFP_ATOMIC);
303 if (!vdso_page)
304 return -ENOMEM;
305
306 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
307 kuser_sz);
308 aarch32_vectors_page = virt_to_page(vdso_page);
309 flush_dcache_page(aarch32_vectors_page);
310 return 0;
311}
312
313static int aarch32_alloc_sigpage(void)
314{
315 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
316 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
317 unsigned long sigpage;
318
319 sigpage = get_zeroed_page(GFP_ATOMIC);
320 if (!sigpage)
321 return -ENOMEM;
322
323 memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
324 aarch32_sig_page = virt_to_page(sigpage);
325 flush_dcache_page(aarch32_sig_page);
326 return 0;
327}
328
329static int __aarch32_alloc_vdso_pages(void)
330{
331
332 if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
333 return 0;
334
335 vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
336 vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
337
338 return __vdso_init(VDSO_ABI_AA32);
339}
340
341static int __init aarch32_alloc_vdso_pages(void)
342{
343 int ret;
344
345 ret = __aarch32_alloc_vdso_pages();
346 if (ret)
347 return ret;
348
349 ret = aarch32_alloc_sigpage();
350 if (ret)
351 return ret;
352
353 return aarch32_alloc_kuser_vdso_page();
354}
355arch_initcall(aarch32_alloc_vdso_pages);
356
357static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
358{
359 void *ret;
360
361 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
362 return 0;
363
364
365
366
367
368 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
369 VM_READ | VM_EXEC |
370 VM_MAYREAD | VM_MAYEXEC,
371 &aarch32_vdso_maps[AA32_MAP_VECTORS]);
372
373 return PTR_ERR_OR_ZERO(ret);
374}
375
376static int aarch32_sigreturn_setup(struct mm_struct *mm)
377{
378 unsigned long addr;
379 void *ret;
380
381 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
382 if (IS_ERR_VALUE(addr)) {
383 ret = ERR_PTR(addr);
384 goto out;
385 }
386
387
388
389
390
391 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
392 VM_READ | VM_EXEC | VM_MAYREAD |
393 VM_MAYWRITE | VM_MAYEXEC,
394 &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
395 if (IS_ERR(ret))
396 goto out;
397
398 mm->context.sigpage = (void *)addr;
399
400out:
401 return PTR_ERR_OR_ZERO(ret);
402}
403
404int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
405{
406 struct mm_struct *mm = current->mm;
407 int ret;
408
409 if (mmap_write_lock_killable(mm))
410 return -EINTR;
411
412 ret = aarch32_kuser_helpers_setup(mm);
413 if (ret)
414 goto out;
415
416 if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
417 ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
418 uses_interp);
419 if (ret)
420 goto out;
421 }
422
423 ret = aarch32_sigreturn_setup(mm);
424out:
425 mmap_write_unlock(mm);
426 return ret;
427}
428#endif
429
430enum aarch64_map {
431 AA64_MAP_VVAR,
432 AA64_MAP_VDSO,
433};
434
435static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
436 [AA64_MAP_VVAR] = {
437 .name = "[vvar]",
438 .fault = vvar_fault,
439 },
440 [AA64_MAP_VDSO] = {
441 .name = "[vdso]",
442 .mremap = vdso_mremap,
443 },
444};
445
446static int __init vdso_init(void)
447{
448 vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
449 vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
450
451 return __vdso_init(VDSO_ABI_AA64);
452}
453arch_initcall(vdso_init);
454
455int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
456{
457 struct mm_struct *mm = current->mm;
458 int ret;
459
460 if (mmap_write_lock_killable(mm))
461 return -EINTR;
462
463 ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
464 mmap_write_unlock(mm);
465
466 return ret;
467}
468