1
2
3
4
5
6
7
8#include <linux/errno.h>
9#include <linux/sched.h>
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/smp.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/slab.h>
16#include <linux/user.h>
17#include <linux/elf.h>
18#include <linux/security.h>
19#include <linux/memblock.h>
20#include <linux/syscalls.h>
21#include <linux/time_namespace.h>
22#include <vdso/datapage.h>
23
24#include <asm/syscall.h>
25#include <asm/processor.h>
26#include <asm/mmu.h>
27#include <asm/mmu_context.h>
28#include <asm/prom.h>
29#include <asm/machdep.h>
30#include <asm/cputable.h>
31#include <asm/sections.h>
32#include <asm/firmware.h>
33#include <asm/vdso.h>
34#include <asm/vdso_datapage.h>
35#include <asm/setup.h>
36
37
38#define VDSO_ALIGNMENT (1 << 16)
39
40extern char vdso32_start, vdso32_end;
41extern char vdso64_start, vdso64_end;
42
43
44
45
46
47
48static union {
49 struct vdso_arch_data data;
50 u8 page[PAGE_SIZE];
51} vdso_data_store __page_aligned_data;
52struct vdso_arch_data *vdso_data = &vdso_data_store.data;
53
54enum vvar_pages {
55 VVAR_DATA_PAGE_OFFSET,
56 VVAR_TIMENS_PAGE_OFFSET,
57 VVAR_NR_PAGES,
58};
59
60static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
61 unsigned long text_size)
62{
63 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
64
65 if (new_size != text_size)
66 return -EINVAL;
67
68 current->mm->context.vdso = (void __user *)new_vma->vm_start;
69
70 return 0;
71}
72
73static int vdso32_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
74{
75 return vdso_mremap(sm, new_vma, &vdso32_end - &vdso32_start);
76}
77
78static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
79{
80 return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
81}
82
83static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
84 struct vm_area_struct *vma, struct vm_fault *vmf);
85
86static struct vm_special_mapping vvar_spec __ro_after_init = {
87 .name = "[vvar]",
88 .fault = vvar_fault,
89};
90
91static struct vm_special_mapping vdso32_spec __ro_after_init = {
92 .name = "[vdso]",
93 .mremap = vdso32_mremap,
94};
95
96static struct vm_special_mapping vdso64_spec __ro_after_init = {
97 .name = "[vdso]",
98 .mremap = vdso64_mremap,
99};
100
101#ifdef CONFIG_TIME_NS
102struct vdso_data *arch_get_vdso_data(void *vvar_page)
103{
104 return ((struct vdso_arch_data *)vvar_page)->data;
105}
106
107
108
109
110
111
112
113
114int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
115{
116 struct mm_struct *mm = task->mm;
117 struct vm_area_struct *vma;
118
119 mmap_read_lock(mm);
120
121 for (vma = mm->mmap; vma; vma = vma->vm_next) {
122 unsigned long size = vma->vm_end - vma->vm_start;
123
124 if (vma_is_special_mapping(vma, &vvar_spec))
125 zap_page_range(vma, vma->vm_start, size);
126 }
127
128 mmap_read_unlock(mm);
129 return 0;
130}
131
132static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
133{
134 if (likely(vma->vm_mm == current->mm))
135 return current->nsproxy->time_ns->vvar_page;
136
137
138
139
140
141
142
143
144 WARN(1, "vvar_page accessed remotely");
145
146 return NULL;
147}
148#else
149static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
150{
151 return NULL;
152}
153#endif
154
155static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
156 struct vm_area_struct *vma, struct vm_fault *vmf)
157{
158 struct page *timens_page = find_timens_vvar_page(vma);
159 unsigned long pfn;
160
161 switch (vmf->pgoff) {
162 case VVAR_DATA_PAGE_OFFSET:
163 if (timens_page)
164 pfn = page_to_pfn(timens_page);
165 else
166 pfn = virt_to_pfn(vdso_data);
167 break;
168#ifdef CONFIG_TIME_NS
169 case VVAR_TIMENS_PAGE_OFFSET:
170
171
172
173
174
175
176
177 if (!timens_page)
178 return VM_FAULT_SIGBUS;
179 pfn = virt_to_pfn(vdso_data);
180 break;
181#endif
182 default:
183 return VM_FAULT_SIGBUS;
184 }
185
186 return vmf_insert_pfn(vma, vmf->address, pfn);
187}
188
189
190
191
192
193static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
194{
195 unsigned long vdso_size, vdso_base, mappings_size;
196 struct vm_special_mapping *vdso_spec;
197 unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE;
198 struct mm_struct *mm = current->mm;
199 struct vm_area_struct *vma;
200
201 if (is_32bit_task()) {
202 vdso_spec = &vdso32_spec;
203 vdso_size = &vdso32_end - &vdso32_start;
204 vdso_base = VDSO32_MBASE;
205 } else {
206 vdso_spec = &vdso64_spec;
207 vdso_size = &vdso64_end - &vdso64_start;
208
209
210
211
212
213 vdso_base = 0;
214 }
215
216 mappings_size = vdso_size + vvar_size;
217 mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
218
219
220
221
222
223
224
225 vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0);
226 if (IS_ERR_VALUE(vdso_base))
227 return vdso_base;
228
229
230 vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
231
232
233
234
235
236
237 mm->context.vdso = (void __user *)vdso_base + vvar_size;
238
239 vma = _install_special_mapping(mm, vdso_base, vvar_size,
240 VM_READ | VM_MAYREAD | VM_IO |
241 VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
242 if (IS_ERR(vma))
243 return PTR_ERR(vma);
244
245
246
247
248
249
250
251
252
253
254
255 vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
256 VM_READ | VM_EXEC | VM_MAYREAD |
257 VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
258 if (IS_ERR(vma))
259 do_munmap(mm, vdso_base, vvar_size, NULL);
260
261 return PTR_ERR_OR_ZERO(vma);
262}
263
264int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
265{
266 struct mm_struct *mm = current->mm;
267 int rc;
268
269 mm->context.vdso = NULL;
270
271 if (mmap_write_lock_killable(mm))
272 return -EINTR;
273
274 rc = __arch_setup_additional_pages(bprm, uses_interp);
275 if (rc)
276 mm->context.vdso = NULL;
277
278 mmap_write_unlock(mm);
279 return rc;
280}
281
282#define VDSO_DO_FIXUPS(type, value, bits, sec) do { \
283 void *__start = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_start); \
284 void *__end = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_end); \
285 \
286 do_##type##_fixups((value), __start, __end); \
287} while (0)
288
289static void __init vdso_fixup_features(void)
290{
291#ifdef CONFIG_PPC64
292 VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 64, ftr_fixup);
293 VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 64, mmu_ftr_fixup);
294 VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 64, fw_ftr_fixup);
295 VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 64, lwsync_fixup);
296#endif
297
298#ifdef CONFIG_VDSO32
299 VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 32, ftr_fixup);
300 VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 32, mmu_ftr_fixup);
301#ifdef CONFIG_PPC64
302 VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 32, fw_ftr_fixup);
303#endif
304 VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 32, lwsync_fixup);
305#endif
306}
307
308
309
310
311
312static void __init vdso_setup_syscall_map(void)
313{
314 unsigned int i;
315
316 for (i = 0; i < NR_syscalls; i++) {
317 if (sys_call_table[i] != (unsigned long)&sys_ni_syscall)
318 vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
319 if (IS_ENABLED(CONFIG_COMPAT) &&
320 compat_sys_call_table[i] != (unsigned long)&sys_ni_syscall)
321 vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
322 }
323}
324
325#ifdef CONFIG_PPC64
326int vdso_getcpu_init(void)
327{
328 unsigned long cpu, node, val;
329
330
331
332
333
334 cpu = get_cpu();
335 WARN_ON_ONCE(cpu > 0xffff);
336
337 node = cpu_to_node(cpu);
338 WARN_ON_ONCE(node > 0xffff);
339
340 val = (cpu & 0xffff) | ((node & 0xffff) << 16);
341 mtspr(SPRN_SPRG_VDSO_WRITE, val);
342 get_paca()->sprg_vdso = val;
343
344 put_cpu();
345
346 return 0;
347}
348
349early_initcall(vdso_getcpu_init);
350#endif
351
352static struct page ** __init vdso_setup_pages(void *start, void *end)
353{
354 int i;
355 struct page **pagelist;
356 int pages = (end - start) >> PAGE_SHIFT;
357
358 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
359 if (!pagelist)
360 panic("%s: Cannot allocate page list for VDSO", __func__);
361
362 for (i = 0; i < pages; i++)
363 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
364
365 return pagelist;
366}
367
368static int __init vdso_init(void)
369{
370#ifdef CONFIG_PPC64
371
372
373
374 strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64");
375 vdso_data->version.major = SYSTEMCFG_MAJOR;
376 vdso_data->version.minor = SYSTEMCFG_MINOR;
377 vdso_data->processor = mfspr(SPRN_PVR);
378
379
380
381
382 vdso_data->platform = 0x100;
383 if (firmware_has_feature(FW_FEATURE_LPAR))
384 vdso_data->platform |= 1;
385 vdso_data->physicalMemorySize = memblock_phys_mem_size();
386 vdso_data->dcache_size = ppc64_caches.l1d.size;
387 vdso_data->dcache_line_size = ppc64_caches.l1d.line_size;
388 vdso_data->icache_size = ppc64_caches.l1i.size;
389 vdso_data->icache_line_size = ppc64_caches.l1i.line_size;
390 vdso_data->dcache_block_size = ppc64_caches.l1d.block_size;
391 vdso_data->icache_block_size = ppc64_caches.l1i.block_size;
392 vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
393 vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
394#endif
395
396 vdso_setup_syscall_map();
397
398 vdso_fixup_features();
399
400 if (IS_ENABLED(CONFIG_VDSO32))
401 vdso32_spec.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
402
403 if (IS_ENABLED(CONFIG_PPC64))
404 vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
405
406 smp_wmb();
407
408 return 0;
409}
410arch_initcall(vdso_init);
411