1
2
3
4
5
6
7
8
9
10
11#include <linux/mm.h>
12#include <linux/err.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/linkage.h>
17#include <linux/random.h>
18#include <linux/elf.h>
19#include <asm/vdso.h>
20#include <asm/vvar.h>
21#include <asm/page.h>
22
23unsigned int __read_mostly vdso_enabled = 1;
24
25static struct vm_special_mapping vvar_mapping = {
26 .name = "[vvar]"
27};
28
29#ifdef CONFIG_SPARC64
30static struct vm_special_mapping vdso_mapping64 = {
31 .name = "[vdso]"
32};
33#endif
34
35#ifdef CONFIG_COMPAT
36static struct vm_special_mapping vdso_mapping32 = {
37 .name = "[vdso]"
38};
39#endif
40
41struct vvar_data *vvar_data;
42
43#define SAVE_INSTR_SIZE 4
44
45
46
47
48
49int __init init_vdso_image(const struct vdso_image *image,
50 struct vm_special_mapping *vdso_mapping)
51{
52 int i;
53 struct page *dp, **dpp = NULL;
54 int dnpages = 0;
55 struct page *cp, **cpp = NULL;
56 int cnpages = (image->size) / PAGE_SIZE;
57
58
59
60
61
62 if (WARN_ON(image->size % PAGE_SIZE != 0))
63 goto oom;
64
65 cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
66 vdso_mapping->pages = cpp;
67
68 if (!cpp)
69 goto oom;
70
71 if (vdso_fix_stick) {
72
73
74
75
76 unsigned int j, k = SAVE_INSTR_SIZE;
77 unsigned char *data = image->data;
78
79 for (j = image->sym_vread_tick_patch_start;
80 j < image->sym_vread_tick_patch_end; j++) {
81
82 data[image->sym_vread_tick + k] = data[j];
83 k++;
84 }
85 }
86
87 for (i = 0; i < cnpages; i++) {
88 cp = alloc_page(GFP_KERNEL);
89 if (!cp)
90 goto oom;
91 cpp[i] = cp;
92 copy_page(page_address(cp), image->data + i * PAGE_SIZE);
93 }
94
95
96
97
98
99 if (vvar_data == NULL) {
100 dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
101 if (WARN_ON(dnpages != 1))
102 goto oom;
103 dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
104 vvar_mapping.pages = dpp;
105
106 if (!dpp)
107 goto oom;
108
109 dp = alloc_page(GFP_KERNEL);
110 if (!dp)
111 goto oom;
112
113 dpp[0] = dp;
114 vvar_data = page_address(dp);
115 memset(vvar_data, 0, PAGE_SIZE);
116
117 vvar_data->seq = 0;
118 }
119
120 return 0;
121 oom:
122 if (cpp != NULL) {
123 for (i = 0; i < cnpages; i++) {
124 if (cpp[i] != NULL)
125 __free_page(cpp[i]);
126 }
127 kfree(cpp);
128 vdso_mapping->pages = NULL;
129 }
130
131 if (dpp != NULL) {
132 for (i = 0; i < dnpages; i++) {
133 if (dpp[i] != NULL)
134 __free_page(dpp[i]);
135 }
136 kfree(dpp);
137 vvar_mapping.pages = NULL;
138 }
139
140 pr_warn("Cannot allocate vdso\n");
141 vdso_enabled = 0;
142 return -ENOMEM;
143}
144
145static int __init init_vdso(void)
146{
147 int err = 0;
148#ifdef CONFIG_SPARC64
149 err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64);
150 if (err)
151 return err;
152#endif
153
154#ifdef CONFIG_COMPAT
155 err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32);
156#endif
157 return err;
158
159}
160subsys_initcall(init_vdso);
161
162struct linux_binprm;
163
164
165static unsigned long vdso_addr(unsigned long start, unsigned int len)
166{
167 unsigned int offset;
168
169
170 offset = get_random_int() & (PTRS_PER_PTE - 1);
171 return start + (offset << PAGE_SHIFT);
172}
173
174static int map_vdso(const struct vdso_image *image,
175 struct vm_special_mapping *vdso_mapping)
176{
177 struct mm_struct *mm = current->mm;
178 struct vm_area_struct *vma;
179 unsigned long text_start, addr = 0;
180 int ret = 0;
181
182 down_write(&mm->mmap_sem);
183
184
185
186
187
188 if (current->flags & PF_RANDOMIZE) {
189 addr = get_unmapped_area(NULL, 0,
190 image->size - image->sym_vvar_start,
191 0, 0);
192 if (IS_ERR_VALUE(addr)) {
193 ret = addr;
194 goto up_fail;
195 }
196 addr = vdso_addr(addr, image->size - image->sym_vvar_start);
197 }
198 addr = get_unmapped_area(NULL, addr,
199 image->size - image->sym_vvar_start, 0, 0);
200 if (IS_ERR_VALUE(addr)) {
201 ret = addr;
202 goto up_fail;
203 }
204
205 text_start = addr - image->sym_vvar_start;
206 current->mm->context.vdso = (void __user *)text_start;
207
208
209
210
211 vma = _install_special_mapping(mm,
212 text_start,
213 image->size,
214 VM_READ|VM_EXEC|
215 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
216 vdso_mapping);
217
218 if (IS_ERR(vma)) {
219 ret = PTR_ERR(vma);
220 goto up_fail;
221 }
222
223 vma = _install_special_mapping(mm,
224 addr,
225 -image->sym_vvar_start,
226 VM_READ|VM_MAYREAD,
227 &vvar_mapping);
228
229 if (IS_ERR(vma)) {
230 ret = PTR_ERR(vma);
231 do_munmap(mm, text_start, image->size, NULL);
232 }
233
234up_fail:
235 if (ret)
236 current->mm->context.vdso = NULL;
237
238 up_write(&mm->mmap_sem);
239 return ret;
240}
241
242int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
243{
244
245 if (!vdso_enabled)
246 return 0;
247
248#if defined CONFIG_COMPAT
249 if (!(is_32bit_task()))
250 return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
251 else
252 return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
253#else
254 return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
255#endif
256
257}
258
259static __init int vdso_setup(char *s)
260{
261 int err;
262 unsigned long val;
263
264 err = kstrtoul(s, 10, &val);
265 vdso_enabled = val;
266 return err;
267}
268__setup("vdso=", vdso_setup);
269