1
2
3
4
5
6
7
8
9
10
11
12#include <linux/mm.h>
13#include <linux/uio.h>
14#include <linux/sched.h>
15#include <linux/highmem.h>
16#include <linux/ptrace.h>
17#include <linux/slab.h>
18#include <linux/syscalls.h>
19
20#ifdef CONFIG_COMPAT
21#include <linux/compat.h>
22#endif
23
24
25
26
27
28
29
30
31
32
33static int process_vm_rw_pages(struct page **pages,
34 unsigned offset,
35 size_t len,
36 struct iov_iter *iter,
37 int vm_write)
38{
39
40 while (len && iov_iter_count(iter)) {
41 struct page *page = *pages++;
42 size_t copy = PAGE_SIZE - offset;
43 size_t copied;
44
45 if (copy > len)
46 copy = len;
47
48 if (vm_write) {
49 if (copy > iov_iter_count(iter))
50 copy = iov_iter_count(iter);
51 copied = iov_iter_copy_from_user(page, iter,
52 offset, copy);
53 iov_iter_advance(iter, copied);
54 set_page_dirty_lock(page);
55 } else {
56 copied = copy_page_to_iter(page, offset, copy, iter);
57 }
58 len -= copied;
59 if (copied < copy && iov_iter_count(iter))
60 return -EFAULT;
61 offset = 0;
62 }
63 return 0;
64}
65
66
67#define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
68
69
70
71
72
73
74
75
76
77
78
79
80
81static int process_vm_rw_single_vec(unsigned long addr,
82 unsigned long len,
83 struct iov_iter *iter,
84 struct page **process_pages,
85 struct mm_struct *mm,
86 struct task_struct *task,
87 int vm_write)
88{
89 unsigned long pa = addr & PAGE_MASK;
90 unsigned long start_offset = addr - pa;
91 unsigned long nr_pages;
92 ssize_t rc = 0;
93 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
94 / sizeof(struct pages *);
95
96
97 if (len == 0)
98 return 0;
99 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
100
101 while (!rc && nr_pages && iov_iter_count(iter)) {
102 int pages = min(nr_pages, max_pages_per_loop);
103 size_t bytes;
104
105
106
107
108
109
110 pages = __get_user_pages_unlocked(task, mm, pa, pages,
111 vm_write, 0, process_pages,
112 FOLL_REMOTE | FOLL_TOUCH);
113 if (pages <= 0)
114 return -EFAULT;
115
116 bytes = pages * PAGE_SIZE - start_offset;
117 if (bytes > len)
118 bytes = len;
119
120 rc = process_vm_rw_pages(process_pages,
121 start_offset, bytes, iter,
122 vm_write);
123 len -= bytes;
124 start_offset = 0;
125 nr_pages -= pages;
126 pa += pages * PAGE_SIZE;
127 while (pages)
128 put_page(process_pages[--pages]);
129 }
130
131 return rc;
132}
133
134
135
136#define PVM_MAX_PP_ARRAY_COUNT 16
137
138
139
140
141
142
143
144
145
146
147
148
149
150static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
151 const struct iovec *rvec,
152 unsigned long riovcnt,
153 unsigned long flags, int vm_write)
154{
155 struct task_struct *task;
156 struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
157 struct page **process_pages = pp_stack;
158 struct mm_struct *mm;
159 unsigned long i;
160 ssize_t rc = 0;
161 unsigned long nr_pages = 0;
162 unsigned long nr_pages_iov;
163 ssize_t iov_len;
164 size_t total_len = iov_iter_count(iter);
165
166
167
168
169
170 for (i = 0; i < riovcnt; i++) {
171 iov_len = rvec[i].iov_len;
172 if (iov_len > 0) {
173 nr_pages_iov = ((unsigned long)rvec[i].iov_base
174 + iov_len)
175 / PAGE_SIZE - (unsigned long)rvec[i].iov_base
176 / PAGE_SIZE + 1;
177 nr_pages = max(nr_pages, nr_pages_iov);
178 }
179 }
180
181 if (nr_pages == 0)
182 return 0;
183
184 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
185
186
187 process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
188 sizeof(struct pages *)*nr_pages),
189 GFP_KERNEL);
190
191 if (!process_pages)
192 return -ENOMEM;
193 }
194
195
196 rcu_read_lock();
197 task = find_task_by_vpid(pid);
198 if (task)
199 get_task_struct(task);
200 rcu_read_unlock();
201 if (!task) {
202 rc = -ESRCH;
203 goto free_proc_pages;
204 }
205
206 mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
207 if (!mm || IS_ERR(mm)) {
208 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
209
210
211
212
213 if (rc == -EACCES)
214 rc = -EPERM;
215 goto put_task_struct;
216 }
217
218 for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
219 rc = process_vm_rw_single_vec(
220 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
221 iter, process_pages, mm, task, vm_write);
222
223
224 total_len -= iov_iter_count(iter);
225
226
227
228
229 if (total_len)
230 rc = total_len;
231
232 mmput(mm);
233
234put_task_struct:
235 put_task_struct(task);
236
237free_proc_pages:
238 if (process_pages != pp_stack)
239 kfree(process_pages);
240 return rc;
241}
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256static ssize_t process_vm_rw(pid_t pid,
257 const struct iovec __user *lvec,
258 unsigned long liovcnt,
259 const struct iovec __user *rvec,
260 unsigned long riovcnt,
261 unsigned long flags, int vm_write)
262{
263 struct iovec iovstack_l[UIO_FASTIOV];
264 struct iovec iovstack_r[UIO_FASTIOV];
265 struct iovec *iov_l = iovstack_l;
266 struct iovec *iov_r = iovstack_r;
267 struct iov_iter iter;
268 ssize_t rc;
269
270 if (flags != 0)
271 return -EINVAL;
272
273
274 if (vm_write)
275 rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
276 iovstack_l, &iov_l);
277 else
278 rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
279 iovstack_l, &iov_l);
280 if (rc <= 0)
281 goto free_iovecs;
282
283 iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
284
285 rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
286 iovstack_r, &iov_r);
287 if (rc <= 0)
288 goto free_iovecs;
289
290 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
291
292free_iovecs:
293 if (iov_r != iovstack_r)
294 kfree(iov_r);
295 if (iov_l != iovstack_l)
296 kfree(iov_l);
297
298 return rc;
299}
300
301SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
302 unsigned long, liovcnt, const struct iovec __user *, rvec,
303 unsigned long, riovcnt, unsigned long, flags)
304{
305 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
306}
307
308SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
309 const struct iovec __user *, lvec,
310 unsigned long, liovcnt, const struct iovec __user *, rvec,
311 unsigned long, riovcnt, unsigned long, flags)
312{
313 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
314}
315
316#ifdef CONFIG_COMPAT
317
318asmlinkage ssize_t
319compat_process_vm_rw(compat_pid_t pid,
320 const struct compat_iovec __user *lvec,
321 unsigned long liovcnt,
322 const struct compat_iovec __user *rvec,
323 unsigned long riovcnt,
324 unsigned long flags, int vm_write)
325{
326 struct iovec iovstack_l[UIO_FASTIOV];
327 struct iovec iovstack_r[UIO_FASTIOV];
328 struct iovec *iov_l = iovstack_l;
329 struct iovec *iov_r = iovstack_r;
330 struct iov_iter iter;
331 ssize_t rc = -EFAULT;
332
333 if (flags != 0)
334 return -EINVAL;
335
336 if (vm_write)
337 rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
338 UIO_FASTIOV, iovstack_l,
339 &iov_l);
340 else
341 rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
342 UIO_FASTIOV, iovstack_l,
343 &iov_l);
344 if (rc <= 0)
345 goto free_iovecs;
346 iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
347 rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
348 UIO_FASTIOV, iovstack_r,
349 &iov_r);
350 if (rc <= 0)
351 goto free_iovecs;
352
353 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
354
355free_iovecs:
356 if (iov_r != iovstack_r)
357 kfree(iov_r);
358 if (iov_l != iovstack_l)
359 kfree(iov_l);
360 return rc;
361}
362
363asmlinkage ssize_t
364compat_sys_process_vm_readv(compat_pid_t pid,
365 const struct compat_iovec __user *lvec,
366 unsigned long liovcnt,
367 const struct compat_iovec __user *rvec,
368 unsigned long riovcnt,
369 unsigned long flags)
370{
371 return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
372 riovcnt, flags, 0);
373}
374
375asmlinkage ssize_t
376compat_sys_process_vm_writev(compat_pid_t pid,
377 const struct compat_iovec __user *lvec,
378 unsigned long liovcnt,
379 const struct compat_iovec __user *rvec,
380 unsigned long riovcnt,
381 unsigned long flags)
382{
383 return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
384 riovcnt, flags, 1);
385}
386
387#endif
388