1
2
3
4
5
6
7
8#include <linux/capability.h>
9#include <linux/mman.h>
10#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/swapops.h>
13#include <linux/pagemap.h>
14#include <linux/mempolicy.h>
15#include <linux/syscalls.h>
16#include <linux/sched.h>
17#include <linux/module.h>
18#include <linux/rmap.h>
19#include <linux/mmzone.h>
20#include <linux/hugetlb.h>
21
22#include "internal.h"
23
24int can_do_mlock(void)
25{
26 if (capable(CAP_IPC_LOCK))
27 return 1;
28 if (rlimit(RLIMIT_MEMLOCK) != 0)
29 return 1;
30 return 0;
31}
32EXPORT_SYMBOL(can_do_mlock);
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54void __clear_page_mlock(struct page *page)
55{
56 VM_BUG_ON(!PageLocked(page));
57
58 if (!page->mapping) {
59 return;
60 }
61
62 dec_zone_page_state(page, NR_MLOCK);
63 count_vm_event(UNEVICTABLE_PGCLEARED);
64 if (!isolate_lru_page(page)) {
65 putback_lru_page(page);
66 } else {
67
68
69
70 if (PageUnevictable(page))
71 count_vm_event(UNEVICTABLE_PGSTRANDED);
72 }
73}
74
75
76
77
78
79void mlock_vma_page(struct page *page)
80{
81 BUG_ON(!PageLocked(page));
82
83 if (!TestSetPageMlocked(page)) {
84 inc_zone_page_state(page, NR_MLOCK);
85 count_vm_event(UNEVICTABLE_PGMLOCKED);
86 if (!isolate_lru_page(page))
87 putback_lru_page(page);
88 }
89}
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106void munlock_vma_page(struct page *page)
107{
108 BUG_ON(!PageLocked(page));
109
110 if (TestClearPageMlocked(page)) {
111 dec_zone_page_state(page, NR_MLOCK);
112 if (!isolate_lru_page(page)) {
113 int ret = try_to_munlock(page);
114
115
116
117 if (ret != SWAP_MLOCK)
118 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
119
120 putback_lru_page(page);
121 } else {
122
123
124
125
126
127
128
129
130 if (PageUnevictable(page))
131 count_vm_event(UNEVICTABLE_PGSTRANDED);
132 else
133 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
134 }
135 }
136}
137
138
139
140
141
142
143
144
145
146
147
148
149
150static long __mlock_vma_pages_range(struct vm_area_struct *vma,
151 unsigned long start, unsigned long end,
152 int *nonblocking)
153{
154 struct mm_struct *mm = vma->vm_mm;
155 unsigned long addr = start;
156 int nr_pages = (end - start) / PAGE_SIZE;
157 int gup_flags;
158
159 VM_BUG_ON(start & ~PAGE_MASK);
160 VM_BUG_ON(end & ~PAGE_MASK);
161 VM_BUG_ON(start < vma->vm_start);
162 VM_BUG_ON(end > vma->vm_end);
163 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
164
165 gup_flags = FOLL_TOUCH | FOLL_MLOCK;
166
167
168
169
170
171 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
172 gup_flags |= FOLL_WRITE;
173
174
175
176
177
178 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
179 gup_flags |= FOLL_FORCE;
180
181 return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
182 NULL, NULL, nonblocking);
183}
184
185
186
187
188static int __mlock_posix_error_return(long retval)
189{
190 if (retval == -EFAULT)
191 retval = -ENOMEM;
192 else if (retval == -ENOMEM)
193 retval = -EAGAIN;
194 return retval;
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210long mlock_vma_pages_range(struct vm_area_struct *vma,
211 unsigned long start, unsigned long end)
212{
213 int nr_pages = (end - start) / PAGE_SIZE;
214 BUG_ON(!(vma->vm_flags & VM_LOCKED));
215
216
217
218
219 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
220 goto no_mlock;
221
222 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
223 is_vm_hugetlb_page(vma) ||
224 vma == get_gate_vma(current->mm))) {
225
226 __mlock_vma_pages_range(vma, start, end, NULL);
227
228
229 return 0;
230 }
231
232
233
234
235
236
237
238
239
240 make_pages_present(start, end);
241
242no_mlock:
243 vma->vm_flags &= ~VM_LOCKED;
244 return nr_pages;
245}
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265void munlock_vma_pages_range(struct vm_area_struct *vma,
266 unsigned long start, unsigned long end)
267{
268 unsigned long addr;
269
270 lru_add_drain();
271 vma->vm_flags &= ~VM_LOCKED;
272
273 for (addr = start; addr < end; addr += PAGE_SIZE) {
274 struct page *page;
275
276
277
278
279
280
281
282 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
283 if (page && !IS_ERR(page)) {
284 lock_page(page);
285
286
287
288
289
290
291 if (page->mapping)
292 munlock_vma_page(page);
293 unlock_page(page);
294 put_page(page);
295 }
296 cond_resched();
297 }
298}
299
300
301
302
303
304
305
306
307
308
309static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
310 unsigned long start, unsigned long end, vm_flags_t newflags)
311{
312 struct mm_struct *mm = vma->vm_mm;
313 pgoff_t pgoff;
314 int nr_pages;
315 int ret = 0;
316 int lock = !!(newflags & VM_LOCKED);
317
318 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
319 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
320 goto out;
321
322 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
323 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
324 vma->vm_file, pgoff, vma_policy(vma));
325 if (*prev) {
326 vma = *prev;
327 goto success;
328 }
329
330 if (start != vma->vm_start) {
331 ret = split_vma(mm, vma, start, 1);
332 if (ret)
333 goto out;
334 }
335
336 if (end != vma->vm_end) {
337 ret = split_vma(mm, vma, end, 0);
338 if (ret)
339 goto out;
340 }
341
342success:
343
344
345
346 nr_pages = (end - start) >> PAGE_SHIFT;
347 if (!lock)
348 nr_pages = -nr_pages;
349 mm->locked_vm += nr_pages;
350
351
352
353
354
355
356
357 if (lock)
358 vma->vm_flags = newflags;
359 else
360 munlock_vma_pages_range(vma, start, end);
361
362out:
363 *prev = vma;
364 return ret;
365}
366
367static int do_mlock(unsigned long start, size_t len, int on)
368{
369 unsigned long nstart, end, tmp;
370 struct vm_area_struct * vma, * prev;
371 int error;
372
373 VM_BUG_ON(start & ~PAGE_MASK);
374 VM_BUG_ON(len != PAGE_ALIGN(len));
375 end = start + len;
376 if (end < start)
377 return -EINVAL;
378 if (end == start)
379 return 0;
380 vma = find_vma_prev(current->mm, start, &prev);
381 if (!vma || vma->vm_start > start)
382 return -ENOMEM;
383
384 if (start > vma->vm_start)
385 prev = vma;
386
387 for (nstart = start ; ; ) {
388 vm_flags_t newflags;
389
390
391
392 newflags = vma->vm_flags | VM_LOCKED;
393 if (!on)
394 newflags &= ~VM_LOCKED;
395
396 tmp = vma->vm_end;
397 if (tmp > end)
398 tmp = end;
399 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
400 if (error)
401 break;
402 nstart = tmp;
403 if (nstart < prev->vm_end)
404 nstart = prev->vm_end;
405 if (nstart >= end)
406 break;
407
408 vma = prev->vm_next;
409 if (!vma || vma->vm_start != nstart) {
410 error = -ENOMEM;
411 break;
412 }
413 }
414 return error;
415}
416
417static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors)
418{
419 struct mm_struct *mm = current->mm;
420 unsigned long end, nstart, nend;
421 struct vm_area_struct *vma = NULL;
422 int locked = 0;
423 int ret = 0;
424
425 VM_BUG_ON(start & ~PAGE_MASK);
426 VM_BUG_ON(len != PAGE_ALIGN(len));
427 end = start + len;
428
429 for (nstart = start; nstart < end; nstart = nend) {
430
431
432
433
434 if (!locked) {
435 locked = 1;
436 down_read(&mm->mmap_sem);
437 vma = find_vma(mm, nstart);
438 } else if (nstart >= vma->vm_end)
439 vma = vma->vm_next;
440 if (!vma || vma->vm_start >= end)
441 break;
442
443
444
445
446 nend = min(end, vma->vm_end);
447 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
448 continue;
449 if (nstart < vma->vm_start)
450 nstart = vma->vm_start;
451
452
453
454
455
456 ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
457 if (ret < 0) {
458 if (ignore_errors) {
459 ret = 0;
460 continue;
461 }
462 ret = __mlock_posix_error_return(ret);
463 break;
464 }
465 nend = nstart + ret * PAGE_SIZE;
466 ret = 0;
467 }
468 if (locked)
469 up_read(&mm->mmap_sem);
470 return ret;
471}
472
473SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
474{
475 unsigned long locked;
476 unsigned long lock_limit;
477 int error = -ENOMEM;
478
479 if (!can_do_mlock())
480 return -EPERM;
481
482 lru_add_drain_all();
483
484 down_write(¤t->mm->mmap_sem);
485 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
486 start &= PAGE_MASK;
487
488 locked = len >> PAGE_SHIFT;
489 locked += current->mm->locked_vm;
490
491 lock_limit = rlimit(RLIMIT_MEMLOCK);
492 lock_limit >>= PAGE_SHIFT;
493
494
495 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
496 error = do_mlock(start, len, 1);
497 up_write(¤t->mm->mmap_sem);
498 if (!error)
499 error = do_mlock_pages(start, len, 0);
500 return error;
501}
502
503SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
504{
505 int ret;
506
507 down_write(¤t->mm->mmap_sem);
508 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
509 start &= PAGE_MASK;
510 ret = do_mlock(start, len, 0);
511 up_write(¤t->mm->mmap_sem);
512 return ret;
513}
514
515static int do_mlockall(int flags)
516{
517 struct vm_area_struct * vma, * prev = NULL;
518 unsigned int def_flags = 0;
519
520 if (flags & MCL_FUTURE)
521 def_flags = VM_LOCKED;
522 current->mm->def_flags = def_flags;
523 if (flags == MCL_FUTURE)
524 goto out;
525
526 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
527 vm_flags_t newflags;
528
529 newflags = vma->vm_flags | VM_LOCKED;
530 if (!(flags & MCL_CURRENT))
531 newflags &= ~VM_LOCKED;
532
533
534 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
535 }
536out:
537 return 0;
538}
539
540SYSCALL_DEFINE1(mlockall, int, flags)
541{
542 unsigned long lock_limit;
543 int ret = -EINVAL;
544
545 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
546 goto out;
547
548 ret = -EPERM;
549 if (!can_do_mlock())
550 goto out;
551
552 lru_add_drain_all();
553
554 down_write(¤t->mm->mmap_sem);
555
556 lock_limit = rlimit(RLIMIT_MEMLOCK);
557 lock_limit >>= PAGE_SHIFT;
558
559 ret = -ENOMEM;
560 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
561 capable(CAP_IPC_LOCK))
562 ret = do_mlockall(flags);
563 up_write(¤t->mm->mmap_sem);
564 if (!ret && (flags & MCL_CURRENT)) {
565
566 do_mlock_pages(0, TASK_SIZE, 1);
567 }
568out:
569 return ret;
570}
571
572SYSCALL_DEFINE0(munlockall)
573{
574 int ret;
575
576 down_write(¤t->mm->mmap_sem);
577 ret = do_mlockall(0);
578 up_write(¤t->mm->mmap_sem);
579 return ret;
580}
581
582
583
584
585
586static DEFINE_SPINLOCK(shmlock_user_lock);
587
588int user_shm_lock(size_t size, struct user_struct *user)
589{
590 unsigned long lock_limit, locked;
591 int allowed = 0;
592
593 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
594 lock_limit = rlimit(RLIMIT_MEMLOCK);
595 if (lock_limit == RLIM_INFINITY)
596 allowed = 1;
597 lock_limit >>= PAGE_SHIFT;
598 spin_lock(&shmlock_user_lock);
599 if (!allowed &&
600 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
601 goto out;
602 get_uid(user);
603 user->locked_shm += locked;
604 allowed = 1;
605out:
606 spin_unlock(&shmlock_user_lock);
607 return allowed;
608}
609
610void user_shm_unlock(size_t size, struct user_struct *user)
611{
612 spin_lock(&shmlock_user_lock);
613 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
614 spin_unlock(&shmlock_user_lock);
615 free_uid(user);
616}
617