1
2
3
4
5
6
7
8#include <linux/capability.h>
9#include <linux/mman.h>
10#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/swapops.h>
13#include <linux/pagemap.h>
14#include <linux/mempolicy.h>
15#include <linux/syscalls.h>
16#include <linux/sched.h>
17#include <linux/module.h>
18#include <linux/rmap.h>
19#include <linux/mmzone.h>
20#include <linux/hugetlb.h>
21
22#include "internal.h"
23
24int can_do_mlock(void)
25{
26 if (capable(CAP_IPC_LOCK))
27 return 1;
28 if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
29 return 1;
30 return 0;
31}
32EXPORT_SYMBOL(can_do_mlock);
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54void __clear_page_mlock(struct page *page)
55{
56 VM_BUG_ON(!PageLocked(page));
57
58 if (!page->mapping) {
59 return;
60 }
61
62 dec_zone_page_state(page, NR_MLOCK);
63 count_vm_event(UNEVICTABLE_PGCLEARED);
64 if (!isolate_lru_page(page)) {
65 putback_lru_page(page);
66 } else {
67
68
69
70 if (PageUnevictable(page))
71 count_vm_event(UNEVICTABLE_PGSTRANDED);
72 }
73}
74
75
76
77
78
79void mlock_vma_page(struct page *page)
80{
81 BUG_ON(!PageLocked(page));
82
83 if (!TestSetPageMlocked(page)) {
84 inc_zone_page_state(page, NR_MLOCK);
85 count_vm_event(UNEVICTABLE_PGMLOCKED);
86 if (!isolate_lru_page(page))
87 putback_lru_page(page);
88 }
89}
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109static void munlock_vma_page(struct page *page)
110{
111 BUG_ON(!PageLocked(page));
112
113 if (TestClearPageMlocked(page)) {
114 dec_zone_page_state(page, NR_MLOCK);
115 if (!isolate_lru_page(page)) {
116 int ret = try_to_munlock(page);
117
118
119
120 if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
121 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
122
123 putback_lru_page(page);
124 } else {
125
126
127
128
129
130
131
132
133 if (PageUnevictable(page))
134 count_vm_event(UNEVICTABLE_PGSTRANDED);
135 else
136 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
137 }
138 }
139}
140
141
142
143
144
145
146
147
148
149
150
151
152
153static long __mlock_vma_pages_range(struct vm_area_struct *vma,
154 unsigned long start, unsigned long end)
155{
156 struct mm_struct *mm = vma->vm_mm;
157 unsigned long addr = start;
158 struct page *pages[16];
159 int nr_pages = (end - start) / PAGE_SIZE;
160 int ret = 0;
161 int gup_flags;
162
163 VM_BUG_ON(start & ~PAGE_MASK);
164 VM_BUG_ON(end & ~PAGE_MASK);
165 VM_BUG_ON(start < vma->vm_start);
166 VM_BUG_ON(end > vma->vm_end);
167 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
168
169 gup_flags = FOLL_TOUCH | FOLL_GET;
170 if (vma->vm_flags & VM_WRITE)
171 gup_flags |= FOLL_WRITE;
172
173 while (nr_pages > 0) {
174 int i;
175
176 cond_resched();
177
178
179
180
181
182
183
184 ret = __get_user_pages(current, mm, addr,
185 min_t(int, nr_pages, ARRAY_SIZE(pages)),
186 gup_flags, pages, NULL);
187
188
189
190
191
192
193 if (ret < 0)
194 break;
195
196 lru_add_drain();
197
198 for (i = 0; i < ret; i++) {
199 struct page *page = pages[i];
200
201 if (page->mapping) {
202
203
204
205
206
207
208
209
210
211 lock_page(page);
212
213
214
215
216
217 if (page->mapping)
218 mlock_vma_page(page);
219 unlock_page(page);
220 }
221 put_page(page);
222 }
223
224 addr += ret * PAGE_SIZE;
225 nr_pages -= ret;
226 ret = 0;
227 }
228
229 return ret;
230}
231
232
233
234
235static int __mlock_posix_error_return(long retval)
236{
237 if (retval == -EFAULT)
238 retval = -ENOMEM;
239 else if (retval == -ENOMEM)
240 retval = -EAGAIN;
241 return retval;
242}
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257long mlock_vma_pages_range(struct vm_area_struct *vma,
258 unsigned long start, unsigned long end)
259{
260 int nr_pages = (end - start) / PAGE_SIZE;
261 BUG_ON(!(vma->vm_flags & VM_LOCKED));
262
263
264
265
266 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
267 goto no_mlock;
268
269 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
270 is_vm_hugetlb_page(vma) ||
271 vma == get_gate_vma(current))) {
272
273 __mlock_vma_pages_range(vma, start, end);
274
275
276 return 0;
277 }
278
279
280
281
282
283
284
285
286
287 make_pages_present(start, end);
288
289no_mlock:
290 vma->vm_flags &= ~VM_LOCKED;
291 return nr_pages;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312void munlock_vma_pages_range(struct vm_area_struct *vma,
313 unsigned long start, unsigned long end)
314{
315 unsigned long addr;
316
317 lru_add_drain();
318 vma->vm_flags &= ~VM_LOCKED;
319
320 for (addr = start; addr < end; addr += PAGE_SIZE) {
321 struct page *page;
322
323
324
325
326
327
328
329 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
330 if (page && !IS_ERR(page)) {
331 lock_page(page);
332
333
334
335
336
337
338 if (page->mapping)
339 munlock_vma_page(page);
340 unlock_page(page);
341 put_page(page);
342 }
343 cond_resched();
344 }
345}
346
347
348
349
350
351
352
353
354
355
356static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
357 unsigned long start, unsigned long end, unsigned int newflags)
358{
359 struct mm_struct *mm = vma->vm_mm;
360 pgoff_t pgoff;
361 int nr_pages;
362 int ret = 0;
363 int lock = newflags & VM_LOCKED;
364
365 if (newflags == vma->vm_flags ||
366 (vma->vm_flags & (VM_IO | VM_PFNMAP)))
367 goto out;
368
369 if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
370 is_vm_hugetlb_page(vma) ||
371 vma == get_gate_vma(current)) {
372 if (lock)
373 make_pages_present(start, end);
374 goto out;
375 }
376
377 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
378 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
379 vma->vm_file, pgoff, vma_policy(vma));
380 if (*prev) {
381 vma = *prev;
382 goto success;
383 }
384
385 if (start != vma->vm_start) {
386 ret = split_vma(mm, vma, start, 1);
387 if (ret)
388 goto out;
389 }
390
391 if (end != vma->vm_end) {
392 ret = split_vma(mm, vma, end, 0);
393 if (ret)
394 goto out;
395 }
396
397success:
398
399
400
401 nr_pages = (end - start) >> PAGE_SHIFT;
402 if (!lock)
403 nr_pages = -nr_pages;
404 mm->locked_vm += nr_pages;
405
406
407
408
409
410
411
412 if (lock) {
413 vma->vm_flags = newflags;
414 ret = __mlock_vma_pages_range(vma, start, end);
415 if (ret < 0)
416 ret = __mlock_posix_error_return(ret);
417 } else {
418 munlock_vma_pages_range(vma, start, end);
419 }
420
421out:
422 *prev = vma;
423 return ret;
424}
425
426static int do_mlock(unsigned long start, size_t len, int on)
427{
428 unsigned long nstart, end, tmp;
429 struct vm_area_struct * vma, * prev;
430 int error;
431
432 len = PAGE_ALIGN(len);
433 end = start + len;
434 if (end < start)
435 return -EINVAL;
436 if (end == start)
437 return 0;
438 vma = find_vma_prev(current->mm, start, &prev);
439 if (!vma || vma->vm_start > start)
440 return -ENOMEM;
441
442 if (start > vma->vm_start)
443 prev = vma;
444
445 for (nstart = start ; ; ) {
446 unsigned int newflags;
447
448
449
450 newflags = vma->vm_flags | VM_LOCKED;
451 if (!on)
452 newflags &= ~VM_LOCKED;
453
454 tmp = vma->vm_end;
455 if (tmp > end)
456 tmp = end;
457 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
458 if (error)
459 break;
460 nstart = tmp;
461 if (nstart < prev->vm_end)
462 nstart = prev->vm_end;
463 if (nstart >= end)
464 break;
465
466 vma = prev->vm_next;
467 if (!vma || vma->vm_start != nstart) {
468 error = -ENOMEM;
469 break;
470 }
471 }
472 return error;
473}
474
475SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
476{
477 unsigned long locked;
478 unsigned long lock_limit;
479 int error = -ENOMEM;
480
481 if (!can_do_mlock())
482 return -EPERM;
483
484 lru_add_drain_all();
485
486 down_write(¤t->mm->mmap_sem);
487 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
488 start &= PAGE_MASK;
489
490 locked = len >> PAGE_SHIFT;
491 locked += current->mm->locked_vm;
492
493 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
494 lock_limit >>= PAGE_SHIFT;
495
496
497 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
498 error = do_mlock(start, len, 1);
499 up_write(¤t->mm->mmap_sem);
500 return error;
501}
502
503SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
504{
505 int ret;
506
507 down_write(¤t->mm->mmap_sem);
508 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
509 start &= PAGE_MASK;
510 ret = do_mlock(start, len, 0);
511 up_write(¤t->mm->mmap_sem);
512 return ret;
513}
514
515static int do_mlockall(int flags)
516{
517 struct vm_area_struct * vma, * prev = NULL;
518 unsigned int def_flags = 0;
519
520 if (flags & MCL_FUTURE)
521 def_flags = VM_LOCKED;
522 current->mm->def_flags = def_flags;
523 if (flags == MCL_FUTURE)
524 goto out;
525
526 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
527 unsigned int newflags;
528
529 newflags = vma->vm_flags | VM_LOCKED;
530 if (!(flags & MCL_CURRENT))
531 newflags &= ~VM_LOCKED;
532
533
534 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
535 }
536out:
537 return 0;
538}
539
540SYSCALL_DEFINE1(mlockall, int, flags)
541{
542 unsigned long lock_limit;
543 int ret = -EINVAL;
544
545 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
546 goto out;
547
548 ret = -EPERM;
549 if (!can_do_mlock())
550 goto out;
551
552 lru_add_drain_all();
553
554 down_write(¤t->mm->mmap_sem);
555
556 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
557 lock_limit >>= PAGE_SHIFT;
558
559 ret = -ENOMEM;
560 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
561 capable(CAP_IPC_LOCK))
562 ret = do_mlockall(flags);
563 up_write(¤t->mm->mmap_sem);
564out:
565 return ret;
566}
567
568SYSCALL_DEFINE0(munlockall)
569{
570 int ret;
571
572 down_write(¤t->mm->mmap_sem);
573 ret = do_mlockall(0);
574 up_write(¤t->mm->mmap_sem);
575 return ret;
576}
577
578
579
580
581
582static DEFINE_SPINLOCK(shmlock_user_lock);
583
584int user_shm_lock(size_t size, struct user_struct *user)
585{
586 unsigned long lock_limit, locked;
587 int allowed = 0;
588
589 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
590 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
591 if (lock_limit == RLIM_INFINITY)
592 allowed = 1;
593 lock_limit >>= PAGE_SHIFT;
594 spin_lock(&shmlock_user_lock);
595 if (!allowed &&
596 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
597 goto out;
598 get_uid(user);
599 user->locked_shm += locked;
600 allowed = 1;
601out:
602 spin_unlock(&shmlock_user_lock);
603 return allowed;
604}
605
606void user_shm_unlock(size_t size, struct user_struct *user)
607{
608 spin_lock(&shmlock_user_lock);
609 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
610 spin_unlock(&shmlock_user_lock);
611 free_uid(user);
612}
613
614int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
615 size_t size)
616{
617 unsigned long lim, vm, pgsz;
618 int error = -ENOMEM;
619
620 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
621
622 down_write(&mm->mmap_sem);
623
624 lim = rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
625 vm = mm->total_vm + pgsz;
626 if (lim < vm)
627 goto out;
628
629 lim = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
630 vm = mm->locked_vm + pgsz;
631 if (lim < vm)
632 goto out;
633
634 mm->total_vm += pgsz;
635 mm->locked_vm += pgsz;
636
637 error = 0;
638 out:
639 up_write(&mm->mmap_sem);
640 return error;
641}
642
643void refund_locked_memory(struct mm_struct *mm, size_t size)
644{
645 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
646
647 down_write(&mm->mmap_sem);
648
649 mm->total_vm -= pgsz;
650 mm->locked_vm -= pgsz;
651
652 up_write(&mm->mmap_sem);
653}
654