1
2
3
4
5
6
7
8#include <linux/capability.h>
9#include <linux/mman.h>
10#include <linux/mm.h>
11#include <linux/sched/user.h>
12#include <linux/swap.h>
13#include <linux/swapops.h>
14#include <linux/pagemap.h>
15#include <linux/pagevec.h>
16#include <linux/mempolicy.h>
17#include <linux/syscalls.h>
18#include <linux/sched.h>
19#include <linux/export.h>
20#include <linux/rmap.h>
21#include <linux/mmzone.h>
22#include <linux/hugetlb.h>
23#include <linux/memcontrol.h>
24#include <linux/mm_inline.h>
25
26#include "internal.h"
27
28bool can_do_mlock(void)
29{
30 if (rlimit(RLIMIT_MEMLOCK) != 0)
31 return true;
32 if (capable(CAP_IPC_LOCK))
33 return true;
34 return false;
35}
36EXPORT_SYMBOL(can_do_mlock);
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58void clear_page_mlock(struct page *page)
59{
60 if (!TestClearPageMlocked(page))
61 return;
62
63 mod_zone_page_state(page_zone(page), NR_MLOCK,
64 -hpage_nr_pages(page));
65 count_vm_event(UNEVICTABLE_PGCLEARED);
66 if (!isolate_lru_page(page)) {
67 putback_lru_page(page);
68 } else {
69
70
71
72 if (PageUnevictable(page))
73 count_vm_event(UNEVICTABLE_PGSTRANDED);
74 }
75}
76
77
78
79
80
81void mlock_vma_page(struct page *page)
82{
83
84 BUG_ON(!PageLocked(page));
85
86 VM_BUG_ON_PAGE(PageTail(page), page);
87 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
88
89 if (!TestSetPageMlocked(page)) {
90 mod_zone_page_state(page_zone(page), NR_MLOCK,
91 hpage_nr_pages(page));
92 count_vm_event(UNEVICTABLE_PGMLOCKED);
93 if (!isolate_lru_page(page))
94 putback_lru_page(page);
95 }
96}
97
98
99
100
101
102static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
103{
104 if (PageLRU(page)) {
105 struct lruvec *lruvec;
106
107 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
108 if (getpage)
109 get_page(page);
110 ClearPageLRU(page);
111 del_page_from_lru_list(page, lruvec, page_lru(page));
112 return true;
113 }
114
115 return false;
116}
117
118
119
120
121
122
123
124static void __munlock_isolated_page(struct page *page)
125{
126 int ret = SWAP_AGAIN;
127
128
129
130
131
132 if (page_mapcount(page) > 1)
133 ret = try_to_munlock(page);
134
135
136 if (ret != SWAP_MLOCK)
137 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
138
139 putback_lru_page(page);
140}
141
142
143
144
145
146
147
148
149
150
151static void __munlock_isolation_failed(struct page *page)
152{
153 if (PageUnevictable(page))
154 __count_vm_event(UNEVICTABLE_PGSTRANDED);
155 else
156 __count_vm_event(UNEVICTABLE_PGMUNLOCKED);
157}
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177unsigned int munlock_vma_page(struct page *page)
178{
179 int nr_pages;
180 struct zone *zone = page_zone(page);
181
182
183 BUG_ON(!PageLocked(page));
184
185 VM_BUG_ON_PAGE(PageTail(page), page);
186
187
188
189
190
191
192 spin_lock_irq(zone_lru_lock(zone));
193
194 if (!TestClearPageMlocked(page)) {
195
196 nr_pages = 1;
197 goto unlock_out;
198 }
199
200 nr_pages = hpage_nr_pages(page);
201 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
202
203 if (__munlock_isolate_lru_page(page, true)) {
204 spin_unlock_irq(zone_lru_lock(zone));
205 __munlock_isolated_page(page);
206 goto out;
207 }
208 __munlock_isolation_failed(page);
209
210unlock_out:
211 spin_unlock_irq(zone_lru_lock(zone));
212
213out:
214 return nr_pages - 1;
215}
216
217
218
219
220static int __mlock_posix_error_return(long retval)
221{
222 if (retval == -EFAULT)
223 retval = -ENOMEM;
224 else if (retval == -ENOMEM)
225 retval = -EAGAIN;
226 return retval;
227}
228
229
230
231
232
233
234
235
236
237
238
239
240
241static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
242 int *pgrescued)
243{
244 VM_BUG_ON_PAGE(PageLRU(page), page);
245 VM_BUG_ON_PAGE(!PageLocked(page), page);
246
247 if (page_mapcount(page) <= 1 && page_evictable(page)) {
248 pagevec_add(pvec, page);
249 if (TestClearPageUnevictable(page))
250 (*pgrescued)++;
251 unlock_page(page);
252 return true;
253 }
254
255 return false;
256}
257
258
259
260
261
262
263
264static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
265{
266 count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
267
268
269
270
271 __pagevec_lru_add(pvec);
272 count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
273}
274
275
276
277
278
279
280
281
282
283
284
285static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
286{
287 int i;
288 int nr = pagevec_count(pvec);
289 int delta_munlocked;
290 struct pagevec pvec_putback;
291 int pgrescued = 0;
292
293 pagevec_init(&pvec_putback, 0);
294
295
296 spin_lock_irq(zone_lru_lock(zone));
297 for (i = 0; i < nr; i++) {
298 struct page *page = pvec->pages[i];
299
300 if (TestClearPageMlocked(page)) {
301
302
303
304
305 if (__munlock_isolate_lru_page(page, false))
306 continue;
307 else
308 __munlock_isolation_failed(page);
309 }
310
311
312
313
314
315
316
317 pagevec_add(&pvec_putback, pvec->pages[i]);
318 pvec->pages[i] = NULL;
319 }
320 delta_munlocked = -nr + pagevec_count(&pvec_putback);
321 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
322 spin_unlock_irq(zone_lru_lock(zone));
323
324
325 pagevec_release(&pvec_putback);
326
327
328 for (i = 0; i < nr; i++) {
329 struct page *page = pvec->pages[i];
330
331 if (page) {
332 lock_page(page);
333 if (!__putback_lru_fast_prepare(page, &pvec_putback,
334 &pgrescued)) {
335
336
337
338
339 get_page(page);
340 __munlock_isolated_page(page);
341 unlock_page(page);
342 put_page(page);
343 }
344 }
345 }
346
347
348
349
350
351 if (pagevec_count(&pvec_putback))
352 __putback_lru_fast(&pvec_putback, pgrescued);
353}
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
369 struct vm_area_struct *vma, int zoneid, unsigned long start,
370 unsigned long end)
371{
372 pte_t *pte;
373 spinlock_t *ptl;
374
375
376
377
378
379
380 pte = get_locked_pte(vma->vm_mm, start, &ptl);
381
382 end = pgd_addr_end(start, end);
383 end = p4d_addr_end(start, end);
384 end = pud_addr_end(start, end);
385 end = pmd_addr_end(start, end);
386
387
388 start += PAGE_SIZE;
389 while (start < end) {
390 struct page *page = NULL;
391 pte++;
392 if (pte_present(*pte))
393 page = vm_normal_page(vma, start, *pte);
394
395
396
397
398 if (!page || page_zone_id(page) != zoneid)
399 break;
400
401
402
403
404
405 if (PageTransCompound(page))
406 break;
407
408 get_page(page);
409
410
411
412
413 start += PAGE_SIZE;
414 if (pagevec_add(pvec, page) == 0)
415 break;
416 }
417 pte_unmap_unlock(pte, ptl);
418 return start;
419}
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439void munlock_vma_pages_range(struct vm_area_struct *vma,
440 unsigned long start, unsigned long end)
441{
442 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
443
444 while (start < end) {
445 struct page *page;
446 unsigned int page_mask = 0;
447 unsigned long page_increm;
448 struct pagevec pvec;
449 struct zone *zone;
450 int zoneid;
451
452 pagevec_init(&pvec, 0);
453
454
455
456
457
458
459
460 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
461
462 if (page && !IS_ERR(page)) {
463 if (PageTransTail(page)) {
464 VM_BUG_ON_PAGE(PageMlocked(page), page);
465 put_page(page);
466 } else if (PageTransHuge(page)) {
467 lock_page(page);
468
469
470
471
472
473
474 page_mask = munlock_vma_page(page);
475 unlock_page(page);
476 put_page(page);
477 } else {
478
479
480
481
482
483 pagevec_add(&pvec, page);
484 zone = page_zone(page);
485 zoneid = page_zone_id(page);
486
487
488
489
490
491
492
493 start = __munlock_pagevec_fill(&pvec, vma,
494 zoneid, start, end);
495 __munlock_pagevec(&pvec, zone);
496 goto next;
497 }
498 }
499 page_increm = 1 + page_mask;
500 start += page_increm * PAGE_SIZE;
501next:
502 cond_resched();
503 }
504}
505
506
507
508
509
510
511
512
513
514
515static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
516 unsigned long start, unsigned long end, vm_flags_t newflags)
517{
518 struct mm_struct *mm = vma->vm_mm;
519 pgoff_t pgoff;
520 int nr_pages;
521 int ret = 0;
522 int lock = !!(newflags & VM_LOCKED);
523 vm_flags_t old_flags = vma->vm_flags;
524
525 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
526 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
527
528 goto out;
529
530 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
531 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
532 vma->vm_file, pgoff, vma_policy(vma),
533 vma->vm_userfaultfd_ctx);
534 if (*prev) {
535 vma = *prev;
536 goto success;
537 }
538
539 if (start != vma->vm_start) {
540 ret = split_vma(mm, vma, start, 1);
541 if (ret)
542 goto out;
543 }
544
545 if (end != vma->vm_end) {
546 ret = split_vma(mm, vma, end, 0);
547 if (ret)
548 goto out;
549 }
550
551success:
552
553
554
555 nr_pages = (end - start) >> PAGE_SHIFT;
556 if (!lock)
557 nr_pages = -nr_pages;
558 else if (old_flags & VM_LOCKED)
559 nr_pages = 0;
560 mm->locked_vm += nr_pages;
561
562
563
564
565
566
567
568 if (lock)
569 vma->vm_flags = newflags;
570 else
571 munlock_vma_pages_range(vma, start, end);
572
573out:
574 *prev = vma;
575 return ret;
576}
577
578static int apply_vma_lock_flags(unsigned long start, size_t len,
579 vm_flags_t flags)
580{
581 unsigned long nstart, end, tmp;
582 struct vm_area_struct * vma, * prev;
583 int error;
584
585 VM_BUG_ON(offset_in_page(start));
586 VM_BUG_ON(len != PAGE_ALIGN(len));
587 end = start + len;
588 if (end < start)
589 return -EINVAL;
590 if (end == start)
591 return 0;
592 vma = find_vma(current->mm, start);
593 if (!vma || vma->vm_start > start)
594 return -ENOMEM;
595
596 prev = vma->vm_prev;
597 if (start > vma->vm_start)
598 prev = vma;
599
600 for (nstart = start ; ; ) {
601 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
602
603 newflags |= flags;
604
605
606 tmp = vma->vm_end;
607 if (tmp > end)
608 tmp = end;
609 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
610 if (error)
611 break;
612 nstart = tmp;
613 if (nstart < prev->vm_end)
614 nstart = prev->vm_end;
615 if (nstart >= end)
616 break;
617
618 vma = prev->vm_next;
619 if (!vma || vma->vm_start != nstart) {
620 error = -ENOMEM;
621 break;
622 }
623 }
624 return error;
625}
626
627
628
629
630
631
632
633
634static int count_mm_mlocked_page_nr(struct mm_struct *mm,
635 unsigned long start, size_t len)
636{
637 struct vm_area_struct *vma;
638 int count = 0;
639
640 if (mm == NULL)
641 mm = current->mm;
642
643 vma = find_vma(mm, start);
644 if (vma == NULL)
645 vma = mm->mmap;
646
647 for (; vma ; vma = vma->vm_next) {
648 if (start >= vma->vm_end)
649 continue;
650 if (start + len <= vma->vm_start)
651 break;
652 if (vma->vm_flags & VM_LOCKED) {
653 if (start > vma->vm_start)
654 count -= (start - vma->vm_start);
655 if (start + len < vma->vm_end) {
656 count += start + len - vma->vm_start;
657 break;
658 }
659 count += vma->vm_end - vma->vm_start;
660 }
661 }
662
663 return count >> PAGE_SHIFT;
664}
665
666static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
667{
668 unsigned long locked;
669 unsigned long lock_limit;
670 int error = -ENOMEM;
671
672 if (!can_do_mlock())
673 return -EPERM;
674
675 lru_add_drain_all();
676
677 len = PAGE_ALIGN(len + (offset_in_page(start)));
678 start &= PAGE_MASK;
679
680 lock_limit = rlimit(RLIMIT_MEMLOCK);
681 lock_limit >>= PAGE_SHIFT;
682 locked = len >> PAGE_SHIFT;
683
684 if (down_write_killable(¤t->mm->mmap_sem))
685 return -EINTR;
686
687 locked += current->mm->locked_vm;
688 if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
689
690
691
692
693
694
695 locked -= count_mm_mlocked_page_nr(current->mm,
696 start, len);
697 }
698
699
700 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
701 error = apply_vma_lock_flags(start, len, flags);
702
703 up_write(¤t->mm->mmap_sem);
704 if (error)
705 return error;
706
707 error = __mm_populate(start, len, 0);
708 if (error)
709 return __mlock_posix_error_return(error);
710 return 0;
711}
712
713SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
714{
715 return do_mlock(start, len, VM_LOCKED);
716}
717
718SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
719{
720 vm_flags_t vm_flags = VM_LOCKED;
721
722 if (flags & ~MLOCK_ONFAULT)
723 return -EINVAL;
724
725 if (flags & MLOCK_ONFAULT)
726 vm_flags |= VM_LOCKONFAULT;
727
728 return do_mlock(start, len, vm_flags);
729}
730
731SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
732{
733 int ret;
734
735 len = PAGE_ALIGN(len + (offset_in_page(start)));
736 start &= PAGE_MASK;
737
738 if (down_write_killable(¤t->mm->mmap_sem))
739 return -EINTR;
740 ret = apply_vma_lock_flags(start, len, 0);
741 up_write(¤t->mm->mmap_sem);
742
743 return ret;
744}
745
746
747
748
749
750
751
752
753
754
755
756static int apply_mlockall_flags(int flags)
757{
758 struct vm_area_struct * vma, * prev = NULL;
759 vm_flags_t to_add = 0;
760
761 current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
762 if (flags & MCL_FUTURE) {
763 current->mm->def_flags |= VM_LOCKED;
764
765 if (flags & MCL_ONFAULT)
766 current->mm->def_flags |= VM_LOCKONFAULT;
767
768 if (!(flags & MCL_CURRENT))
769 goto out;
770 }
771
772 if (flags & MCL_CURRENT) {
773 to_add |= VM_LOCKED;
774 if (flags & MCL_ONFAULT)
775 to_add |= VM_LOCKONFAULT;
776 }
777
778 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
779 vm_flags_t newflags;
780
781 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
782 newflags |= to_add;
783
784
785 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
786 cond_resched_rcu_qs();
787 }
788out:
789 return 0;
790}
791
792SYSCALL_DEFINE1(mlockall, int, flags)
793{
794 unsigned long lock_limit;
795 int ret;
796
797 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)))
798 return -EINVAL;
799
800 if (!can_do_mlock())
801 return -EPERM;
802
803 if (flags & MCL_CURRENT)
804 lru_add_drain_all();
805
806 lock_limit = rlimit(RLIMIT_MEMLOCK);
807 lock_limit >>= PAGE_SHIFT;
808
809 if (down_write_killable(¤t->mm->mmap_sem))
810 return -EINTR;
811
812 ret = -ENOMEM;
813 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
814 capable(CAP_IPC_LOCK))
815 ret = apply_mlockall_flags(flags);
816 up_write(¤t->mm->mmap_sem);
817 if (!ret && (flags & MCL_CURRENT))
818 mm_populate(0, TASK_SIZE);
819
820 return ret;
821}
822
823SYSCALL_DEFINE0(munlockall)
824{
825 int ret;
826
827 if (down_write_killable(¤t->mm->mmap_sem))
828 return -EINTR;
829 ret = apply_mlockall_flags(0);
830 up_write(¤t->mm->mmap_sem);
831 return ret;
832}
833
834
835
836
837
838static DEFINE_SPINLOCK(shmlock_user_lock);
839
840int user_shm_lock(size_t size, struct user_struct *user)
841{
842 unsigned long lock_limit, locked;
843 int allowed = 0;
844
845 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
846 lock_limit = rlimit(RLIMIT_MEMLOCK);
847 if (lock_limit == RLIM_INFINITY)
848 allowed = 1;
849 lock_limit >>= PAGE_SHIFT;
850 spin_lock(&shmlock_user_lock);
851 if (!allowed &&
852 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
853 goto out;
854 get_uid(user);
855 user->locked_shm += locked;
856 allowed = 1;
857out:
858 spin_unlock(&shmlock_user_lock);
859 return allowed;
860}
861
862void user_shm_unlock(size_t size, struct user_struct *user)
863{
864 spin_lock(&shmlock_user_lock);
865 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
866 spin_unlock(&shmlock_user_lock);
867 free_uid(user);
868}
869