1
2
3
4
5
6
7
8
9#include <linux/capability.h>
10#include <linux/mman.h>
11#include <linux/mm.h>
12#include <linux/sched/user.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/pagemap.h>
16#include <linux/pagevec.h>
17#include <linux/mempolicy.h>
18#include <linux/syscalls.h>
19#include <linux/sched.h>
20#include <linux/export.h>
21#include <linux/rmap.h>
22#include <linux/mmzone.h>
23#include <linux/hugetlb.h>
24#include <linux/memcontrol.h>
25#include <linux/mm_inline.h>
26
27#include "internal.h"
28
29bool can_do_mlock(void)
30{
31 if (rlimit(RLIMIT_MEMLOCK) != 0)
32 return true;
33 if (capable(CAP_IPC_LOCK))
34 return true;
35 return false;
36}
37EXPORT_SYMBOL(can_do_mlock);
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59void clear_page_mlock(struct page *page)
60{
61 int nr_pages;
62
63 if (!TestClearPageMlocked(page))
64 return;
65
66 nr_pages = thp_nr_pages(page);
67 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
68 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
69
70
71
72
73
74
75 if (!isolate_lru_page(page)) {
76 putback_lru_page(page);
77 } else {
78
79
80
81 if (PageUnevictable(page))
82 count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
83 }
84}
85
86
87
88
89
90void mlock_vma_page(struct page *page)
91{
92
93 BUG_ON(!PageLocked(page));
94
95 VM_BUG_ON_PAGE(PageTail(page), page);
96 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
97
98 if (!TestSetPageMlocked(page)) {
99 int nr_pages = thp_nr_pages(page);
100
101 mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
102 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
103 if (!isolate_lru_page(page))
104 putback_lru_page(page);
105 }
106}
107
108
109
110
111
112static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
113{
114 if (PageLRU(page)) {
115 struct lruvec *lruvec;
116
117 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
118 if (getpage)
119 get_page(page);
120 ClearPageLRU(page);
121 del_page_from_lru_list(page, lruvec, page_lru(page));
122 return true;
123 }
124
125 return false;
126}
127
128
129
130
131
132
133
134static void __munlock_isolated_page(struct page *page)
135{
136
137
138
139
140 if (page_mapcount(page) > 1)
141 try_to_munlock(page);
142
143
144 if (!PageMlocked(page))
145 count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page));
146
147 putback_lru_page(page);
148}
149
150
151
152
153
154
155
156
157
158
159static void __munlock_isolation_failed(struct page *page)
160{
161 int nr_pages = thp_nr_pages(page);
162
163 if (PageUnevictable(page))
164 __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
165 else
166 __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
167}
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187unsigned int munlock_vma_page(struct page *page)
188{
189 int nr_pages;
190 pg_data_t *pgdat = page_pgdat(page);
191
192
193 BUG_ON(!PageLocked(page));
194
195 VM_BUG_ON_PAGE(PageTail(page), page);
196
197
198
199
200
201
202 spin_lock_irq(&pgdat->lru_lock);
203
204 if (!TestClearPageMlocked(page)) {
205
206 nr_pages = 1;
207 goto unlock_out;
208 }
209
210 nr_pages = thp_nr_pages(page);
211 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
212
213 if (__munlock_isolate_lru_page(page, true)) {
214 spin_unlock_irq(&pgdat->lru_lock);
215 __munlock_isolated_page(page);
216 goto out;
217 }
218 __munlock_isolation_failed(page);
219
220unlock_out:
221 spin_unlock_irq(&pgdat->lru_lock);
222
223out:
224 return nr_pages - 1;
225}
226
227
228
229
230static int __mlock_posix_error_return(long retval)
231{
232 if (retval == -EFAULT)
233 retval = -ENOMEM;
234 else if (retval == -ENOMEM)
235 retval = -EAGAIN;
236 return retval;
237}
238
239
240
241
242
243
244
245
246
247
248
249
250
251static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
252 int *pgrescued)
253{
254 VM_BUG_ON_PAGE(PageLRU(page), page);
255 VM_BUG_ON_PAGE(!PageLocked(page), page);
256
257 if (page_mapcount(page) <= 1 && page_evictable(page)) {
258 pagevec_add(pvec, page);
259 if (TestClearPageUnevictable(page))
260 (*pgrescued)++;
261 unlock_page(page);
262 return true;
263 }
264
265 return false;
266}
267
268
269
270
271
272
273
274static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
275{
276 count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
277
278
279
280
281 __pagevec_lru_add(pvec);
282 count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
283}
284
285
286
287
288
289
290
291
292
293
294
295static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
296{
297 int i;
298 int nr = pagevec_count(pvec);
299 int delta_munlocked = -nr;
300 struct pagevec pvec_putback;
301 int pgrescued = 0;
302
303 pagevec_init(&pvec_putback);
304
305
306 spin_lock_irq(&zone->zone_pgdat->lru_lock);
307 for (i = 0; i < nr; i++) {
308 struct page *page = pvec->pages[i];
309
310 if (TestClearPageMlocked(page)) {
311
312
313
314
315 if (__munlock_isolate_lru_page(page, false))
316 continue;
317 else
318 __munlock_isolation_failed(page);
319 } else {
320 delta_munlocked++;
321 }
322
323
324
325
326
327
328
329 pagevec_add(&pvec_putback, pvec->pages[i]);
330 pvec->pages[i] = NULL;
331 }
332 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
333 spin_unlock_irq(&zone->zone_pgdat->lru_lock);
334
335
336 pagevec_release(&pvec_putback);
337
338
339 for (i = 0; i < nr; i++) {
340 struct page *page = pvec->pages[i];
341
342 if (page) {
343 lock_page(page);
344 if (!__putback_lru_fast_prepare(page, &pvec_putback,
345 &pgrescued)) {
346
347
348
349
350 get_page(page);
351 __munlock_isolated_page(page);
352 unlock_page(page);
353 put_page(page);
354 }
355 }
356 }
357
358
359
360
361
362 if (pagevec_count(&pvec_putback))
363 __putback_lru_fast(&pvec_putback, pgrescued);
364}
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
380 struct vm_area_struct *vma, struct zone *zone,
381 unsigned long start, unsigned long end)
382{
383 pte_t *pte;
384 spinlock_t *ptl;
385
386
387
388
389
390
391 pte = get_locked_pte(vma->vm_mm, start, &ptl);
392
393 end = pgd_addr_end(start, end);
394 end = p4d_addr_end(start, end);
395 end = pud_addr_end(start, end);
396 end = pmd_addr_end(start, end);
397
398
399 start += PAGE_SIZE;
400 while (start < end) {
401 struct page *page = NULL;
402 pte++;
403 if (pte_present(*pte))
404 page = vm_normal_page(vma, start, *pte);
405
406
407
408
409 if (!page || page_zone(page) != zone)
410 break;
411
412
413
414
415
416 if (PageTransCompound(page))
417 break;
418
419 get_page(page);
420
421
422
423
424 start += PAGE_SIZE;
425 if (pagevec_add(pvec, page) == 0)
426 break;
427 }
428 pte_unmap_unlock(pte, ptl);
429 return start;
430}
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450void munlock_vma_pages_range(struct vm_area_struct *vma,
451 unsigned long start, unsigned long end)
452{
453 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
454
455 while (start < end) {
456 struct page *page;
457 unsigned int page_mask = 0;
458 unsigned long page_increm;
459 struct pagevec pvec;
460 struct zone *zone;
461
462 pagevec_init(&pvec);
463
464
465
466
467
468
469
470 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
471
472 if (page && !IS_ERR(page)) {
473 if (PageTransTail(page)) {
474 VM_BUG_ON_PAGE(PageMlocked(page), page);
475 put_page(page);
476 } else if (PageTransHuge(page)) {
477 lock_page(page);
478
479
480
481
482
483
484 page_mask = munlock_vma_page(page);
485 unlock_page(page);
486 put_page(page);
487 } else {
488
489
490
491
492
493 pagevec_add(&pvec, page);
494 zone = page_zone(page);
495
496
497
498
499
500
501
502 start = __munlock_pagevec_fill(&pvec, vma,
503 zone, start, end);
504 __munlock_pagevec(&pvec, zone);
505 goto next;
506 }
507 }
508 page_increm = 1 + page_mask;
509 start += page_increm * PAGE_SIZE;
510next:
511 cond_resched();
512 }
513}
514
515
516
517
518
519
520
521
522
523
524static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
525 unsigned long start, unsigned long end, vm_flags_t newflags)
526{
527 struct mm_struct *mm = vma->vm_mm;
528 pgoff_t pgoff;
529 int nr_pages;
530 int ret = 0;
531 int lock = !!(newflags & VM_LOCKED);
532 vm_flags_t old_flags = vma->vm_flags;
533
534 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
535 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
536 vma_is_dax(vma))
537
538 goto out;
539
540 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
541 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
542 vma->vm_file, pgoff, vma_policy(vma),
543 vma->vm_userfaultfd_ctx);
544 if (*prev) {
545 vma = *prev;
546 goto success;
547 }
548
549 if (start != vma->vm_start) {
550 ret = split_vma(mm, vma, start, 1);
551 if (ret)
552 goto out;
553 }
554
555 if (end != vma->vm_end) {
556 ret = split_vma(mm, vma, end, 0);
557 if (ret)
558 goto out;
559 }
560
561success:
562
563
564
565 nr_pages = (end - start) >> PAGE_SHIFT;
566 if (!lock)
567 nr_pages = -nr_pages;
568 else if (old_flags & VM_LOCKED)
569 nr_pages = 0;
570 mm->locked_vm += nr_pages;
571
572
573
574
575
576
577
578 if (lock)
579 vma->vm_flags = newflags;
580 else
581 munlock_vma_pages_range(vma, start, end);
582
583out:
584 *prev = vma;
585 return ret;
586}
587
588static int apply_vma_lock_flags(unsigned long start, size_t len,
589 vm_flags_t flags)
590{
591 unsigned long nstart, end, tmp;
592 struct vm_area_struct * vma, * prev;
593 int error;
594
595 VM_BUG_ON(offset_in_page(start));
596 VM_BUG_ON(len != PAGE_ALIGN(len));
597 end = start + len;
598 if (end < start)
599 return -EINVAL;
600 if (end == start)
601 return 0;
602 vma = find_vma(current->mm, start);
603 if (!vma || vma->vm_start > start)
604 return -ENOMEM;
605
606 prev = vma->vm_prev;
607 if (start > vma->vm_start)
608 prev = vma;
609
610 for (nstart = start ; ; ) {
611 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
612
613 newflags |= flags;
614
615
616 tmp = vma->vm_end;
617 if (tmp > end)
618 tmp = end;
619 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
620 if (error)
621 break;
622 nstart = tmp;
623 if (nstart < prev->vm_end)
624 nstart = prev->vm_end;
625 if (nstart >= end)
626 break;
627
628 vma = prev->vm_next;
629 if (!vma || vma->vm_start != nstart) {
630 error = -ENOMEM;
631 break;
632 }
633 }
634 return error;
635}
636
637
638
639
640
641
642
643
644static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
645 unsigned long start, size_t len)
646{
647 struct vm_area_struct *vma;
648 unsigned long count = 0;
649
650 if (mm == NULL)
651 mm = current->mm;
652
653 vma = find_vma(mm, start);
654 if (vma == NULL)
655 vma = mm->mmap;
656
657 for (; vma ; vma = vma->vm_next) {
658 if (start >= vma->vm_end)
659 continue;
660 if (start + len <= vma->vm_start)
661 break;
662 if (vma->vm_flags & VM_LOCKED) {
663 if (start > vma->vm_start)
664 count -= (start - vma->vm_start);
665 if (start + len < vma->vm_end) {
666 count += start + len - vma->vm_start;
667 break;
668 }
669 count += vma->vm_end - vma->vm_start;
670 }
671 }
672
673 return count >> PAGE_SHIFT;
674}
675
676static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
677{
678 unsigned long locked;
679 unsigned long lock_limit;
680 int error = -ENOMEM;
681
682 start = untagged_addr(start);
683
684 if (!can_do_mlock())
685 return -EPERM;
686
687 len = PAGE_ALIGN(len + (offset_in_page(start)));
688 start &= PAGE_MASK;
689
690 lock_limit = rlimit(RLIMIT_MEMLOCK);
691 lock_limit >>= PAGE_SHIFT;
692 locked = len >> PAGE_SHIFT;
693
694 if (mmap_write_lock_killable(current->mm))
695 return -EINTR;
696
697 locked += current->mm->locked_vm;
698 if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
699
700
701
702
703
704
705 locked -= count_mm_mlocked_page_nr(current->mm,
706 start, len);
707 }
708
709
710 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
711 error = apply_vma_lock_flags(start, len, flags);
712
713 mmap_write_unlock(current->mm);
714 if (error)
715 return error;
716
717 error = __mm_populate(start, len, 0);
718 if (error)
719 return __mlock_posix_error_return(error);
720 return 0;
721}
722
723SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
724{
725 return do_mlock(start, len, VM_LOCKED);
726}
727
728SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
729{
730 vm_flags_t vm_flags = VM_LOCKED;
731
732 if (flags & ~MLOCK_ONFAULT)
733 return -EINVAL;
734
735 if (flags & MLOCK_ONFAULT)
736 vm_flags |= VM_LOCKONFAULT;
737
738 return do_mlock(start, len, vm_flags);
739}
740
741SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
742{
743 int ret;
744
745 start = untagged_addr(start);
746
747 len = PAGE_ALIGN(len + (offset_in_page(start)));
748 start &= PAGE_MASK;
749
750 if (mmap_write_lock_killable(current->mm))
751 return -EINTR;
752 ret = apply_vma_lock_flags(start, len, 0);
753 mmap_write_unlock(current->mm);
754
755 return ret;
756}
757
758
759
760
761
762
763
764
765
766
767
768static int apply_mlockall_flags(int flags)
769{
770 struct vm_area_struct * vma, * prev = NULL;
771 vm_flags_t to_add = 0;
772
773 current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
774 if (flags & MCL_FUTURE) {
775 current->mm->def_flags |= VM_LOCKED;
776
777 if (flags & MCL_ONFAULT)
778 current->mm->def_flags |= VM_LOCKONFAULT;
779
780 if (!(flags & MCL_CURRENT))
781 goto out;
782 }
783
784 if (flags & MCL_CURRENT) {
785 to_add |= VM_LOCKED;
786 if (flags & MCL_ONFAULT)
787 to_add |= VM_LOCKONFAULT;
788 }
789
790 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
791 vm_flags_t newflags;
792
793 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
794 newflags |= to_add;
795
796
797 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
798 cond_resched();
799 }
800out:
801 return 0;
802}
803
804SYSCALL_DEFINE1(mlockall, int, flags)
805{
806 unsigned long lock_limit;
807 int ret;
808
809 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
810 flags == MCL_ONFAULT)
811 return -EINVAL;
812
813 if (!can_do_mlock())
814 return -EPERM;
815
816 lock_limit = rlimit(RLIMIT_MEMLOCK);
817 lock_limit >>= PAGE_SHIFT;
818
819 if (mmap_write_lock_killable(current->mm))
820 return -EINTR;
821
822 ret = -ENOMEM;
823 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
824 capable(CAP_IPC_LOCK))
825 ret = apply_mlockall_flags(flags);
826 mmap_write_unlock(current->mm);
827 if (!ret && (flags & MCL_CURRENT))
828 mm_populate(0, TASK_SIZE);
829
830 return ret;
831}
832
833SYSCALL_DEFINE0(munlockall)
834{
835 int ret;
836
837 if (mmap_write_lock_killable(current->mm))
838 return -EINTR;
839 ret = apply_mlockall_flags(0);
840 mmap_write_unlock(current->mm);
841 return ret;
842}
843
844
845
846
847
848static DEFINE_SPINLOCK(shmlock_user_lock);
849
850int user_shm_lock(size_t size, struct user_struct *user)
851{
852 unsigned long lock_limit, locked;
853 int allowed = 0;
854
855 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
856 lock_limit = rlimit(RLIMIT_MEMLOCK);
857 if (lock_limit == RLIM_INFINITY)
858 allowed = 1;
859 lock_limit >>= PAGE_SHIFT;
860 spin_lock(&shmlock_user_lock);
861 if (!allowed &&
862 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
863 goto out;
864 get_uid(user);
865 user->locked_shm += locked;
866 allowed = 1;
867out:
868 spin_unlock(&shmlock_user_lock);
869 return allowed;
870}
871
872void user_shm_unlock(size_t size, struct user_struct *user)
873{
874 spin_lock(&shmlock_user_lock);
875 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
876 spin_unlock(&shmlock_user_lock);
877 free_uid(user);
878}
879