1
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/string.h>
5#include <linux/compiler.h>
6#include <linux/export.h>
7#include <linux/err.h>
8#include <linux/sched.h>
9#include <linux/sched/mm.h>
10#include <linux/sched/signal.h>
11#include <linux/sched/task_stack.h>
12#include <linux/security.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/mman.h>
16#include <linux/hugetlb.h>
17#include <linux/vmalloc.h>
18#include <linux/userfaultfd_k.h>
19#include <linux/elf.h>
20#include <linux/elf-randomize.h>
21#include <linux/personality.h>
22#include <linux/random.h>
23#include <linux/processor.h>
24#include <linux/sizes.h>
25#include <linux/compat.h>
26
27#include <linux/uaccess.h>
28
29#include "internal.h"
30
31
32
33
34
35
36
37void kfree_const(const void *x)
38{
39 if (!is_kernel_rodata((unsigned long)x))
40 kfree(x);
41}
42EXPORT_SYMBOL(kfree_const);
43
44
45
46
47
48
49
50
51char *kstrdup(const char *s, gfp_t gfp)
52{
53 size_t len;
54 char *buf;
55
56 if (!s)
57 return NULL;
58
59 len = strlen(s) + 1;
60 buf = kmalloc_track_caller(len, gfp);
61 if (buf)
62 memcpy(buf, s, len);
63 return buf;
64}
65EXPORT_SYMBOL(kstrdup);
66
67
68
69
70
71
72
73
74
75
76
77
78const char *kstrdup_const(const char *s, gfp_t gfp)
79{
80 if (is_kernel_rodata((unsigned long)s))
81 return s;
82
83 return kstrdup(s, gfp);
84}
85EXPORT_SYMBOL(kstrdup_const);
86
87
88
89
90
91
92
93
94
95
96
97char *kstrndup(const char *s, size_t max, gfp_t gfp)
98{
99 size_t len;
100 char *buf;
101
102 if (!s)
103 return NULL;
104
105 len = strnlen(s, max);
106 buf = kmalloc_track_caller(len+1, gfp);
107 if (buf) {
108 memcpy(buf, s, len);
109 buf[len] = '\0';
110 }
111 return buf;
112}
113EXPORT_SYMBOL(kstrndup);
114
115
116
117
118
119
120
121
122
123
124void *kmemdup(const void *src, size_t len, gfp_t gfp)
125{
126 void *p;
127
128 p = kmalloc_track_caller(len, gfp);
129 if (p)
130 memcpy(p, src, len);
131 return p;
132}
133EXPORT_SYMBOL(kmemdup);
134
135
136
137
138
139
140
141
142
143
144char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
145{
146 char *buf;
147
148 if (!s)
149 return NULL;
150
151 buf = kmalloc_track_caller(len + 1, gfp);
152 if (buf) {
153 memcpy(buf, s, len);
154 buf[len] = '\0';
155 }
156 return buf;
157}
158EXPORT_SYMBOL(kmemdup_nul);
159
160
161
162
163
164
165
166
167
168
169void *memdup_user(const void __user *src, size_t len)
170{
171 void *p;
172
173 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
174 if (!p)
175 return ERR_PTR(-ENOMEM);
176
177 if (copy_from_user(p, src, len)) {
178 kfree(p);
179 return ERR_PTR(-EFAULT);
180 }
181
182 return p;
183}
184EXPORT_SYMBOL(memdup_user);
185
186
187
188
189
190
191
192
193
194
195void *vmemdup_user(const void __user *src, size_t len)
196{
197 void *p;
198
199 p = kvmalloc(len, GFP_USER);
200 if (!p)
201 return ERR_PTR(-ENOMEM);
202
203 if (copy_from_user(p, src, len)) {
204 kvfree(p);
205 return ERR_PTR(-EFAULT);
206 }
207
208 return p;
209}
210EXPORT_SYMBOL(vmemdup_user);
211
212
213
214
215
216
217
218
219char *strndup_user(const char __user *s, long n)
220{
221 char *p;
222 long length;
223
224 length = strnlen_user(s, n);
225
226 if (!length)
227 return ERR_PTR(-EFAULT);
228
229 if (length > n)
230 return ERR_PTR(-EINVAL);
231
232 p = memdup_user(s, length);
233
234 if (IS_ERR(p))
235 return p;
236
237 p[length - 1] = '\0';
238
239 return p;
240}
241EXPORT_SYMBOL(strndup_user);
242
243
244
245
246
247
248
249
250
251void *memdup_user_nul(const void __user *src, size_t len)
252{
253 char *p;
254
255
256
257
258
259
260 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
261 if (!p)
262 return ERR_PTR(-ENOMEM);
263
264 if (copy_from_user(p, src, len)) {
265 kfree(p);
266 return ERR_PTR(-EFAULT);
267 }
268 p[len] = '\0';
269
270 return p;
271}
272EXPORT_SYMBOL(memdup_user_nul);
273
274void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
275 struct vm_area_struct *prev)
276{
277 struct vm_area_struct *next;
278
279 vma->vm_prev = prev;
280 if (prev) {
281 next = prev->vm_next;
282 prev->vm_next = vma;
283 } else {
284 next = mm->mmap;
285 mm->mmap = vma;
286 }
287 vma->vm_next = next;
288 if (next)
289 next->vm_prev = vma;
290}
291
292void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
293{
294 struct vm_area_struct *prev, *next;
295
296 next = vma->vm_next;
297 prev = vma->vm_prev;
298 if (prev)
299 prev->vm_next = next;
300 else
301 mm->mmap = next;
302 if (next)
303 next->vm_prev = prev;
304}
305
306
307int vma_is_stack_for_current(struct vm_area_struct *vma)
308{
309 struct task_struct * __maybe_unused t = current;
310
311 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
312}
313
314
315
316
317void vma_set_file(struct vm_area_struct *vma, struct file *file)
318{
319
320 get_file(file);
321 swap(vma->vm_file, file);
322 fput(file);
323}
324EXPORT_SYMBOL(vma_set_file);
325
326#ifndef STACK_RND_MASK
327#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
328#endif
329
330unsigned long randomize_stack_top(unsigned long stack_top)
331{
332 unsigned long random_variable = 0;
333
334 if (current->flags & PF_RANDOMIZE) {
335 random_variable = get_random_long();
336 random_variable &= STACK_RND_MASK;
337 random_variable <<= PAGE_SHIFT;
338 }
339#ifdef CONFIG_STACK_GROWSUP
340 return PAGE_ALIGN(stack_top) + random_variable;
341#else
342 return PAGE_ALIGN(stack_top) - random_variable;
343#endif
344}
345
346#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
347unsigned long arch_randomize_brk(struct mm_struct *mm)
348{
349
350 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
351 return randomize_page(mm->brk, SZ_32M);
352
353 return randomize_page(mm->brk, SZ_1G);
354}
355
356unsigned long arch_mmap_rnd(void)
357{
358 unsigned long rnd;
359
360#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
361 if (is_compat_task())
362 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
363 else
364#endif
365 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
366
367 return rnd << PAGE_SHIFT;
368}
369
370static int mmap_is_legacy(struct rlimit *rlim_stack)
371{
372 if (current->personality & ADDR_COMPAT_LAYOUT)
373 return 1;
374
375 if (rlim_stack->rlim_cur == RLIM_INFINITY)
376 return 1;
377
378 return sysctl_legacy_va_layout;
379}
380
381
382
383
384
385#define MIN_GAP (SZ_128M)
386#define MAX_GAP (STACK_TOP / 6 * 5)
387
388static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
389{
390 unsigned long gap = rlim_stack->rlim_cur;
391 unsigned long pad = stack_guard_gap;
392
393
394 if (current->flags & PF_RANDOMIZE)
395 pad += (STACK_RND_MASK << PAGE_SHIFT);
396
397
398 if (gap + pad > gap)
399 gap += pad;
400
401 if (gap < MIN_GAP)
402 gap = MIN_GAP;
403 else if (gap > MAX_GAP)
404 gap = MAX_GAP;
405
406 return PAGE_ALIGN(STACK_TOP - gap - rnd);
407}
408
409void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
410{
411 unsigned long random_factor = 0UL;
412
413 if (current->flags & PF_RANDOMIZE)
414 random_factor = arch_mmap_rnd();
415
416 if (mmap_is_legacy(rlim_stack)) {
417 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
418 mm->get_unmapped_area = arch_get_unmapped_area;
419 } else {
420 mm->mmap_base = mmap_base(random_factor, rlim_stack);
421 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
422 }
423}
424#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
425void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
426{
427 mm->mmap_base = TASK_UNMAPPED_BASE;
428 mm->get_unmapped_area = arch_get_unmapped_area;
429}
430#endif
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
448 struct task_struct *task, bool bypass_rlim)
449{
450 unsigned long locked_vm, limit;
451 int ret = 0;
452
453 mmap_assert_write_locked(mm);
454
455 locked_vm = mm->locked_vm;
456 if (inc) {
457 if (!bypass_rlim) {
458 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
459 if (locked_vm + pages > limit)
460 ret = -ENOMEM;
461 }
462 if (!ret)
463 mm->locked_vm = locked_vm + pages;
464 } else {
465 WARN_ON_ONCE(pages > locked_vm);
466 mm->locked_vm = locked_vm - pages;
467 }
468
469 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
470 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
471 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
472 ret ? " - exceeded" : "");
473
474 return ret;
475}
476EXPORT_SYMBOL_GPL(__account_locked_vm);
477
478
479
480
481
482
483
484
485
486
487
488
489
490int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
491{
492 int ret;
493
494 if (pages == 0 || !mm)
495 return 0;
496
497 mmap_write_lock(mm);
498 ret = __account_locked_vm(mm, pages, inc, current,
499 capable(CAP_IPC_LOCK));
500 mmap_write_unlock(mm);
501
502 return ret;
503}
504EXPORT_SYMBOL_GPL(account_locked_vm);
505
506unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
507 unsigned long len, unsigned long prot,
508 unsigned long flag, unsigned long pgoff)
509{
510 unsigned long ret;
511 struct mm_struct *mm = current->mm;
512 unsigned long populate;
513 LIST_HEAD(uf);
514
515 ret = security_mmap_file(file, prot, flag);
516 if (!ret) {
517 if (mmap_write_lock_killable(mm))
518 return -EINTR;
519 ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
520 &uf);
521 mmap_write_unlock(mm);
522 userfaultfd_unmap_complete(mm, &uf);
523 if (populate)
524 mm_populate(ret, populate);
525 }
526 return ret;
527}
528
529unsigned long vm_mmap(struct file *file, unsigned long addr,
530 unsigned long len, unsigned long prot,
531 unsigned long flag, unsigned long offset)
532{
533 if (unlikely(offset + PAGE_ALIGN(len) < offset))
534 return -EINVAL;
535 if (unlikely(offset_in_page(offset)))
536 return -EINVAL;
537
538 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
539}
540EXPORT_SYMBOL(vm_mmap);
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561void *kvmalloc_node(size_t size, gfp_t flags, int node)
562{
563 gfp_t kmalloc_flags = flags;
564 void *ret;
565
566
567
568
569
570 if ((flags & GFP_KERNEL) != GFP_KERNEL)
571 return kmalloc_node(size, flags, node);
572
573
574
575
576
577
578
579
580 if (size > PAGE_SIZE) {
581 kmalloc_flags |= __GFP_NOWARN;
582
583 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
584 kmalloc_flags |= __GFP_NORETRY;
585 }
586
587 ret = kmalloc_node(size, kmalloc_flags, node);
588
589
590
591
592
593 if (ret || size <= PAGE_SIZE)
594 return ret;
595
596 return __vmalloc_node(size, 1, flags, node,
597 __builtin_return_address(0));
598}
599EXPORT_SYMBOL(kvmalloc_node);
600
601
602
603
604
605
606
607
608
609
610
611void kvfree(const void *addr)
612{
613 if (is_vmalloc_addr(addr))
614 vfree(addr);
615 else
616 kfree(addr);
617}
618EXPORT_SYMBOL(kvfree);
619
620
621
622
623
624
625
626
627
628
629void kvfree_sensitive(const void *addr, size_t len)
630{
631 if (likely(!ZERO_OR_NULL_PTR(addr))) {
632 memzero_explicit((void *)addr, len);
633 kvfree(addr);
634 }
635}
636EXPORT_SYMBOL(kvfree_sensitive);
637
638static inline void *__page_rmapping(struct page *page)
639{
640 unsigned long mapping;
641
642 mapping = (unsigned long)page->mapping;
643 mapping &= ~PAGE_MAPPING_FLAGS;
644
645 return (void *)mapping;
646}
647
648
649void *page_rmapping(struct page *page)
650{
651 page = compound_head(page);
652 return __page_rmapping(page);
653}
654
655
656
657
658
659bool page_mapped(struct page *page)
660{
661 int i;
662
663 if (likely(!PageCompound(page)))
664 return atomic_read(&page->_mapcount) >= 0;
665 page = compound_head(page);
666 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
667 return true;
668 if (PageHuge(page))
669 return false;
670 for (i = 0; i < compound_nr(page); i++) {
671 if (atomic_read(&page[i]._mapcount) >= 0)
672 return true;
673 }
674 return false;
675}
676EXPORT_SYMBOL(page_mapped);
677
678struct anon_vma *page_anon_vma(struct page *page)
679{
680 unsigned long mapping;
681
682 page = compound_head(page);
683 mapping = (unsigned long)page->mapping;
684 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
685 return NULL;
686 return __page_rmapping(page);
687}
688
689struct address_space *page_mapping(struct page *page)
690{
691 struct address_space *mapping;
692
693 page = compound_head(page);
694
695
696 if (unlikely(PageSlab(page)))
697 return NULL;
698
699 if (unlikely(PageSwapCache(page))) {
700 swp_entry_t entry;
701
702 entry.val = page_private(page);
703 return swap_address_space(entry);
704 }
705
706 mapping = page->mapping;
707 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
708 return NULL;
709
710 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
711}
712EXPORT_SYMBOL(page_mapping);
713
714
715
716
717struct address_space *page_mapping_file(struct page *page)
718{
719 if (unlikely(PageSwapCache(page)))
720 return NULL;
721 return page_mapping(page);
722}
723
724
725int __page_mapcount(struct page *page)
726{
727 int ret;
728
729 ret = atomic_read(&page->_mapcount) + 1;
730
731
732
733
734 if (!PageAnon(page) && !PageHuge(page))
735 return ret;
736 page = compound_head(page);
737 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
738 if (PageDoubleMap(page))
739 ret--;
740 return ret;
741}
742EXPORT_SYMBOL_GPL(__page_mapcount);
743
744int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
745int sysctl_overcommit_ratio __read_mostly = 50;
746unsigned long sysctl_overcommit_kbytes __read_mostly;
747int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
748unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17;
749unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13;
750
751int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
752 size_t *lenp, loff_t *ppos)
753{
754 int ret;
755
756 ret = proc_dointvec(table, write, buffer, lenp, ppos);
757 if (ret == 0 && write)
758 sysctl_overcommit_kbytes = 0;
759 return ret;
760}
761
762static void sync_overcommit_as(struct work_struct *dummy)
763{
764 percpu_counter_sync(&vm_committed_as);
765}
766
767int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
768 size_t *lenp, loff_t *ppos)
769{
770 struct ctl_table t;
771 int new_policy;
772 int ret;
773
774
775
776
777
778
779
780
781
782
783
784
785 if (write) {
786 t = *table;
787 t.data = &new_policy;
788 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
789 if (ret)
790 return ret;
791
792 mm_compute_batch(new_policy);
793 if (new_policy == OVERCOMMIT_NEVER)
794 schedule_on_each_cpu(sync_overcommit_as);
795 sysctl_overcommit_memory = new_policy;
796 } else {
797 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
798 }
799
800 return ret;
801}
802
803int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
804 size_t *lenp, loff_t *ppos)
805{
806 int ret;
807
808 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
809 if (ret == 0 && write)
810 sysctl_overcommit_ratio = 0;
811 return ret;
812}
813
814
815
816
817unsigned long vm_commit_limit(void)
818{
819 unsigned long allowed;
820
821 if (sysctl_overcommit_kbytes)
822 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
823 else
824 allowed = ((totalram_pages() - hugetlb_total_pages())
825 * sysctl_overcommit_ratio / 100);
826 allowed += total_swap_pages;
827
828 return allowed;
829}
830
831
832
833
834
835struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850unsigned long vm_memory_committed(void)
851{
852 return percpu_counter_sum_positive(&vm_committed_as);
853}
854EXPORT_SYMBOL_GPL(vm_memory_committed);
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
873{
874 long allowed;
875
876 vm_acct_memory(pages);
877
878
879
880
881 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
882 return 0;
883
884 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
885 if (pages > totalram_pages() + total_swap_pages)
886 goto error;
887 return 0;
888 }
889
890 allowed = vm_commit_limit();
891
892
893
894 if (!cap_sys_admin)
895 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
896
897
898
899
900 if (mm) {
901 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
902
903 allowed -= min_t(long, mm->total_vm / 32, reserve);
904 }
905
906 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
907 return 0;
908error:
909 vm_unacct_memory(pages);
910
911 return -ENOMEM;
912}
913
914
915
916
917
918
919
920
921
922
923
924int get_cmdline(struct task_struct *task, char *buffer, int buflen)
925{
926 int res = 0;
927 unsigned int len;
928 struct mm_struct *mm = get_task_mm(task);
929 unsigned long arg_start, arg_end, env_start, env_end;
930 if (!mm)
931 goto out;
932 if (!mm->arg_end)
933 goto out_mm;
934
935 spin_lock(&mm->arg_lock);
936 arg_start = mm->arg_start;
937 arg_end = mm->arg_end;
938 env_start = mm->env_start;
939 env_end = mm->env_end;
940 spin_unlock(&mm->arg_lock);
941
942 len = arg_end - arg_start;
943
944 if (len > buflen)
945 len = buflen;
946
947 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
948
949
950
951
952
953 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
954 len = strnlen(buffer, res);
955 if (len < res) {
956 res = len;
957 } else {
958 len = env_end - env_start;
959 if (len > buflen - res)
960 len = buflen - res;
961 res += access_process_vm(task, env_start,
962 buffer+res, len,
963 FOLL_FORCE);
964 res = strnlen(buffer, res);
965 }
966 }
967out_mm:
968 mmput(mm);
969out:
970 return res;
971}
972
973int __weak memcmp_pages(struct page *page1, struct page *page2)
974{
975 char *addr1, *addr2;
976 int ret;
977
978 addr1 = kmap_atomic(page1);
979 addr2 = kmap_atomic(page2);
980 ret = memcmp(addr1, addr2, PAGE_SIZE);
981 kunmap_atomic(addr2);
982 kunmap_atomic(addr1);
983 return ret;
984}
985