1
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/string.h>
5#include <linux/compiler.h>
6#include <linux/export.h>
7#include <linux/err.h>
8#include <linux/sched.h>
9#include <linux/sched/mm.h>
10#include <linux/sched/signal.h>
11#include <linux/sched/task_stack.h>
12#include <linux/security.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/mman.h>
16#include <linux/hugetlb.h>
17#include <linux/vmalloc.h>
18#include <linux/userfaultfd_k.h>
19#include <linux/elf.h>
20#include <linux/elf-randomize.h>
21#include <linux/personality.h>
22#include <linux/random.h>
23#include <linux/processor.h>
24#include <linux/sizes.h>
25#include <linux/compat.h>
26
27#include <linux/uaccess.h>
28
29#include "internal.h"
30
31
32
33
34
35
36
37void kfree_const(const void *x)
38{
39 if (!is_kernel_rodata((unsigned long)x))
40 kfree(x);
41}
42EXPORT_SYMBOL(kfree_const);
43
44
45
46
47
48
49
50
51char *kstrdup(const char *s, gfp_t gfp)
52{
53 size_t len;
54 char *buf;
55
56 if (!s)
57 return NULL;
58
59 len = strlen(s) + 1;
60 buf = kmalloc_track_caller(len, gfp);
61 if (buf)
62 memcpy(buf, s, len);
63 return buf;
64}
65EXPORT_SYMBOL(kstrdup);
66
67
68
69
70
71
72
73
74
75
76
77const char *kstrdup_const(const char *s, gfp_t gfp)
78{
79 if (is_kernel_rodata((unsigned long)s))
80 return s;
81
82 return kstrdup(s, gfp);
83}
84EXPORT_SYMBOL(kstrdup_const);
85
86
87
88
89
90
91
92
93
94
95
96char *kstrndup(const char *s, size_t max, gfp_t gfp)
97{
98 size_t len;
99 char *buf;
100
101 if (!s)
102 return NULL;
103
104 len = strnlen(s, max);
105 buf = kmalloc_track_caller(len+1, gfp);
106 if (buf) {
107 memcpy(buf, s, len);
108 buf[len] = '\0';
109 }
110 return buf;
111}
112EXPORT_SYMBOL(kstrndup);
113
114
115
116
117
118
119
120
121
122
123void *kmemdup(const void *src, size_t len, gfp_t gfp)
124{
125 void *p;
126
127 p = kmalloc_track_caller(len, gfp);
128 if (p)
129 memcpy(p, src, len);
130 return p;
131}
132EXPORT_SYMBOL(kmemdup);
133
134
135
136
137
138
139
140
141
142
143char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
144{
145 char *buf;
146
147 if (!s)
148 return NULL;
149
150 buf = kmalloc_track_caller(len + 1, gfp);
151 if (buf) {
152 memcpy(buf, s, len);
153 buf[len] = '\0';
154 }
155 return buf;
156}
157EXPORT_SYMBOL(kmemdup_nul);
158
159
160
161
162
163
164
165
166
167
168void *memdup_user(const void __user *src, size_t len)
169{
170 void *p;
171
172 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
173 if (!p)
174 return ERR_PTR(-ENOMEM);
175
176 if (copy_from_user(p, src, len)) {
177 kfree(p);
178 return ERR_PTR(-EFAULT);
179 }
180
181 return p;
182}
183EXPORT_SYMBOL(memdup_user);
184
185
186
187
188
189
190
191
192
193
194void *vmemdup_user(const void __user *src, size_t len)
195{
196 void *p;
197
198 p = kvmalloc(len, GFP_USER);
199 if (!p)
200 return ERR_PTR(-ENOMEM);
201
202 if (copy_from_user(p, src, len)) {
203 kvfree(p);
204 return ERR_PTR(-EFAULT);
205 }
206
207 return p;
208}
209EXPORT_SYMBOL(vmemdup_user);
210
211
212
213
214
215
216
217
218char *strndup_user(const char __user *s, long n)
219{
220 char *p;
221 long length;
222
223 length = strnlen_user(s, n);
224
225 if (!length)
226 return ERR_PTR(-EFAULT);
227
228 if (length > n)
229 return ERR_PTR(-EINVAL);
230
231 p = memdup_user(s, length);
232
233 if (IS_ERR(p))
234 return p;
235
236 p[length - 1] = '\0';
237
238 return p;
239}
240EXPORT_SYMBOL(strndup_user);
241
242
243
244
245
246
247
248
249
250void *memdup_user_nul(const void __user *src, size_t len)
251{
252 char *p;
253
254
255
256
257
258
259 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
260 if (!p)
261 return ERR_PTR(-ENOMEM);
262
263 if (copy_from_user(p, src, len)) {
264 kfree(p);
265 return ERR_PTR(-EFAULT);
266 }
267 p[len] = '\0';
268
269 return p;
270}
271EXPORT_SYMBOL(memdup_user_nul);
272
273void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
274 struct vm_area_struct *prev)
275{
276 struct vm_area_struct *next;
277
278 vma->vm_prev = prev;
279 if (prev) {
280 next = prev->vm_next;
281 prev->vm_next = vma;
282 } else {
283 next = mm->mmap;
284 mm->mmap = vma;
285 }
286 vma->vm_next = next;
287 if (next)
288 next->vm_prev = vma;
289}
290
291void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
292{
293 struct vm_area_struct *prev, *next;
294
295 next = vma->vm_next;
296 prev = vma->vm_prev;
297 if (prev)
298 prev->vm_next = next;
299 else
300 mm->mmap = next;
301 if (next)
302 next->vm_prev = prev;
303}
304
305
306int vma_is_stack_for_current(struct vm_area_struct *vma)
307{
308 struct task_struct * __maybe_unused t = current;
309
310 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
311}
312
313#ifndef STACK_RND_MASK
314#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
315#endif
316
317unsigned long randomize_stack_top(unsigned long stack_top)
318{
319 unsigned long random_variable = 0;
320
321 if (current->flags & PF_RANDOMIZE) {
322 random_variable = get_random_long();
323 random_variable &= STACK_RND_MASK;
324 random_variable <<= PAGE_SHIFT;
325 }
326#ifdef CONFIG_STACK_GROWSUP
327 return PAGE_ALIGN(stack_top) + random_variable;
328#else
329 return PAGE_ALIGN(stack_top) - random_variable;
330#endif
331}
332
333#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
334unsigned long arch_randomize_brk(struct mm_struct *mm)
335{
336
337 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
338 return randomize_page(mm->brk, SZ_32M);
339
340 return randomize_page(mm->brk, SZ_1G);
341}
342
343unsigned long arch_mmap_rnd(void)
344{
345 unsigned long rnd;
346
347#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
348 if (is_compat_task())
349 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
350 else
351#endif
352 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
353
354 return rnd << PAGE_SHIFT;
355}
356
357static int mmap_is_legacy(struct rlimit *rlim_stack)
358{
359 if (current->personality & ADDR_COMPAT_LAYOUT)
360 return 1;
361
362 if (rlim_stack->rlim_cur == RLIM_INFINITY)
363 return 1;
364
365 return sysctl_legacy_va_layout;
366}
367
368
369
370
371
372#define MIN_GAP (SZ_128M)
373#define MAX_GAP (STACK_TOP / 6 * 5)
374
375static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
376{
377 unsigned long gap = rlim_stack->rlim_cur;
378 unsigned long pad = stack_guard_gap;
379
380
381 if (current->flags & PF_RANDOMIZE)
382 pad += (STACK_RND_MASK << PAGE_SHIFT);
383
384
385 if (gap + pad > gap)
386 gap += pad;
387
388 if (gap < MIN_GAP)
389 gap = MIN_GAP;
390 else if (gap > MAX_GAP)
391 gap = MAX_GAP;
392
393 return PAGE_ALIGN(STACK_TOP - gap - rnd);
394}
395
396void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
397{
398 unsigned long random_factor = 0UL;
399
400 if (current->flags & PF_RANDOMIZE)
401 random_factor = arch_mmap_rnd();
402
403 if (mmap_is_legacy(rlim_stack)) {
404 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
405 mm->get_unmapped_area = arch_get_unmapped_area;
406 } else {
407 mm->mmap_base = mmap_base(random_factor, rlim_stack);
408 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
409 }
410}
411#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
412void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
413{
414 mm->mmap_base = TASK_UNMAPPED_BASE;
415 mm->get_unmapped_area = arch_get_unmapped_area;
416}
417#endif
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
435 struct task_struct *task, bool bypass_rlim)
436{
437 unsigned long locked_vm, limit;
438 int ret = 0;
439
440 lockdep_assert_held_write(&mm->mmap_sem);
441
442 locked_vm = mm->locked_vm;
443 if (inc) {
444 if (!bypass_rlim) {
445 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
446 if (locked_vm + pages > limit)
447 ret = -ENOMEM;
448 }
449 if (!ret)
450 mm->locked_vm = locked_vm + pages;
451 } else {
452 WARN_ON_ONCE(pages > locked_vm);
453 mm->locked_vm = locked_vm - pages;
454 }
455
456 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
457 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
458 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
459 ret ? " - exceeded" : "");
460
461 return ret;
462}
463EXPORT_SYMBOL_GPL(__account_locked_vm);
464
465
466
467
468
469
470
471
472
473
474
475
476
477int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
478{
479 int ret;
480
481 if (pages == 0 || !mm)
482 return 0;
483
484 down_write(&mm->mmap_sem);
485 ret = __account_locked_vm(mm, pages, inc, current,
486 capable(CAP_IPC_LOCK));
487 up_write(&mm->mmap_sem);
488
489 return ret;
490}
491EXPORT_SYMBOL_GPL(account_locked_vm);
492
493unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
494 unsigned long len, unsigned long prot,
495 unsigned long flag, unsigned long pgoff)
496{
497 unsigned long ret;
498 struct mm_struct *mm = current->mm;
499 unsigned long populate;
500 LIST_HEAD(uf);
501
502 ret = security_mmap_file(file, prot, flag);
503 if (!ret) {
504 if (down_write_killable(&mm->mmap_sem))
505 return -EINTR;
506 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
507 &populate, &uf);
508 up_write(&mm->mmap_sem);
509 userfaultfd_unmap_complete(mm, &uf);
510 if (populate)
511 mm_populate(ret, populate);
512 }
513 return ret;
514}
515
516unsigned long vm_mmap(struct file *file, unsigned long addr,
517 unsigned long len, unsigned long prot,
518 unsigned long flag, unsigned long offset)
519{
520 if (unlikely(offset + PAGE_ALIGN(len) < offset))
521 return -EINVAL;
522 if (unlikely(offset_in_page(offset)))
523 return -EINVAL;
524
525 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
526}
527EXPORT_SYMBOL(vm_mmap);
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548void *kvmalloc_node(size_t size, gfp_t flags, int node)
549{
550 gfp_t kmalloc_flags = flags;
551 void *ret;
552
553
554
555
556
557 if ((flags & GFP_KERNEL) != GFP_KERNEL)
558 return kmalloc_node(size, flags, node);
559
560
561
562
563
564
565
566
567 if (size > PAGE_SIZE) {
568 kmalloc_flags |= __GFP_NOWARN;
569
570 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
571 kmalloc_flags |= __GFP_NORETRY;
572 }
573
574 ret = kmalloc_node(size, kmalloc_flags, node);
575
576
577
578
579
580 if (ret || size <= PAGE_SIZE)
581 return ret;
582
583 return __vmalloc_node_flags_caller(size, node, flags,
584 __builtin_return_address(0));
585}
586EXPORT_SYMBOL(kvmalloc_node);
587
588
589
590
591
592
593
594
595
596
597
598void kvfree(const void *addr)
599{
600 if (is_vmalloc_addr(addr))
601 vfree(addr);
602 else
603 kfree(addr);
604}
605EXPORT_SYMBOL(kvfree);
606
607static inline void *__page_rmapping(struct page *page)
608{
609 unsigned long mapping;
610
611 mapping = (unsigned long)page->mapping;
612 mapping &= ~PAGE_MAPPING_FLAGS;
613
614 return (void *)mapping;
615}
616
617
618void *page_rmapping(struct page *page)
619{
620 page = compound_head(page);
621 return __page_rmapping(page);
622}
623
624
625
626
627
628bool page_mapped(struct page *page)
629{
630 int i;
631
632 if (likely(!PageCompound(page)))
633 return atomic_read(&page->_mapcount) >= 0;
634 page = compound_head(page);
635 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
636 return true;
637 if (PageHuge(page))
638 return false;
639 for (i = 0; i < compound_nr(page); i++) {
640 if (atomic_read(&page[i]._mapcount) >= 0)
641 return true;
642 }
643 return false;
644}
645EXPORT_SYMBOL(page_mapped);
646
647struct anon_vma *page_anon_vma(struct page *page)
648{
649 unsigned long mapping;
650
651 page = compound_head(page);
652 mapping = (unsigned long)page->mapping;
653 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
654 return NULL;
655 return __page_rmapping(page);
656}
657
658struct address_space *page_mapping(struct page *page)
659{
660 struct address_space *mapping;
661
662 page = compound_head(page);
663
664
665 if (unlikely(PageSlab(page)))
666 return NULL;
667
668 if (unlikely(PageSwapCache(page))) {
669 swp_entry_t entry;
670
671 entry.val = page_private(page);
672 return swap_address_space(entry);
673 }
674
675 mapping = page->mapping;
676 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
677 return NULL;
678
679 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
680}
681EXPORT_SYMBOL(page_mapping);
682
683
684
685
686struct address_space *page_mapping_file(struct page *page)
687{
688 if (unlikely(PageSwapCache(page)))
689 return NULL;
690 return page_mapping(page);
691}
692
693
694int __page_mapcount(struct page *page)
695{
696 int ret;
697
698 ret = atomic_read(&page->_mapcount) + 1;
699
700
701
702
703 if (!PageAnon(page) && !PageHuge(page))
704 return ret;
705 page = compound_head(page);
706 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
707 if (PageDoubleMap(page))
708 ret--;
709 return ret;
710}
711EXPORT_SYMBOL_GPL(__page_mapcount);
712
713int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
714int sysctl_overcommit_ratio __read_mostly = 50;
715unsigned long sysctl_overcommit_kbytes __read_mostly;
716int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
717unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17;
718unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13;
719
720int overcommit_ratio_handler(struct ctl_table *table, int write,
721 void __user *buffer, size_t *lenp,
722 loff_t *ppos)
723{
724 int ret;
725
726 ret = proc_dointvec(table, write, buffer, lenp, ppos);
727 if (ret == 0 && write)
728 sysctl_overcommit_kbytes = 0;
729 return ret;
730}
731
732int overcommit_kbytes_handler(struct ctl_table *table, int write,
733 void __user *buffer, size_t *lenp,
734 loff_t *ppos)
735{
736 int ret;
737
738 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
739 if (ret == 0 && write)
740 sysctl_overcommit_ratio = 0;
741 return ret;
742}
743
744
745
746
747unsigned long vm_commit_limit(void)
748{
749 unsigned long allowed;
750
751 if (sysctl_overcommit_kbytes)
752 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
753 else
754 allowed = ((totalram_pages() - hugetlb_total_pages())
755 * sysctl_overcommit_ratio / 100);
756 allowed += total_swap_pages;
757
758 return allowed;
759}
760
761
762
763
764
765struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
766
767
768
769
770
771
772
773
774
775unsigned long vm_memory_committed(void)
776{
777 return percpu_counter_read_positive(&vm_committed_as);
778}
779EXPORT_SYMBOL_GPL(vm_memory_committed);
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
798{
799 long allowed;
800
801 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
802 -(s64)vm_committed_as_batch * num_online_cpus(),
803 "memory commitment underflow");
804
805 vm_acct_memory(pages);
806
807
808
809
810 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
811 return 0;
812
813 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
814 if (pages > totalram_pages() + total_swap_pages)
815 goto error;
816 return 0;
817 }
818
819 allowed = vm_commit_limit();
820
821
822
823 if (!cap_sys_admin)
824 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
825
826
827
828
829 if (mm) {
830 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
831
832 allowed -= min_t(long, mm->total_vm / 32, reserve);
833 }
834
835 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
836 return 0;
837error:
838 vm_unacct_memory(pages);
839
840 return -ENOMEM;
841}
842
843
844
845
846
847
848
849
850
851
852
853int get_cmdline(struct task_struct *task, char *buffer, int buflen)
854{
855 int res = 0;
856 unsigned int len;
857 struct mm_struct *mm = get_task_mm(task);
858 unsigned long arg_start, arg_end, env_start, env_end;
859 if (!mm)
860 goto out;
861 if (!mm->arg_end)
862 goto out_mm;
863
864 spin_lock(&mm->arg_lock);
865 arg_start = mm->arg_start;
866 arg_end = mm->arg_end;
867 env_start = mm->env_start;
868 env_end = mm->env_end;
869 spin_unlock(&mm->arg_lock);
870
871 len = arg_end - arg_start;
872
873 if (len > buflen)
874 len = buflen;
875
876 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
877
878
879
880
881
882 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
883 len = strnlen(buffer, res);
884 if (len < res) {
885 res = len;
886 } else {
887 len = env_end - env_start;
888 if (len > buflen - res)
889 len = buflen - res;
890 res += access_process_vm(task, env_start,
891 buffer+res, len,
892 FOLL_FORCE);
893 res = strnlen(buffer, res);
894 }
895 }
896out_mm:
897 mmput(mm);
898out:
899 return res;
900}
901
902int memcmp_pages(struct page *page1, struct page *page2)
903{
904 char *addr1, *addr2;
905 int ret;
906
907 addr1 = kmap_atomic(page1);
908 addr2 = kmap_atomic(page2);
909 ret = memcmp(addr1, addr2, PAGE_SIZE);
910 kunmap_atomic(addr2);
911 kunmap_atomic(addr1);
912 return ret;
913}
914