1
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/string.h>
5#include <linux/compiler.h>
6#include <linux/export.h>
7#include <linux/err.h>
8#include <linux/sched.h>
9#include <linux/sched/mm.h>
10#include <linux/sched/signal.h>
11#include <linux/sched/task_stack.h>
12#include <linux/security.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/mman.h>
16#include <linux/hugetlb.h>
17#include <linux/vmalloc.h>
18#include <linux/userfaultfd_k.h>
19
20#include <linux/uaccess.h>
21
22#include "internal.h"
23
24
25
26
27
28
29
30void kfree_const(const void *x)
31{
32 if (!is_kernel_rodata((unsigned long)x))
33 kfree(x);
34}
35EXPORT_SYMBOL(kfree_const);
36
37
38
39
40
41
42
43
44char *kstrdup(const char *s, gfp_t gfp)
45{
46 size_t len;
47 char *buf;
48
49 if (!s)
50 return NULL;
51
52 len = strlen(s) + 1;
53 buf = kmalloc_track_caller(len, gfp);
54 if (buf)
55 memcpy(buf, s, len);
56 return buf;
57}
58EXPORT_SYMBOL(kstrdup);
59
60
61
62
63
64
65
66
67
68
69
70const char *kstrdup_const(const char *s, gfp_t gfp)
71{
72 if (is_kernel_rodata((unsigned long)s))
73 return s;
74
75 return kstrdup(s, gfp);
76}
77EXPORT_SYMBOL(kstrdup_const);
78
79
80
81
82
83
84
85
86
87
88
89char *kstrndup(const char *s, size_t max, gfp_t gfp)
90{
91 size_t len;
92 char *buf;
93
94 if (!s)
95 return NULL;
96
97 len = strnlen(s, max);
98 buf = kmalloc_track_caller(len+1, gfp);
99 if (buf) {
100 memcpy(buf, s, len);
101 buf[len] = '\0';
102 }
103 return buf;
104}
105EXPORT_SYMBOL(kstrndup);
106
107
108
109
110
111
112
113
114
115
116void *kmemdup(const void *src, size_t len, gfp_t gfp)
117{
118 void *p;
119
120 p = kmalloc_track_caller(len, gfp);
121 if (p)
122 memcpy(p, src, len);
123 return p;
124}
125EXPORT_SYMBOL(kmemdup);
126
127
128
129
130
131
132
133
134
135
136char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
137{
138 char *buf;
139
140 if (!s)
141 return NULL;
142
143 buf = kmalloc_track_caller(len + 1, gfp);
144 if (buf) {
145 memcpy(buf, s, len);
146 buf[len] = '\0';
147 }
148 return buf;
149}
150EXPORT_SYMBOL(kmemdup_nul);
151
152
153
154
155
156
157
158
159
160
161void *memdup_user(const void __user *src, size_t len)
162{
163 void *p;
164
165 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
166 if (!p)
167 return ERR_PTR(-ENOMEM);
168
169 if (copy_from_user(p, src, len)) {
170 kfree(p);
171 return ERR_PTR(-EFAULT);
172 }
173
174 return p;
175}
176EXPORT_SYMBOL(memdup_user);
177
178
179
180
181
182
183
184
185
186
187void *vmemdup_user(const void __user *src, size_t len)
188{
189 void *p;
190
191 p = kvmalloc(len, GFP_USER);
192 if (!p)
193 return ERR_PTR(-ENOMEM);
194
195 if (copy_from_user(p, src, len)) {
196 kvfree(p);
197 return ERR_PTR(-EFAULT);
198 }
199
200 return p;
201}
202EXPORT_SYMBOL(vmemdup_user);
203
204
205
206
207
208
209
210
211char *strndup_user(const char __user *s, long n)
212{
213 char *p;
214 long length;
215
216 length = strnlen_user(s, n);
217
218 if (!length)
219 return ERR_PTR(-EFAULT);
220
221 if (length > n)
222 return ERR_PTR(-EINVAL);
223
224 p = memdup_user(s, length);
225
226 if (IS_ERR(p))
227 return p;
228
229 p[length - 1] = '\0';
230
231 return p;
232}
233EXPORT_SYMBOL(strndup_user);
234
235
236
237
238
239
240
241
242
243void *memdup_user_nul(const void __user *src, size_t len)
244{
245 char *p;
246
247
248
249
250
251
252 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
253 if (!p)
254 return ERR_PTR(-ENOMEM);
255
256 if (copy_from_user(p, src, len)) {
257 kfree(p);
258 return ERR_PTR(-EFAULT);
259 }
260 p[len] = '\0';
261
262 return p;
263}
264EXPORT_SYMBOL(memdup_user_nul);
265
266void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
267 struct vm_area_struct *prev, struct rb_node *rb_parent)
268{
269 struct vm_area_struct *next;
270
271 vma->vm_prev = prev;
272 if (prev) {
273 next = prev->vm_next;
274 prev->vm_next = vma;
275 } else {
276 mm->mmap = vma;
277 if (rb_parent)
278 next = rb_entry(rb_parent,
279 struct vm_area_struct, vm_rb);
280 else
281 next = NULL;
282 }
283 vma->vm_next = next;
284 if (next)
285 next->vm_prev = vma;
286}
287
288
289int vma_is_stack_for_current(struct vm_area_struct *vma)
290{
291 struct task_struct * __maybe_unused t = current;
292
293 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
294}
295
296#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
297void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
298{
299 mm->mmap_base = TASK_UNMAPPED_BASE;
300 mm->get_unmapped_area = arch_get_unmapped_area;
301}
302#endif
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
320 struct task_struct *task, bool bypass_rlim)
321{
322 unsigned long locked_vm, limit;
323 int ret = 0;
324
325 lockdep_assert_held_write(&mm->mmap_sem);
326
327 locked_vm = mm->locked_vm;
328 if (inc) {
329 if (!bypass_rlim) {
330 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
331 if (locked_vm + pages > limit)
332 ret = -ENOMEM;
333 }
334 if (!ret)
335 mm->locked_vm = locked_vm + pages;
336 } else {
337 WARN_ON_ONCE(pages > locked_vm);
338 mm->locked_vm = locked_vm - pages;
339 }
340
341 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
342 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
343 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
344 ret ? " - exceeded" : "");
345
346 return ret;
347}
348EXPORT_SYMBOL_GPL(__account_locked_vm);
349
350
351
352
353
354
355
356
357
358
359
360
361
362int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
363{
364 int ret;
365
366 if (pages == 0 || !mm)
367 return 0;
368
369 down_write(&mm->mmap_sem);
370 ret = __account_locked_vm(mm, pages, inc, current,
371 capable(CAP_IPC_LOCK));
372 up_write(&mm->mmap_sem);
373
374 return ret;
375}
376EXPORT_SYMBOL_GPL(account_locked_vm);
377
378unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
379 unsigned long len, unsigned long prot,
380 unsigned long flag, unsigned long pgoff)
381{
382 unsigned long ret;
383 struct mm_struct *mm = current->mm;
384 unsigned long populate;
385 LIST_HEAD(uf);
386
387 ret = security_mmap_file(file, prot, flag);
388 if (!ret) {
389 if (down_write_killable(&mm->mmap_sem))
390 return -EINTR;
391 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
392 &populate, &uf);
393 up_write(&mm->mmap_sem);
394 userfaultfd_unmap_complete(mm, &uf);
395 if (populate)
396 mm_populate(ret, populate);
397 }
398 return ret;
399}
400
401unsigned long vm_mmap(struct file *file, unsigned long addr,
402 unsigned long len, unsigned long prot,
403 unsigned long flag, unsigned long offset)
404{
405 if (unlikely(offset + PAGE_ALIGN(len) < offset))
406 return -EINVAL;
407 if (unlikely(offset_in_page(offset)))
408 return -EINVAL;
409
410 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
411}
412EXPORT_SYMBOL(vm_mmap);
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433void *kvmalloc_node(size_t size, gfp_t flags, int node)
434{
435 gfp_t kmalloc_flags = flags;
436 void *ret;
437
438
439
440
441
442 if ((flags & GFP_KERNEL) != GFP_KERNEL)
443 return kmalloc_node(size, flags, node);
444
445
446
447
448
449
450
451
452 if (size > PAGE_SIZE) {
453 kmalloc_flags |= __GFP_NOWARN;
454
455 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
456 kmalloc_flags |= __GFP_NORETRY;
457 }
458
459 ret = kmalloc_node(size, kmalloc_flags, node);
460
461
462
463
464
465 if (ret || size <= PAGE_SIZE)
466 return ret;
467
468 return __vmalloc_node_flags_caller(size, node, flags,
469 __builtin_return_address(0));
470}
471EXPORT_SYMBOL(kvmalloc_node);
472
473
474
475
476
477
478
479
480
481
482
483void kvfree(const void *addr)
484{
485 if (is_vmalloc_addr(addr))
486 vfree(addr);
487 else
488 kfree(addr);
489}
490EXPORT_SYMBOL(kvfree);
491
492static inline void *__page_rmapping(struct page *page)
493{
494 unsigned long mapping;
495
496 mapping = (unsigned long)page->mapping;
497 mapping &= ~PAGE_MAPPING_FLAGS;
498
499 return (void *)mapping;
500}
501
502
503void *page_rmapping(struct page *page)
504{
505 page = compound_head(page);
506 return __page_rmapping(page);
507}
508
509
510
511
512
513bool page_mapped(struct page *page)
514{
515 int i;
516
517 if (likely(!PageCompound(page)))
518 return atomic_read(&page->_mapcount) >= 0;
519 page = compound_head(page);
520 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
521 return true;
522 if (PageHuge(page))
523 return false;
524 for (i = 0; i < (1 << compound_order(page)); i++) {
525 if (atomic_read(&page[i]._mapcount) >= 0)
526 return true;
527 }
528 return false;
529}
530EXPORT_SYMBOL(page_mapped);
531
532struct anon_vma *page_anon_vma(struct page *page)
533{
534 unsigned long mapping;
535
536 page = compound_head(page);
537 mapping = (unsigned long)page->mapping;
538 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
539 return NULL;
540 return __page_rmapping(page);
541}
542
543struct address_space *page_mapping(struct page *page)
544{
545 struct address_space *mapping;
546
547 page = compound_head(page);
548
549
550 if (unlikely(PageSlab(page)))
551 return NULL;
552
553 if (unlikely(PageSwapCache(page))) {
554 swp_entry_t entry;
555
556 entry.val = page_private(page);
557 return swap_address_space(entry);
558 }
559
560 mapping = page->mapping;
561 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
562 return NULL;
563
564 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
565}
566EXPORT_SYMBOL(page_mapping);
567
568
569
570
571struct address_space *page_mapping_file(struct page *page)
572{
573 if (unlikely(PageSwapCache(page)))
574 return NULL;
575 return page_mapping(page);
576}
577
578
579int __page_mapcount(struct page *page)
580{
581 int ret;
582
583 ret = atomic_read(&page->_mapcount) + 1;
584
585
586
587
588 if (!PageAnon(page) && !PageHuge(page))
589 return ret;
590 page = compound_head(page);
591 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
592 if (PageDoubleMap(page))
593 ret--;
594 return ret;
595}
596EXPORT_SYMBOL_GPL(__page_mapcount);
597
598int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
599int sysctl_overcommit_ratio __read_mostly = 50;
600unsigned long sysctl_overcommit_kbytes __read_mostly;
601int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
602unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17;
603unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13;
604
605int overcommit_ratio_handler(struct ctl_table *table, int write,
606 void __user *buffer, size_t *lenp,
607 loff_t *ppos)
608{
609 int ret;
610
611 ret = proc_dointvec(table, write, buffer, lenp, ppos);
612 if (ret == 0 && write)
613 sysctl_overcommit_kbytes = 0;
614 return ret;
615}
616
617int overcommit_kbytes_handler(struct ctl_table *table, int write,
618 void __user *buffer, size_t *lenp,
619 loff_t *ppos)
620{
621 int ret;
622
623 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
624 if (ret == 0 && write)
625 sysctl_overcommit_ratio = 0;
626 return ret;
627}
628
629
630
631
632unsigned long vm_commit_limit(void)
633{
634 unsigned long allowed;
635
636 if (sysctl_overcommit_kbytes)
637 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
638 else
639 allowed = ((totalram_pages() - hugetlb_total_pages())
640 * sysctl_overcommit_ratio / 100);
641 allowed += total_swap_pages;
642
643 return allowed;
644}
645
646
647
648
649
650struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
651
652
653
654
655
656
657
658
659
660unsigned long vm_memory_committed(void)
661{
662 return percpu_counter_read_positive(&vm_committed_as);
663}
664EXPORT_SYMBOL_GPL(vm_memory_committed);
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
683{
684 long allowed;
685
686 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
687 -(s64)vm_committed_as_batch * num_online_cpus(),
688 "memory commitment underflow");
689
690 vm_acct_memory(pages);
691
692
693
694
695 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
696 return 0;
697
698 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
699 if (pages > totalram_pages() + total_swap_pages)
700 goto error;
701 return 0;
702 }
703
704 allowed = vm_commit_limit();
705
706
707
708 if (!cap_sys_admin)
709 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
710
711
712
713
714 if (mm) {
715 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
716
717 allowed -= min_t(long, mm->total_vm / 32, reserve);
718 }
719
720 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
721 return 0;
722error:
723 vm_unacct_memory(pages);
724
725 return -ENOMEM;
726}
727
728
729
730
731
732
733
734
735
736
737
738int get_cmdline(struct task_struct *task, char *buffer, int buflen)
739{
740 int res = 0;
741 unsigned int len;
742 struct mm_struct *mm = get_task_mm(task);
743 unsigned long arg_start, arg_end, env_start, env_end;
744 if (!mm)
745 goto out;
746 if (!mm->arg_end)
747 goto out_mm;
748
749 spin_lock(&mm->arg_lock);
750 arg_start = mm->arg_start;
751 arg_end = mm->arg_end;
752 env_start = mm->env_start;
753 env_end = mm->env_end;
754 spin_unlock(&mm->arg_lock);
755
756 len = arg_end - arg_start;
757
758 if (len > buflen)
759 len = buflen;
760
761 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
762
763
764
765
766
767 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
768 len = strnlen(buffer, res);
769 if (len < res) {
770 res = len;
771 } else {
772 len = env_end - env_start;
773 if (len > buflen - res)
774 len = buflen - res;
775 res += access_process_vm(task, env_start,
776 buffer+res, len,
777 FOLL_FORCE);
778 res = strnlen(buffer, res);
779 }
780 }
781out_mm:
782 mmput(mm);
783out:
784 return res;
785}
786