1#include <linux/mm.h>
2#include <linux/slab.h>
3#include <linux/string.h>
4#include <linux/compiler.h>
5#include <linux/export.h>
6#include <linux/err.h>
7#include <linux/sched.h>
8#include <linux/sched/mm.h>
9#include <linux/sched/task_stack.h>
10#include <linux/security.h>
11#include <linux/swap.h>
12#include <linux/swapops.h>
13#include <linux/mman.h>
14#include <linux/hugetlb.h>
15#include <linux/vmalloc.h>
16#include <linux/userfaultfd_k.h>
17
18#include <asm/sections.h>
19#include <linux/uaccess.h>
20
21#include "internal.h"
22
23static inline int is_kernel_rodata(unsigned long addr)
24{
25 return addr >= (unsigned long)__start_rodata &&
26 addr < (unsigned long)__end_rodata;
27}
28
29
30
31
32
33
34
35void kfree_const(const void *x)
36{
37 if (!is_kernel_rodata((unsigned long)x))
38 kfree(x);
39}
40EXPORT_SYMBOL(kfree_const);
41
42
43
44
45
46
47char *kstrdup(const char *s, gfp_t gfp)
48{
49 size_t len;
50 char *buf;
51
52 if (!s)
53 return NULL;
54
55 len = strlen(s) + 1;
56 buf = kmalloc_track_caller(len, gfp);
57 if (buf)
58 memcpy(buf, s, len);
59 return buf;
60}
61EXPORT_SYMBOL(kstrdup);
62
63
64
65
66
67
68
69
70
71
72const char *kstrdup_const(const char *s, gfp_t gfp)
73{
74 if (is_kernel_rodata((unsigned long)s))
75 return s;
76
77 return kstrdup(s, gfp);
78}
79EXPORT_SYMBOL(kstrdup_const);
80
81
82
83
84
85
86
87
88
89char *kstrndup(const char *s, size_t max, gfp_t gfp)
90{
91 size_t len;
92 char *buf;
93
94 if (!s)
95 return NULL;
96
97 len = strnlen(s, max);
98 buf = kmalloc_track_caller(len+1, gfp);
99 if (buf) {
100 memcpy(buf, s, len);
101 buf[len] = '\0';
102 }
103 return buf;
104}
105EXPORT_SYMBOL(kstrndup);
106
107
108
109
110
111
112
113
114void *kmemdup(const void *src, size_t len, gfp_t gfp)
115{
116 void *p;
117
118 p = kmalloc_track_caller(len, gfp);
119 if (p)
120 memcpy(p, src, len);
121 return p;
122}
123EXPORT_SYMBOL(kmemdup);
124
125
126
127
128
129
130
131char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
132{
133 char *buf;
134
135 if (!s)
136 return NULL;
137
138 buf = kmalloc_track_caller(len + 1, gfp);
139 if (buf) {
140 memcpy(buf, s, len);
141 buf[len] = '\0';
142 }
143 return buf;
144}
145EXPORT_SYMBOL(kmemdup_nul);
146
147
148
149
150
151
152
153
154
155
156void *memdup_user(const void __user *src, size_t len)
157{
158 void *p;
159
160 p = kmalloc_track_caller(len, GFP_USER);
161 if (!p)
162 return ERR_PTR(-ENOMEM);
163
164 if (copy_from_user(p, src, len)) {
165 kfree(p);
166 return ERR_PTR(-EFAULT);
167 }
168
169 return p;
170}
171EXPORT_SYMBOL(memdup_user);
172
173
174
175
176
177
178
179
180
181
182void *vmemdup_user(const void __user *src, size_t len)
183{
184 void *p;
185
186 p = kvmalloc(len, GFP_USER);
187 if (!p)
188 return ERR_PTR(-ENOMEM);
189
190 if (copy_from_user(p, src, len)) {
191 kvfree(p);
192 return ERR_PTR(-EFAULT);
193 }
194
195 return p;
196}
197EXPORT_SYMBOL(vmemdup_user);
198
199
200
201
202
203
204char *strndup_user(const char __user *s, long n)
205{
206 char *p;
207 long length;
208
209 length = strnlen_user(s, n);
210
211 if (!length)
212 return ERR_PTR(-EFAULT);
213
214 if (length > n)
215 return ERR_PTR(-EINVAL);
216
217 p = memdup_user(s, length);
218
219 if (IS_ERR(p))
220 return p;
221
222 p[length - 1] = '\0';
223
224 return p;
225}
226EXPORT_SYMBOL(strndup_user);
227
228
229
230
231
232
233
234
235
236void *memdup_user_nul(const void __user *src, size_t len)
237{
238 char *p;
239
240
241
242
243
244
245 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
246 if (!p)
247 return ERR_PTR(-ENOMEM);
248
249 if (copy_from_user(p, src, len)) {
250 kfree(p);
251 return ERR_PTR(-EFAULT);
252 }
253 p[len] = '\0';
254
255 return p;
256}
257EXPORT_SYMBOL(memdup_user_nul);
258
259void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
260 struct vm_area_struct *prev, struct rb_node *rb_parent)
261{
262 struct vm_area_struct *next;
263
264 vma->vm_prev = prev;
265 if (prev) {
266 next = prev->vm_next;
267 prev->vm_next = vma;
268 } else {
269 mm->mmap = vma;
270 if (rb_parent)
271 next = rb_entry(rb_parent,
272 struct vm_area_struct, vm_rb);
273 else
274 next = NULL;
275 }
276 vma->vm_next = next;
277 if (next)
278 next->vm_prev = vma;
279}
280
281
282int vma_is_stack_for_current(struct vm_area_struct *vma)
283{
284 struct task_struct * __maybe_unused t = current;
285
286 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
287}
288
289#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
290void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
291{
292 mm->mmap_base = TASK_UNMAPPED_BASE;
293 mm->get_unmapped_area = arch_get_unmapped_area;
294}
295#endif
296
297
298
299
300
301
302
303
304
305int __weak __get_user_pages_fast(unsigned long start,
306 int nr_pages, int write, struct page **pages)
307{
308 return 0;
309}
310EXPORT_SYMBOL_GPL(__get_user_pages_fast);
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336int __weak get_user_pages_fast(unsigned long start,
337 int nr_pages, int write, struct page **pages)
338{
339 return get_user_pages_unlocked(start, nr_pages, pages,
340 write ? FOLL_WRITE : 0);
341}
342EXPORT_SYMBOL_GPL(get_user_pages_fast);
343
344unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
345 unsigned long len, unsigned long prot,
346 unsigned long flag, unsigned long pgoff)
347{
348 unsigned long ret;
349 struct mm_struct *mm = current->mm;
350 unsigned long populate;
351 LIST_HEAD(uf);
352
353 ret = security_mmap_file(file, prot, flag);
354 if (!ret) {
355 if (down_write_killable(&mm->mmap_sem))
356 return -EINTR;
357 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
358 &populate, &uf);
359 up_write(&mm->mmap_sem);
360 userfaultfd_unmap_complete(mm, &uf);
361 if (populate)
362 mm_populate(ret, populate);
363 }
364 return ret;
365}
366
367unsigned long vm_mmap(struct file *file, unsigned long addr,
368 unsigned long len, unsigned long prot,
369 unsigned long flag, unsigned long offset)
370{
371 if (unlikely(offset + PAGE_ALIGN(len) < offset))
372 return -EINVAL;
373 if (unlikely(offset_in_page(offset)))
374 return -EINVAL;
375
376 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
377}
378EXPORT_SYMBOL(vm_mmap);
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396void *kvmalloc_node(size_t size, gfp_t flags, int node)
397{
398 gfp_t kmalloc_flags = flags;
399 void *ret;
400
401
402
403
404
405 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
406
407
408
409
410
411
412
413
414 if (size > PAGE_SIZE) {
415 kmalloc_flags |= __GFP_NOWARN;
416
417 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
418 kmalloc_flags |= __GFP_NORETRY;
419 }
420
421 ret = kmalloc_node(size, kmalloc_flags, node);
422
423
424
425
426
427 if (ret || size <= PAGE_SIZE)
428 return ret;
429
430 return __vmalloc_node_flags_caller(size, node, flags,
431 __builtin_return_address(0));
432}
433EXPORT_SYMBOL(kvmalloc_node);
434
435void kvfree(const void *addr)
436{
437 if (is_vmalloc_addr(addr))
438 vfree(addr);
439 else
440 kfree(addr);
441}
442EXPORT_SYMBOL(kvfree);
443
444static inline void *__page_rmapping(struct page *page)
445{
446 unsigned long mapping;
447
448 mapping = (unsigned long)page->mapping;
449 mapping &= ~PAGE_MAPPING_FLAGS;
450
451 return (void *)mapping;
452}
453
454
455void *page_rmapping(struct page *page)
456{
457 page = compound_head(page);
458 return __page_rmapping(page);
459}
460
461
462
463
464
465bool page_mapped(struct page *page)
466{
467 int i;
468
469 if (likely(!PageCompound(page)))
470 return atomic_read(&page->_mapcount) >= 0;
471 page = compound_head(page);
472 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
473 return true;
474 if (PageHuge(page))
475 return false;
476 for (i = 0; i < hpage_nr_pages(page); i++) {
477 if (atomic_read(&page[i]._mapcount) >= 0)
478 return true;
479 }
480 return false;
481}
482EXPORT_SYMBOL(page_mapped);
483
484struct anon_vma *page_anon_vma(struct page *page)
485{
486 unsigned long mapping;
487
488 page = compound_head(page);
489 mapping = (unsigned long)page->mapping;
490 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
491 return NULL;
492 return __page_rmapping(page);
493}
494
495struct address_space *page_mapping(struct page *page)
496{
497 struct address_space *mapping;
498
499 page = compound_head(page);
500
501
502 if (unlikely(PageSlab(page)))
503 return NULL;
504
505 if (unlikely(PageSwapCache(page))) {
506 swp_entry_t entry;
507
508 entry.val = page_private(page);
509 return swap_address_space(entry);
510 }
511
512 mapping = page->mapping;
513 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
514 return NULL;
515
516 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
517}
518EXPORT_SYMBOL(page_mapping);
519
520
521
522
523struct address_space *page_mapping_file(struct page *page)
524{
525 if (unlikely(PageSwapCache(page)))
526 return NULL;
527 return page_mapping(page);
528}
529
530
531int __page_mapcount(struct page *page)
532{
533 int ret;
534
535 ret = atomic_read(&page->_mapcount) + 1;
536
537
538
539
540 if (!PageAnon(page) && !PageHuge(page))
541 return ret;
542 page = compound_head(page);
543 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
544 if (PageDoubleMap(page))
545 ret--;
546 return ret;
547}
548EXPORT_SYMBOL_GPL(__page_mapcount);
549
550int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
551int sysctl_overcommit_ratio __read_mostly = 50;
552unsigned long sysctl_overcommit_kbytes __read_mostly;
553int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
554unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17;
555unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13;
556
557int overcommit_ratio_handler(struct ctl_table *table, int write,
558 void __user *buffer, size_t *lenp,
559 loff_t *ppos)
560{
561 int ret;
562
563 ret = proc_dointvec(table, write, buffer, lenp, ppos);
564 if (ret == 0 && write)
565 sysctl_overcommit_kbytes = 0;
566 return ret;
567}
568
569int overcommit_kbytes_handler(struct ctl_table *table, int write,
570 void __user *buffer, size_t *lenp,
571 loff_t *ppos)
572{
573 int ret;
574
575 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
576 if (ret == 0 && write)
577 sysctl_overcommit_ratio = 0;
578 return ret;
579}
580
581
582
583
584unsigned long vm_commit_limit(void)
585{
586 unsigned long allowed;
587
588 if (sysctl_overcommit_kbytes)
589 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
590 else
591 allowed = ((totalram_pages - hugetlb_total_pages())
592 * sysctl_overcommit_ratio / 100);
593 allowed += total_swap_pages;
594
595 return allowed;
596}
597
598
599
600
601
602struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
603
604
605
606
607
608
609
610
611
612unsigned long vm_memory_committed(void)
613{
614 return percpu_counter_read_positive(&vm_committed_as);
615}
616EXPORT_SYMBOL_GPL(vm_memory_committed);
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
635{
636 long free, allowed, reserve;
637
638 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
639 -(s64)vm_committed_as_batch * num_online_cpus(),
640 "memory commitment underflow");
641
642 vm_acct_memory(pages);
643
644
645
646
647 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
648 return 0;
649
650 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
651 free = global_zone_page_state(NR_FREE_PAGES);
652 free += global_node_page_state(NR_FILE_PAGES);
653
654
655
656
657
658
659
660 free -= global_node_page_state(NR_SHMEM);
661
662 free += get_nr_swap_pages();
663
664
665
666
667
668
669
670 free += global_node_page_state(NR_SLAB_RECLAIMABLE);
671
672
673
674
675
676 free += global_node_page_state(
677 NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
678
679
680
681
682 if (free <= totalreserve_pages)
683 goto error;
684 else
685 free -= totalreserve_pages;
686
687
688
689
690 if (!cap_sys_admin)
691 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
692
693 if (free > pages)
694 return 0;
695
696 goto error;
697 }
698
699 allowed = vm_commit_limit();
700
701
702
703 if (!cap_sys_admin)
704 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
705
706
707
708
709 if (mm) {
710 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
711 allowed -= min_t(long, mm->total_vm / 32, reserve);
712 }
713
714 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
715 return 0;
716error:
717 vm_unacct_memory(pages);
718
719 return -ENOMEM;
720}
721
722
723
724
725
726
727
728
729
730
731int get_cmdline(struct task_struct *task, char *buffer, int buflen)
732{
733 int res = 0;
734 unsigned int len;
735 struct mm_struct *mm = get_task_mm(task);
736 unsigned long arg_start, arg_end, env_start, env_end;
737 if (!mm)
738 goto out;
739 if (!mm->arg_end)
740 goto out_mm;
741
742 down_read(&mm->mmap_sem);
743 arg_start = mm->arg_start;
744 arg_end = mm->arg_end;
745 env_start = mm->env_start;
746 env_end = mm->env_end;
747 up_read(&mm->mmap_sem);
748
749 len = arg_end - arg_start;
750
751 if (len > buflen)
752 len = buflen;
753
754 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
755
756
757
758
759
760 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
761 len = strnlen(buffer, res);
762 if (len < res) {
763 res = len;
764 } else {
765 len = env_end - env_start;
766 if (len > buflen - res)
767 len = buflen - res;
768 res += access_process_vm(task, env_start,
769 buffer+res, len,
770 FOLL_FORCE);
771 res = strnlen(buffer, res);
772 }
773 }
774out_mm:
775 mmput(mm);
776out:
777 return res;
778}
779