1#include <linux/mm.h>
2#include <linux/slab.h>
3#include <linux/string.h>
4#include <linux/compiler.h>
5#include <linux/export.h>
6#include <linux/err.h>
7#include <linux/sched.h>
8#include <linux/security.h>
9#include <linux/swap.h>
10#include <linux/swapops.h>
11#include <linux/mman.h>
12#include <linux/hugetlb.h>
13#include <linux/vmalloc.h>
14
15#include <asm/sections.h>
16#include <asm/uaccess.h>
17
18#include "internal.h"
19
20static inline int is_kernel_rodata(unsigned long addr)
21{
22 return addr >= (unsigned long)__start_rodata &&
23 addr < (unsigned long)__end_rodata;
24}
25
26
27
28
29
30
31
32void kfree_const(const void *x)
33{
34 if (!is_kernel_rodata((unsigned long)x))
35 kfree(x);
36}
37EXPORT_SYMBOL(kfree_const);
38
39
40
41
42
43
44char *kstrdup(const char *s, gfp_t gfp)
45{
46 size_t len;
47 char *buf;
48
49 if (!s)
50 return NULL;
51
52 len = strlen(s) + 1;
53 buf = kmalloc_track_caller(len, gfp);
54 if (buf)
55 memcpy(buf, s, len);
56 return buf;
57}
58EXPORT_SYMBOL(kstrdup);
59
60
61
62
63
64
65
66
67
68
69const char *kstrdup_const(const char *s, gfp_t gfp)
70{
71 if (is_kernel_rodata((unsigned long)s))
72 return s;
73
74 return kstrdup(s, gfp);
75}
76EXPORT_SYMBOL(kstrdup_const);
77
78
79
80
81
82
83
84char *kstrndup(const char *s, size_t max, gfp_t gfp)
85{
86 size_t len;
87 char *buf;
88
89 if (!s)
90 return NULL;
91
92 len = strnlen(s, max);
93 buf = kmalloc_track_caller(len+1, gfp);
94 if (buf) {
95 memcpy(buf, s, len);
96 buf[len] = '\0';
97 }
98 return buf;
99}
100EXPORT_SYMBOL(kstrndup);
101
102
103
104
105
106
107
108
109void *kmemdup(const void *src, size_t len, gfp_t gfp)
110{
111 void *p;
112
113 p = kmalloc_track_caller(len, gfp);
114 if (p)
115 memcpy(p, src, len);
116 return p;
117}
118EXPORT_SYMBOL(kmemdup);
119
120
121
122
123
124
125
126
127
128void *memdup_user(const void __user *src, size_t len)
129{
130 void *p;
131
132
133
134
135
136
137 p = kmalloc_track_caller(len, GFP_KERNEL);
138 if (!p)
139 return ERR_PTR(-ENOMEM);
140
141 if (copy_from_user(p, src, len)) {
142 kfree(p);
143 return ERR_PTR(-EFAULT);
144 }
145
146 return p;
147}
148EXPORT_SYMBOL(memdup_user);
149
150
151
152
153
154
155char *strndup_user(const char __user *s, long n)
156{
157 char *p;
158 long length;
159
160 length = strnlen_user(s, n);
161
162 if (!length)
163 return ERR_PTR(-EFAULT);
164
165 if (length > n)
166 return ERR_PTR(-EINVAL);
167
168 p = memdup_user(s, length);
169
170 if (IS_ERR(p))
171 return p;
172
173 p[length - 1] = '\0';
174
175 return p;
176}
177EXPORT_SYMBOL(strndup_user);
178
179
180
181
182
183
184
185
186
187void *memdup_user_nul(const void __user *src, size_t len)
188{
189 char *p;
190
191
192
193
194
195
196 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
197 if (!p)
198 return ERR_PTR(-ENOMEM);
199
200 if (copy_from_user(p, src, len)) {
201 kfree(p);
202 return ERR_PTR(-EFAULT);
203 }
204 p[len] = '\0';
205
206 return p;
207}
208EXPORT_SYMBOL(memdup_user_nul);
209
210void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
211 struct vm_area_struct *prev, struct rb_node *rb_parent)
212{
213 struct vm_area_struct *next;
214
215 vma->vm_prev = prev;
216 if (prev) {
217 next = prev->vm_next;
218 prev->vm_next = vma;
219 } else {
220 mm->mmap = vma;
221 if (rb_parent)
222 next = rb_entry(rb_parent,
223 struct vm_area_struct, vm_rb);
224 else
225 next = NULL;
226 }
227 vma->vm_next = next;
228 if (next)
229 next->vm_prev = vma;
230}
231
232
233int vma_is_stack_for_current(struct vm_area_struct *vma)
234{
235 struct task_struct * __maybe_unused t = current;
236
237 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
238}
239
240#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
241void arch_pick_mmap_layout(struct mm_struct *mm)
242{
243 mm->mmap_base = TASK_UNMAPPED_BASE;
244 mm->get_unmapped_area = arch_get_unmapped_area;
245}
246#endif
247
248
249
250
251
252
253
254int __weak __get_user_pages_fast(unsigned long start,
255 int nr_pages, int write, struct page **pages)
256{
257 return 0;
258}
259EXPORT_SYMBOL_GPL(__get_user_pages_fast);
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285int __weak get_user_pages_fast(unsigned long start,
286 int nr_pages, int write, struct page **pages)
287{
288 return get_user_pages_unlocked(start, nr_pages, pages,
289 write ? FOLL_WRITE : 0);
290}
291EXPORT_SYMBOL_GPL(get_user_pages_fast);
292
293unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
294 unsigned long len, unsigned long prot,
295 unsigned long flag, unsigned long pgoff)
296{
297 unsigned long ret;
298 struct mm_struct *mm = current->mm;
299 unsigned long populate;
300
301 ret = security_mmap_file(file, prot, flag);
302 if (!ret) {
303 if (down_write_killable(&mm->mmap_sem))
304 return -EINTR;
305 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
306 &populate);
307 up_write(&mm->mmap_sem);
308 if (populate)
309 mm_populate(ret, populate);
310 }
311 return ret;
312}
313
314unsigned long vm_mmap(struct file *file, unsigned long addr,
315 unsigned long len, unsigned long prot,
316 unsigned long flag, unsigned long offset)
317{
318 if (unlikely(offset + PAGE_ALIGN(len) < offset))
319 return -EINVAL;
320 if (unlikely(offset_in_page(offset)))
321 return -EINVAL;
322
323 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
324}
325EXPORT_SYMBOL(vm_mmap);
326
327void kvfree(const void *addr)
328{
329 if (is_vmalloc_addr(addr))
330 vfree(addr);
331 else
332 kfree(addr);
333}
334EXPORT_SYMBOL(kvfree);
335
336static inline void *__page_rmapping(struct page *page)
337{
338 unsigned long mapping;
339
340 mapping = (unsigned long)page->mapping;
341 mapping &= ~PAGE_MAPPING_FLAGS;
342
343 return (void *)mapping;
344}
345
346
347void *page_rmapping(struct page *page)
348{
349 page = compound_head(page);
350 return __page_rmapping(page);
351}
352
353
354
355
356
357bool page_mapped(struct page *page)
358{
359 int i;
360
361 if (likely(!PageCompound(page)))
362 return atomic_read(&page->_mapcount) >= 0;
363 page = compound_head(page);
364 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
365 return true;
366 if (PageHuge(page))
367 return false;
368 for (i = 0; i < hpage_nr_pages(page); i++) {
369 if (atomic_read(&page[i]._mapcount) >= 0)
370 return true;
371 }
372 return false;
373}
374EXPORT_SYMBOL(page_mapped);
375
376struct anon_vma *page_anon_vma(struct page *page)
377{
378 unsigned long mapping;
379
380 page = compound_head(page);
381 mapping = (unsigned long)page->mapping;
382 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
383 return NULL;
384 return __page_rmapping(page);
385}
386
387struct address_space *page_mapping(struct page *page)
388{
389 struct address_space *mapping;
390
391 page = compound_head(page);
392
393
394 if (unlikely(PageSlab(page)))
395 return NULL;
396
397 if (unlikely(PageSwapCache(page))) {
398 swp_entry_t entry;
399
400 entry.val = page_private(page);
401 return swap_address_space(entry);
402 }
403
404 mapping = page->mapping;
405 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
406 return NULL;
407
408 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
409}
410EXPORT_SYMBOL(page_mapping);
411
412
413int __page_mapcount(struct page *page)
414{
415 int ret;
416
417 ret = atomic_read(&page->_mapcount) + 1;
418
419
420
421
422 if (!PageAnon(page) && !PageHuge(page))
423 return ret;
424 page = compound_head(page);
425 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
426 if (PageDoubleMap(page))
427 ret--;
428 return ret;
429}
430EXPORT_SYMBOL_GPL(__page_mapcount);
431
432int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
433int sysctl_overcommit_ratio __read_mostly = 50;
434unsigned long sysctl_overcommit_kbytes __read_mostly;
435int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
436unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17;
437unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13;
438
439int overcommit_ratio_handler(struct ctl_table *table, int write,
440 void __user *buffer, size_t *lenp,
441 loff_t *ppos)
442{
443 int ret;
444
445 ret = proc_dointvec(table, write, buffer, lenp, ppos);
446 if (ret == 0 && write)
447 sysctl_overcommit_kbytes = 0;
448 return ret;
449}
450
451int overcommit_kbytes_handler(struct ctl_table *table, int write,
452 void __user *buffer, size_t *lenp,
453 loff_t *ppos)
454{
455 int ret;
456
457 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
458 if (ret == 0 && write)
459 sysctl_overcommit_ratio = 0;
460 return ret;
461}
462
463
464
465
466unsigned long vm_commit_limit(void)
467{
468 unsigned long allowed;
469
470 if (sysctl_overcommit_kbytes)
471 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
472 else
473 allowed = ((totalram_pages - hugetlb_total_pages())
474 * sysctl_overcommit_ratio / 100);
475 allowed += total_swap_pages;
476
477 return allowed;
478}
479
480
481
482
483
484struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
485
486
487
488
489
490
491
492
493
494unsigned long vm_memory_committed(void)
495{
496 return percpu_counter_read_positive(&vm_committed_as);
497}
498EXPORT_SYMBOL_GPL(vm_memory_committed);
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
517{
518 long free, allowed, reserve;
519
520 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
521 -(s64)vm_committed_as_batch * num_online_cpus(),
522 "memory commitment underflow");
523
524 vm_acct_memory(pages);
525
526
527
528
529 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
530 return 0;
531
532 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
533 free = global_page_state(NR_FREE_PAGES);
534 free += global_node_page_state(NR_FILE_PAGES);
535
536
537
538
539
540
541
542 free -= global_node_page_state(NR_SHMEM);
543
544 free += get_nr_swap_pages();
545
546
547
548
549
550
551
552 free += global_page_state(NR_SLAB_RECLAIMABLE);
553
554
555
556
557 if (free <= totalreserve_pages)
558 goto error;
559 else
560 free -= totalreserve_pages;
561
562
563
564
565 if (!cap_sys_admin)
566 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
567
568 if (free > pages)
569 return 0;
570
571 goto error;
572 }
573
574 allowed = vm_commit_limit();
575
576
577
578 if (!cap_sys_admin)
579 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
580
581
582
583
584 if (mm) {
585 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
586 allowed -= min_t(long, mm->total_vm / 32, reserve);
587 }
588
589 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
590 return 0;
591error:
592 vm_unacct_memory(pages);
593
594 return -ENOMEM;
595}
596
597
598
599
600
601
602
603
604
605
606int get_cmdline(struct task_struct *task, char *buffer, int buflen)
607{
608 int res = 0;
609 unsigned int len;
610 struct mm_struct *mm = get_task_mm(task);
611 unsigned long arg_start, arg_end, env_start, env_end;
612 if (!mm)
613 goto out;
614 if (!mm->arg_end)
615 goto out_mm;
616
617 down_read(&mm->mmap_sem);
618 arg_start = mm->arg_start;
619 arg_end = mm->arg_end;
620 env_start = mm->env_start;
621 env_end = mm->env_end;
622 up_read(&mm->mmap_sem);
623
624 len = arg_end - arg_start;
625
626 if (len > buflen)
627 len = buflen;
628
629 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
630
631
632
633
634
635 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
636 len = strnlen(buffer, res);
637 if (len < res) {
638 res = len;
639 } else {
640 len = env_end - env_start;
641 if (len > buflen - res)
642 len = buflen - res;
643 res += access_process_vm(task, env_start,
644 buffer+res, len,
645 FOLL_FORCE);
646 res = strnlen(buffer, res);
647 }
648 }
649out_mm:
650 mmput(mm);
651out:
652 return res;
653}
654