1#include <linux/mm.h>
2#include <linux/slab.h>
3#include <linux/string.h>
4#include <linux/export.h>
5#include <linux/err.h>
6#include <linux/sched.h>
7#include <linux/security.h>
8#include <linux/swap.h>
9#include <linux/swapops.h>
10#include <asm/uaccess.h>
11
12#include "internal.h"
13
14#define CREATE_TRACE_POINTS
15#include <trace/events/kmem.h>
16
17
18
19
20
21
22char *kstrdup(const char *s, gfp_t gfp)
23{
24 size_t len;
25 char *buf;
26
27 if (!s)
28 return NULL;
29
30 len = strlen(s) + 1;
31 buf = kmalloc_track_caller(len, gfp);
32 if (buf)
33 memcpy(buf, s, len);
34 return buf;
35}
36EXPORT_SYMBOL(kstrdup);
37
38
39
40
41
42
43
44char *kstrndup(const char *s, size_t max, gfp_t gfp)
45{
46 size_t len;
47 char *buf;
48
49 if (!s)
50 return NULL;
51
52 len = strnlen(s, max);
53 buf = kmalloc_track_caller(len+1, gfp);
54 if (buf) {
55 memcpy(buf, s, len);
56 buf[len] = '\0';
57 }
58 return buf;
59}
60EXPORT_SYMBOL(kstrndup);
61
62
63
64
65
66
67
68
69void *kmemdup(const void *src, size_t len, gfp_t gfp)
70{
71 void *p;
72
73 p = kmalloc_track_caller(len, gfp);
74 if (p)
75 memcpy(p, src, len);
76 return p;
77}
78EXPORT_SYMBOL(kmemdup);
79
80
81
82
83
84
85
86
87
88void *memdup_user(const void __user *src, size_t len)
89{
90 void *p;
91
92
93
94
95
96
97 p = kmalloc_track_caller(len, GFP_KERNEL);
98 if (!p)
99 return ERR_PTR(-ENOMEM);
100
101 if (copy_from_user(p, src, len)) {
102 kfree(p);
103 return ERR_PTR(-EFAULT);
104 }
105
106 return p;
107}
108EXPORT_SYMBOL(memdup_user);
109
110static __always_inline void *__do_krealloc(const void *p, size_t new_size,
111 gfp_t flags)
112{
113 void *ret;
114 size_t ks = 0;
115
116 if (p)
117 ks = ksize(p);
118
119 if (ks >= new_size)
120 return (void *)p;
121
122 ret = kmalloc_track_caller(new_size, flags);
123 if (ret && p)
124 memcpy(ret, p, ks);
125
126 return ret;
127}
128
129
130
131
132
133
134
135
136
137
138
139void *__krealloc(const void *p, size_t new_size, gfp_t flags)
140{
141 if (unlikely(!new_size))
142 return ZERO_SIZE_PTR;
143
144 return __do_krealloc(p, new_size, flags);
145
146}
147EXPORT_SYMBOL(__krealloc);
148
149
150
151
152
153
154
155
156
157
158
159
160void *krealloc(const void *p, size_t new_size, gfp_t flags)
161{
162 void *ret;
163
164 if (unlikely(!new_size)) {
165 kfree(p);
166 return ZERO_SIZE_PTR;
167 }
168
169 ret = __do_krealloc(p, new_size, flags);
170 if (ret && p != ret)
171 kfree(p);
172
173 return ret;
174}
175EXPORT_SYMBOL(krealloc);
176
177
178
179
180
181
182
183
184
185
186
187
188void kzfree(const void *p)
189{
190 size_t ks;
191 void *mem = (void *)p;
192
193 if (unlikely(ZERO_OR_NULL_PTR(mem)))
194 return;
195 ks = ksize(mem);
196 memset(mem, 0, ks);
197 kfree(mem);
198}
199EXPORT_SYMBOL(kzfree);
200
201
202
203
204
205
206char *strndup_user(const char __user *s, long n)
207{
208 char *p;
209 long length;
210
211 length = strnlen_user(s, n);
212
213 if (!length)
214 return ERR_PTR(-EFAULT);
215
216 if (length > n)
217 return ERR_PTR(-EINVAL);
218
219 p = memdup_user(s, length);
220
221 if (IS_ERR(p))
222 return p;
223
224 p[length - 1] = '\0';
225
226 return p;
227}
228EXPORT_SYMBOL(strndup_user);
229
230void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
231 struct vm_area_struct *prev, struct rb_node *rb_parent)
232{
233 struct vm_area_struct *next;
234
235 vma->vm_prev = prev;
236 if (prev) {
237 next = prev->vm_next;
238 prev->vm_next = vma;
239 } else {
240 mm->mmap = vma;
241 if (rb_parent)
242 next = rb_entry(rb_parent,
243 struct vm_area_struct, vm_rb);
244 else
245 next = NULL;
246 }
247 vma->vm_next = next;
248 if (next)
249 next->vm_prev = vma;
250}
251
252
253static int vm_is_stack_for_task(struct task_struct *t,
254 struct vm_area_struct *vma)
255{
256 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
257}
258
259
260
261
262
263
264
265pid_t vm_is_stack(struct task_struct *task,
266 struct vm_area_struct *vma, int in_group)
267{
268 pid_t ret = 0;
269
270 if (vm_is_stack_for_task(task, vma))
271 return task->pid;
272
273 if (in_group) {
274 struct task_struct *t;
275 rcu_read_lock();
276 if (!pid_alive(task))
277 goto done;
278
279 t = task;
280 do {
281 if (vm_is_stack_for_task(t, vma)) {
282 ret = t->pid;
283 goto done;
284 }
285 } while_each_thread(task, t);
286done:
287 rcu_read_unlock();
288 }
289
290 return ret;
291}
292
293#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
294void arch_pick_mmap_layout(struct mm_struct *mm)
295{
296 mm->mmap_base = TASK_UNMAPPED_BASE;
297 mm->get_unmapped_area = arch_get_unmapped_area;
298}
299#endif
300
301
302
303
304
305
306
307int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
308 int nr_pages, int write, struct page **pages)
309{
310 return 0;
311}
312EXPORT_SYMBOL_GPL(__get_user_pages_fast);
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338int __attribute__((weak)) get_user_pages_fast(unsigned long start,
339 int nr_pages, int write, struct page **pages)
340{
341 struct mm_struct *mm = current->mm;
342 int ret;
343
344 down_read(&mm->mmap_sem);
345 ret = get_user_pages(current, mm, start, nr_pages,
346 write, 0, pages, NULL);
347 up_read(&mm->mmap_sem);
348
349 return ret;
350}
351EXPORT_SYMBOL_GPL(get_user_pages_fast);
352
353unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
354 unsigned long len, unsigned long prot,
355 unsigned long flag, unsigned long pgoff)
356{
357 unsigned long ret;
358 struct mm_struct *mm = current->mm;
359 unsigned long populate;
360
361 ret = security_mmap_file(file, prot, flag);
362 if (!ret) {
363 down_write(&mm->mmap_sem);
364 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
365 &populate);
366 up_write(&mm->mmap_sem);
367 if (populate)
368 mm_populate(ret, populate);
369 }
370 return ret;
371}
372
373unsigned long vm_mmap(struct file *file, unsigned long addr,
374 unsigned long len, unsigned long prot,
375 unsigned long flag, unsigned long offset)
376{
377 if (unlikely(offset + PAGE_ALIGN(len) < offset))
378 return -EINVAL;
379 if (unlikely(offset & ~PAGE_MASK))
380 return -EINVAL;
381
382 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
383}
384EXPORT_SYMBOL(vm_mmap);
385
386struct address_space *page_mapping(struct page *page)
387{
388 struct address_space *mapping = page->mapping;
389
390 VM_BUG_ON(PageSlab(page));
391 if (unlikely(PageSwapCache(page))) {
392 swp_entry_t entry;
393
394 entry.val = page_private(page);
395 mapping = swap_address_space(entry);
396 } else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
397 mapping = NULL;
398 return mapping;
399}
400
401
402EXPORT_TRACEPOINT_SYMBOL(kmalloc);
403EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
404EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
405EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
406EXPORT_TRACEPOINT_SYMBOL(kfree);
407EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
408