1#include <linux/errno.h>
2#include <linux/sched.h>
3#include <linux/syscalls.h>
4#include <linux/mm.h>
5#include <linux/fs.h>
6#include <linux/smp.h>
7#include <linux/sem.h>
8#include <linux/msg.h>
9#include <linux/shm.h>
10#include <linux/stat.h>
11#include <linux/mman.h>
12#include <linux/file.h>
13#include <linux/utsname.h>
14#include <linux/personality.h>
15#include <linux/random.h>
16#include <linux/uaccess.h>
17
18#include <asm/ia32.h>
19#include <asm/syscalls.h>
20
21SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
22 unsigned long, prot, unsigned long, flags,
23 unsigned long, fd, unsigned long, off)
24{
25 long error;
26 error = -EINVAL;
27 if (off & ~PAGE_MASK)
28 goto out;
29
30 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
31out:
32 return error;
33}
34
35static void find_start_end(unsigned long flags, unsigned long *begin,
36 unsigned long *end)
37{
38 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
39 unsigned long new_begin;
40
41
42
43
44
45
46
47 *begin = 0x40000000;
48 *end = 0x80000000;
49 if (current->flags & PF_RANDOMIZE) {
50 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
51 if (new_begin)
52 *begin = new_begin;
53 }
54 } else {
55 *begin = TASK_UNMAPPED_BASE;
56 *end = TASK_SIZE;
57 }
58}
59
60unsigned long
61arch_get_unmapped_area(struct file *filp, unsigned long addr,
62 unsigned long len, unsigned long pgoff, unsigned long flags)
63{
64 struct mm_struct *mm = current->mm;
65 struct vm_area_struct *vma;
66 unsigned long start_addr;
67 unsigned long begin, end;
68
69 if (flags & MAP_FIXED)
70 return addr;
71
72 find_start_end(flags, &begin, &end);
73
74 if (len > end)
75 return -ENOMEM;
76
77 if (addr) {
78 addr = PAGE_ALIGN(addr);
79 vma = find_vma(mm, addr);
80 if (end - len >= addr &&
81 (!vma || addr + len <= vma->vm_start))
82 return addr;
83 }
84 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
85 && len <= mm->cached_hole_size) {
86 mm->cached_hole_size = 0;
87 mm->free_area_cache = begin;
88 }
89 addr = mm->free_area_cache;
90 if (addr < begin)
91 addr = begin;
92 start_addr = addr;
93
94full_search:
95 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
96
97 if (end - len < addr) {
98
99
100
101
102 if (start_addr != begin) {
103 start_addr = addr = begin;
104 mm->cached_hole_size = 0;
105 goto full_search;
106 }
107 return -ENOMEM;
108 }
109 if (!vma || addr + len <= vma->vm_start) {
110
111
112
113 mm->free_area_cache = addr + len;
114 return addr;
115 }
116 if (addr + mm->cached_hole_size < vma->vm_start)
117 mm->cached_hole_size = vma->vm_start - addr;
118
119 addr = vma->vm_end;
120 }
121}
122
123
124unsigned long
125arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
126 const unsigned long len, const unsigned long pgoff,
127 const unsigned long flags)
128{
129 struct vm_area_struct *vma;
130 struct mm_struct *mm = current->mm;
131 unsigned long addr = addr0;
132
133
134 if (len > TASK_SIZE)
135 return -ENOMEM;
136
137 if (flags & MAP_FIXED)
138 return addr;
139
140
141 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
142 goto bottomup;
143
144
145 if (addr) {
146 addr = PAGE_ALIGN(addr);
147 vma = find_vma(mm, addr);
148 if (TASK_SIZE - len >= addr &&
149 (!vma || addr + len <= vma->vm_start))
150 return addr;
151 }
152
153
154 if (len <= mm->cached_hole_size) {
155 mm->cached_hole_size = 0;
156 mm->free_area_cache = mm->mmap_base;
157 }
158
159
160 addr = mm->free_area_cache;
161
162
163 if (addr > len) {
164 vma = find_vma(mm, addr-len);
165 if (!vma || addr <= vma->vm_start)
166
167 return mm->free_area_cache = addr-len;
168 }
169
170 if (mm->mmap_base < len)
171 goto bottomup;
172
173 addr = mm->mmap_base-len;
174
175 do {
176
177
178
179
180
181 vma = find_vma(mm, addr);
182 if (!vma || addr+len <= vma->vm_start)
183
184 return mm->free_area_cache = addr;
185
186
187 if (addr + mm->cached_hole_size < vma->vm_start)
188 mm->cached_hole_size = vma->vm_start - addr;
189
190
191 addr = vma->vm_start-len;
192 } while (len < vma->vm_start);
193
194bottomup:
195
196
197
198
199
200
201 mm->cached_hole_size = ~0UL;
202 mm->free_area_cache = TASK_UNMAPPED_BASE;
203 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
204
205
206
207 mm->free_area_cache = mm->mmap_base;
208 mm->cached_hole_size = ~0UL;
209
210 return addr;
211}
212