1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/personality.h>
26#include <linux/mm.h>
27#include <linux/mman.h>
28#include <linux/module.h>
29#include <linux/random.h>
30#include <linux/compat.h>
31#include <linux/security.h>
32#include <asm/pgalloc.h>
33
34unsigned long mmap_rnd_mask;
35unsigned long mmap_align_mask;
36
37static unsigned long stack_maxrandom_size(void)
38{
39 if (!(current->flags & PF_RANDOMIZE))
40 return 0;
41 if (current->personality & ADDR_NO_RANDOMIZE)
42 return 0;
43 return STACK_RND_MASK << PAGE_SHIFT;
44}
45
46
47
48
49
50
51#define MIN_GAP (32*1024*1024)
52#define MAX_GAP (STACK_TOP/6*5)
53
54static inline int mmap_is_legacy(void)
55{
56 if (current->personality & ADDR_COMPAT_LAYOUT)
57 return 1;
58 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
59 return 1;
60 return sysctl_legacy_va_layout;
61}
62
63unsigned long arch_mmap_rnd(void)
64{
65 if (is_32bit_task())
66 return (get_random_int() & 0x7ff) << PAGE_SHIFT;
67 else
68 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
69}
70
71static unsigned long mmap_base_legacy(unsigned long rnd)
72{
73 return TASK_UNMAPPED_BASE + rnd;
74}
75
76static inline unsigned long mmap_base(unsigned long rnd)
77{
78 unsigned long gap = rlimit(RLIMIT_STACK);
79
80 if (gap < MIN_GAP)
81 gap = MIN_GAP;
82 else if (gap > MAX_GAP)
83 gap = MAX_GAP;
84 gap &= PAGE_MASK;
85 return STACK_TOP - stack_maxrandom_size() - rnd - gap;
86}
87
88unsigned long
89arch_get_unmapped_area(struct file *filp, unsigned long addr,
90 unsigned long len, unsigned long pgoff, unsigned long flags)
91{
92 struct mm_struct *mm = current->mm;
93 struct vm_area_struct *vma;
94 struct vm_unmapped_area_info info;
95 int do_color_align;
96 int rc;
97
98 if (len > TASK_SIZE - mmap_min_addr)
99 return -ENOMEM;
100
101 if (flags & MAP_FIXED)
102 goto check_asce_limit;
103
104 if (addr) {
105 addr = PAGE_ALIGN(addr);
106 vma = find_vma(mm, addr);
107 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
108 (!vma || addr + len <= vm_start_gap(vma)))
109 goto check_asce_limit;
110 }
111
112 do_color_align = 0;
113 if (filp || (flags & MAP_SHARED))
114 do_color_align = !is_32bit_task();
115
116 info.flags = 0;
117 info.length = len;
118 info.low_limit = mm->mmap_base;
119 info.high_limit = TASK_SIZE;
120 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
121 info.align_offset = pgoff << PAGE_SHIFT;
122 addr = vm_unmapped_area(&info);
123 if (addr & ~PAGE_MASK)
124 return addr;
125
126check_asce_limit:
127 if (addr + len > current->mm->context.asce_limit &&
128 addr + len <= TASK_SIZE) {
129 rc = crst_table_upgrade(mm);
130 if (rc)
131 return (unsigned long) rc;
132 }
133
134 return addr;
135}
136
137unsigned long
138arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
139 const unsigned long len, const unsigned long pgoff,
140 const unsigned long flags)
141{
142 struct vm_area_struct *vma;
143 struct mm_struct *mm = current->mm;
144 unsigned long addr = addr0;
145 struct vm_unmapped_area_info info;
146 int do_color_align;
147 int rc;
148
149
150 if (len > TASK_SIZE - mmap_min_addr)
151 return -ENOMEM;
152
153 if (flags & MAP_FIXED)
154 goto check_asce_limit;
155
156
157 if (addr) {
158 addr = PAGE_ALIGN(addr);
159 vma = find_vma(mm, addr);
160 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
161 (!vma || addr + len <= vma->vm_start))
162 goto check_asce_limit;
163 }
164
165 do_color_align = 0;
166 if (filp || (flags & MAP_SHARED))
167 do_color_align = !is_32bit_task();
168
169 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
170 info.length = len;
171 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
172 info.high_limit = mm->mmap_base;
173 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
174 info.align_offset = pgoff << PAGE_SHIFT;
175 addr = vm_unmapped_area(&info);
176
177
178
179
180
181
182
183 if (addr & ~PAGE_MASK) {
184 VM_BUG_ON(addr != -ENOMEM);
185 info.flags = 0;
186 info.low_limit = TASK_UNMAPPED_BASE;
187 info.high_limit = TASK_SIZE;
188 addr = vm_unmapped_area(&info);
189 if (addr & ~PAGE_MASK)
190 return addr;
191 }
192
193check_asce_limit:
194 if (addr + len > current->mm->context.asce_limit &&
195 addr + len <= TASK_SIZE) {
196 rc = crst_table_upgrade(mm);
197 if (rc)
198 return (unsigned long) rc;
199 }
200 return addr;
201}
202
203
204
205
206
207void arch_pick_mmap_layout(struct mm_struct *mm)
208{
209 unsigned long random_factor = 0UL;
210
211 if (current->flags & PF_RANDOMIZE)
212 random_factor = arch_mmap_rnd();
213
214
215
216
217
218 if (mmap_is_legacy()) {
219 mm->mmap_base = mmap_base_legacy(random_factor);
220 mm->get_unmapped_area = arch_get_unmapped_area;
221 mm->unmap_area = arch_unmap_area;
222 } else {
223 mm->mmap_base = mmap_base(random_factor);
224 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
225 mm->unmap_area = arch_unmap_area_topdown;
226 }
227}
228
229static int __init setup_mmap_rnd(void)
230{
231 struct cpuid cpu_id;
232
233 get_cpu_id(&cpu_id);
234 switch (cpu_id.machine) {
235 case 0x9672:
236 case 0x2064:
237 case 0x2066:
238 case 0x2084:
239 case 0x2086:
240 case 0x2094:
241 case 0x2096:
242 case 0x2097:
243 case 0x2098:
244 case 0x2817:
245 case 0x2818:
246 case 0x2827:
247 case 0x2828:
248 mmap_rnd_mask = 0x7ffUL;
249 mmap_align_mask = 0UL;
250 break;
251 case 0x2964:
252 default:
253 mmap_rnd_mask = 0x3ff80UL;
254 mmap_align_mask = 0x7fUL;
255 break;
256 }
257 return 0;
258}
259early_initcall(setup_mmap_rnd);
260