1
2
3
4
5
6#include <linux/module.h>
7#include <linux/mm.h>
8#include <linux/pagemap.h>
9#include <linux/io.h>
10#include <linux/memblock.h>
11#include <linux/kernel.h>
12
13#include <asm/cacheflush.h>
14#include <asm/cp15.h>
15#include <asm/sections.h>
16#include <asm/page.h>
17#include <asm/setup.h>
18#include <asm/traps.h>
19#include <asm/mach/arch.h>
20#include <asm/cputype.h>
21#include <asm/mpu.h>
22#include <asm/procinfo.h>
23
24#include "mm.h"
25
26unsigned long vectors_base;
27
28#ifdef CONFIG_ARM_MPU
29struct mpu_rgn_info mpu_rgn_info;
30
31
32static void rgnr_write(u32 v)
33{
34 asm("mcr p15, 0, %0, c6, c2, 0" : : "r" (v));
35}
36
37
38
39
40static void dracr_write(u32 v)
41{
42 asm("mcr p15, 0, %0, c6, c1, 4" : : "r" (v));
43}
44
45
46static void drsr_write(u32 v)
47{
48 asm("mcr p15, 0, %0, c6, c1, 2" : : "r" (v));
49}
50
51
52static void drbar_write(u32 v)
53{
54 asm("mcr p15, 0, %0, c6, c1, 0" : : "r" (v));
55}
56
57static u32 drbar_read(void)
58{
59 u32 v;
60 asm("mrc p15, 0, %0, c6, c1, 0" : "=r" (v));
61 return v;
62}
63
64
65
66static void iracr_write(u32 v)
67{
68 asm("mcr p15, 0, %0, c6, c1, 5" : : "r" (v));
69}
70
71
72static void irsr_write(u32 v)
73{
74 asm("mcr p15, 0, %0, c6, c1, 3" : : "r" (v));
75}
76
77
78static void irbar_write(u32 v)
79{
80 asm("mcr p15, 0, %0, c6, c1, 1" : : "r" (v));
81}
82
83static unsigned long irbar_read(void)
84{
85 unsigned long v;
86 asm("mrc p15, 0, %0, c6, c1, 1" : "=r" (v));
87 return v;
88}
89
90
91void __init adjust_lowmem_bounds_mpu(void)
92{
93 phys_addr_t phys_offset = PHYS_OFFSET;
94 phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
95 struct memblock_region *reg;
96 bool first = true;
97 phys_addr_t mem_start;
98 phys_addr_t mem_end;
99
100 for_each_memblock(memory, reg) {
101 if (first) {
102
103
104
105 if (reg->base != phys_offset)
106 panic("First memory bank must be contiguous from PHYS_OFFSET");
107
108 mem_start = reg->base;
109 mem_end = reg->base + reg->size;
110 specified_mem_size = reg->size;
111 first = false;
112 } else {
113
114
115
116
117
118 pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
119 &mem_end, ®->base);
120 memblock_remove(reg->base, 0 - reg->base);
121 break;
122 }
123 }
124
125
126
127
128
129 if (phys_offset != 0)
130 pr_info("PHYS_OFFSET != 0 => MPU Region size constrained by alignment requirements\n");
131
132
133
134
135
136
137
138 aligned_region_size = (phys_offset - 1) ^ (phys_offset);
139
140 rounded_mem_size = (1 << __fls(specified_mem_size)) - 1;
141
142
143 aligned_region_size = aligned_region_size < rounded_mem_size
144 ? aligned_region_size + 1
145 : rounded_mem_size + 1;
146
147 if (aligned_region_size != specified_mem_size) {
148 pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
149 &specified_mem_size, &aligned_region_size);
150 memblock_remove(mem_start + aligned_region_size,
151 specified_mem_size - aligned_region_size);
152
153 mem_end = mem_start + aligned_region_size;
154 }
155
156 pr_debug("MPU Region from %pa size %pa (end %pa))\n",
157 &phys_offset, &aligned_region_size, &mem_end);
158
159}
160
161static int mpu_present(void)
162{
163 return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
164}
165
166static int mpu_max_regions(void)
167{
168
169
170
171
172
173 u32 dregions, iregions, mpuir;
174 mpuir = read_cpuid(CPUID_MPUIR);
175
176 dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
177
178
179 if (mpuir & MPUIR_nU)
180 iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
181
182
183 return min(dregions, iregions);
184}
185
186static int mpu_iside_independent(void)
187{
188
189 return read_cpuid(CPUID_MPUIR) & MPUIR_nU;
190}
191
192static int mpu_min_region_order(void)
193{
194 u32 drbar_result, irbar_result;
195
196 rgnr_write(MPU_PROBE_REGION);
197 isb();
198
199
200
201
202 drbar_write(0xFFFFFFFC);
203 drbar_result = irbar_result = drbar_read();
204 drbar_write(0x0);
205
206 if (mpu_iside_independent()) {
207 irbar_write(0xFFFFFFFC);
208 irbar_result = irbar_read();
209 irbar_write(0x0);
210 }
211 isb();
212
213 return __ffs(max(drbar_result, irbar_result));
214}
215
216static int mpu_setup_region(unsigned int number, phys_addr_t start,
217 unsigned int size_order, unsigned int properties)
218{
219 u32 size_data;
220
221
222 if (number > mpu_max_regions() || number == MPU_PROBE_REGION)
223 return -ENOENT;
224
225 if (size_order > 32)
226 return -ENOMEM;
227
228 if (size_order < mpu_min_region_order())
229 return -ENOMEM;
230
231
232 size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN;
233
234 dsb();
235 rgnr_write(number);
236 isb();
237 drbar_write(start);
238 dracr_write(properties);
239 isb();
240 drsr_write(size_data);
241
242
243 if (mpu_iside_independent()) {
244 irbar_write(start);
245 iracr_write(properties);
246 isb();
247 irsr_write(size_data);
248 }
249 isb();
250
251
252 mpu_rgn_info.rgns[number].dracr = properties;
253 mpu_rgn_info.rgns[number].drbar = start;
254 mpu_rgn_info.rgns[number].drsr = size_data;
255 return 0;
256}
257
258
259
260
261void __init mpu_setup(void)
262{
263 int region_err;
264 if (!mpu_present())
265 return;
266
267 region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET,
268 ilog2(memblock.memory.regions[0].size),
269 MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
270 if (region_err) {
271 panic("MPU region initialization failure! %d", region_err);
272 } else {
273 pr_info("Using ARMv7 PMSA Compliant MPU. "
274 "Region independence: %s, Max regions: %d\n",
275 mpu_iside_independent() ? "Yes" : "No",
276 mpu_max_regions());
277 }
278}
279#else
280static void adjust_lowmem_bounds_mpu(void) {}
281static void __init mpu_setup(void) {}
282#endif
283
284#ifdef CONFIG_CPU_CP15
285#ifdef CONFIG_CPU_HIGH_VECTOR
286static unsigned long __init setup_vectors_base(void)
287{
288 unsigned long reg = get_cr();
289
290 set_cr(reg | CR_V);
291 return 0xffff0000;
292}
293#else
294
295static inline void set_vbar(unsigned long val)
296{
297 asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc");
298}
299
300
301
302
303
304static inline bool security_extensions_enabled(void)
305{
306
307 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
308 return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
309 return 0;
310}
311
312static unsigned long __init setup_vectors_base(void)
313{
314 unsigned long base = 0, reg = get_cr();
315
316 set_cr(reg & ~CR_V);
317 if (security_extensions_enabled()) {
318 if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM))
319 base = CONFIG_DRAM_BASE;
320 set_vbar(base);
321 } else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) {
322 if (CONFIG_DRAM_BASE != 0)
323 pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n");
324 }
325
326 return base;
327}
328#endif
329#endif
330
331void __init arm_mm_memblock_reserve(void)
332{
333#ifndef CONFIG_CPU_V7M
334 vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0;
335
336
337
338
339
340 memblock_reserve(vectors_base, 2 * PAGE_SIZE);
341#else
342
343
344
345
346#endif
347}
348
349void __init adjust_lowmem_bounds(void)
350{
351 phys_addr_t end;
352 adjust_lowmem_bounds_mpu();
353 end = memblock_end_of_DRAM();
354 high_memory = __va(end - 1) + 1;
355 memblock_set_current_limit(end);
356}
357
358
359
360
361
362void __init paging_init(const struct machine_desc *mdesc)
363{
364 early_trap_init((void *)vectors_base);
365 mpu_setup();
366 bootmem_init();
367}
368
369
370
371
372void setup_mm_for_reboot(void)
373{
374}
375
376void flush_dcache_page(struct page *page)
377{
378 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
379}
380EXPORT_SYMBOL(flush_dcache_page);
381
382void flush_kernel_dcache_page(struct page *page)
383{
384 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
385}
386EXPORT_SYMBOL(flush_kernel_dcache_page);
387
388void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
389 unsigned long uaddr, void *dst, const void *src,
390 unsigned long len)
391{
392 memcpy(dst, src, len);
393 if (vma->vm_flags & VM_EXEC)
394 __cpuc_coherent_user_range(uaddr, uaddr + len);
395}
396
397void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
398 size_t size, unsigned int mtype)
399{
400 if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
401 return NULL;
402 return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
403}
404EXPORT_SYMBOL(__arm_ioremap_pfn);
405
406void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
407 unsigned int mtype, void *caller)
408{
409 return (void __iomem *)phys_addr;
410}
411
412void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
413
414void __iomem *ioremap(resource_size_t res_cookie, size_t size)
415{
416 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
417 __builtin_return_address(0));
418}
419EXPORT_SYMBOL(ioremap);
420
421void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
422 __alias(ioremap_cached);
423
424void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
425{
426 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
427 __builtin_return_address(0));
428}
429EXPORT_SYMBOL(ioremap_cache);
430EXPORT_SYMBOL(ioremap_cached);
431
432void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
433{
434 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
435 __builtin_return_address(0));
436}
437EXPORT_SYMBOL(ioremap_wc);
438
439void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
440{
441 return (void *)phys_addr;
442}
443
444void __iounmap(volatile void __iomem *addr)
445{
446}
447EXPORT_SYMBOL(__iounmap);
448
449void (*arch_iounmap)(volatile void __iomem *);
450
451void iounmap(volatile void __iomem *addr)
452{
453}
454EXPORT_SYMBOL(iounmap);
455