1
2
3
4
5
6
7#include <linux/bitops.h>
8#include <linux/memblock.h>
9#include <linux/string.h>
10
11#include <asm/cacheflush.h>
12#include <asm/cp15.h>
13#include <asm/cputype.h>
14#include <asm/mpu.h>
15#include <asm/sections.h>
16
17#include "mm.h"
18
19struct region {
20 phys_addr_t base;
21 phys_addr_t size;
22 unsigned long subreg;
23};
24
25static struct region __initdata mem[MPU_MAX_REGIONS];
26#ifdef CONFIG_XIP_KERNEL
27static struct region __initdata xip[MPU_MAX_REGIONS];
28#endif
29
30static unsigned int __initdata mpu_min_region_order;
31static unsigned int __initdata mpu_max_regions;
32
33static int __init __mpu_min_region_order(void);
34static int __init __mpu_max_regions(void);
35
36#ifndef CONFIG_CPU_V7M
37
38#define DRBAR __ACCESS_CP15(c6, 0, c1, 0)
39#define IRBAR __ACCESS_CP15(c6, 0, c1, 1)
40#define DRSR __ACCESS_CP15(c6, 0, c1, 2)
41#define IRSR __ACCESS_CP15(c6, 0, c1, 3)
42#define DRACR __ACCESS_CP15(c6, 0, c1, 4)
43#define IRACR __ACCESS_CP15(c6, 0, c1, 5)
44#define RNGNR __ACCESS_CP15(c6, 0, c2, 0)
45
46
47static inline void rgnr_write(u32 v)
48{
49 write_sysreg(v, RNGNR);
50}
51
52
53
54
55static inline void dracr_write(u32 v)
56{
57 write_sysreg(v, DRACR);
58}
59
60
61static inline void drsr_write(u32 v)
62{
63 write_sysreg(v, DRSR);
64}
65
66
67static inline void drbar_write(u32 v)
68{
69 write_sysreg(v, DRBAR);
70}
71
72static inline u32 drbar_read(void)
73{
74 return read_sysreg(DRBAR);
75}
76
77
78
79static inline void iracr_write(u32 v)
80{
81 write_sysreg(v, IRACR);
82}
83
84
85static inline void irsr_write(u32 v)
86{
87 write_sysreg(v, IRSR);
88}
89
90
91static inline void irbar_write(u32 v)
92{
93 write_sysreg(v, IRBAR);
94}
95
96static inline u32 irbar_read(void)
97{
98 return read_sysreg(IRBAR);
99}
100
101#else
102
103static inline void rgnr_write(u32 v)
104{
105 writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RNR);
106}
107
108
109
110
111static inline void dracr_write(u32 v)
112{
113 u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(15, 0);
114
115 writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + MPU_RASR);
116}
117
118
119static inline void drsr_write(u32 v)
120{
121 u32 racr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(31, 16);
122
123 writel_relaxed(v | racr, BASEADDR_V7M_SCB + MPU_RASR);
124}
125
126
127static inline void drbar_write(u32 v)
128{
129 writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RBAR);
130}
131
132static inline u32 drbar_read(void)
133{
134 return readl_relaxed(BASEADDR_V7M_SCB + MPU_RBAR);
135}
136
137
138
139static inline void iracr_write(u32 v) {}
140static inline void irsr_write(u32 v) {}
141static inline void irbar_write(u32 v) {}
142static inline unsigned long irbar_read(void) {return 0;}
143
144#endif
145
146static int __init mpu_present(void)
147{
148 return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
149}
150
151static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
152{
153 unsigned long subreg, bslots, sslots;
154 phys_addr_t abase = base & ~(size - 1);
155 phys_addr_t asize = base + size - abase;
156 phys_addr_t p2size = 1 << __fls(asize);
157 phys_addr_t bdiff, sdiff;
158
159 if (p2size != asize)
160 p2size *= 2;
161
162 bdiff = base - abase;
163 sdiff = p2size - asize;
164 subreg = p2size / MPU_NR_SUBREGS;
165
166 if ((bdiff % subreg) || (sdiff % subreg))
167 return false;
168
169 bslots = bdiff / subreg;
170 sslots = sdiff / subreg;
171
172 if (bslots || sslots) {
173 int i;
174
175 if (subreg < MPU_MIN_SUBREG_SIZE)
176 return false;
177
178 if (bslots + sslots > MPU_NR_SUBREGS)
179 return false;
180
181 for (i = 0; i < bslots; i++)
182 _set_bit(i, ®ion->subreg);
183
184 for (i = 1; i <= sslots; i++)
185 _set_bit(MPU_NR_SUBREGS - i, ®ion->subreg);
186 }
187
188 region->base = abase;
189 region->size = p2size;
190
191 return true;
192}
193
194static int __init allocate_region(phys_addr_t base, phys_addr_t size,
195 unsigned int limit, struct region *regions)
196{
197 int count = 0;
198 phys_addr_t diff = size;
199 int attempts = MPU_MAX_REGIONS;
200
201 while (diff) {
202
203 if (try_split_region(base, size, ®ions[count])) {
204 count++;
205 base += size;
206 diff -= size;
207 size = diff;
208 } else {
209
210
211
212
213
214
215
216
217 phys_addr_t asize = (base - 1) ^ base;
218 phys_addr_t p2size = (1 << __fls(diff)) - 1;
219
220 size = asize < p2size ? asize + 1 : p2size + 1;
221 }
222
223 if (count > limit)
224 break;
225
226 if (!attempts)
227 break;
228
229 attempts--;
230 }
231
232 return count;
233}
234
235
236void __init adjust_lowmem_bounds_mpu(void)
237{
238 phys_addr_t specified_mem_size = 0, total_mem_size = 0;
239 struct memblock_region *reg;
240 bool first = true;
241 phys_addr_t mem_start;
242 phys_addr_t mem_end;
243 unsigned int mem_max_regions;
244 int num, i;
245
246 if (!mpu_present())
247 return;
248
249
250 mpu_min_region_order = __mpu_min_region_order();
251
252
253 mpu_max_regions = __mpu_max_regions();
254
255 mem_max_regions = min((unsigned int)MPU_MAX_REGIONS, mpu_max_regions);
256
257
258 mem_max_regions--;
259
260#ifndef CONFIG_CPU_V7M
261
262 mem_max_regions--;
263#endif
264
265#ifdef CONFIG_XIP_KERNEL
266
267 num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR,
268 mem_max_regions, xip);
269
270 mem_max_regions -= num;
271#endif
272
273 for_each_memblock(memory, reg) {
274 if (first) {
275 phys_addr_t phys_offset = PHYS_OFFSET;
276
277
278
279
280 if (reg->base != phys_offset)
281 panic("First memory bank must be contiguous from PHYS_OFFSET");
282
283 mem_start = reg->base;
284 mem_end = reg->base + reg->size;
285 specified_mem_size = reg->size;
286 first = false;
287 } else {
288
289
290
291
292
293 pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
294 &mem_end, ®->base);
295 memblock_remove(reg->base, 0 - reg->base);
296 break;
297 }
298 }
299
300 memset(mem, 0, sizeof(mem));
301 num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
302
303 for (i = 0; i < num; i++) {
304 unsigned long subreg = mem[i].size / MPU_NR_SUBREGS;
305
306 total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
307
308 pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
309 &mem[i].base, &mem[i].size, MPU_NR_SUBREGS, &mem[i].subreg);
310 }
311
312 if (total_mem_size != specified_mem_size) {
313 pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
314 &specified_mem_size, &total_mem_size);
315 memblock_remove(mem_start + total_mem_size,
316 specified_mem_size - total_mem_size);
317 }
318}
319
320static int __init __mpu_max_regions(void)
321{
322
323
324
325
326
327 u32 dregions, iregions, mpuir;
328
329 mpuir = read_cpuid_mputype();
330
331 dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
332
333
334 if (mpuir & MPUIR_nU)
335 iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
336
337
338 return min(dregions, iregions);
339}
340
341static int __init mpu_iside_independent(void)
342{
343
344 return read_cpuid_mputype() & MPUIR_nU;
345}
346
347static int __init __mpu_min_region_order(void)
348{
349 u32 drbar_result, irbar_result;
350
351
352 rgnr_write(MPU_PROBE_REGION);
353 isb();
354
355
356
357
358 drbar_write(0xFFFFFFFC);
359 drbar_result = irbar_result = drbar_read();
360 drbar_write(0x0);
361
362 if (mpu_iside_independent()) {
363 irbar_write(0xFFFFFFFC);
364 irbar_result = irbar_read();
365 irbar_write(0x0);
366 }
367 isb();
368
369
370 return __ffs(max(drbar_result, irbar_result));
371}
372
373static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
374 unsigned int size_order, unsigned int properties,
375 unsigned int subregions, bool need_flush)
376{
377 u32 size_data;
378
379
380 if (number > mpu_max_regions
381 || number >= MPU_MAX_REGIONS)
382 return -ENOENT;
383
384 if (size_order > 32)
385 return -ENOMEM;
386
387 if (size_order < mpu_min_region_order)
388 return -ENOMEM;
389
390
391 size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN;
392 size_data |= subregions << MPU_RSR_SD;
393
394 if (need_flush)
395 flush_cache_all();
396
397 dsb();
398 rgnr_write(number);
399 isb();
400 drbar_write(start);
401 dracr_write(properties);
402 isb();
403 drsr_write(size_data);
404
405
406 if (mpu_iside_independent()) {
407 irbar_write(start);
408 iracr_write(properties);
409 isb();
410 irsr_write(size_data);
411 }
412 isb();
413
414
415 mpu_rgn_info.rgns[number].dracr = properties;
416 mpu_rgn_info.rgns[number].drbar = start;
417 mpu_rgn_info.rgns[number].drsr = size_data;
418
419 mpu_rgn_info.used++;
420
421 return 0;
422}
423
424
425
426
427void __init mpu_setup(void)
428{
429 int i, region = 0, err = 0;
430
431 if (!mpu_present())
432 return;
433
434
435
436
437 err |= mpu_setup_region(region++, 0, 32,
438 MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0RW,
439 0, false);
440
441#ifdef CONFIG_XIP_KERNEL
442
443 for (i = 0; i < ARRAY_SIZE(xip); i++) {
444
445
446
447
448
449
450
451 bool need_flush = region == MPU_RAM_REGION;
452
453 if (!xip[i].size)
454 continue;
455
456 err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
457 MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL,
458 xip[i].subreg, need_flush);
459 }
460#endif
461
462
463 for (i = 0; i < ARRAY_SIZE(mem); i++) {
464 if (!mem[i].size)
465 continue;
466
467 err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
468 MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL,
469 mem[i].subreg, false);
470 }
471
472
473#ifndef CONFIG_CPU_V7M
474 err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
475 MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL,
476 0, false);
477#endif
478 if (err) {
479 panic("MPU region initialization failure! %d", err);
480 } else {
481 pr_info("Using ARMv7 PMSA Compliant MPU. "
482 "Region independence: %s, Used %d of %d regions\n",
483 mpu_iside_independent() ? "Yes" : "No",
484 mpu_rgn_info.used, mpu_max_regions);
485 }
486}
487