1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/io.h>
20#include "MMURegAcM.h"
21#include <hw_defs.h>
22#include <hw_mmu.h>
23#include <linux/types.h>
24#include <linux/err.h>
25
26#define MMU_BASE_VAL_MASK 0xFC00
27#define MMU_PAGE_MAX 3
28#define MMU_ELEMENTSIZE_MAX 3
29#define MMU_ADDR_MASK 0xFFFFF000
30#define MMU_TTB_MASK 0xFFFFC000
31#define MMU_SECTION_ADDR_MASK 0xFFF00000
32#define MMU_SSECTION_ADDR_MASK 0xFF000000
33#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
34#define MMU_LARGE_PAGE_MASK 0xFFFF0000
35#define MMU_SMALL_PAGE_MASK 0xFFFFF000
36
37#define MMU_LOAD_TLB 0x00000001
38#define MMU_GFLUSH 0x60
39
40
41
42
43enum hw_mmu_page_size_t {
44 HW_MMU_SECTION,
45 HW_MMU_LARGE_PAGE,
46 HW_MMU_SMALL_PAGE,
47 HW_MMU_SUPERSECTION
48};
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90static hw_status mmu_set_cam_entry(void __iomem *base_address,
91 const u32 page_sz,
92 const u32 preserved_bit,
93 const u32 valid_bit,
94 const u32 virtual_addr_tag);
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135static hw_status mmu_set_ram_entry(void __iomem *base_address,
136 const u32 physical_addr,
137 enum hw_endianism_t endianism,
138 enum hw_element_size_t element_size,
139 enum hw_mmu_mixed_size_t mixed_size);
140
141
142
143hw_status hw_mmu_enable(void __iomem *base_address)
144{
145 hw_status status = 0;
146
147 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
148
149 return status;
150}
151
152hw_status hw_mmu_disable(void __iomem *base_address)
153{
154 hw_status status = 0;
155
156 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
157
158 return status;
159}
160
161hw_status hw_mmu_num_locked_set(void __iomem *base_address,
162 u32 num_locked_entries)
163{
164 hw_status status = 0;
165
166 MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
167
168 return status;
169}
170
171hw_status hw_mmu_victim_num_set(void __iomem *base_address,
172 u32 victim_entry_num)
173{
174 hw_status status = 0;
175
176 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
177
178 return status;
179}
180
181hw_status hw_mmu_event_ack(void __iomem *base_address, u32 irq_mask)
182{
183 hw_status status = 0;
184
185 MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
186
187 return status;
188}
189
190hw_status hw_mmu_event_disable(void __iomem *base_address, u32 irq_mask)
191{
192 hw_status status = 0;
193 u32 irq_reg;
194
195 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
196
197 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
198
199 return status;
200}
201
202hw_status hw_mmu_event_enable(void __iomem *base_address, u32 irq_mask)
203{
204 hw_status status = 0;
205 u32 irq_reg;
206
207 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
208
209 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
210
211 return status;
212}
213
214hw_status hw_mmu_event_status(void __iomem *base_address, u32 *irq_mask)
215{
216 hw_status status = 0;
217
218 *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
219
220 return status;
221}
222
223hw_status hw_mmu_fault_addr_read(void __iomem *base_address, u32 *addr)
224{
225 hw_status status = 0;
226
227
228 *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
229
230 return status;
231}
232
233hw_status hw_mmu_ttb_set(void __iomem *base_address, u32 ttb_phys_addr)
234{
235 hw_status status = 0;
236 u32 load_ttb;
237
238 load_ttb = ttb_phys_addr & ~0x7FUL;
239
240 MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
241
242 return status;
243}
244
245hw_status hw_mmu_twl_enable(void __iomem *base_address)
246{
247 hw_status status = 0;
248
249 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
250
251 return status;
252}
253
254hw_status hw_mmu_twl_disable(void __iomem *base_address)
255{
256 hw_status status = 0;
257
258 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
259
260 return status;
261}
262
263hw_status hw_mmu_tlb_add(void __iomem *base_address,
264 u32 physical_addr,
265 u32 virtual_addr,
266 u32 page_sz,
267 u32 entry_num,
268 struct hw_mmu_map_attrs_t *map_attrs,
269 s8 preserved_bit, s8 valid_bit)
270{
271 hw_status status = 0;
272 u32 lock_reg;
273 u32 virtual_addr_tag;
274 enum hw_mmu_page_size_t mmu_pg_size;
275
276
277 switch (page_sz) {
278 case HW_PAGE_SIZE4KB:
279 mmu_pg_size = HW_MMU_SMALL_PAGE;
280 break;
281
282 case HW_PAGE_SIZE64KB:
283 mmu_pg_size = HW_MMU_LARGE_PAGE;
284 break;
285
286 case HW_PAGE_SIZE1MB:
287 mmu_pg_size = HW_MMU_SECTION;
288 break;
289
290 case HW_PAGE_SIZE16MB:
291 mmu_pg_size = HW_MMU_SUPERSECTION;
292 break;
293
294 default:
295 return -EINVAL;
296 }
297
298 lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
299
300
301 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
302
303
304 mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
305 virtual_addr_tag);
306
307
308
309 mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
310 map_attrs->element_size, map_attrs->mixed_size);
311
312
313
314 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
315
316
317
318 MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
319
320 MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
321
322 return status;
323}
324
325hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
326 u32 physical_addr,
327 u32 virtual_addr,
328 u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
329{
330 hw_status status = 0;
331 u32 pte_addr, pte_val;
332 s32 num_entries = 1;
333
334 switch (page_sz) {
335 case HW_PAGE_SIZE4KB:
336 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
337 virtual_addr &
338 MMU_SMALL_PAGE_MASK);
339 pte_val =
340 ((physical_addr & MMU_SMALL_PAGE_MASK) |
341 (map_attrs->endianism << 9) | (map_attrs->
342 element_size << 4) |
343 (map_attrs->mixed_size << 11) | 2);
344 break;
345
346 case HW_PAGE_SIZE64KB:
347 num_entries = 16;
348 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
349 virtual_addr &
350 MMU_LARGE_PAGE_MASK);
351 pte_val =
352 ((physical_addr & MMU_LARGE_PAGE_MASK) |
353 (map_attrs->endianism << 9) | (map_attrs->
354 element_size << 4) |
355 (map_attrs->mixed_size << 11) | 1);
356 break;
357
358 case HW_PAGE_SIZE1MB:
359 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
360 virtual_addr &
361 MMU_SECTION_ADDR_MASK);
362 pte_val =
363 ((((physical_addr & MMU_SECTION_ADDR_MASK) |
364 (map_attrs->endianism << 15) | (map_attrs->
365 element_size << 10) |
366 (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
367 break;
368
369 case HW_PAGE_SIZE16MB:
370 num_entries = 16;
371 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
372 virtual_addr &
373 MMU_SSECTION_ADDR_MASK);
374 pte_val =
375 (((physical_addr & MMU_SSECTION_ADDR_MASK) |
376 (map_attrs->endianism << 15) | (map_attrs->
377 element_size << 10) |
378 (map_attrs->mixed_size << 17)
379 ) | 0x40000 | 0x2);
380 break;
381
382 case HW_MMU_COARSE_PAGE_SIZE:
383 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
384 virtual_addr &
385 MMU_SECTION_ADDR_MASK);
386 pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
387 break;
388
389 default:
390 return -EINVAL;
391 }
392
393 while (--num_entries >= 0)
394 ((u32 *) pte_addr)[num_entries] = pte_val;
395
396 return status;
397}
398
399hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
400{
401 hw_status status = 0;
402 u32 pte_addr;
403 s32 num_entries = 1;
404
405 switch (page_size) {
406 case HW_PAGE_SIZE4KB:
407 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
408 virtual_addr &
409 MMU_SMALL_PAGE_MASK);
410 break;
411
412 case HW_PAGE_SIZE64KB:
413 num_entries = 16;
414 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
415 virtual_addr &
416 MMU_LARGE_PAGE_MASK);
417 break;
418
419 case HW_PAGE_SIZE1MB:
420 case HW_MMU_COARSE_PAGE_SIZE:
421 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
422 virtual_addr &
423 MMU_SECTION_ADDR_MASK);
424 break;
425
426 case HW_PAGE_SIZE16MB:
427 num_entries = 16;
428 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
429 virtual_addr &
430 MMU_SSECTION_ADDR_MASK);
431 break;
432
433 default:
434 return -EINVAL;
435 }
436
437 while (--num_entries >= 0)
438 ((u32 *) pte_addr)[num_entries] = 0;
439
440 return status;
441}
442
443
444static hw_status mmu_set_cam_entry(void __iomem *base_address,
445 const u32 page_sz,
446 const u32 preserved_bit,
447 const u32 valid_bit,
448 const u32 virtual_addr_tag)
449{
450 hw_status status = 0;
451 u32 mmu_cam_reg;
452
453 mmu_cam_reg = (virtual_addr_tag << 12);
454 mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
455 (preserved_bit << 3);
456
457
458 MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
459
460 return status;
461}
462
463
464static hw_status mmu_set_ram_entry(void __iomem *base_address,
465 const u32 physical_addr,
466 enum hw_endianism_t endianism,
467 enum hw_element_size_t element_size,
468 enum hw_mmu_mixed_size_t mixed_size)
469{
470 hw_status status = 0;
471 u32 mmu_ram_reg;
472
473 mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
474 mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
475 (mixed_size << 6));
476
477
478 MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
479
480 return status;
481
482}
483
484void hw_mmu_tlb_flush_all(void __iomem *base)
485{
486 __raw_writel(1, base + MMU_GFLUSH);
487}
488