1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
25#include "exec/cpu_ldst.h"
26
27#include "exec/cputlb.h"
28
29#include "exec/memory-internal.h"
30#include "exec/ram_addr.h"
31#include "exec/exec-all.h"
32#include "tcg/tcg.h"
33
34
35
36
37
38#ifdef DEBUG_TLB
39# define DEBUG_TLB_GATE 1
40# ifdef DEBUG_TLB_LOG
41# define DEBUG_TLB_LOG_GATE 1
42# else
43# define DEBUG_TLB_LOG_GATE 0
44# endif
45#else
46# define DEBUG_TLB_GATE 0
47# define DEBUG_TLB_LOG_GATE 0
48#endif
49
50#define tlb_debug(fmt, ...) do { \
51 if (DEBUG_TLB_LOG_GATE) { \
52 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
53 ## __VA_ARGS__); \
54 } else if (DEBUG_TLB_GATE) { \
55 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
56 } \
57} while (0)
58
59
60int tlb_flush_count;
61
62
63
64
65
66
67
68
69
70
71
72
73
74void tlb_flush(CPUState *cpu, int flush_global)
75{
76 CPUArchState *env = cpu->env_ptr;
77
78 tlb_debug("(%d)\n", flush_global);
79
80
81
82 cpu->current_tb = NULL;
83
84 memset(env->tlb_table, -1, sizeof(env->tlb_table));
85 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
86 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
87
88 env->vtlb_index = 0;
89 env->tlb_flush_addr = -1;
90 env->tlb_flush_mask = 0;
91 tlb_flush_count++;
92}
93
94static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
95{
96 CPUArchState *env = cpu->env_ptr;
97
98 tlb_debug("start\n");
99
100
101
102 cpu->current_tb = NULL;
103
104 for (;;) {
105 int mmu_idx = va_arg(argp, int);
106
107 if (mmu_idx < 0) {
108 break;
109 }
110
111 tlb_debug("%d\n", mmu_idx);
112
113 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
114 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
115 }
116
117 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
118}
119
120void tlb_flush_by_mmuidx(CPUState *cpu, ...)
121{
122 va_list argp;
123 va_start(argp, cpu);
124 v_tlb_flush_by_mmuidx(cpu, argp);
125 va_end(argp);
126}
127
128static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
129{
130 if (addr == (tlb_entry->addr_read &
131 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
132 addr == (tlb_entry->addr_write &
133 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
134 addr == (tlb_entry->addr_code &
135 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
136 memset(tlb_entry, -1, sizeof(*tlb_entry));
137 }
138}
139
140void tlb_flush_page(CPUState *cpu, target_ulong addr)
141{
142 CPUArchState *env = cpu->env_ptr;
143 int i;
144 int mmu_idx;
145
146 tlb_debug("page :" TARGET_FMT_lx "\n", addr);
147
148
149 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
150 tlb_debug("forcing full flush ("
151 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
152 env->tlb_flush_addr, env->tlb_flush_mask);
153
154 tlb_flush(cpu, 1);
155 return;
156 }
157
158 addr &= TARGET_PAGE_MASK;
159 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
160 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
161 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
162 }
163
164
165 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
166 int k;
167 for (k = 0; k < CPU_VTLB_SIZE; k++) {
168 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
169 }
170 }
171
172 tb_flush_jmp_cache(cpu, addr);
173}
174
175void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
176{
177 CPUArchState *env = cpu->env_ptr;
178 int i, k;
179 va_list argp;
180
181 va_start(argp, addr);
182
183 tlb_debug("addr "TARGET_FMT_lx"\n", addr);
184
185
186 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
187 tlb_debug("forced full flush ("
188 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
189 env->tlb_flush_addr, env->tlb_flush_mask);
190
191 v_tlb_flush_by_mmuidx(cpu, argp);
192 va_end(argp);
193 return;
194 }
195
196
197 cpu->current_tb = NULL;
198
199 addr &= TARGET_PAGE_MASK;
200 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
201
202 for (;;) {
203 int mmu_idx = va_arg(argp, int);
204
205 if (mmu_idx < 0) {
206 break;
207 }
208
209 tlb_debug("idx %d\n", mmu_idx);
210
211 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
212
213
214 for (k = 0; k < CPU_VTLB_SIZE; k++) {
215 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
216 }
217 }
218 va_end(argp);
219
220 tb_flush_jmp_cache(cpu, addr);
221}
222
223
224
225void tlb_protect_code(ram_addr_t ram_addr)
226{
227 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
228 DIRTY_MEMORY_CODE);
229}
230
231
232
233void tlb_unprotect_code(ram_addr_t ram_addr)
234{
235 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
236}
237
238static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
239{
240 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
241}
242
243void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
244 uintptr_t length)
245{
246 uintptr_t addr;
247
248 if (tlb_is_dirty_ram(tlb_entry)) {
249 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
250 if ((addr - start) < length) {
251 tlb_entry->addr_write |= TLB_NOTDIRTY;
252 }
253 }
254}
255
256static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
257{
258 ram_addr_t ram_addr;
259
260 if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) {
261 fprintf(stderr, "Bad ram pointer %p\n", ptr);
262 abort();
263 }
264 return ram_addr;
265}
266
267void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
268{
269 CPUArchState *env;
270
271 int mmu_idx;
272
273 env = cpu->env_ptr;
274 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
275 unsigned int i;
276
277 for (i = 0; i < CPU_TLB_SIZE; i++) {
278 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
279 start1, length);
280 }
281
282 for (i = 0; i < CPU_VTLB_SIZE; i++) {
283 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
284 start1, length);
285 }
286 }
287}
288
289static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
290{
291 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
292 tlb_entry->addr_write = vaddr;
293 }
294}
295
296
297
298void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
299{
300 CPUArchState *env = cpu->env_ptr;
301 int i;
302 int mmu_idx;
303
304 vaddr &= TARGET_PAGE_MASK;
305 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
306 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
307 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
308 }
309
310 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
311 int k;
312 for (k = 0; k < CPU_VTLB_SIZE; k++) {
313 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
314 }
315 }
316}
317
318
319
320static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
321 target_ulong size)
322{
323 target_ulong mask = ~(size - 1);
324
325 if (env->tlb_flush_addr == (target_ulong)-1) {
326 env->tlb_flush_addr = vaddr & mask;
327 env->tlb_flush_mask = mask;
328 return;
329 }
330
331
332
333 mask &= env->tlb_flush_mask;
334 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
335 mask <<= 1;
336 }
337 env->tlb_flush_addr &= mask;
338 env->tlb_flush_mask = mask;
339}
340
341
342
343
344
345
346
347
348void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
349 hwaddr paddr, MemTxAttrs attrs, int prot,
350 int mmu_idx, target_ulong size)
351{
352 CPUArchState *env = cpu->env_ptr;
353 MemoryRegionSection *section;
354 unsigned int index;
355 target_ulong address;
356 target_ulong code_address;
357 uintptr_t addend;
358 CPUTLBEntry *te;
359 hwaddr iotlb, xlat, sz;
360 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
361 int asidx = cpu_asidx_from_attrs(cpu, attrs);
362 CPUIOTLBEntry *attr = &env->memattr[attrs.secure];
363
364 assert(size >= TARGET_PAGE_SIZE);
365 if (size != TARGET_PAGE_SIZE) {
366 tlb_add_large_page(env, vaddr, size);
367 }
368
369 sz = size;
370 section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz,
371 &prot, &attr->attrs);
372
373 assert(sz >= TARGET_PAGE_SIZE);
374
375 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
376 " prot=%x idx=%d sec=%d.%d\n",
377 vaddr, paddr, prot, mmu_idx, attr->attrs.secure, attrs.secure);
378
379 address = vaddr;
380 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
381
382 address |= TLB_MMIO;
383 addend = 0;
384 } else {
385
386 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
387 }
388
389 code_address = address;
390 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
391 prot, &address);
392
393 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
394 te = &env->tlb_table[mmu_idx][index];
395
396
397 env->tlb_v_table[mmu_idx][vidx] = *te;
398 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
399
400
401 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
402 env->iotlb[mmu_idx][index].attrs = attr->attrs;
403 te->addend = addend - vaddr;
404 if (prot & PAGE_READ) {
405 te->addr_read = address;
406 } else {
407 te->addr_read = -1;
408 }
409
410 if (prot & PAGE_EXEC) {
411 te->addr_code = code_address;
412 } else {
413 te->addr_code = -1;
414 }
415 if (prot & PAGE_WRITE) {
416 if ((memory_region_is_ram(section->mr) && section->readonly)
417 || memory_region_is_romd(section->mr)) {
418
419 te->addr_write = address | TLB_MMIO;
420 } else if (memory_region_is_ram(section->mr)
421 && cpu_physical_memory_is_clean(
422 memory_region_get_ram_addr(section->mr) + xlat)) {
423 te->addr_write = address | TLB_NOTDIRTY;
424 } else {
425 te->addr_write = address;
426 }
427 } else {
428 te->addr_write = -1;
429 }
430}
431
432
433
434
435void tlb_set_page(CPUState *cpu, target_ulong vaddr,
436 hwaddr paddr, int prot,
437 int mmu_idx, target_ulong size)
438{
439 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
440 prot, mmu_idx, size);
441}
442
443
444
445
446
447
448tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
449{
450 int mmu_idx, page_index, pd;
451 void *p;
452 MemoryRegion *mr;
453 CPUState *cpu = ENV_GET_CPU(env1);
454 CPUIOTLBEntry *iotlbentry;
455
456 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
457 mmu_idx = cpu_mmu_index(env1, true);
458 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
459 (addr & TARGET_PAGE_MASK))) {
460 cpu_ldub_code(env1, addr);
461 }
462 iotlbentry = &env1->iotlb[mmu_idx][page_index];
463 pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
464 mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
465 if (memory_region_is_unassigned(mr)) {
466 CPUClass *cc = CPU_GET_CLASS(cpu);
467
468 if (cc->do_unassigned_access) {
469 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
470 } else {
471 cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
472 TARGET_FMT_lx "\n", addr);
473 }
474 }
475 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
476 return qemu_ram_addr_from_host_nofail(p);
477}
478
479#define MMUSUFFIX _mmu
480
481#define SHIFT 0
482#include "softmmu_template.h"
483
484#define SHIFT 1
485#include "softmmu_template.h"
486
487#define SHIFT 2
488#include "softmmu_template.h"
489
490#define SHIFT 3
491#include "softmmu_template.h"
492#undef MMUSUFFIX
493
494#define MMUSUFFIX _cmmu
495#undef GETPC_ADJ
496#define GETPC_ADJ 0
497#undef GETRA
498#define GETRA() ((uintptr_t)0)
499#define SOFTMMU_CODE_ACCESS
500
501#define SHIFT 0
502#include "softmmu_template.h"
503
504#define SHIFT 1
505#include "softmmu_template.h"
506
507#define SHIFT 2
508#include "softmmu_template.h"
509
510#define SHIFT 3
511#include "softmmu_template.h"
512