1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "exec/cputlb.h"
22#include "exec/log.h"
23#include "exec/exec-all.h"
24#include "exec/translate-all.h"
25#include "sysemu/tcg.h"
26#include "tcg/tcg.h"
27#include "tb-hash.h"
28#include "tb-context.h"
29#include "internal.h"
30
31
32static bool tb_cmp(const void *ap, const void *bp)
33{
34 const TranslationBlock *a = ap;
35 const TranslationBlock *b = bp;
36
37 return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
38 a->cs_base == b->cs_base &&
39 a->flags == b->flags &&
40 (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
41 a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
42 tb_page_addr0(a) == tb_page_addr0(b) &&
43 tb_page_addr1(a) == tb_page_addr1(b));
44}
45
46void tb_htable_init(void)
47{
48 unsigned int mode = QHT_MODE_AUTO_RESIZE;
49
50 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
51}
52
53
54static void page_flush_tb_1(int level, void **lp)
55{
56 int i;
57
58 if (*lp == NULL) {
59 return;
60 }
61 if (level == 0) {
62 PageDesc *pd = *lp;
63
64 for (i = 0; i < V_L2_SIZE; ++i) {
65 page_lock(&pd[i]);
66 pd[i].first_tb = (uintptr_t)NULL;
67 page_unlock(&pd[i]);
68 }
69 } else {
70 void **pp = *lp;
71
72 for (i = 0; i < V_L2_SIZE; ++i) {
73 page_flush_tb_1(level - 1, pp + i);
74 }
75 }
76}
77
78static void page_flush_tb(void)
79{
80 int i, l1_sz = v_l1_size;
81
82 for (i = 0; i < l1_sz; i++) {
83 page_flush_tb_1(v_l2_levels, l1_map + i);
84 }
85}
86
87
88static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
89{
90 bool did_flush = false;
91
92 mmap_lock();
93
94 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
95 goto done;
96 }
97 did_flush = true;
98
99 CPU_FOREACH(cpu) {
100 tcg_flush_jmp_cache(cpu);
101 }
102
103 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
104 page_flush_tb();
105
106 tcg_region_reset_all();
107
108 qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
109
110done:
111 mmap_unlock();
112 if (did_flush) {
113 qemu_plugin_flush_cb();
114 }
115}
116
117void tb_flush(CPUState *cpu)
118{
119 if (tcg_enabled()) {
120 unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
121
122 if (cpu_in_exclusive_context(cpu)) {
123 do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
124 } else {
125 async_safe_run_on_cpu(cpu, do_tb_flush,
126 RUN_ON_CPU_HOST_INT(tb_flush_count));
127 }
128 }
129}
130
131
132
133
134
135static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
136{
137 TranslationBlock *tb1;
138 uintptr_t *pprev;
139 unsigned int n1;
140
141 assert_page_locked(pd);
142 pprev = &pd->first_tb;
143 PAGE_FOR_EACH_TB(pd, tb1, n1) {
144 if (tb1 == tb) {
145 *pprev = tb1->page_next[n1];
146 return;
147 }
148 pprev = &tb1->page_next[n1];
149 }
150 g_assert_not_reached();
151}
152
153
154static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
155{
156 uintptr_t ptr, ptr_locked;
157 TranslationBlock *dest;
158 TranslationBlock *tb;
159 uintptr_t *pprev;
160 int n;
161
162
163 ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
164 dest = (TranslationBlock *)(ptr & ~1);
165 if (dest == NULL) {
166 return;
167 }
168
169 qemu_spin_lock(&dest->jmp_lock);
170
171
172
173
174 ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
175 if (ptr_locked != ptr) {
176 qemu_spin_unlock(&dest->jmp_lock);
177
178
179
180
181
182 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
183 return;
184 }
185
186
187
188
189 pprev = &dest->jmp_list_head;
190 TB_FOR_EACH_JMP(dest, tb, n) {
191 if (tb == orig && n == n_orig) {
192 *pprev = tb->jmp_list_next[n];
193
194 qemu_spin_unlock(&dest->jmp_lock);
195 return;
196 }
197 pprev = &tb->jmp_list_next[n];
198 }
199 g_assert_not_reached();
200}
201
202
203
204
205void tb_reset_jump(TranslationBlock *tb, int n)
206{
207 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
208 tb_set_jmp_target(tb, n, addr);
209}
210
211
212static inline void tb_jmp_unlink(TranslationBlock *dest)
213{
214 TranslationBlock *tb;
215 int n;
216
217 qemu_spin_lock(&dest->jmp_lock);
218
219 TB_FOR_EACH_JMP(dest, tb, n) {
220 tb_reset_jump(tb, n);
221 qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
222
223 }
224 dest->jmp_list_head = (uintptr_t)NULL;
225
226 qemu_spin_unlock(&dest->jmp_lock);
227}
228
229static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
230{
231 CPUState *cpu;
232
233 if (TARGET_TB_PCREL) {
234
235 CPU_FOREACH(cpu) {
236 tcg_flush_jmp_cache(cpu);
237 }
238 } else {
239 uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
240
241 CPU_FOREACH(cpu) {
242 CPUJumpCache *jc = cpu->tb_jmp_cache;
243
244 if (qatomic_read(&jc->array[h].tb) == tb) {
245 qatomic_set(&jc->array[h].tb, NULL);
246 }
247 }
248 }
249}
250
251
252
253
254
255
256static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
257{
258 PageDesc *p;
259 uint32_t h;
260 tb_page_addr_t phys_pc;
261 uint32_t orig_cflags = tb_cflags(tb);
262
263 assert_memory_lock();
264
265
266 qemu_spin_lock(&tb->jmp_lock);
267 qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
268 qemu_spin_unlock(&tb->jmp_lock);
269
270
271 phys_pc = tb_page_addr0(tb);
272 h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
273 tb->flags, orig_cflags, tb->trace_vcpu_dstate);
274 if (!qht_remove(&tb_ctx.htable, tb, h)) {
275 return;
276 }
277
278
279 if (rm_from_page_list) {
280 p = page_find(phys_pc >> TARGET_PAGE_BITS);
281 tb_page_remove(p, tb);
282 phys_pc = tb_page_addr1(tb);
283 if (phys_pc != -1) {
284 p = page_find(phys_pc >> TARGET_PAGE_BITS);
285 tb_page_remove(p, tb);
286 }
287 }
288
289
290 tb_jmp_cache_inval_tb(tb);
291
292
293 tb_remove_from_jmp_list(tb, 0);
294 tb_remove_from_jmp_list(tb, 1);
295
296
297 tb_jmp_unlink(tb);
298
299 qatomic_set(&tb_ctx.tb_phys_invalidate_count,
300 tb_ctx.tb_phys_invalidate_count + 1);
301}
302
303static void tb_phys_invalidate__locked(TranslationBlock *tb)
304{
305 qemu_thread_jit_write();
306 do_tb_phys_invalidate(tb, true);
307 qemu_thread_jit_execute();
308}
309
310static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
311 PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
312{
313 PageDesc *p1, *p2;
314 tb_page_addr_t page1;
315 tb_page_addr_t page2;
316
317 assert_memory_lock();
318 g_assert(phys1 != -1);
319
320 page1 = phys1 >> TARGET_PAGE_BITS;
321 page2 = phys2 >> TARGET_PAGE_BITS;
322
323 p1 = page_find_alloc(page1, alloc);
324 if (ret_p1) {
325 *ret_p1 = p1;
326 }
327 if (likely(phys2 == -1)) {
328 page_lock(p1);
329 return;
330 } else if (page1 == page2) {
331 page_lock(p1);
332 if (ret_p2) {
333 *ret_p2 = p1;
334 }
335 return;
336 }
337 p2 = page_find_alloc(page2, alloc);
338 if (ret_p2) {
339 *ret_p2 = p2;
340 }
341 if (page1 < page2) {
342 page_lock(p1);
343 page_lock(p2);
344 } else {
345 page_lock(p2);
346 page_lock(p1);
347 }
348}
349
350#ifdef CONFIG_USER_ONLY
351static inline void page_lock_tb(const TranslationBlock *tb) { }
352static inline void page_unlock_tb(const TranslationBlock *tb) { }
353#else
354
355static void page_lock_tb(const TranslationBlock *tb)
356{
357 page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
358}
359
360static void page_unlock_tb(const TranslationBlock *tb)
361{
362 PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
363
364 page_unlock(p1);
365 if (unlikely(tb_page_addr1(tb) != -1)) {
366 PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
367
368 if (p2 != p1) {
369 page_unlock(p2);
370 }
371 }
372}
373#endif
374
375
376
377
378
379void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
380{
381 if (page_addr == -1 && tb_page_addr0(tb) != -1) {
382 page_lock_tb(tb);
383 do_tb_phys_invalidate(tb, true);
384 page_unlock_tb(tb);
385 } else {
386 do_tb_phys_invalidate(tb, false);
387 }
388}
389
390
391
392
393
394
395static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
396 unsigned int n, tb_page_addr_t page_addr)
397{
398#ifndef CONFIG_USER_ONLY
399 bool page_already_protected;
400#endif
401
402 assert_page_locked(p);
403
404 tb->page_next[n] = p->first_tb;
405#ifndef CONFIG_USER_ONLY
406 page_already_protected = p->first_tb != (uintptr_t)NULL;
407#endif
408 p->first_tb = (uintptr_t)tb | n;
409
410#if defined(CONFIG_USER_ONLY)
411
412 assert(!(p->flags & PAGE_WRITE));
413#else
414
415
416
417
418
419 if (!page_already_protected) {
420 tlb_protect_code(page_addr);
421 }
422#endif
423}
424
425
426
427
428
429
430
431
432
433
434
435
436TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
437 tb_page_addr_t phys_page2)
438{
439 PageDesc *p;
440 PageDesc *p2 = NULL;
441 void *existing_tb = NULL;
442 uint32_t h;
443
444 assert_memory_lock();
445 tcg_debug_assert(!(tb->cflags & CF_INVALID));
446
447
448
449
450
451
452
453
454
455 page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
456 tb_page_add(p, tb, 0, phys_pc);
457 if (p2) {
458 tb_page_add(p2, tb, 1, phys_page2);
459 }
460
461
462 h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
463 tb->flags, tb->cflags, tb->trace_vcpu_dstate);
464 qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
465
466
467 if (unlikely(existing_tb)) {
468 tb_page_remove(p, tb);
469 if (p2) {
470 tb_page_remove(p2, tb);
471 }
472 tb = existing_tb;
473 }
474
475 if (p2 && p2 != p) {
476 page_unlock(p2);
477 }
478 page_unlock(p);
479 return tb;
480}
481
482
483
484
485
486
487static void
488tb_invalidate_phys_page_range__locked(struct page_collection *pages,
489 PageDesc *p, tb_page_addr_t start,
490 tb_page_addr_t end,
491 uintptr_t retaddr)
492{
493 TranslationBlock *tb;
494 tb_page_addr_t tb_start, tb_end;
495 int n;
496#ifdef TARGET_HAS_PRECISE_SMC
497 CPUState *cpu = current_cpu;
498 bool current_tb_not_found = retaddr != 0;
499 bool current_tb_modified = false;
500 TranslationBlock *current_tb = NULL;
501#endif
502
503 assert_page_locked(p);
504
505
506
507
508
509 PAGE_FOR_EACH_TB(p, tb, n) {
510 assert_page_locked(p);
511
512 if (n == 0) {
513
514
515 tb_start = tb_page_addr0(tb);
516 tb_end = tb_start + tb->size;
517 } else {
518 tb_start = tb_page_addr1(tb);
519 tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
520 & ~TARGET_PAGE_MASK);
521 }
522 if (!(tb_end <= start || tb_start >= end)) {
523#ifdef TARGET_HAS_PRECISE_SMC
524 if (current_tb_not_found) {
525 current_tb_not_found = false;
526
527 current_tb = tcg_tb_lookup(retaddr);
528 }
529 if (current_tb == tb &&
530 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
531
532
533
534
535
536
537
538 current_tb_modified = true;
539 cpu_restore_state_from_tb(cpu, current_tb, retaddr);
540 }
541#endif
542 tb_phys_invalidate__locked(tb);
543 }
544 }
545#if !defined(CONFIG_USER_ONLY)
546
547 if (!p->first_tb) {
548 tlb_unprotect_code(start);
549 }
550#endif
551#ifdef TARGET_HAS_PRECISE_SMC
552 if (current_tb_modified) {
553 page_collection_unlock(pages);
554
555 cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
556 mmap_unlock();
557 cpu_loop_exit_noexc(cpu);
558 }
559#endif
560}
561
562
563
564
565
566
567
568void tb_invalidate_phys_page(tb_page_addr_t addr)
569{
570 struct page_collection *pages;
571 tb_page_addr_t start, end;
572 PageDesc *p;
573
574 assert_memory_lock();
575
576 p = page_find(addr >> TARGET_PAGE_BITS);
577 if (p == NULL) {
578 return;
579 }
580
581 start = addr & TARGET_PAGE_MASK;
582 end = start + TARGET_PAGE_SIZE;
583 pages = page_collection_lock(start, end);
584 tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
585 page_collection_unlock(pages);
586}
587
588
589
590
591
592
593
594
595
596
597void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
598{
599 struct page_collection *pages;
600 tb_page_addr_t next;
601
602 assert_memory_lock();
603
604 pages = page_collection_lock(start, end);
605 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
606 start < end;
607 start = next, next += TARGET_PAGE_SIZE) {
608 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
609 tb_page_addr_t bound = MIN(next, end);
610
611 if (pd == NULL) {
612 continue;
613 }
614 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
615 }
616 page_collection_unlock(pages);
617}
618
619#ifdef CONFIG_SOFTMMU
620
621
622
623
624
625
626
627void tb_invalidate_phys_page_fast(struct page_collection *pages,
628 tb_page_addr_t start, int len,
629 uintptr_t retaddr)
630{
631 PageDesc *p;
632
633 assert_memory_lock();
634
635 p = page_find(start >> TARGET_PAGE_BITS);
636 if (!p) {
637 return;
638 }
639
640 assert_page_locked(p);
641 tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
642 retaddr);
643}
644#else
645
646
647
648
649
650
651
652bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
653{
654 TranslationBlock *tb;
655 PageDesc *p;
656 int n;
657#ifdef TARGET_HAS_PRECISE_SMC
658 TranslationBlock *current_tb = NULL;
659 CPUState *cpu = current_cpu;
660 bool current_tb_modified = false;
661#endif
662
663 assert_memory_lock();
664
665 addr &= TARGET_PAGE_MASK;
666 p = page_find(addr >> TARGET_PAGE_BITS);
667 if (!p) {
668 return false;
669 }
670
671#ifdef TARGET_HAS_PRECISE_SMC
672 if (p->first_tb && pc != 0) {
673 current_tb = tcg_tb_lookup(pc);
674 }
675#endif
676 assert_page_locked(p);
677 PAGE_FOR_EACH_TB(p, tb, n) {
678#ifdef TARGET_HAS_PRECISE_SMC
679 if (current_tb == tb &&
680 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
681
682
683
684
685
686
687 current_tb_modified = true;
688 cpu_restore_state_from_tb(cpu, current_tb, pc);
689 }
690#endif
691 tb_phys_invalidate(tb, addr);
692 }
693 p->first_tb = (uintptr_t)NULL;
694#ifdef TARGET_HAS_PRECISE_SMC
695 if (current_tb_modified) {
696
697 cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
698 return true;
699 }
700#endif
701
702 return false;
703}
704#endif
705