1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include "qemu/osdep.h"
27#include "qemu-common.h"
28#include "sysemu/tcg.h"
29#include "sysemu/replay.h"
30#include "qemu/main-loop.h"
31#include "qemu/guest-random.h"
32#include "exec/exec-all.h"
33#include "hw/boards.h"
34
35#include "tcg-cpus.h"
36
37
38static void qemu_cpu_kick_rr_cpus(void)
39{
40 CPUState *cpu;
41
42 CPU_FOREACH(cpu) {
43 cpu_exit(cpu);
44 };
45}
46
47static void tcg_kick_vcpu_thread(CPUState *cpu)
48{
49 if (qemu_tcg_mttcg_enabled()) {
50 cpu_exit(cpu);
51 } else {
52 qemu_cpu_kick_rr_cpus();
53 }
54}
55
56
57
58
59
60
61
62
63
64
65
66
67
68static QEMUTimer *tcg_kick_vcpu_timer;
69static CPUState *tcg_current_rr_cpu;
70
71#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
72
73static inline int64_t qemu_tcg_next_kick(void)
74{
75 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
76}
77
78
79static void qemu_cpu_kick_rr_next_cpu(void)
80{
81 CPUState *cpu;
82 do {
83 cpu = qatomic_mb_read(&tcg_current_rr_cpu);
84 if (cpu) {
85 cpu_exit(cpu);
86 }
87 } while (cpu != qatomic_mb_read(&tcg_current_rr_cpu));
88}
89
90static void kick_tcg_thread(void *opaque)
91{
92 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
93 qemu_cpu_kick_rr_next_cpu();
94}
95
96static void start_tcg_kick_timer(void)
97{
98 assert(!mttcg_enabled);
99 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
100 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
101 kick_tcg_thread, NULL);
102 }
103 if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
104 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
105 }
106}
107
108static void stop_tcg_kick_timer(void)
109{
110 assert(!mttcg_enabled);
111 if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
112 timer_del(tcg_kick_vcpu_timer);
113 }
114}
115
116static void qemu_tcg_destroy_vcpu(CPUState *cpu)
117{
118}
119
120static void qemu_tcg_rr_wait_io_event(void)
121{
122 CPUState *cpu;
123
124 while (all_cpu_threads_idle()) {
125 stop_tcg_kick_timer();
126 qemu_cond_wait_iothread(first_cpu->halt_cond);
127 }
128
129 start_tcg_kick_timer();
130
131 CPU_FOREACH(cpu) {
132 qemu_wait_io_event_common(cpu);
133 }
134}
135
136static int64_t tcg_get_icount_limit(void)
137{
138 int64_t deadline;
139
140 if (replay_mode != REPLAY_MODE_PLAY) {
141
142
143
144
145 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
146 QEMU_TIMER_ATTR_ALL);
147
148 deadline = qemu_soonest_timeout(deadline,
149 qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME,
150 QEMU_TIMER_ATTR_ALL));
151
152
153
154
155
156
157
158 if ((deadline < 0) || (deadline > INT32_MAX)) {
159 deadline = INT32_MAX;
160 }
161
162 return icount_round(deadline);
163 } else {
164 return replay_get_instructions();
165 }
166}
167
168static void notify_aio_contexts(void)
169{
170
171 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
172 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
173}
174
175static void handle_icount_deadline(void)
176{
177 assert(qemu_in_vcpu_thread());
178 if (icount_enabled()) {
179 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
180 QEMU_TIMER_ATTR_ALL);
181
182 if (deadline == 0) {
183 notify_aio_contexts();
184 }
185 }
186}
187
188static void prepare_icount_for_run(CPUState *cpu)
189{
190 if (icount_enabled()) {
191 int insns_left;
192
193
194
195
196
197
198 g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
199 g_assert(cpu->icount_extra == 0);
200
201 cpu->icount_budget = tcg_get_icount_limit();
202 insns_left = MIN(0xffff, cpu->icount_budget);
203 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
204 cpu->icount_extra = cpu->icount_budget - insns_left;
205
206 replay_mutex_lock();
207
208 if (cpu->icount_budget == 0 && replay_has_checkpoint()) {
209 notify_aio_contexts();
210 }
211 }
212}
213
214static void process_icount_data(CPUState *cpu)
215{
216 if (icount_enabled()) {
217
218 icount_update(cpu);
219
220
221 cpu_neg(cpu)->icount_decr.u16.low = 0;
222 cpu->icount_extra = 0;
223 cpu->icount_budget = 0;
224
225 replay_account_executed_instructions();
226
227 replay_mutex_unlock();
228 }
229}
230
231static int tcg_cpu_exec(CPUState *cpu)
232{
233 int ret;
234#ifdef CONFIG_PROFILER
235 int64_t ti;
236#endif
237
238 assert(tcg_enabled());
239#ifdef CONFIG_PROFILER
240 ti = profile_getclock();
241#endif
242 cpu_exec_start(cpu);
243 ret = cpu_exec(cpu);
244 cpu_exec_end(cpu);
245#ifdef CONFIG_PROFILER
246 qatomic_set(&tcg_ctx->prof.cpu_exec_time,
247 tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
248#endif
249 return ret;
250}
251
252
253
254
255
256static void deal_with_unplugged_cpus(void)
257{
258 CPUState *cpu;
259
260 CPU_FOREACH(cpu) {
261 if (cpu->unplug && !cpu_can_run(cpu)) {
262 qemu_tcg_destroy_vcpu(cpu);
263 cpu_thread_signal_destroyed(cpu);
264 break;
265 }
266 }
267}
268
269
270
271
272
273
274
275
276
277
278
279static void *tcg_rr_cpu_thread_fn(void *arg)
280{
281 CPUState *cpu = arg;
282
283 assert(tcg_enabled());
284 rcu_register_thread();
285 tcg_register_thread();
286
287 qemu_mutex_lock_iothread();
288 qemu_thread_get_self(cpu->thread);
289
290 cpu->thread_id = qemu_get_thread_id();
291 cpu->can_do_io = 1;
292 cpu_thread_signal_created(cpu);
293 qemu_guest_random_seed_thread_part2(cpu->random_seed);
294
295
296 while (first_cpu->stopped) {
297 qemu_cond_wait_iothread(first_cpu->halt_cond);
298
299
300 CPU_FOREACH(cpu) {
301 current_cpu = cpu;
302 qemu_wait_io_event_common(cpu);
303 }
304 }
305
306 start_tcg_kick_timer();
307
308 cpu = first_cpu;
309
310
311 cpu->exit_request = 1;
312
313 while (1) {
314 qemu_mutex_unlock_iothread();
315 replay_mutex_lock();
316 qemu_mutex_lock_iothread();
317
318 icount_account_warp_timer();
319
320
321
322
323
324 handle_icount_deadline();
325
326 replay_mutex_unlock();
327
328 if (!cpu) {
329 cpu = first_cpu;
330 }
331
332 while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
333
334 qatomic_mb_set(&tcg_current_rr_cpu, cpu);
335 current_cpu = cpu;
336
337 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
338 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
339
340 if (cpu_can_run(cpu)) {
341 int r;
342
343 qemu_mutex_unlock_iothread();
344 prepare_icount_for_run(cpu);
345
346 r = tcg_cpu_exec(cpu);
347
348 process_icount_data(cpu);
349 qemu_mutex_lock_iothread();
350
351 if (r == EXCP_DEBUG) {
352 cpu_handle_guest_debug(cpu);
353 break;
354 } else if (r == EXCP_ATOMIC) {
355 qemu_mutex_unlock_iothread();
356 cpu_exec_step_atomic(cpu);
357 qemu_mutex_lock_iothread();
358 break;
359 }
360 } else if (cpu->stop) {
361 if (cpu->unplug) {
362 cpu = CPU_NEXT(cpu);
363 }
364 break;
365 }
366
367 cpu = CPU_NEXT(cpu);
368 }
369
370
371 qatomic_set(&tcg_current_rr_cpu, NULL);
372
373 if (cpu && cpu->exit_request) {
374 qatomic_mb_set(&cpu->exit_request, 0);
375 }
376
377 if (icount_enabled() && all_cpu_threads_idle()) {
378
379
380
381
382 qemu_notify_event();
383 }
384
385 qemu_tcg_rr_wait_io_event();
386 deal_with_unplugged_cpus();
387 }
388
389 rcu_unregister_thread();
390 return NULL;
391}
392
393
394
395
396
397
398
399
400
401static void *tcg_cpu_thread_fn(void *arg)
402{
403 CPUState *cpu = arg;
404
405 assert(tcg_enabled());
406 g_assert(!icount_enabled());
407
408 rcu_register_thread();
409 tcg_register_thread();
410
411 qemu_mutex_lock_iothread();
412 qemu_thread_get_self(cpu->thread);
413
414 cpu->thread_id = qemu_get_thread_id();
415 cpu->can_do_io = 1;
416 current_cpu = cpu;
417 cpu_thread_signal_created(cpu);
418 qemu_guest_random_seed_thread_part2(cpu->random_seed);
419
420
421 cpu->exit_request = 1;
422
423 do {
424 if (cpu_can_run(cpu)) {
425 int r;
426 qemu_mutex_unlock_iothread();
427 r = tcg_cpu_exec(cpu);
428 qemu_mutex_lock_iothread();
429 switch (r) {
430 case EXCP_DEBUG:
431 cpu_handle_guest_debug(cpu);
432 break;
433 case EXCP_HALTED:
434
435
436
437
438
439
440
441
442 g_assert(cpu->halted);
443 break;
444 case EXCP_ATOMIC:
445 qemu_mutex_unlock_iothread();
446 cpu_exec_step_atomic(cpu);
447 qemu_mutex_lock_iothread();
448 default:
449
450 break;
451 }
452 }
453
454 qatomic_mb_set(&cpu->exit_request, 0);
455 qemu_wait_io_event(cpu);
456 } while (!cpu->unplug || cpu_can_run(cpu));
457
458 qemu_tcg_destroy_vcpu(cpu);
459 cpu_thread_signal_destroyed(cpu);
460 qemu_mutex_unlock_iothread();
461 rcu_unregister_thread();
462 return NULL;
463}
464
465static void tcg_start_vcpu_thread(CPUState *cpu)
466{
467 char thread_name[VCPU_THREAD_NAME_SIZE];
468 static QemuCond *single_tcg_halt_cond;
469 static QemuThread *single_tcg_cpu_thread;
470 static int tcg_region_inited;
471
472 assert(tcg_enabled());
473
474
475
476
477
478
479 if (!tcg_region_inited) {
480 tcg_region_inited = 1;
481 tcg_region_init();
482 parallel_cpus = qemu_tcg_mttcg_enabled() && current_machine->smp.max_cpus > 1;
483 }
484
485 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
486 cpu->thread = g_malloc0(sizeof(QemuThread));
487 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
488 qemu_cond_init(cpu->halt_cond);
489
490 if (qemu_tcg_mttcg_enabled()) {
491
492 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
493 cpu->cpu_index);
494
495 qemu_thread_create(cpu->thread, thread_name, tcg_cpu_thread_fn,
496 cpu, QEMU_THREAD_JOINABLE);
497
498 } else {
499
500 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
501 qemu_thread_create(cpu->thread, thread_name,
502 tcg_rr_cpu_thread_fn,
503 cpu, QEMU_THREAD_JOINABLE);
504
505 single_tcg_halt_cond = cpu->halt_cond;
506 single_tcg_cpu_thread = cpu->thread;
507 }
508#ifdef _WIN32
509 cpu->hThread = qemu_thread_get_handle(cpu->thread);
510#endif
511 } else {
512
513 cpu->thread = single_tcg_cpu_thread;
514 cpu->halt_cond = single_tcg_halt_cond;
515 cpu->thread_id = first_cpu->thread_id;
516 cpu->can_do_io = 1;
517 cpu->created = true;
518 }
519}
520
521static int64_t tcg_get_virtual_clock(void)
522{
523 if (icount_enabled()) {
524 return icount_get();
525 }
526 return cpu_get_clock();
527}
528
529static int64_t tcg_get_elapsed_ticks(void)
530{
531 if (icount_enabled()) {
532 return icount_get();
533 }
534 return cpu_get_ticks();
535}
536
537
538static void tcg_handle_interrupt(CPUState *cpu, int mask)
539{
540 int old_mask;
541 g_assert(qemu_mutex_iothread_locked());
542
543 old_mask = cpu->interrupt_request;
544 cpu->interrupt_request |= mask;
545
546
547
548
549
550 if (!qemu_cpu_is_self(cpu)) {
551 qemu_cpu_kick(cpu);
552 } else {
553 qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
554 if (icount_enabled() &&
555 !cpu->can_do_io
556 && (mask & ~old_mask) != 0) {
557 cpu_abort(cpu, "Raised interrupt while not in I/O function");
558 }
559 }
560}
561
562const CpusAccel tcg_cpus = {
563 .create_vcpu_thread = tcg_start_vcpu_thread,
564 .kick_vcpu_thread = tcg_kick_vcpu_thread,
565
566 .handle_interrupt = tcg_handle_interrupt,
567
568 .get_virtual_clock = tcg_get_virtual_clock,
569 .get_elapsed_ticks = tcg_get_elapsed_ticks,
570};
571