qemu/accel/tcg/tcg-cpus.c
<<
>>
Prefs
   1/*
   2 * QEMU System Emulator
   3 *
   4 * Copyright (c) 2003-2008 Fabrice Bellard
   5 * Copyright (c) 2014 Red Hat Inc.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a copy
   8 * of this software and associated documentation files (the "Software"), to deal
   9 * in the Software without restriction, including without limitation the rights
  10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 * copies of the Software, and to permit persons to whom the Software is
  12 * furnished to do so, subject to the following conditions:
  13 *
  14 * The above copyright notice and this permission notice shall be included in
  15 * all copies or substantial portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23 * THE SOFTWARE.
  24 */
  25
  26#include "qemu/osdep.h"
  27#include "qemu-common.h"
  28#include "sysemu/tcg.h"
  29#include "sysemu/replay.h"
  30#include "qemu/main-loop.h"
  31#include "qemu/guest-random.h"
  32#include "exec/exec-all.h"
  33#include "hw/boards.h"
  34
  35#include "tcg-cpus.h"
  36
  37/* Kick all RR vCPUs */
  38static void qemu_cpu_kick_rr_cpus(void)
  39{
  40    CPUState *cpu;
  41
  42    CPU_FOREACH(cpu) {
  43        cpu_exit(cpu);
  44    };
  45}
  46
  47static void tcg_kick_vcpu_thread(CPUState *cpu)
  48{
  49    if (qemu_tcg_mttcg_enabled()) {
  50        cpu_exit(cpu);
  51    } else {
  52        qemu_cpu_kick_rr_cpus();
  53    }
  54}
  55
  56/*
  57 * TCG vCPU kick timer
  58 *
  59 * The kick timer is responsible for moving single threaded vCPU
  60 * emulation on to the next vCPU. If more than one vCPU is running a
  61 * timer event with force a cpu->exit so the next vCPU can get
  62 * scheduled.
  63 *
  64 * The timer is removed if all vCPUs are idle and restarted again once
  65 * idleness is complete.
  66 */
  67
  68static QEMUTimer *tcg_kick_vcpu_timer;
  69static CPUState *tcg_current_rr_cpu;
  70
  71#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
  72
  73static inline int64_t qemu_tcg_next_kick(void)
  74{
  75    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
  76}
  77
  78/* Kick the currently round-robin scheduled vCPU to next */
  79static void qemu_cpu_kick_rr_next_cpu(void)
  80{
  81    CPUState *cpu;
  82    do {
  83        cpu = qatomic_mb_read(&tcg_current_rr_cpu);
  84        if (cpu) {
  85            cpu_exit(cpu);
  86        }
  87    } while (cpu != qatomic_mb_read(&tcg_current_rr_cpu));
  88}
  89
  90static void kick_tcg_thread(void *opaque)
  91{
  92    timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
  93    qemu_cpu_kick_rr_next_cpu();
  94}
  95
  96static void start_tcg_kick_timer(void)
  97{
  98    assert(!mttcg_enabled);
  99    if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
 100        tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
 101                                           kick_tcg_thread, NULL);
 102    }
 103    if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
 104        timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
 105    }
 106}
 107
 108static void stop_tcg_kick_timer(void)
 109{
 110    assert(!mttcg_enabled);
 111    if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
 112        timer_del(tcg_kick_vcpu_timer);
 113    }
 114}
 115
 116static void qemu_tcg_destroy_vcpu(CPUState *cpu)
 117{
 118}
 119
 120static void qemu_tcg_rr_wait_io_event(void)
 121{
 122    CPUState *cpu;
 123
 124    while (all_cpu_threads_idle()) {
 125        stop_tcg_kick_timer();
 126        qemu_cond_wait_iothread(first_cpu->halt_cond);
 127    }
 128
 129    start_tcg_kick_timer();
 130
 131    CPU_FOREACH(cpu) {
 132        qemu_wait_io_event_common(cpu);
 133    }
 134}
 135
 136static int64_t tcg_get_icount_limit(void)
 137{
 138    int64_t deadline;
 139
 140    if (replay_mode != REPLAY_MODE_PLAY) {
 141        /*
 142         * Include all the timers, because they may need an attention.
 143         * Too long CPU execution may create unnecessary delay in UI.
 144         */
 145        deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
 146                                              QEMU_TIMER_ATTR_ALL);
 147        /* Check realtime timers, because they help with input processing */
 148        deadline = qemu_soonest_timeout(deadline,
 149                qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME,
 150                                           QEMU_TIMER_ATTR_ALL));
 151
 152        /*
 153         * Maintain prior (possibly buggy) behaviour where if no deadline
 154         * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
 155         * INT32_MAX nanoseconds ahead, we still use INT32_MAX
 156         * nanoseconds.
 157         */
 158        if ((deadline < 0) || (deadline > INT32_MAX)) {
 159            deadline = INT32_MAX;
 160        }
 161
 162        return icount_round(deadline);
 163    } else {
 164        return replay_get_instructions();
 165    }
 166}
 167
 168static void notify_aio_contexts(void)
 169{
 170    /* Wake up other AioContexts.  */
 171    qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
 172    qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
 173}
 174
 175static void handle_icount_deadline(void)
 176{
 177    assert(qemu_in_vcpu_thread());
 178    if (icount_enabled()) {
 179        int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
 180                                                      QEMU_TIMER_ATTR_ALL);
 181
 182        if (deadline == 0) {
 183            notify_aio_contexts();
 184        }
 185    }
 186}
 187
 188static void prepare_icount_for_run(CPUState *cpu)
 189{
 190    if (icount_enabled()) {
 191        int insns_left;
 192
 193        /*
 194         * These should always be cleared by process_icount_data after
 195         * each vCPU execution. However u16.high can be raised
 196         * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
 197         */
 198        g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
 199        g_assert(cpu->icount_extra == 0);
 200
 201        cpu->icount_budget = tcg_get_icount_limit();
 202        insns_left = MIN(0xffff, cpu->icount_budget);
 203        cpu_neg(cpu)->icount_decr.u16.low = insns_left;
 204        cpu->icount_extra = cpu->icount_budget - insns_left;
 205
 206        replay_mutex_lock();
 207
 208        if (cpu->icount_budget == 0 && replay_has_checkpoint()) {
 209            notify_aio_contexts();
 210        }
 211    }
 212}
 213
 214static void process_icount_data(CPUState *cpu)
 215{
 216    if (icount_enabled()) {
 217        /* Account for executed instructions */
 218        icount_update(cpu);
 219
 220        /* Reset the counters */
 221        cpu_neg(cpu)->icount_decr.u16.low = 0;
 222        cpu->icount_extra = 0;
 223        cpu->icount_budget = 0;
 224
 225        replay_account_executed_instructions();
 226
 227        replay_mutex_unlock();
 228    }
 229}
 230
 231static int tcg_cpu_exec(CPUState *cpu)
 232{
 233    int ret;
 234#ifdef CONFIG_PROFILER
 235    int64_t ti;
 236#endif
 237
 238    assert(tcg_enabled());
 239#ifdef CONFIG_PROFILER
 240    ti = profile_getclock();
 241#endif
 242    cpu_exec_start(cpu);
 243    ret = cpu_exec(cpu);
 244    cpu_exec_end(cpu);
 245#ifdef CONFIG_PROFILER
 246    qatomic_set(&tcg_ctx->prof.cpu_exec_time,
 247                tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
 248#endif
 249    return ret;
 250}
 251
 252/*
 253 * Destroy any remaining vCPUs which have been unplugged and have
 254 * finished running
 255 */
 256static void deal_with_unplugged_cpus(void)
 257{
 258    CPUState *cpu;
 259
 260    CPU_FOREACH(cpu) {
 261        if (cpu->unplug && !cpu_can_run(cpu)) {
 262            qemu_tcg_destroy_vcpu(cpu);
 263            cpu_thread_signal_destroyed(cpu);
 264            break;
 265        }
 266    }
 267}
 268
 269/*
 270 * Single-threaded TCG
 271 *
 272 * In the single-threaded case each vCPU is simulated in turn. If
 273 * there is more than a single vCPU we create a simple timer to kick
 274 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
 275 * This is done explicitly rather than relying on side-effects
 276 * elsewhere.
 277 */
 278
 279static void *tcg_rr_cpu_thread_fn(void *arg)
 280{
 281    CPUState *cpu = arg;
 282
 283    assert(tcg_enabled());
 284    rcu_register_thread();
 285    tcg_register_thread();
 286
 287    qemu_mutex_lock_iothread();
 288    qemu_thread_get_self(cpu->thread);
 289
 290    cpu->thread_id = qemu_get_thread_id();
 291    cpu->can_do_io = 1;
 292    cpu_thread_signal_created(cpu);
 293    qemu_guest_random_seed_thread_part2(cpu->random_seed);
 294
 295    /* wait for initial kick-off after machine start */
 296    while (first_cpu->stopped) {
 297        qemu_cond_wait_iothread(first_cpu->halt_cond);
 298
 299        /* process any pending work */
 300        CPU_FOREACH(cpu) {
 301            current_cpu = cpu;
 302            qemu_wait_io_event_common(cpu);
 303        }
 304    }
 305
 306    start_tcg_kick_timer();
 307
 308    cpu = first_cpu;
 309
 310    /* process any pending work */
 311    cpu->exit_request = 1;
 312
 313    while (1) {
 314        qemu_mutex_unlock_iothread();
 315        replay_mutex_lock();
 316        qemu_mutex_lock_iothread();
 317        /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
 318        icount_account_warp_timer();
 319
 320        /*
 321         * Run the timers here.  This is much more efficient than
 322         * waking up the I/O thread and waiting for completion.
 323         */
 324        handle_icount_deadline();
 325
 326        replay_mutex_unlock();
 327
 328        if (!cpu) {
 329            cpu = first_cpu;
 330        }
 331
 332        while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
 333
 334            qatomic_mb_set(&tcg_current_rr_cpu, cpu);
 335            current_cpu = cpu;
 336
 337            qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
 338                              (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
 339
 340            if (cpu_can_run(cpu)) {
 341                int r;
 342
 343                qemu_mutex_unlock_iothread();
 344                prepare_icount_for_run(cpu);
 345
 346                r = tcg_cpu_exec(cpu);
 347
 348                process_icount_data(cpu);
 349                qemu_mutex_lock_iothread();
 350
 351                if (r == EXCP_DEBUG) {
 352                    cpu_handle_guest_debug(cpu);
 353                    break;
 354                } else if (r == EXCP_ATOMIC) {
 355                    qemu_mutex_unlock_iothread();
 356                    cpu_exec_step_atomic(cpu);
 357                    qemu_mutex_lock_iothread();
 358                    break;
 359                }
 360            } else if (cpu->stop) {
 361                if (cpu->unplug) {
 362                    cpu = CPU_NEXT(cpu);
 363                }
 364                break;
 365            }
 366
 367            cpu = CPU_NEXT(cpu);
 368        } /* while (cpu && !cpu->exit_request).. */
 369
 370        /* Does not need qatomic_mb_set because a spurious wakeup is okay.  */
 371        qatomic_set(&tcg_current_rr_cpu, NULL);
 372
 373        if (cpu && cpu->exit_request) {
 374            qatomic_mb_set(&cpu->exit_request, 0);
 375        }
 376
 377        if (icount_enabled() && all_cpu_threads_idle()) {
 378            /*
 379             * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
 380             * in the main_loop, wake it up in order to start the warp timer.
 381             */
 382            qemu_notify_event();
 383        }
 384
 385        qemu_tcg_rr_wait_io_event();
 386        deal_with_unplugged_cpus();
 387    }
 388
 389    rcu_unregister_thread();
 390    return NULL;
 391}
 392
 393/*
 394 * Multi-threaded TCG
 395 *
 396 * In the multi-threaded case each vCPU has its own thread. The TLS
 397 * variable current_cpu can be used deep in the code to find the
 398 * current CPUState for a given thread.
 399 */
 400
 401static void *tcg_cpu_thread_fn(void *arg)
 402{
 403    CPUState *cpu = arg;
 404
 405    assert(tcg_enabled());
 406    g_assert(!icount_enabled());
 407
 408    rcu_register_thread();
 409    tcg_register_thread();
 410
 411    qemu_mutex_lock_iothread();
 412    qemu_thread_get_self(cpu->thread);
 413
 414    cpu->thread_id = qemu_get_thread_id();
 415    cpu->can_do_io = 1;
 416    current_cpu = cpu;
 417    cpu_thread_signal_created(cpu);
 418    qemu_guest_random_seed_thread_part2(cpu->random_seed);
 419
 420    /* process any pending work */
 421    cpu->exit_request = 1;
 422
 423    do {
 424        if (cpu_can_run(cpu)) {
 425            int r;
 426            qemu_mutex_unlock_iothread();
 427            r = tcg_cpu_exec(cpu);
 428            qemu_mutex_lock_iothread();
 429            switch (r) {
 430            case EXCP_DEBUG:
 431                cpu_handle_guest_debug(cpu);
 432                break;
 433            case EXCP_HALTED:
 434                /*
 435                 * during start-up the vCPU is reset and the thread is
 436                 * kicked several times. If we don't ensure we go back
 437                 * to sleep in the halted state we won't cleanly
 438                 * start-up when the vCPU is enabled.
 439                 *
 440                 * cpu->halted should ensure we sleep in wait_io_event
 441                 */
 442                g_assert(cpu->halted);
 443                break;
 444            case EXCP_ATOMIC:
 445                qemu_mutex_unlock_iothread();
 446                cpu_exec_step_atomic(cpu);
 447                qemu_mutex_lock_iothread();
 448            default:
 449                /* Ignore everything else? */
 450                break;
 451            }
 452        }
 453
 454        qatomic_mb_set(&cpu->exit_request, 0);
 455        qemu_wait_io_event(cpu);
 456    } while (!cpu->unplug || cpu_can_run(cpu));
 457
 458    qemu_tcg_destroy_vcpu(cpu);
 459    cpu_thread_signal_destroyed(cpu);
 460    qemu_mutex_unlock_iothread();
 461    rcu_unregister_thread();
 462    return NULL;
 463}
 464
 465static void tcg_start_vcpu_thread(CPUState *cpu)
 466{
 467    char thread_name[VCPU_THREAD_NAME_SIZE];
 468    static QemuCond *single_tcg_halt_cond;
 469    static QemuThread *single_tcg_cpu_thread;
 470    static int tcg_region_inited;
 471
 472    assert(tcg_enabled());
 473    /*
 474     * Initialize TCG regions--once. Now is a good time, because:
 475     * (1) TCG's init context, prologue and target globals have been set up.
 476     * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
 477     *     -accel flag is processed, so the check doesn't work then).
 478     */
 479    if (!tcg_region_inited) {
 480        tcg_region_inited = 1;
 481        tcg_region_init();
 482        parallel_cpus = qemu_tcg_mttcg_enabled() && current_machine->smp.max_cpus > 1;
 483    }
 484
 485    if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
 486        cpu->thread = g_malloc0(sizeof(QemuThread));
 487        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
 488        qemu_cond_init(cpu->halt_cond);
 489
 490        if (qemu_tcg_mttcg_enabled()) {
 491            /* create a thread per vCPU with TCG (MTTCG) */
 492            snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
 493                 cpu->cpu_index);
 494
 495            qemu_thread_create(cpu->thread, thread_name, tcg_cpu_thread_fn,
 496                               cpu, QEMU_THREAD_JOINABLE);
 497
 498        } else {
 499            /* share a single thread for all cpus with TCG */
 500            snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
 501            qemu_thread_create(cpu->thread, thread_name,
 502                               tcg_rr_cpu_thread_fn,
 503                               cpu, QEMU_THREAD_JOINABLE);
 504
 505            single_tcg_halt_cond = cpu->halt_cond;
 506            single_tcg_cpu_thread = cpu->thread;
 507        }
 508#ifdef _WIN32
 509        cpu->hThread = qemu_thread_get_handle(cpu->thread);
 510#endif
 511    } else {
 512        /* For non-MTTCG cases we share the thread */
 513        cpu->thread = single_tcg_cpu_thread;
 514        cpu->halt_cond = single_tcg_halt_cond;
 515        cpu->thread_id = first_cpu->thread_id;
 516        cpu->can_do_io = 1;
 517        cpu->created = true;
 518    }
 519}
 520
 521static int64_t tcg_get_virtual_clock(void)
 522{
 523    if (icount_enabled()) {
 524        return icount_get();
 525    }
 526    return cpu_get_clock();
 527}
 528
 529static int64_t tcg_get_elapsed_ticks(void)
 530{
 531    if (icount_enabled()) {
 532        return icount_get();
 533    }
 534    return cpu_get_ticks();
 535}
 536
 537/* mask must never be zero, except for A20 change call */
 538static void tcg_handle_interrupt(CPUState *cpu, int mask)
 539{
 540    int old_mask;
 541    g_assert(qemu_mutex_iothread_locked());
 542
 543    old_mask = cpu->interrupt_request;
 544    cpu->interrupt_request |= mask;
 545
 546    /*
 547     * If called from iothread context, wake the target cpu in
 548     * case its halted.
 549     */
 550    if (!qemu_cpu_is_self(cpu)) {
 551        qemu_cpu_kick(cpu);
 552    } else {
 553        qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
 554        if (icount_enabled() &&
 555            !cpu->can_do_io
 556            && (mask & ~old_mask) != 0) {
 557            cpu_abort(cpu, "Raised interrupt while not in I/O function");
 558        }
 559    }
 560}
 561
 562const CpusAccel tcg_cpus = {
 563    .create_vcpu_thread = tcg_start_vcpu_thread,
 564    .kick_vcpu_thread = tcg_kick_vcpu_thread,
 565
 566    .handle_interrupt = tcg_handle_interrupt,
 567
 568    .get_virtual_clock = tcg_get_virtual_clock,
 569    .get_elapsed_ticks = tcg_get_elapsed_ticks,
 570};
 571