qemu/cpu-common.c
<<
>>
Prefs
   1/*
   2 * CPU thread main loop - common bits for user and system mode emulation
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu/main-loop.h"
  22#include "exec/cpu-common.h"
  23#include "hw/core/cpu.h"
  24#include "qemu/lockable.h"
  25#include "trace/trace-root.h"
  26
  27QemuMutex qemu_cpu_list_lock;
  28static QemuCond exclusive_cond;
  29static QemuCond exclusive_resume;
  30static QemuCond qemu_work_cond;
  31
  32/* >= 1 if a thread is inside start_exclusive/end_exclusive.  Written
  33 * under qemu_cpu_list_lock, read with atomic operations.
  34 */
  35static int pending_cpus;
  36
  37void qemu_init_cpu_list(void)
  38{
  39    /* This is needed because qemu_init_cpu_list is also called by the
  40     * child process in a fork.  */
  41    pending_cpus = 0;
  42
  43    qemu_mutex_init(&qemu_cpu_list_lock);
  44    qemu_cond_init(&exclusive_cond);
  45    qemu_cond_init(&exclusive_resume);
  46    qemu_cond_init(&qemu_work_cond);
  47}
  48
  49void cpu_list_lock(void)
  50{
  51    qemu_mutex_lock(&qemu_cpu_list_lock);
  52}
  53
  54void cpu_list_unlock(void)
  55{
  56    qemu_mutex_unlock(&qemu_cpu_list_lock);
  57}
  58
  59
  60int cpu_get_free_index(void)
  61{
  62    CPUState *some_cpu;
  63    int max_cpu_index = 0;
  64
  65    CPU_FOREACH(some_cpu) {
  66        if (some_cpu->cpu_index >= max_cpu_index) {
  67            max_cpu_index = some_cpu->cpu_index + 1;
  68        }
  69    }
  70    return max_cpu_index;
  71}
  72
  73CPUTailQ cpus_queue = QTAILQ_HEAD_INITIALIZER(cpus_queue);
  74static unsigned int cpu_list_generation_id;
  75
  76unsigned int cpu_list_generation_id_get(void)
  77{
  78    return cpu_list_generation_id;
  79}
  80
  81void cpu_list_add(CPUState *cpu)
  82{
  83    static bool cpu_index_auto_assigned;
  84
  85    QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
  86    if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
  87        cpu_index_auto_assigned = true;
  88        cpu->cpu_index = cpu_get_free_index();
  89        assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
  90    } else {
  91        assert(!cpu_index_auto_assigned);
  92    }
  93    QTAILQ_INSERT_TAIL_RCU(&cpus_queue, cpu, node);
  94    cpu_list_generation_id++;
  95}
  96
  97void cpu_list_remove(CPUState *cpu)
  98{
  99    QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
 100    if (!QTAILQ_IN_USE(cpu, node)) {
 101        /* there is nothing to undo since cpu_exec_init() hasn't been called */
 102        return;
 103    }
 104
 105    QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node);
 106    cpu->cpu_index = UNASSIGNED_CPU_INDEX;
 107    cpu_list_generation_id++;
 108}
 109
 110CPUState *qemu_get_cpu(int index)
 111{
 112    CPUState *cpu;
 113
 114    CPU_FOREACH(cpu) {
 115        if (cpu->cpu_index == index) {
 116            return cpu;
 117        }
 118    }
 119
 120    return NULL;
 121}
 122
 123/* current CPU in the current thread. It is only valid inside cpu_exec() */
 124__thread CPUState *current_cpu;
 125
 126struct qemu_work_item {
 127    QSIMPLEQ_ENTRY(qemu_work_item) node;
 128    run_on_cpu_func func;
 129    run_on_cpu_data data;
 130    bool free, exclusive, done;
 131};
 132
 133static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
 134{
 135    qemu_mutex_lock(&cpu->work_mutex);
 136    QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
 137    wi->done = false;
 138    qemu_mutex_unlock(&cpu->work_mutex);
 139
 140    qemu_cpu_kick(cpu);
 141}
 142
 143void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
 144                   QemuMutex *mutex)
 145{
 146    struct qemu_work_item wi;
 147
 148    if (qemu_cpu_is_self(cpu)) {
 149        func(cpu, data);
 150        return;
 151    }
 152
 153    wi.func = func;
 154    wi.data = data;
 155    wi.done = false;
 156    wi.free = false;
 157    wi.exclusive = false;
 158
 159    queue_work_on_cpu(cpu, &wi);
 160    while (!qatomic_load_acquire(&wi.done)) {
 161        CPUState *self_cpu = current_cpu;
 162
 163        qemu_cond_wait(&qemu_work_cond, mutex);
 164        current_cpu = self_cpu;
 165    }
 166}
 167
 168void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
 169{
 170    struct qemu_work_item *wi;
 171
 172    wi = g_new0(struct qemu_work_item, 1);
 173    wi->func = func;
 174    wi->data = data;
 175    wi->free = true;
 176
 177    queue_work_on_cpu(cpu, wi);
 178}
 179
 180/* Wait for pending exclusive operations to complete.  The CPU list lock
 181   must be held.  */
 182static inline void exclusive_idle(void)
 183{
 184    while (pending_cpus) {
 185        qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock);
 186    }
 187}
 188
 189/* Start an exclusive operation.
 190   Must only be called from outside cpu_exec.  */
 191void start_exclusive(void)
 192{
 193    CPUState *other_cpu;
 194    int running_cpus;
 195
 196    /* Ensure we are not running, or start_exclusive will be blocked. */
 197    g_assert(!current_cpu->running);
 198
 199    if (current_cpu->exclusive_context_count) {
 200        current_cpu->exclusive_context_count++;
 201        return;
 202    }
 203
 204    qemu_mutex_lock(&qemu_cpu_list_lock);
 205    exclusive_idle();
 206
 207    /* Make all other cpus stop executing.  */
 208    qatomic_set(&pending_cpus, 1);
 209
 210    /* Write pending_cpus before reading other_cpu->running.  */
 211    smp_mb();
 212    running_cpus = 0;
 213    CPU_FOREACH(other_cpu) {
 214        if (qatomic_read(&other_cpu->running)) {
 215            other_cpu->has_waiter = true;
 216            running_cpus++;
 217            qemu_cpu_kick(other_cpu);
 218        }
 219    }
 220
 221    qatomic_set(&pending_cpus, running_cpus + 1);
 222    while (pending_cpus > 1) {
 223        qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
 224    }
 225
 226    /* Can release mutex, no one will enter another exclusive
 227     * section until end_exclusive resets pending_cpus to 0.
 228     */
 229    qemu_mutex_unlock(&qemu_cpu_list_lock);
 230
 231    current_cpu->exclusive_context_count = 1;
 232}
 233
 234/* Finish an exclusive operation.  */
 235void end_exclusive(void)
 236{
 237    current_cpu->exclusive_context_count--;
 238    if (current_cpu->exclusive_context_count) {
 239        return;
 240    }
 241
 242    qemu_mutex_lock(&qemu_cpu_list_lock);
 243    qatomic_set(&pending_cpus, 0);
 244    qemu_cond_broadcast(&exclusive_resume);
 245    qemu_mutex_unlock(&qemu_cpu_list_lock);
 246}
 247
 248/* Wait for exclusive ops to finish, and begin cpu execution.  */
 249void cpu_exec_start(CPUState *cpu)
 250{
 251    qatomic_set(&cpu->running, true);
 252
 253    /* Write cpu->running before reading pending_cpus.  */
 254    smp_mb();
 255
 256    /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
 257     * After taking the lock we'll see cpu->has_waiter == true and run---not
 258     * for long because start_exclusive kicked us.  cpu_exec_end will
 259     * decrement pending_cpus and signal the waiter.
 260     *
 261     * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
 262     * This includes the case when an exclusive item is running now.
 263     * Then we'll see cpu->has_waiter == false and wait for the item to
 264     * complete.
 265     *
 266     * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
 267     * see cpu->running == true, and it will kick the CPU.
 268     */
 269    if (unlikely(qatomic_read(&pending_cpus))) {
 270        QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
 271        if (!cpu->has_waiter) {
 272            /* Not counted in pending_cpus, let the exclusive item
 273             * run.  Since we have the lock, just set cpu->running to true
 274             * while holding it; no need to check pending_cpus again.
 275             */
 276            qatomic_set(&cpu->running, false);
 277            exclusive_idle();
 278            /* Now pending_cpus is zero.  */
 279            qatomic_set(&cpu->running, true);
 280        } else {
 281            /* Counted in pending_cpus, go ahead and release the
 282             * waiter at cpu_exec_end.
 283             */
 284        }
 285    }
 286}
 287
 288/* Mark cpu as not executing, and release pending exclusive ops.  */
 289void cpu_exec_end(CPUState *cpu)
 290{
 291    qatomic_set(&cpu->running, false);
 292
 293    /* Write cpu->running before reading pending_cpus.  */
 294    smp_mb();
 295
 296    /* 1. start_exclusive saw cpu->running == true.  Then it will increment
 297     * pending_cpus and wait for exclusive_cond.  After taking the lock
 298     * we'll see cpu->has_waiter == true.
 299     *
 300     * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
 301     * This includes the case when an exclusive item started after setting
 302     * cpu->running to false and before we read pending_cpus.  Then we'll see
 303     * cpu->has_waiter == false and not touch pending_cpus.  The next call to
 304     * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
 305     * for the item to complete.
 306     *
 307     * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
 308     * see cpu->running == false, and it can ignore this CPU until the
 309     * next cpu_exec_start.
 310     */
 311    if (unlikely(qatomic_read(&pending_cpus))) {
 312        QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
 313        if (cpu->has_waiter) {
 314            cpu->has_waiter = false;
 315            qatomic_set(&pending_cpus, pending_cpus - 1);
 316            if (pending_cpus == 1) {
 317                qemu_cond_signal(&exclusive_cond);
 318            }
 319        }
 320    }
 321}
 322
 323void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
 324                           run_on_cpu_data data)
 325{
 326    struct qemu_work_item *wi;
 327
 328    wi = g_new0(struct qemu_work_item, 1);
 329    wi->func = func;
 330    wi->data = data;
 331    wi->free = true;
 332    wi->exclusive = true;
 333
 334    queue_work_on_cpu(cpu, wi);
 335}
 336
 337void free_queued_cpu_work(CPUState *cpu)
 338{
 339    while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
 340        struct qemu_work_item *wi = QSIMPLEQ_FIRST(&cpu->work_list);
 341        QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
 342        if (wi->free) {
 343            g_free(wi);
 344        }
 345    }
 346}
 347
 348void process_queued_cpu_work(CPUState *cpu)
 349{
 350    struct qemu_work_item *wi;
 351
 352    qemu_mutex_lock(&cpu->work_mutex);
 353    if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
 354        qemu_mutex_unlock(&cpu->work_mutex);
 355        return;
 356    }
 357    while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
 358        wi = QSIMPLEQ_FIRST(&cpu->work_list);
 359        QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
 360        qemu_mutex_unlock(&cpu->work_mutex);
 361        if (wi->exclusive) {
 362            /* Running work items outside the BQL avoids the following deadlock:
 363             * 1) start_exclusive() is called with the BQL taken while another
 364             * CPU is running; 2) cpu_exec in the other CPU tries to takes the
 365             * BQL, so it goes to sleep; start_exclusive() is sleeping too, so
 366             * neither CPU can proceed.
 367             */
 368            bql_unlock();
 369            start_exclusive();
 370            wi->func(cpu, wi->data);
 371            end_exclusive();
 372            bql_lock();
 373        } else {
 374            wi->func(cpu, wi->data);
 375        }
 376        qemu_mutex_lock(&cpu->work_mutex);
 377        if (wi->free) {
 378            g_free(wi);
 379        } else {
 380            qatomic_store_release(&wi->done, true);
 381        }
 382    }
 383    qemu_mutex_unlock(&cpu->work_mutex);
 384    qemu_cond_broadcast(&qemu_work_cond);
 385}
 386
 387/* Add a breakpoint.  */
 388int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
 389                          CPUBreakpoint **breakpoint)
 390{
 391    CPUBreakpoint *bp;
 392
 393    if (cpu->cc->gdb_adjust_breakpoint) {
 394        pc = cpu->cc->gdb_adjust_breakpoint(cpu, pc);
 395    }
 396
 397    bp = g_malloc(sizeof(*bp));
 398
 399    bp->pc = pc;
 400    bp->flags = flags;
 401
 402    /* keep all GDB-injected breakpoints in front */
 403    if (flags & BP_GDB) {
 404        QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
 405    } else {
 406        QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
 407    }
 408
 409    if (breakpoint) {
 410        *breakpoint = bp;
 411    }
 412
 413    trace_breakpoint_insert(cpu->cpu_index, pc, flags);
 414    return 0;
 415}
 416
 417/* Remove a specific breakpoint.  */
 418int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
 419{
 420    CPUBreakpoint *bp;
 421
 422    if (cpu->cc->gdb_adjust_breakpoint) {
 423        pc = cpu->cc->gdb_adjust_breakpoint(cpu, pc);
 424    }
 425
 426    QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
 427        if (bp->pc == pc && bp->flags == flags) {
 428            cpu_breakpoint_remove_by_ref(cpu, bp);
 429            return 0;
 430        }
 431    }
 432    return -ENOENT;
 433}
 434
 435/* Remove a specific breakpoint by reference.  */
 436void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp)
 437{
 438    QTAILQ_REMOVE(&cpu->breakpoints, bp, entry);
 439
 440    trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags);
 441    g_free(bp);
 442}
 443
 444/* Remove all matching breakpoints. */
 445void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
 446{
 447    CPUBreakpoint *bp, *next;
 448
 449    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
 450        if (bp->flags & mask) {
 451            cpu_breakpoint_remove_by_ref(cpu, bp);
 452        }
 453    }
 454}
 455