qemu/cpus-common.c
<<
>>
Prefs
   1/*
   2 * CPU thread main loop - common bits for user and system mode emulation
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu/main-loop.h"
  22#include "exec/cpu-common.h"
  23#include "hw/core/cpu.h"
  24#include "sysemu/cpus.h"
  25
  26static QemuMutex qemu_cpu_list_lock;
  27static QemuCond exclusive_cond;
  28static QemuCond exclusive_resume;
  29static QemuCond qemu_work_cond;
  30
  31/* >= 1 if a thread is inside start_exclusive/end_exclusive.  Written
  32 * under qemu_cpu_list_lock, read with atomic operations.
  33 */
  34static int pending_cpus;
  35
  36void qemu_init_cpu_list(void)
  37{
  38    /* This is needed because qemu_init_cpu_list is also called by the
  39     * child process in a fork.  */
  40    pending_cpus = 0;
  41
  42    qemu_mutex_init(&qemu_cpu_list_lock);
  43    qemu_cond_init(&exclusive_cond);
  44    qemu_cond_init(&exclusive_resume);
  45    qemu_cond_init(&qemu_work_cond);
  46}
  47
  48void cpu_list_lock(void)
  49{
  50    qemu_mutex_lock(&qemu_cpu_list_lock);
  51}
  52
  53void cpu_list_unlock(void)
  54{
  55    qemu_mutex_unlock(&qemu_cpu_list_lock);
  56}
  57
  58static bool cpu_index_auto_assigned;
  59
  60static int cpu_get_free_index(void)
  61{
  62    CPUState *some_cpu;
  63    int cpu_index = 0;
  64
  65    cpu_index_auto_assigned = true;
  66    CPU_FOREACH(some_cpu) {
  67        cpu_index++;
  68    }
  69    return cpu_index;
  70}
  71
  72void cpu_list_add(CPUState *cpu)
  73{
  74    qemu_mutex_lock(&qemu_cpu_list_lock);
  75    if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
  76        cpu->cpu_index = cpu_get_free_index();
  77        assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
  78    } else {
  79        assert(!cpu_index_auto_assigned);
  80    }
  81    QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
  82    qemu_mutex_unlock(&qemu_cpu_list_lock);
  83}
  84
  85void cpu_list_remove(CPUState *cpu)
  86{
  87    qemu_mutex_lock(&qemu_cpu_list_lock);
  88    if (!QTAILQ_IN_USE(cpu, node)) {
  89        /* there is nothing to undo since cpu_exec_init() hasn't been called */
  90        qemu_mutex_unlock(&qemu_cpu_list_lock);
  91        return;
  92    }
  93
  94    assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus)));
  95
  96    QTAILQ_REMOVE_RCU(&cpus, cpu, node);
  97    cpu->cpu_index = UNASSIGNED_CPU_INDEX;
  98    qemu_mutex_unlock(&qemu_cpu_list_lock);
  99}
 100
 101struct qemu_work_item {
 102    struct qemu_work_item *next;
 103    run_on_cpu_func func;
 104    run_on_cpu_data data;
 105    bool free, exclusive, done;
 106};
 107
 108static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
 109{
 110    qemu_mutex_lock(&cpu->work_mutex);
 111    if (cpu->queued_work_first == NULL) {
 112        cpu->queued_work_first = wi;
 113    } else {
 114        cpu->queued_work_last->next = wi;
 115    }
 116    cpu->queued_work_last = wi;
 117    wi->next = NULL;
 118    wi->done = false;
 119    qemu_mutex_unlock(&cpu->work_mutex);
 120
 121    qemu_cpu_kick(cpu);
 122}
 123
 124void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
 125                   QemuMutex *mutex)
 126{
 127    struct qemu_work_item wi;
 128
 129    if (qemu_cpu_is_self(cpu)) {
 130        func(cpu, data);
 131        return;
 132    }
 133
 134    wi.func = func;
 135    wi.data = data;
 136    wi.done = false;
 137    wi.free = false;
 138    wi.exclusive = false;
 139
 140    queue_work_on_cpu(cpu, &wi);
 141    while (!atomic_mb_read(&wi.done)) {
 142        CPUState *self_cpu = current_cpu;
 143
 144        qemu_cond_wait(&qemu_work_cond, mutex);
 145        current_cpu = self_cpu;
 146    }
 147}
 148
 149void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
 150{
 151    struct qemu_work_item *wi;
 152
 153    wi = g_malloc0(sizeof(struct qemu_work_item));
 154    wi->func = func;
 155    wi->data = data;
 156    wi->free = true;
 157
 158    queue_work_on_cpu(cpu, wi);
 159}
 160
 161/* Wait for pending exclusive operations to complete.  The CPU list lock
 162   must be held.  */
 163static inline void exclusive_idle(void)
 164{
 165    while (pending_cpus) {
 166        qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock);
 167    }
 168}
 169
 170/* Start an exclusive operation.
 171   Must only be called from outside cpu_exec.  */
 172void start_exclusive(void)
 173{
 174    CPUState *other_cpu;
 175    int running_cpus;
 176
 177    qemu_mutex_lock(&qemu_cpu_list_lock);
 178    exclusive_idle();
 179
 180    /* Make all other cpus stop executing.  */
 181    atomic_set(&pending_cpus, 1);
 182
 183    /* Write pending_cpus before reading other_cpu->running.  */
 184    smp_mb();
 185    running_cpus = 0;
 186    CPU_FOREACH(other_cpu) {
 187        if (atomic_read(&other_cpu->running)) {
 188            other_cpu->has_waiter = true;
 189            running_cpus++;
 190            qemu_cpu_kick(other_cpu);
 191        }
 192    }
 193
 194    atomic_set(&pending_cpus, running_cpus + 1);
 195    while (pending_cpus > 1) {
 196        qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
 197    }
 198
 199    /* Can release mutex, no one will enter another exclusive
 200     * section until end_exclusive resets pending_cpus to 0.
 201     */
 202    qemu_mutex_unlock(&qemu_cpu_list_lock);
 203
 204    current_cpu->in_exclusive_context = true;
 205}
 206
 207/* Finish an exclusive operation.  */
 208void end_exclusive(void)
 209{
 210    current_cpu->in_exclusive_context = false;
 211
 212    qemu_mutex_lock(&qemu_cpu_list_lock);
 213    atomic_set(&pending_cpus, 0);
 214    qemu_cond_broadcast(&exclusive_resume);
 215    qemu_mutex_unlock(&qemu_cpu_list_lock);
 216}
 217
 218/* Wait for exclusive ops to finish, and begin cpu execution.  */
 219void cpu_exec_start(CPUState *cpu)
 220{
 221    atomic_set(&cpu->running, true);
 222
 223    /* Write cpu->running before reading pending_cpus.  */
 224    smp_mb();
 225
 226    /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
 227     * After taking the lock we'll see cpu->has_waiter == true and run---not
 228     * for long because start_exclusive kicked us.  cpu_exec_end will
 229     * decrement pending_cpus and signal the waiter.
 230     *
 231     * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
 232     * This includes the case when an exclusive item is running now.
 233     * Then we'll see cpu->has_waiter == false and wait for the item to
 234     * complete.
 235     *
 236     * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
 237     * see cpu->running == true, and it will kick the CPU.
 238     */
 239    if (unlikely(atomic_read(&pending_cpus))) {
 240        qemu_mutex_lock(&qemu_cpu_list_lock);
 241        if (!cpu->has_waiter) {
 242            /* Not counted in pending_cpus, let the exclusive item
 243             * run.  Since we have the lock, just set cpu->running to true
 244             * while holding it; no need to check pending_cpus again.
 245             */
 246            atomic_set(&cpu->running, false);
 247            exclusive_idle();
 248            /* Now pending_cpus is zero.  */
 249            atomic_set(&cpu->running, true);
 250        } else {
 251            /* Counted in pending_cpus, go ahead and release the
 252             * waiter at cpu_exec_end.
 253             */
 254        }
 255        qemu_mutex_unlock(&qemu_cpu_list_lock);
 256    }
 257}
 258
 259/* Mark cpu as not executing, and release pending exclusive ops.  */
 260void cpu_exec_end(CPUState *cpu)
 261{
 262    atomic_set(&cpu->running, false);
 263
 264    /* Write cpu->running before reading pending_cpus.  */
 265    smp_mb();
 266
 267    /* 1. start_exclusive saw cpu->running == true.  Then it will increment
 268     * pending_cpus and wait for exclusive_cond.  After taking the lock
 269     * we'll see cpu->has_waiter == true.
 270     *
 271     * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
 272     * This includes the case when an exclusive item started after setting
 273     * cpu->running to false and before we read pending_cpus.  Then we'll see
 274     * cpu->has_waiter == false and not touch pending_cpus.  The next call to
 275     * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
 276     * for the item to complete.
 277     *
 278     * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
 279     * see cpu->running == false, and it can ignore this CPU until the
 280     * next cpu_exec_start.
 281     */
 282    if (unlikely(atomic_read(&pending_cpus))) {
 283        qemu_mutex_lock(&qemu_cpu_list_lock);
 284        if (cpu->has_waiter) {
 285            cpu->has_waiter = false;
 286            atomic_set(&pending_cpus, pending_cpus - 1);
 287            if (pending_cpus == 1) {
 288                qemu_cond_signal(&exclusive_cond);
 289            }
 290        }
 291        qemu_mutex_unlock(&qemu_cpu_list_lock);
 292    }
 293}
 294
 295void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
 296                           run_on_cpu_data data)
 297{
 298    struct qemu_work_item *wi;
 299
 300    wi = g_malloc0(sizeof(struct qemu_work_item));
 301    wi->func = func;
 302    wi->data = data;
 303    wi->free = true;
 304    wi->exclusive = true;
 305
 306    queue_work_on_cpu(cpu, wi);
 307}
 308
 309void process_queued_cpu_work(CPUState *cpu)
 310{
 311    struct qemu_work_item *wi;
 312
 313    if (cpu->queued_work_first == NULL) {
 314        return;
 315    }
 316
 317    qemu_mutex_lock(&cpu->work_mutex);
 318    while (cpu->queued_work_first != NULL) {
 319        wi = cpu->queued_work_first;
 320        cpu->queued_work_first = wi->next;
 321        if (!cpu->queued_work_first) {
 322            cpu->queued_work_last = NULL;
 323        }
 324        qemu_mutex_unlock(&cpu->work_mutex);
 325        if (wi->exclusive) {
 326            /* Running work items outside the BQL avoids the following deadlock:
 327             * 1) start_exclusive() is called with the BQL taken while another
 328             * CPU is running; 2) cpu_exec in the other CPU tries to takes the
 329             * BQL, so it goes to sleep; start_exclusive() is sleeping too, so
 330             * neither CPU can proceed.
 331             */
 332            qemu_mutex_unlock_iothread();
 333            start_exclusive();
 334            wi->func(cpu, wi->data);
 335            end_exclusive();
 336            qemu_mutex_lock_iothread();
 337        } else {
 338            wi->func(cpu, wi->data);
 339        }
 340        qemu_mutex_lock(&cpu->work_mutex);
 341        if (wi->free) {
 342            g_free(wi);
 343        } else {
 344            atomic_mb_set(&wi->done, true);
 345        }
 346    }
 347    qemu_mutex_unlock(&cpu->work_mutex);
 348    qemu_cond_broadcast(&qemu_work_cond);
 349}
 350