qemu/cpus-common.c
<<
>>
Prefs
   1/*
   2 * CPU thread main loop - common bits for user and system mode emulation
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu/main-loop.h"
  22#include "exec/cpu-common.h"
  23#include "hw/core/cpu.h"
  24#include "sysemu/cpus.h"
  25#include "qemu/lockable.h"
  26
  27static QemuMutex qemu_cpu_list_lock;
  28static QemuCond exclusive_cond;
  29static QemuCond exclusive_resume;
  30static QemuCond qemu_work_cond;
  31
  32/* >= 1 if a thread is inside start_exclusive/end_exclusive.  Written
  33 * under qemu_cpu_list_lock, read with atomic operations.
  34 */
  35static int pending_cpus;
  36
  37void qemu_init_cpu_list(void)
  38{
  39    /* This is needed because qemu_init_cpu_list is also called by the
  40     * child process in a fork.  */
  41    pending_cpus = 0;
  42
  43    qemu_mutex_init(&qemu_cpu_list_lock);
  44    qemu_cond_init(&exclusive_cond);
  45    qemu_cond_init(&exclusive_resume);
  46    qemu_cond_init(&qemu_work_cond);
  47}
  48
  49void cpu_list_lock(void)
  50{
  51    qemu_mutex_lock(&qemu_cpu_list_lock);
  52}
  53
  54void cpu_list_unlock(void)
  55{
  56    qemu_mutex_unlock(&qemu_cpu_list_lock);
  57}
  58
  59static bool cpu_index_auto_assigned;
  60
  61static int cpu_get_free_index(void)
  62{
  63    CPUState *some_cpu;
  64    int max_cpu_index = 0;
  65
  66    cpu_index_auto_assigned = true;
  67    CPU_FOREACH(some_cpu) {
  68        if (some_cpu->cpu_index >= max_cpu_index) {
  69            max_cpu_index = some_cpu->cpu_index + 1;
  70        }
  71    }
  72    return max_cpu_index;
  73}
  74
  75CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
  76static unsigned int cpu_list_generation_id;
  77
  78unsigned int cpu_list_generation_id_get(void)
  79{
  80    return cpu_list_generation_id;
  81}
  82
  83void cpu_list_add(CPUState *cpu)
  84{
  85    QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
  86    if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
  87        cpu->cpu_index = cpu_get_free_index();
  88        assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
  89    } else {
  90        assert(!cpu_index_auto_assigned);
  91    }
  92    QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
  93    cpu_list_generation_id++;
  94}
  95
  96void cpu_list_remove(CPUState *cpu)
  97{
  98    QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
  99    if (!QTAILQ_IN_USE(cpu, node)) {
 100        /* there is nothing to undo since cpu_exec_init() hasn't been called */
 101        return;
 102    }
 103
 104    QTAILQ_REMOVE_RCU(&cpus, cpu, node);
 105    cpu->cpu_index = UNASSIGNED_CPU_INDEX;
 106    cpu_list_generation_id++;
 107}
 108
 109CPUState *qemu_get_cpu(int index)
 110{
 111    CPUState *cpu;
 112
 113    CPU_FOREACH(cpu) {
 114        if (cpu->cpu_index == index) {
 115            return cpu;
 116        }
 117    }
 118
 119    return NULL;
 120}
 121
 122/* current CPU in the current thread. It is only valid inside cpu_exec() */
 123__thread CPUState *current_cpu;
 124
 125struct qemu_work_item {
 126    QSIMPLEQ_ENTRY(qemu_work_item) node;
 127    run_on_cpu_func func;
 128    run_on_cpu_data data;
 129    bool free, exclusive, done;
 130};
 131
 132static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
 133{
 134    qemu_mutex_lock(&cpu->work_mutex);
 135    QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
 136    wi->done = false;
 137    qemu_mutex_unlock(&cpu->work_mutex);
 138
 139    qemu_cpu_kick(cpu);
 140}
 141
 142void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
 143                   QemuMutex *mutex)
 144{
 145    struct qemu_work_item wi;
 146
 147    if (qemu_cpu_is_self(cpu)) {
 148        func(cpu, data);
 149        return;
 150    }
 151
 152    wi.func = func;
 153    wi.data = data;
 154    wi.done = false;
 155    wi.free = false;
 156    wi.exclusive = false;
 157
 158    queue_work_on_cpu(cpu, &wi);
 159    while (!qatomic_mb_read(&wi.done)) {
 160        CPUState *self_cpu = current_cpu;
 161
 162        qemu_cond_wait(&qemu_work_cond, mutex);
 163        current_cpu = self_cpu;
 164    }
 165}
 166
 167void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
 168{
 169    struct qemu_work_item *wi;
 170
 171    wi = g_new0(struct qemu_work_item, 1);
 172    wi->func = func;
 173    wi->data = data;
 174    wi->free = true;
 175
 176    queue_work_on_cpu(cpu, wi);
 177}
 178
 179/* Wait for pending exclusive operations to complete.  The CPU list lock
 180   must be held.  */
 181static inline void exclusive_idle(void)
 182{
 183    while (pending_cpus) {
 184        qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock);
 185    }
 186}
 187
 188/* Start an exclusive operation.
 189   Must only be called from outside cpu_exec.  */
 190void start_exclusive(void)
 191{
 192    CPUState *other_cpu;
 193    int running_cpus;
 194
 195    qemu_mutex_lock(&qemu_cpu_list_lock);
 196    exclusive_idle();
 197
 198    /* Make all other cpus stop executing.  */
 199    qatomic_set(&pending_cpus, 1);
 200
 201    /* Write pending_cpus before reading other_cpu->running.  */
 202    smp_mb();
 203    running_cpus = 0;
 204    CPU_FOREACH(other_cpu) {
 205        if (qatomic_read(&other_cpu->running)) {
 206            other_cpu->has_waiter = true;
 207            running_cpus++;
 208            qemu_cpu_kick(other_cpu);
 209        }
 210    }
 211
 212    qatomic_set(&pending_cpus, running_cpus + 1);
 213    while (pending_cpus > 1) {
 214        qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
 215    }
 216
 217    /* Can release mutex, no one will enter another exclusive
 218     * section until end_exclusive resets pending_cpus to 0.
 219     */
 220    qemu_mutex_unlock(&qemu_cpu_list_lock);
 221
 222    current_cpu->in_exclusive_context = true;
 223}
 224
 225/* Finish an exclusive operation.  */
 226void end_exclusive(void)
 227{
 228    current_cpu->in_exclusive_context = false;
 229
 230    qemu_mutex_lock(&qemu_cpu_list_lock);
 231    qatomic_set(&pending_cpus, 0);
 232    qemu_cond_broadcast(&exclusive_resume);
 233    qemu_mutex_unlock(&qemu_cpu_list_lock);
 234}
 235
 236/* Wait for exclusive ops to finish, and begin cpu execution.  */
 237void cpu_exec_start(CPUState *cpu)
 238{
 239    qatomic_set(&cpu->running, true);
 240
 241    /* Write cpu->running before reading pending_cpus.  */
 242    smp_mb();
 243
 244    /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
 245     * After taking the lock we'll see cpu->has_waiter == true and run---not
 246     * for long because start_exclusive kicked us.  cpu_exec_end will
 247     * decrement pending_cpus and signal the waiter.
 248     *
 249     * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
 250     * This includes the case when an exclusive item is running now.
 251     * Then we'll see cpu->has_waiter == false and wait for the item to
 252     * complete.
 253     *
 254     * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
 255     * see cpu->running == true, and it will kick the CPU.
 256     */
 257    if (unlikely(qatomic_read(&pending_cpus))) {
 258        QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
 259        if (!cpu->has_waiter) {
 260            /* Not counted in pending_cpus, let the exclusive item
 261             * run.  Since we have the lock, just set cpu->running to true
 262             * while holding it; no need to check pending_cpus again.
 263             */
 264            qatomic_set(&cpu->running, false);
 265            exclusive_idle();
 266            /* Now pending_cpus is zero.  */
 267            qatomic_set(&cpu->running, true);
 268        } else {
 269            /* Counted in pending_cpus, go ahead and release the
 270             * waiter at cpu_exec_end.
 271             */
 272        }
 273    }
 274}
 275
 276/* Mark cpu as not executing, and release pending exclusive ops.  */
 277void cpu_exec_end(CPUState *cpu)
 278{
 279    qatomic_set(&cpu->running, false);
 280
 281    /* Write cpu->running before reading pending_cpus.  */
 282    smp_mb();
 283
 284    /* 1. start_exclusive saw cpu->running == true.  Then it will increment
 285     * pending_cpus and wait for exclusive_cond.  After taking the lock
 286     * we'll see cpu->has_waiter == true.
 287     *
 288     * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
 289     * This includes the case when an exclusive item started after setting
 290     * cpu->running to false and before we read pending_cpus.  Then we'll see
 291     * cpu->has_waiter == false and not touch pending_cpus.  The next call to
 292     * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
 293     * for the item to complete.
 294     *
 295     * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
 296     * see cpu->running == false, and it can ignore this CPU until the
 297     * next cpu_exec_start.
 298     */
 299    if (unlikely(qatomic_read(&pending_cpus))) {
 300        QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
 301        if (cpu->has_waiter) {
 302            cpu->has_waiter = false;
 303            qatomic_set(&pending_cpus, pending_cpus - 1);
 304            if (pending_cpus == 1) {
 305                qemu_cond_signal(&exclusive_cond);
 306            }
 307        }
 308    }
 309}
 310
 311void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
 312                           run_on_cpu_data data)
 313{
 314    struct qemu_work_item *wi;
 315
 316    wi = g_new0(struct qemu_work_item, 1);
 317    wi->func = func;
 318    wi->data = data;
 319    wi->free = true;
 320    wi->exclusive = true;
 321
 322    queue_work_on_cpu(cpu, wi);
 323}
 324
 325void process_queued_cpu_work(CPUState *cpu)
 326{
 327    struct qemu_work_item *wi;
 328
 329    qemu_mutex_lock(&cpu->work_mutex);
 330    if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
 331        qemu_mutex_unlock(&cpu->work_mutex);
 332        return;
 333    }
 334    while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
 335        wi = QSIMPLEQ_FIRST(&cpu->work_list);
 336        QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
 337        qemu_mutex_unlock(&cpu->work_mutex);
 338        if (wi->exclusive) {
 339            /* Running work items outside the BQL avoids the following deadlock:
 340             * 1) start_exclusive() is called with the BQL taken while another
 341             * CPU is running; 2) cpu_exec in the other CPU tries to takes the
 342             * BQL, so it goes to sleep; start_exclusive() is sleeping too, so
 343             * neither CPU can proceed.
 344             */
 345            qemu_mutex_unlock_iothread();
 346            start_exclusive();
 347            wi->func(cpu, wi->data);
 348            end_exclusive();
 349            qemu_mutex_lock_iothread();
 350        } else {
 351            wi->func(cpu, wi->data);
 352        }
 353        qemu_mutex_lock(&cpu->work_mutex);
 354        if (wi->free) {
 355            g_free(wi);
 356        } else {
 357            qatomic_mb_set(&wi->done, true);
 358        }
 359    }
 360    qemu_mutex_unlock(&cpu->work_mutex);
 361    qemu_cond_broadcast(&qemu_work_cond);
 362}
 363