qemu/cpu.c
<<
>>
Prefs
   1/*
   2 * Target-specific parts of the CPU object
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu-common.h"
  22#include "qapi/error.h"
  23
  24#include "exec/target_page.h"
  25#include "hw/qdev-core.h"
  26#include "hw/qdev-properties.h"
  27#include "qemu/error-report.h"
  28#include "migration/vmstate.h"
  29#ifdef CONFIG_USER_ONLY
  30#include "qemu.h"
  31#else
  32#include "hw/core/sysemu-cpu-ops.h"
  33#include "exec/address-spaces.h"
  34#endif
  35#include "sysemu/tcg.h"
  36#include "sysemu/kvm.h"
  37#include "sysemu/replay.h"
  38#include "exec/translate-all.h"
  39#include "exec/log.h"
  40#include "hw/core/accel-cpu.h"
  41#include "trace/trace-root.h"
  42
  43uintptr_t qemu_host_page_size;
  44intptr_t qemu_host_page_mask;
  45
  46#ifndef CONFIG_USER_ONLY
  47static int cpu_common_post_load(void *opaque, int version_id)
  48{
  49    CPUState *cpu = opaque;
  50
  51    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
  52       version_id is increased. */
  53    cpu->interrupt_request &= ~0x01;
  54    tlb_flush(cpu);
  55
  56    /* loadvm has just updated the content of RAM, bypassing the
  57     * usual mechanisms that ensure we flush TBs for writes to
  58     * memory we've translated code from. So we must flush all TBs,
  59     * which will now be stale.
  60     */
  61    tb_flush(cpu);
  62
  63    return 0;
  64}
  65
  66static int cpu_common_pre_load(void *opaque)
  67{
  68    CPUState *cpu = opaque;
  69
  70    cpu->exception_index = -1;
  71
  72    return 0;
  73}
  74
  75static bool cpu_common_exception_index_needed(void *opaque)
  76{
  77    CPUState *cpu = opaque;
  78
  79    return tcg_enabled() && cpu->exception_index != -1;
  80}
  81
  82static const VMStateDescription vmstate_cpu_common_exception_index = {
  83    .name = "cpu_common/exception_index",
  84    .version_id = 1,
  85    .minimum_version_id = 1,
  86    .needed = cpu_common_exception_index_needed,
  87    .fields = (VMStateField[]) {
  88        VMSTATE_INT32(exception_index, CPUState),
  89        VMSTATE_END_OF_LIST()
  90    }
  91};
  92
  93static bool cpu_common_crash_occurred_needed(void *opaque)
  94{
  95    CPUState *cpu = opaque;
  96
  97    return cpu->crash_occurred;
  98}
  99
 100static const VMStateDescription vmstate_cpu_common_crash_occurred = {
 101    .name = "cpu_common/crash_occurred",
 102    .version_id = 1,
 103    .minimum_version_id = 1,
 104    .needed = cpu_common_crash_occurred_needed,
 105    .fields = (VMStateField[]) {
 106        VMSTATE_BOOL(crash_occurred, CPUState),
 107        VMSTATE_END_OF_LIST()
 108    }
 109};
 110
 111const VMStateDescription vmstate_cpu_common = {
 112    .name = "cpu_common",
 113    .version_id = 1,
 114    .minimum_version_id = 1,
 115    .pre_load = cpu_common_pre_load,
 116    .post_load = cpu_common_post_load,
 117    .fields = (VMStateField[]) {
 118        VMSTATE_UINT32(halted, CPUState),
 119        VMSTATE_UINT32(interrupt_request, CPUState),
 120        VMSTATE_END_OF_LIST()
 121    },
 122    .subsections = (const VMStateDescription*[]) {
 123        &vmstate_cpu_common_exception_index,
 124        &vmstate_cpu_common_crash_occurred,
 125        NULL
 126    }
 127};
 128#endif
 129
 130void cpu_exec_realizefn(CPUState *cpu, Error **errp)
 131{
 132#ifndef CONFIG_USER_ONLY
 133    CPUClass *cc = CPU_GET_CLASS(cpu);
 134#endif
 135
 136    cpu_list_add(cpu);
 137    if (!accel_cpu_realizefn(cpu, errp)) {
 138        return;
 139    }
 140#ifdef CONFIG_TCG
 141    /* NB: errp parameter is unused currently */
 142    if (tcg_enabled()) {
 143        tcg_exec_realizefn(cpu, errp);
 144    }
 145#endif /* CONFIG_TCG */
 146
 147#ifdef CONFIG_USER_ONLY
 148    assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
 149           qdev_get_vmsd(DEVICE(cpu))->unmigratable);
 150#else
 151    if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
 152        vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
 153    }
 154    if (cc->sysemu_ops->legacy_vmsd != NULL) {
 155        vmstate_register(NULL, cpu->cpu_index, cc->sysemu_ops->legacy_vmsd, cpu);
 156    }
 157#endif /* CONFIG_USER_ONLY */
 158}
 159
 160void cpu_exec_unrealizefn(CPUState *cpu)
 161{
 162#ifndef CONFIG_USER_ONLY
 163    CPUClass *cc = CPU_GET_CLASS(cpu);
 164
 165    if (cc->sysemu_ops->legacy_vmsd != NULL) {
 166        vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu);
 167    }
 168    if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
 169        vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
 170    }
 171#endif
 172#ifdef CONFIG_TCG
 173    /* NB: errp parameter is unused currently */
 174    if (tcg_enabled()) {
 175        tcg_exec_unrealizefn(cpu);
 176    }
 177#endif /* CONFIG_TCG */
 178
 179    cpu_list_remove(cpu);
 180}
 181
 182void cpu_exec_initfn(CPUState *cpu)
 183{
 184    cpu->as = NULL;
 185    cpu->num_ases = 0;
 186
 187#ifndef CONFIG_USER_ONLY
 188    cpu->thread_id = qemu_get_thread_id();
 189    cpu->memory = get_system_memory();
 190    object_ref(OBJECT(cpu->memory));
 191#endif
 192}
 193
 194const char *parse_cpu_option(const char *cpu_option)
 195{
 196    ObjectClass *oc;
 197    CPUClass *cc;
 198    gchar **model_pieces;
 199    const char *cpu_type;
 200
 201    model_pieces = g_strsplit(cpu_option, ",", 2);
 202    if (!model_pieces[0]) {
 203        error_report("-cpu option cannot be empty");
 204        exit(1);
 205    }
 206
 207    oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]);
 208    if (oc == NULL) {
 209        error_report("unable to find CPU model '%s'", model_pieces[0]);
 210        g_strfreev(model_pieces);
 211        exit(EXIT_FAILURE);
 212    }
 213
 214    cpu_type = object_class_get_name(oc);
 215    cc = CPU_CLASS(oc);
 216    cc->parse_features(cpu_type, model_pieces[1], &error_fatal);
 217    g_strfreev(model_pieces);
 218    return cpu_type;
 219}
 220
 221#if defined(CONFIG_USER_ONLY)
 222void tb_invalidate_phys_addr(target_ulong addr)
 223{
 224    mmap_lock();
 225    tb_invalidate_phys_page_range(addr, addr + 1);
 226    mmap_unlock();
 227}
 228#else
 229void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
 230{
 231    ram_addr_t ram_addr;
 232    MemoryRegion *mr;
 233    hwaddr l = 1;
 234
 235    if (!tcg_enabled()) {
 236        return;
 237    }
 238
 239    RCU_READ_LOCK_GUARD();
 240    mr = address_space_translate(as, addr, &addr, &l, false, attrs);
 241    if (!(memory_region_is_ram(mr)
 242          || memory_region_is_romd(mr))) {
 243        return;
 244    }
 245    ram_addr = memory_region_get_ram_addr(mr) + addr;
 246    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
 247}
 248#endif
 249
 250/* Add a breakpoint.  */
 251int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
 252                          CPUBreakpoint **breakpoint)
 253{
 254    CPUClass *cc = CPU_GET_CLASS(cpu);
 255    CPUBreakpoint *bp;
 256
 257    if (cc->gdb_adjust_breakpoint) {
 258        pc = cc->gdb_adjust_breakpoint(cpu, pc);
 259    }
 260
 261    bp = g_malloc(sizeof(*bp));
 262
 263    bp->pc = pc;
 264    bp->flags = flags;
 265
 266    /* keep all GDB-injected breakpoints in front */
 267    if (flags & BP_GDB) {
 268        QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
 269    } else {
 270        QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
 271    }
 272
 273    if (breakpoint) {
 274        *breakpoint = bp;
 275    }
 276
 277    trace_breakpoint_insert(cpu->cpu_index, pc, flags);
 278    return 0;
 279}
 280
 281/* Remove a specific breakpoint.  */
 282int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
 283{
 284    CPUClass *cc = CPU_GET_CLASS(cpu);
 285    CPUBreakpoint *bp;
 286
 287    if (cc->gdb_adjust_breakpoint) {
 288        pc = cc->gdb_adjust_breakpoint(cpu, pc);
 289    }
 290
 291    QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
 292        if (bp->pc == pc && bp->flags == flags) {
 293            cpu_breakpoint_remove_by_ref(cpu, bp);
 294            return 0;
 295        }
 296    }
 297    return -ENOENT;
 298}
 299
 300/* Remove a specific breakpoint by reference.  */
 301void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp)
 302{
 303    QTAILQ_REMOVE(&cpu->breakpoints, bp, entry);
 304
 305    trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags);
 306    g_free(bp);
 307}
 308
 309/* Remove all matching breakpoints. */
 310void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
 311{
 312    CPUBreakpoint *bp, *next;
 313
 314    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
 315        if (bp->flags & mask) {
 316            cpu_breakpoint_remove_by_ref(cpu, bp);
 317        }
 318    }
 319}
 320
 321/* enable or disable single step mode. EXCP_DEBUG is returned by the
 322   CPU loop after each instruction */
 323void cpu_single_step(CPUState *cpu, int enabled)
 324{
 325    if (cpu->singlestep_enabled != enabled) {
 326        cpu->singlestep_enabled = enabled;
 327        if (kvm_enabled()) {
 328            kvm_update_guest_debug(cpu, 0);
 329        }
 330        trace_breakpoint_singlestep(cpu->cpu_index, enabled);
 331    }
 332}
 333
 334void cpu_abort(CPUState *cpu, const char *fmt, ...)
 335{
 336    va_list ap;
 337    va_list ap2;
 338
 339    va_start(ap, fmt);
 340    va_copy(ap2, ap);
 341    fprintf(stderr, "qemu: fatal: ");
 342    vfprintf(stderr, fmt, ap);
 343    fprintf(stderr, "\n");
 344    cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
 345    if (qemu_log_separate()) {
 346        FILE *logfile = qemu_log_lock();
 347        qemu_log("qemu: fatal: ");
 348        qemu_log_vprintf(fmt, ap2);
 349        qemu_log("\n");
 350        log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
 351        qemu_log_flush();
 352        qemu_log_unlock(logfile);
 353        qemu_log_close();
 354    }
 355    va_end(ap2);
 356    va_end(ap);
 357    replay_finish();
 358#if defined(CONFIG_USER_ONLY)
 359    {
 360        struct sigaction act;
 361        sigfillset(&act.sa_mask);
 362        act.sa_handler = SIG_DFL;
 363        act.sa_flags = 0;
 364        sigaction(SIGABRT, &act, NULL);
 365    }
 366#endif
 367    abort();
 368}
 369
 370/* physical memory access (slow version, mainly for debug) */
 371#if defined(CONFIG_USER_ONLY)
 372int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
 373                        void *ptr, target_ulong len, bool is_write)
 374{
 375    int flags;
 376    target_ulong l, page;
 377    void * p;
 378    uint8_t *buf = ptr;
 379
 380    while (len > 0) {
 381        page = addr & TARGET_PAGE_MASK;
 382        l = (page + TARGET_PAGE_SIZE) - addr;
 383        if (l > len)
 384            l = len;
 385        flags = page_get_flags(page);
 386        if (!(flags & PAGE_VALID))
 387            return -1;
 388        if (is_write) {
 389            if (!(flags & PAGE_WRITE))
 390                return -1;
 391            /* XXX: this code should not depend on lock_user */
 392            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
 393                return -1;
 394            memcpy(p, buf, l);
 395            unlock_user(p, addr, l);
 396        } else {
 397            if (!(flags & PAGE_READ))
 398                return -1;
 399            /* XXX: this code should not depend on lock_user */
 400            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
 401                return -1;
 402            memcpy(buf, p, l);
 403            unlock_user(p, addr, 0);
 404        }
 405        len -= l;
 406        buf += l;
 407        addr += l;
 408    }
 409    return 0;
 410}
 411#endif
 412
 413bool target_words_bigendian(void)
 414{
 415#if defined(TARGET_WORDS_BIGENDIAN)
 416    return true;
 417#else
 418    return false;
 419#endif
 420}
 421
 422void page_size_init(void)
 423{
 424    /* NOTE: we can always suppose that qemu_host_page_size >=
 425       TARGET_PAGE_SIZE */
 426    if (qemu_host_page_size == 0) {
 427        qemu_host_page_size = qemu_real_host_page_size;
 428    }
 429    if (qemu_host_page_size < TARGET_PAGE_SIZE) {
 430        qemu_host_page_size = TARGET_PAGE_SIZE;
 431    }
 432    qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
 433}
 434