qemu/cpu.c
<<
>>
Prefs
   1/*
   2 * Target-specific parts of the CPU object
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu-common.h"
  22#include "qapi/error.h"
  23
  24#include "exec/target_page.h"
  25#include "hw/qdev-core.h"
  26#include "hw/qdev-properties.h"
  27#include "qemu/error-report.h"
  28#include "migration/vmstate.h"
  29#ifdef CONFIG_USER_ONLY
  30#include "qemu.h"
  31#else
  32#include "exec/address-spaces.h"
  33#endif
  34#include "sysemu/tcg.h"
  35#include "sysemu/kvm.h"
  36#include "sysemu/replay.h"
  37#include "translate-all.h"
  38#include "exec/log.h"
  39
  40uintptr_t qemu_host_page_size;
  41intptr_t qemu_host_page_mask;
  42
  43#ifndef CONFIG_USER_ONLY
  44static int cpu_common_post_load(void *opaque, int version_id)
  45{
  46    CPUState *cpu = opaque;
  47
  48    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
  49       version_id is increased. */
  50    cpu->interrupt_request &= ~0x01;
  51    tlb_flush(cpu);
  52
  53    /* loadvm has just updated the content of RAM, bypassing the
  54     * usual mechanisms that ensure we flush TBs for writes to
  55     * memory we've translated code from. So we must flush all TBs,
  56     * which will now be stale.
  57     */
  58    tb_flush(cpu);
  59
  60    return 0;
  61}
  62
  63static int cpu_common_pre_load(void *opaque)
  64{
  65    CPUState *cpu = opaque;
  66
  67    cpu->exception_index = -1;
  68
  69    return 0;
  70}
  71
  72static bool cpu_common_exception_index_needed(void *opaque)
  73{
  74    CPUState *cpu = opaque;
  75
  76    return tcg_enabled() && cpu->exception_index != -1;
  77}
  78
  79static const VMStateDescription vmstate_cpu_common_exception_index = {
  80    .name = "cpu_common/exception_index",
  81    .version_id = 1,
  82    .minimum_version_id = 1,
  83    .needed = cpu_common_exception_index_needed,
  84    .fields = (VMStateField[]) {
  85        VMSTATE_INT32(exception_index, CPUState),
  86        VMSTATE_END_OF_LIST()
  87    }
  88};
  89
  90static bool cpu_common_crash_occurred_needed(void *opaque)
  91{
  92    CPUState *cpu = opaque;
  93
  94    return cpu->crash_occurred;
  95}
  96
  97static const VMStateDescription vmstate_cpu_common_crash_occurred = {
  98    .name = "cpu_common/crash_occurred",
  99    .version_id = 1,
 100    .minimum_version_id = 1,
 101    .needed = cpu_common_crash_occurred_needed,
 102    .fields = (VMStateField[]) {
 103        VMSTATE_BOOL(crash_occurred, CPUState),
 104        VMSTATE_END_OF_LIST()
 105    }
 106};
 107
 108const VMStateDescription vmstate_cpu_common = {
 109    .name = "cpu_common",
 110    .version_id = 1,
 111    .minimum_version_id = 1,
 112    .pre_load = cpu_common_pre_load,
 113    .post_load = cpu_common_post_load,
 114    .fields = (VMStateField[]) {
 115        VMSTATE_UINT32(halted, CPUState),
 116        VMSTATE_UINT32(interrupt_request, CPUState),
 117        VMSTATE_END_OF_LIST()
 118    },
 119    .subsections = (const VMStateDescription*[]) {
 120        &vmstate_cpu_common_exception_index,
 121        &vmstate_cpu_common_crash_occurred,
 122        NULL
 123    }
 124};
 125#endif
 126
 127void cpu_exec_unrealizefn(CPUState *cpu)
 128{
 129    CPUClass *cc = CPU_GET_CLASS(cpu);
 130
 131    tlb_destroy(cpu);
 132    cpu_list_remove(cpu);
 133
 134#ifdef CONFIG_USER_ONLY
 135    assert(cc->vmsd == NULL);
 136#else
 137    if (cc->vmsd != NULL) {
 138        vmstate_unregister(NULL, cc->vmsd, cpu);
 139    }
 140    if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
 141        vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
 142    }
 143    tcg_iommu_free_notifier_list(cpu);
 144#endif
 145}
 146
 147Property cpu_common_props[] = {
 148#ifndef CONFIG_USER_ONLY
 149    /* Create a memory property for softmmu CPU object,
 150     * so users can wire up its memory. (This can't go in hw/core/cpu.c
 151     * because that file is compiled only once for both user-mode
 152     * and system builds.) The default if no link is set up is to use
 153     * the system address space.
 154     */
 155    DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
 156                     MemoryRegion *),
 157#endif
 158    DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
 159    DEFINE_PROP_END_OF_LIST(),
 160};
 161
 162void cpu_exec_initfn(CPUState *cpu)
 163{
 164    cpu->as = NULL;
 165    cpu->num_ases = 0;
 166
 167#ifndef CONFIG_USER_ONLY
 168    cpu->thread_id = qemu_get_thread_id();
 169    cpu->memory = get_system_memory();
 170    object_ref(OBJECT(cpu->memory));
 171#endif
 172}
 173
 174void cpu_exec_realizefn(CPUState *cpu, Error **errp)
 175{
 176    CPUClass *cc = CPU_GET_CLASS(cpu);
 177    static bool tcg_target_initialized;
 178
 179    cpu_list_add(cpu);
 180
 181    if (tcg_enabled() && !tcg_target_initialized) {
 182        tcg_target_initialized = true;
 183        cc->tcg_initialize();
 184    }
 185    tlb_init(cpu);
 186
 187    qemu_plugin_vcpu_init_hook(cpu);
 188
 189#ifdef CONFIG_USER_ONLY
 190    assert(cc->vmsd == NULL);
 191#else /* !CONFIG_USER_ONLY */
 192    if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
 193        vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
 194    }
 195    if (cc->vmsd != NULL) {
 196        vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
 197    }
 198
 199    tcg_iommu_init_notifier_list(cpu);
 200#endif
 201}
 202
 203const char *parse_cpu_option(const char *cpu_option)
 204{
 205    ObjectClass *oc;
 206    CPUClass *cc;
 207    gchar **model_pieces;
 208    const char *cpu_type;
 209
 210    model_pieces = g_strsplit(cpu_option, ",", 2);
 211    if (!model_pieces[0]) {
 212        error_report("-cpu option cannot be empty");
 213        exit(1);
 214    }
 215
 216    oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]);
 217    if (oc == NULL) {
 218        error_report("unable to find CPU model '%s'", model_pieces[0]);
 219        g_strfreev(model_pieces);
 220        exit(EXIT_FAILURE);
 221    }
 222
 223    cpu_type = object_class_get_name(oc);
 224    cc = CPU_CLASS(oc);
 225    cc->parse_features(cpu_type, model_pieces[1], &error_fatal);
 226    g_strfreev(model_pieces);
 227    return cpu_type;
 228}
 229
 230#if defined(CONFIG_USER_ONLY)
 231void tb_invalidate_phys_addr(target_ulong addr)
 232{
 233    mmap_lock();
 234    tb_invalidate_phys_page_range(addr, addr + 1);
 235    mmap_unlock();
 236}
 237
 238static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
 239{
 240    tb_invalidate_phys_addr(pc);
 241}
 242#else
 243void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
 244{
 245    ram_addr_t ram_addr;
 246    MemoryRegion *mr;
 247    hwaddr l = 1;
 248
 249    if (!tcg_enabled()) {
 250        return;
 251    }
 252
 253    RCU_READ_LOCK_GUARD();
 254    mr = address_space_translate(as, addr, &addr, &l, false, attrs);
 255    if (!(memory_region_is_ram(mr)
 256          || memory_region_is_romd(mr))) {
 257        return;
 258    }
 259    ram_addr = memory_region_get_ram_addr(mr) + addr;
 260    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
 261}
 262
 263static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
 264{
 265    /*
 266     * There may not be a virtual to physical translation for the pc
 267     * right now, but there may exist cached TB for this pc.
 268     * Flush the whole TB cache to force re-translation of such TBs.
 269     * This is heavyweight, but we're debugging anyway.
 270     */
 271    tb_flush(cpu);
 272}
 273#endif
 274
 275/* Add a breakpoint.  */
 276int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
 277                          CPUBreakpoint **breakpoint)
 278{
 279    CPUBreakpoint *bp;
 280
 281    bp = g_malloc(sizeof(*bp));
 282
 283    bp->pc = pc;
 284    bp->flags = flags;
 285
 286    /* keep all GDB-injected breakpoints in front */
 287    if (flags & BP_GDB) {
 288        QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
 289    } else {
 290        QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
 291    }
 292
 293    breakpoint_invalidate(cpu, pc);
 294
 295    if (breakpoint) {
 296        *breakpoint = bp;
 297    }
 298    return 0;
 299}
 300
 301/* Remove a specific breakpoint.  */
 302int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
 303{
 304    CPUBreakpoint *bp;
 305
 306    QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
 307        if (bp->pc == pc && bp->flags == flags) {
 308            cpu_breakpoint_remove_by_ref(cpu, bp);
 309            return 0;
 310        }
 311    }
 312    return -ENOENT;
 313}
 314
 315/* Remove a specific breakpoint by reference.  */
 316void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
 317{
 318    QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
 319
 320    breakpoint_invalidate(cpu, breakpoint->pc);
 321
 322    g_free(breakpoint);
 323}
 324
 325/* Remove all matching breakpoints. */
 326void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
 327{
 328    CPUBreakpoint *bp, *next;
 329
 330    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
 331        if (bp->flags & mask) {
 332            cpu_breakpoint_remove_by_ref(cpu, bp);
 333        }
 334    }
 335}
 336
 337/* enable or disable single step mode. EXCP_DEBUG is returned by the
 338   CPU loop after each instruction */
 339void cpu_single_step(CPUState *cpu, int enabled)
 340{
 341    if (cpu->singlestep_enabled != enabled) {
 342        cpu->singlestep_enabled = enabled;
 343        if (kvm_enabled()) {
 344            kvm_update_guest_debug(cpu, 0);
 345        } else {
 346            /* must flush all the translated code to avoid inconsistencies */
 347            /* XXX: only flush what is necessary */
 348            tb_flush(cpu);
 349        }
 350    }
 351}
 352
 353void cpu_abort(CPUState *cpu, const char *fmt, ...)
 354{
 355    va_list ap;
 356    va_list ap2;
 357
 358    va_start(ap, fmt);
 359    va_copy(ap2, ap);
 360    fprintf(stderr, "qemu: fatal: ");
 361    vfprintf(stderr, fmt, ap);
 362    fprintf(stderr, "\n");
 363    cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
 364    if (qemu_log_separate()) {
 365        FILE *logfile = qemu_log_lock();
 366        qemu_log("qemu: fatal: ");
 367        qemu_log_vprintf(fmt, ap2);
 368        qemu_log("\n");
 369        log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
 370        qemu_log_flush();
 371        qemu_log_unlock(logfile);
 372        qemu_log_close();
 373    }
 374    va_end(ap2);
 375    va_end(ap);
 376    replay_finish();
 377#if defined(CONFIG_USER_ONLY)
 378    {
 379        struct sigaction act;
 380        sigfillset(&act.sa_mask);
 381        act.sa_handler = SIG_DFL;
 382        act.sa_flags = 0;
 383        sigaction(SIGABRT, &act, NULL);
 384    }
 385#endif
 386    abort();
 387}
 388
 389/* physical memory access (slow version, mainly for debug) */
 390#if defined(CONFIG_USER_ONLY)
 391int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
 392                        void *ptr, target_ulong len, bool is_write)
 393{
 394    int flags;
 395    target_ulong l, page;
 396    void * p;
 397    uint8_t *buf = ptr;
 398
 399    while (len > 0) {
 400        page = addr & TARGET_PAGE_MASK;
 401        l = (page + TARGET_PAGE_SIZE) - addr;
 402        if (l > len)
 403            l = len;
 404        flags = page_get_flags(page);
 405        if (!(flags & PAGE_VALID))
 406            return -1;
 407        if (is_write) {
 408            if (!(flags & PAGE_WRITE))
 409                return -1;
 410            /* XXX: this code should not depend on lock_user */
 411            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
 412                return -1;
 413            memcpy(p, buf, l);
 414            unlock_user(p, addr, l);
 415        } else {
 416            if (!(flags & PAGE_READ))
 417                return -1;
 418            /* XXX: this code should not depend on lock_user */
 419            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
 420                return -1;
 421            memcpy(buf, p, l);
 422            unlock_user(p, addr, 0);
 423        }
 424        len -= l;
 425        buf += l;
 426        addr += l;
 427    }
 428    return 0;
 429}
 430#endif
 431
 432bool target_words_bigendian(void)
 433{
 434#if defined(TARGET_WORDS_BIGENDIAN)
 435    return true;
 436#else
 437    return false;
 438#endif
 439}
 440
 441void page_size_init(void)
 442{
 443    /* NOTE: we can always suppose that qemu_host_page_size >=
 444       TARGET_PAGE_SIZE */
 445    if (qemu_host_page_size == 0) {
 446        qemu_host_page_size = qemu_real_host_page_size;
 447    }
 448    if (qemu_host_page_size < TARGET_PAGE_SIZE) {
 449        qemu_host_page_size = TARGET_PAGE_SIZE;
 450    }
 451    qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
 452}
 453