qemu/include/qom/cpu.h
<<
>>
Prefs
   1/*
   2 * QEMU CPU model
   3 *
   4 * Copyright (c) 2012 SUSE LINUX Products GmbH
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see
  18 * <http://www.gnu.org/licenses/gpl-2.0.html>
  19 */
  20#ifndef QEMU_CPU_H
  21#define QEMU_CPU_H
  22
  23#include "hw/qdev-core.h"
  24#include "disas/bfd.h"
  25#include "exec/hwaddr.h"
  26#include "exec/memattrs.h"
  27#include "qemu/bitmap.h"
  28#include "qemu/queue.h"
  29#include "qemu/thread.h"
  30#include "trace/generated-events.h"
  31
  32typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
  33                                     void *opaque);
  34
  35/**
  36 * vaddr:
  37 * Type wide enough to contain any #target_ulong virtual address.
  38 */
  39typedef uint64_t vaddr;
  40#define VADDR_PRId PRId64
  41#define VADDR_PRIu PRIu64
  42#define VADDR_PRIo PRIo64
  43#define VADDR_PRIx PRIx64
  44#define VADDR_PRIX PRIX64
  45#define VADDR_MAX UINT64_MAX
  46
  47/**
  48 * SECTION:cpu
  49 * @section_id: QEMU-cpu
  50 * @title: CPU Class
  51 * @short_description: Base class for all CPUs
  52 */
  53
  54#define TYPE_CPU "cpu"
  55
  56/* Since this macro is used a lot in hot code paths and in conjunction with
  57 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
  58 * an unchecked cast.
  59 */
  60#define CPU(obj) ((CPUState *)(obj))
  61
  62#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
  63#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
  64
  65typedef enum MMUAccessType {
  66    MMU_DATA_LOAD  = 0,
  67    MMU_DATA_STORE = 1,
  68    MMU_INST_FETCH = 2
  69} MMUAccessType;
  70
  71typedef struct CPUWatchpoint CPUWatchpoint;
  72
  73typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
  74                                    bool is_write, bool is_exec, int opaque,
  75                                    unsigned size);
  76
  77struct TranslationBlock;
  78
  79/**
  80 * CPUClass:
  81 * @class_by_name: Callback to map -cpu command line model name to an
  82 * instantiatable CPU type.
  83 * @parse_features: Callback to parse command line arguments.
  84 * @reset: Callback to reset the #CPUState to its initial state.
  85 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
  86 * @has_work: Callback for checking if there is work to do.
  87 * @do_interrupt: Callback for interrupt handling.
  88 * @do_unassigned_access: Callback for unassigned access handling.
  89 * @do_unaligned_access: Callback for unaligned access handling, if
  90 * the target defines #ALIGNED_ONLY.
  91 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
  92 * runtime configurable endianness is currently big-endian. Non-configurable
  93 * CPUs can use the default implementation of this method. This method should
  94 * not be used by any callers other than the pre-1.0 virtio devices.
  95 * @memory_rw_debug: Callback for GDB memory access.
  96 * @dump_state: Callback for dumping state.
  97 * @dump_statistics: Callback for dumping statistics.
  98 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
  99 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
 100 * @get_memory_mapping: Callback for obtaining the memory mappings.
 101 * @set_pc: Callback for setting the Program Counter register.
 102 * @synchronize_from_tb: Callback for synchronizing state from a TCG
 103 * #TranslationBlock.
 104 * @handle_mmu_fault: Callback for handling an MMU fault.
 105 * @get_phys_page_debug: Callback for obtaining a physical address.
 106 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
 107 *       associated memory transaction attributes to use for the access.
 108 *       CPUs which use memory transaction attributes should implement this
 109 *       instead of get_phys_page_debug.
 110 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
 111 *       a memory access with the specified memory transaction attributes.
 112 * @gdb_read_register: Callback for letting GDB read a register.
 113 * @gdb_write_register: Callback for letting GDB write a register.
 114 * @debug_check_watchpoint: Callback: return true if the architectural
 115 *       watchpoint whose address has matched should really fire.
 116 * @debug_excp_handler: Callback for handling debug exceptions.
 117 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
 118 * 64-bit VM coredump.
 119 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
 120 * note to a 32-bit VM coredump.
 121 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
 122 * 32-bit VM coredump.
 123 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
 124 * note to a 32-bit VM coredump.
 125 * @vmsd: State description for migration.
 126 * @gdb_num_core_regs: Number of core registers accessible to GDB.
 127 * @gdb_core_xml_file: File name for core registers GDB XML description.
 128 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
 129 *           before the insn which triggers a watchpoint rather than after it.
 130 * @gdb_arch_name: Optional callback that returns the architecture name known
 131 * to GDB. The caller must free the returned string with g_free.
 132 * @cpu_exec_enter: Callback for cpu_exec preparation.
 133 * @cpu_exec_exit: Callback for cpu_exec cleanup.
 134 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
 135 * @disas_set_info: Setup architecture specific components of disassembly info
 136 *
 137 * Represents a CPU family or model.
 138 */
 139typedef struct CPUClass {
 140    /*< private >*/
 141    DeviceClass parent_class;
 142    /*< public >*/
 143
 144    ObjectClass *(*class_by_name)(const char *cpu_model);
 145    void (*parse_features)(const char *typename, char *str, Error **errp);
 146
 147    void (*reset)(CPUState *cpu);
 148    int reset_dump_flags;
 149    bool (*has_work)(CPUState *cpu);
 150    void (*do_interrupt)(CPUState *cpu);
 151    CPUUnassignedAccess do_unassigned_access;
 152    void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
 153                                MMUAccessType access_type,
 154                                int mmu_idx, uintptr_t retaddr);
 155    bool (*virtio_is_big_endian)(CPUState *cpu);
 156    int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
 157                           uint8_t *buf, int len, bool is_write);
 158    void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 159                       int flags);
 160    void (*dump_statistics)(CPUState *cpu, FILE *f,
 161                            fprintf_function cpu_fprintf, int flags);
 162    int64_t (*get_arch_id)(CPUState *cpu);
 163    bool (*get_paging_enabled)(const CPUState *cpu);
 164    void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
 165                               Error **errp);
 166    void (*set_pc)(CPUState *cpu, vaddr value);
 167    void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
 168    int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
 169                            int mmu_index);
 170    hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
 171    hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
 172                                        MemTxAttrs *attrs);
 173    int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
 174    int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
 175    int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
 176    bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
 177    void (*debug_excp_handler)(CPUState *cpu);
 178
 179    int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
 180                            int cpuid, void *opaque);
 181    int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
 182                                void *opaque);
 183    int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
 184                            int cpuid, void *opaque);
 185    int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
 186                                void *opaque);
 187
 188    const struct VMStateDescription *vmsd;
 189    int gdb_num_core_regs;
 190    const char *gdb_core_xml_file;
 191    gchar * (*gdb_arch_name)(CPUState *cpu);
 192    bool gdb_stop_before_watchpoint;
 193
 194    void (*cpu_exec_enter)(CPUState *cpu);
 195    void (*cpu_exec_exit)(CPUState *cpu);
 196    bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
 197
 198    void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
 199} CPUClass;
 200
 201#ifdef HOST_WORDS_BIGENDIAN
 202typedef struct icount_decr_u16 {
 203    uint16_t high;
 204    uint16_t low;
 205} icount_decr_u16;
 206#else
 207typedef struct icount_decr_u16 {
 208    uint16_t low;
 209    uint16_t high;
 210} icount_decr_u16;
 211#endif
 212
 213typedef struct CPUBreakpoint {
 214    vaddr pc;
 215    int flags; /* BP_* */
 216    QTAILQ_ENTRY(CPUBreakpoint) entry;
 217} CPUBreakpoint;
 218
 219struct CPUWatchpoint {
 220    vaddr vaddr;
 221    vaddr len;
 222    vaddr hitaddr;
 223    MemTxAttrs hitattrs;
 224    int flags; /* BP_* */
 225    QTAILQ_ENTRY(CPUWatchpoint) entry;
 226};
 227
 228struct KVMState;
 229struct kvm_run;
 230
 231#define TB_JMP_CACHE_BITS 12
 232#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
 233
 234/* work queue */
 235struct qemu_work_item {
 236    struct qemu_work_item *next;
 237    void (*func)(void *data);
 238    void *data;
 239    int done;
 240    bool free;
 241};
 242
 243/**
 244 * CPUState:
 245 * @cpu_index: CPU index (informative).
 246 * @nr_cores: Number of cores within this CPU package.
 247 * @nr_threads: Number of threads within this CPU.
 248 * @numa_node: NUMA node this CPU is belonging to.
 249 * @host_tid: Host thread ID.
 250 * @running: #true if CPU is currently running (usermode).
 251 * @created: Indicates whether the CPU thread has been successfully created.
 252 * @interrupt_request: Indicates a pending interrupt request.
 253 * @halted: Nonzero if the CPU is in suspended state.
 254 * @stop: Indicates a pending stop request.
 255 * @stopped: Indicates the CPU has been artificially stopped.
 256 * @unplug: Indicates a pending CPU unplug request.
 257 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
 258 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
 259 *           CPU and return to its top level loop.
 260 * @tb_flushed: Indicates the translation buffer has been flushed.
 261 * @singlestep_enabled: Flags for single-stepping.
 262 * @icount_extra: Instructions until next timer event.
 263 * @icount_decr: Number of cycles left, with interrupt flag in high bit.
 264 * This allows a single read-compare-cbranch-write sequence to test
 265 * for both decrementer underflow and exceptions.
 266 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
 267 * requires that IO only be performed on the last instruction of a TB
 268 * so that interrupts take effect immediately.
 269 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
 270 *            AddressSpaces this CPU has)
 271 * @num_ases: number of CPUAddressSpaces in @cpu_ases
 272 * @as: Pointer to the first AddressSpace, for the convenience of targets which
 273 *      only have a single AddressSpace
 274 * @env_ptr: Pointer to subclass-specific CPUArchState field.
 275 * @gdb_regs: Additional GDB registers.
 276 * @gdb_num_regs: Number of total registers accessible to GDB.
 277 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
 278 * @next_cpu: Next CPU sharing TB cache.
 279 * @opaque: User data.
 280 * @mem_io_pc: Host Program Counter at which the memory was accessed.
 281 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
 282 * @kvm_fd: vCPU file descriptor for KVM.
 283 * @work_mutex: Lock to prevent multiple access to queued_work_*.
 284 * @queued_work_first: First asynchronous work pending.
 285 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
 286 *
 287 * State of one CPU core or thread.
 288 */
 289struct CPUState {
 290    /*< private >*/
 291    DeviceState parent_obj;
 292    /*< public >*/
 293
 294    int nr_cores;
 295    int nr_threads;
 296    int numa_node;
 297
 298    struct QemuThread *thread;
 299#ifdef _WIN32
 300    HANDLE hThread;
 301#endif
 302    int thread_id;
 303    uint32_t host_tid;
 304    bool running;
 305    struct QemuCond *halt_cond;
 306    bool thread_kicked;
 307    bool created;
 308    bool stop;
 309    bool stopped;
 310    bool unplug;
 311    bool crash_occurred;
 312    bool exit_request;
 313    bool tb_flushed;
 314    uint32_t interrupt_request;
 315    int singlestep_enabled;
 316    int64_t icount_extra;
 317    sigjmp_buf jmp_env;
 318
 319    QemuMutex work_mutex;
 320    struct qemu_work_item *queued_work_first, *queued_work_last;
 321
 322    CPUAddressSpace *cpu_ases;
 323    int num_ases;
 324    AddressSpace *as;
 325    MemoryRegion *memory;
 326
 327    void *env_ptr; /* CPUArchState */
 328    struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
 329    struct GDBRegisterState *gdb_regs;
 330    int gdb_num_regs;
 331    int gdb_num_g_regs;
 332    QTAILQ_ENTRY(CPUState) node;
 333
 334    /* ice debug support */
 335    QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
 336
 337    QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
 338    CPUWatchpoint *watchpoint_hit;
 339
 340    void *opaque;
 341
 342    /* In order to avoid passing too many arguments to the MMIO helpers,
 343     * we store some rarely used information in the CPU context.
 344     */
 345    uintptr_t mem_io_pc;
 346    vaddr mem_io_vaddr;
 347
 348    int kvm_fd;
 349    bool kvm_vcpu_dirty;
 350    struct KVMState *kvm_state;
 351    struct kvm_run *kvm_run;
 352
 353    /* Used for events with 'vcpu' and *without* the 'disabled' properties */
 354    DECLARE_BITMAP(trace_dstate, TRACE_VCPU_EVENT_COUNT);
 355
 356    /* TODO Move common fields from CPUArchState here. */
 357    int cpu_index; /* used by alpha TCG */
 358    uint32_t halted; /* used by alpha, cris, ppc TCG */
 359    union {
 360        uint32_t u32;
 361        icount_decr_u16 u16;
 362    } icount_decr;
 363    uint32_t can_do_io;
 364    int32_t exception_index; /* used by m68k TCG */
 365
 366    /* Used to keep track of an outstanding cpu throttle thread for migration
 367     * autoconverge
 368     */
 369    bool throttle_thread_scheduled;
 370
 371    /* Note that this is accessed at the start of every TB via a negative
 372       offset from AREG0.  Leave this field at the end so as to make the
 373       (absolute value) offset as small as possible.  This reduces code
 374       size, especially for hosts without large memory offsets.  */
 375    uint32_t tcg_exit_req;
 376};
 377
 378QTAILQ_HEAD(CPUTailQ, CPUState);
 379extern struct CPUTailQ cpus;
 380#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
 381#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
 382#define CPU_FOREACH_SAFE(cpu, next_cpu) \
 383    QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
 384#define CPU_FOREACH_REVERSE(cpu) \
 385    QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
 386#define first_cpu QTAILQ_FIRST(&cpus)
 387
 388extern __thread CPUState *current_cpu;
 389
 390/**
 391 * cpu_paging_enabled:
 392 * @cpu: The CPU whose state is to be inspected.
 393 *
 394 * Returns: %true if paging is enabled, %false otherwise.
 395 */
 396bool cpu_paging_enabled(const CPUState *cpu);
 397
 398/**
 399 * cpu_get_memory_mapping:
 400 * @cpu: The CPU whose memory mappings are to be obtained.
 401 * @list: Where to write the memory mappings to.
 402 * @errp: Pointer for reporting an #Error.
 403 */
 404void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
 405                            Error **errp);
 406
 407/**
 408 * cpu_write_elf64_note:
 409 * @f: pointer to a function that writes memory to a file
 410 * @cpu: The CPU whose memory is to be dumped
 411 * @cpuid: ID number of the CPU
 412 * @opaque: pointer to the CPUState struct
 413 */
 414int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
 415                         int cpuid, void *opaque);
 416
 417/**
 418 * cpu_write_elf64_qemunote:
 419 * @f: pointer to a function that writes memory to a file
 420 * @cpu: The CPU whose memory is to be dumped
 421 * @cpuid: ID number of the CPU
 422 * @opaque: pointer to the CPUState struct
 423 */
 424int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
 425                             void *opaque);
 426
 427/**
 428 * cpu_write_elf32_note:
 429 * @f: pointer to a function that writes memory to a file
 430 * @cpu: The CPU whose memory is to be dumped
 431 * @cpuid: ID number of the CPU
 432 * @opaque: pointer to the CPUState struct
 433 */
 434int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
 435                         int cpuid, void *opaque);
 436
 437/**
 438 * cpu_write_elf32_qemunote:
 439 * @f: pointer to a function that writes memory to a file
 440 * @cpu: The CPU whose memory is to be dumped
 441 * @cpuid: ID number of the CPU
 442 * @opaque: pointer to the CPUState struct
 443 */
 444int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
 445                             void *opaque);
 446
 447/**
 448 * CPUDumpFlags:
 449 * @CPU_DUMP_CODE:
 450 * @CPU_DUMP_FPU: dump FPU register state, not just integer
 451 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
 452 */
 453enum CPUDumpFlags {
 454    CPU_DUMP_CODE = 0x00010000,
 455    CPU_DUMP_FPU  = 0x00020000,
 456    CPU_DUMP_CCOP = 0x00040000,
 457};
 458
 459/**
 460 * cpu_dump_state:
 461 * @cpu: The CPU whose state is to be dumped.
 462 * @f: File to dump to.
 463 * @cpu_fprintf: Function to dump with.
 464 * @flags: Flags what to dump.
 465 *
 466 * Dumps CPU state.
 467 */
 468void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 469                    int flags);
 470
 471/**
 472 * cpu_dump_statistics:
 473 * @cpu: The CPU whose state is to be dumped.
 474 * @f: File to dump to.
 475 * @cpu_fprintf: Function to dump with.
 476 * @flags: Flags what to dump.
 477 *
 478 * Dumps CPU statistics.
 479 */
 480void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 481                         int flags);
 482
 483#ifndef CONFIG_USER_ONLY
 484/**
 485 * cpu_get_phys_page_attrs_debug:
 486 * @cpu: The CPU to obtain the physical page address for.
 487 * @addr: The virtual address.
 488 * @attrs: Updated on return with the memory transaction attributes to use
 489 *         for this access.
 490 *
 491 * Obtains the physical page corresponding to a virtual one, together
 492 * with the corresponding memory transaction attributes to use for the access.
 493 * Use it only for debugging because no protection checks are done.
 494 *
 495 * Returns: Corresponding physical page address or -1 if no page found.
 496 */
 497static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
 498                                                   MemTxAttrs *attrs)
 499{
 500    CPUClass *cc = CPU_GET_CLASS(cpu);
 501
 502    if (cc->get_phys_page_attrs_debug) {
 503        return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
 504    }
 505    /* Fallback for CPUs which don't implement the _attrs_ hook */
 506    *attrs = MEMTXATTRS_UNSPECIFIED;
 507    return cc->get_phys_page_debug(cpu, addr);
 508}
 509
 510/**
 511 * cpu_get_phys_page_debug:
 512 * @cpu: The CPU to obtain the physical page address for.
 513 * @addr: The virtual address.
 514 *
 515 * Obtains the physical page corresponding to a virtual one.
 516 * Use it only for debugging because no protection checks are done.
 517 *
 518 * Returns: Corresponding physical page address or -1 if no page found.
 519 */
 520static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
 521{
 522    MemTxAttrs attrs = {};
 523
 524    return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
 525}
 526
 527/** cpu_asidx_from_attrs:
 528 * @cpu: CPU
 529 * @attrs: memory transaction attributes
 530 *
 531 * Returns the address space index specifying the CPU AddressSpace
 532 * to use for a memory access with the given transaction attributes.
 533 */
 534static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
 535{
 536    CPUClass *cc = CPU_GET_CLASS(cpu);
 537
 538    if (cc->asidx_from_attrs) {
 539        return cc->asidx_from_attrs(cpu, attrs);
 540    }
 541    return 0;
 542}
 543#endif
 544
 545/**
 546 * cpu_reset:
 547 * @cpu: The CPU whose state is to be reset.
 548 */
 549void cpu_reset(CPUState *cpu);
 550
 551/**
 552 * cpu_class_by_name:
 553 * @typename: The CPU base type.
 554 * @cpu_model: The model string without any parameters.
 555 *
 556 * Looks up a CPU #ObjectClass matching name @cpu_model.
 557 *
 558 * Returns: A #CPUClass or %NULL if not matching class is found.
 559 */
 560ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
 561
 562/**
 563 * cpu_generic_init:
 564 * @typename: The CPU base type.
 565 * @cpu_model: The model string including optional parameters.
 566 *
 567 * Instantiates a CPU, processes optional parameters and realizes the CPU.
 568 *
 569 * Returns: A #CPUState or %NULL if an error occurred.
 570 */
 571CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
 572
 573/**
 574 * cpu_has_work:
 575 * @cpu: The vCPU to check.
 576 *
 577 * Checks whether the CPU has work to do.
 578 *
 579 * Returns: %true if the CPU has work, %false otherwise.
 580 */
 581static inline bool cpu_has_work(CPUState *cpu)
 582{
 583    CPUClass *cc = CPU_GET_CLASS(cpu);
 584
 585    g_assert(cc->has_work);
 586    return cc->has_work(cpu);
 587}
 588
 589/**
 590 * qemu_cpu_is_self:
 591 * @cpu: The vCPU to check against.
 592 *
 593 * Checks whether the caller is executing on the vCPU thread.
 594 *
 595 * Returns: %true if called from @cpu's thread, %false otherwise.
 596 */
 597bool qemu_cpu_is_self(CPUState *cpu);
 598
 599/**
 600 * qemu_cpu_kick:
 601 * @cpu: The vCPU to kick.
 602 *
 603 * Kicks @cpu's thread.
 604 */
 605void qemu_cpu_kick(CPUState *cpu);
 606
 607/**
 608 * cpu_is_stopped:
 609 * @cpu: The CPU to check.
 610 *
 611 * Checks whether the CPU is stopped.
 612 *
 613 * Returns: %true if run state is not running or if artificially stopped;
 614 * %false otherwise.
 615 */
 616bool cpu_is_stopped(CPUState *cpu);
 617
 618/**
 619 * run_on_cpu:
 620 * @cpu: The vCPU to run on.
 621 * @func: The function to be executed.
 622 * @data: Data to pass to the function.
 623 *
 624 * Schedules the function @func for execution on the vCPU @cpu.
 625 */
 626void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
 627
 628/**
 629 * async_run_on_cpu:
 630 * @cpu: The vCPU to run on.
 631 * @func: The function to be executed.
 632 * @data: Data to pass to the function.
 633 *
 634 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
 635 */
 636void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
 637
 638/**
 639 * qemu_get_cpu:
 640 * @index: The CPUState@cpu_index value of the CPU to obtain.
 641 *
 642 * Gets a CPU matching @index.
 643 *
 644 * Returns: The CPU or %NULL if there is no matching CPU.
 645 */
 646CPUState *qemu_get_cpu(int index);
 647
 648/**
 649 * cpu_exists:
 650 * @id: Guest-exposed CPU ID to lookup.
 651 *
 652 * Search for CPU with specified ID.
 653 *
 654 * Returns: %true - CPU is found, %false - CPU isn't found.
 655 */
 656bool cpu_exists(int64_t id);
 657
 658/**
 659 * cpu_throttle_set:
 660 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
 661 *
 662 * Throttles all vcpus by forcing them to sleep for the given percentage of
 663 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
 664 * (example: 10ms sleep for every 30ms awake).
 665 *
 666 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
 667 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
 668 * is called.
 669 */
 670void cpu_throttle_set(int new_throttle_pct);
 671
 672/**
 673 * cpu_throttle_stop:
 674 *
 675 * Stops the vcpu throttling started by cpu_throttle_set.
 676 */
 677void cpu_throttle_stop(void);
 678
 679/**
 680 * cpu_throttle_active:
 681 *
 682 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
 683 */
 684bool cpu_throttle_active(void);
 685
 686/**
 687 * cpu_throttle_get_percentage:
 688 *
 689 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
 690 *
 691 * Returns: The throttle percentage in range 1 to 99.
 692 */
 693int cpu_throttle_get_percentage(void);
 694
 695#ifndef CONFIG_USER_ONLY
 696
 697typedef void (*CPUInterruptHandler)(CPUState *, int);
 698
 699extern CPUInterruptHandler cpu_interrupt_handler;
 700
 701/**
 702 * cpu_interrupt:
 703 * @cpu: The CPU to set an interrupt on.
 704 * @mask: The interupts to set.
 705 *
 706 * Invokes the interrupt handler.
 707 */
 708static inline void cpu_interrupt(CPUState *cpu, int mask)
 709{
 710    cpu_interrupt_handler(cpu, mask);
 711}
 712
 713#else /* USER_ONLY */
 714
 715void cpu_interrupt(CPUState *cpu, int mask);
 716
 717#endif /* USER_ONLY */
 718
 719#ifdef CONFIG_SOFTMMU
 720static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
 721                                         bool is_write, bool is_exec,
 722                                         int opaque, unsigned size)
 723{
 724    CPUClass *cc = CPU_GET_CLASS(cpu);
 725
 726    if (cc->do_unassigned_access) {
 727        cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
 728    }
 729}
 730
 731static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
 732                                        MMUAccessType access_type,
 733                                        int mmu_idx, uintptr_t retaddr)
 734{
 735    CPUClass *cc = CPU_GET_CLASS(cpu);
 736
 737    cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
 738}
 739#endif
 740
 741/**
 742 * cpu_set_pc:
 743 * @cpu: The CPU to set the program counter for.
 744 * @addr: Program counter value.
 745 *
 746 * Sets the program counter for a CPU.
 747 */
 748static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
 749{
 750    CPUClass *cc = CPU_GET_CLASS(cpu);
 751
 752    cc->set_pc(cpu, addr);
 753}
 754
 755/**
 756 * cpu_reset_interrupt:
 757 * @cpu: The CPU to clear the interrupt on.
 758 * @mask: The interrupt mask to clear.
 759 *
 760 * Resets interrupts on the vCPU @cpu.
 761 */
 762void cpu_reset_interrupt(CPUState *cpu, int mask);
 763
 764/**
 765 * cpu_exit:
 766 * @cpu: The CPU to exit.
 767 *
 768 * Requests the CPU @cpu to exit execution.
 769 */
 770void cpu_exit(CPUState *cpu);
 771
 772/**
 773 * cpu_resume:
 774 * @cpu: The CPU to resume.
 775 *
 776 * Resumes CPU, i.e. puts CPU into runnable state.
 777 */
 778void cpu_resume(CPUState *cpu);
 779
 780/**
 781 * cpu_remove:
 782 * @cpu: The CPU to remove.
 783 *
 784 * Requests the CPU to be removed.
 785 */
 786void cpu_remove(CPUState *cpu);
 787
 788 /**
 789 * cpu_remove_sync:
 790 * @cpu: The CPU to remove.
 791 *
 792 * Requests the CPU to be removed and waits till it is removed.
 793 */
 794void cpu_remove_sync(CPUState *cpu);
 795
 796/**
 797 * qemu_init_vcpu:
 798 * @cpu: The vCPU to initialize.
 799 *
 800 * Initializes a vCPU.
 801 */
 802void qemu_init_vcpu(CPUState *cpu);
 803
 804#define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
 805#define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
 806#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
 807
 808/**
 809 * cpu_single_step:
 810 * @cpu: CPU to the flags for.
 811 * @enabled: Flags to enable.
 812 *
 813 * Enables or disables single-stepping for @cpu.
 814 */
 815void cpu_single_step(CPUState *cpu, int enabled);
 816
 817/* Breakpoint/watchpoint flags */
 818#define BP_MEM_READ           0x01
 819#define BP_MEM_WRITE          0x02
 820#define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
 821#define BP_STOP_BEFORE_ACCESS 0x04
 822/* 0x08 currently unused */
 823#define BP_GDB                0x10
 824#define BP_CPU                0x20
 825#define BP_ANY                (BP_GDB | BP_CPU)
 826#define BP_WATCHPOINT_HIT_READ 0x40
 827#define BP_WATCHPOINT_HIT_WRITE 0x80
 828#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
 829
 830int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
 831                          CPUBreakpoint **breakpoint);
 832int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
 833void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
 834void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
 835
 836/* Return true if PC matches an installed breakpoint.  */
 837static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
 838{
 839    CPUBreakpoint *bp;
 840
 841    if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
 842        QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
 843            if (bp->pc == pc && (bp->flags & mask)) {
 844                return true;
 845            }
 846        }
 847    }
 848    return false;
 849}
 850
 851int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
 852                          int flags, CPUWatchpoint **watchpoint);
 853int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
 854                          vaddr len, int flags);
 855void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
 856void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
 857
 858/**
 859 * cpu_get_address_space:
 860 * @cpu: CPU to get address space from
 861 * @asidx: index identifying which address space to get
 862 *
 863 * Return the requested address space of this CPU. @asidx
 864 * specifies which address space to read.
 865 */
 866AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
 867
 868void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
 869    GCC_FMT_ATTR(2, 3);
 870void cpu_exec_exit(CPUState *cpu);
 871
 872#ifdef CONFIG_SOFTMMU
 873extern const struct VMStateDescription vmstate_cpu_common;
 874#else
 875#define vmstate_cpu_common vmstate_dummy
 876#endif
 877
 878#define VMSTATE_CPU() {                                                     \
 879    .name = "parent_obj",                                                   \
 880    .size = sizeof(CPUState),                                               \
 881    .vmsd = &vmstate_cpu_common,                                            \
 882    .flags = VMS_STRUCT,                                                    \
 883    .offset = 0,                                                            \
 884}
 885
 886#define UNASSIGNED_CPU_INDEX -1
 887
 888#endif
 889