qemu/include/qom/cpu.h
<<
>>
Prefs
   1/*
   2 * QEMU CPU model
   3 *
   4 * Copyright (c) 2012 SUSE LINUX Products GmbH
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see
  18 * <http://www.gnu.org/licenses/gpl-2.0.html>
  19 */
  20#ifndef QEMU_CPU_H
  21#define QEMU_CPU_H
  22
  23#include "hw/qdev-core.h"
  24#include "disas/bfd.h"
  25#include "exec/hwaddr.h"
  26#include "exec/memattrs.h"
  27#include "qemu/queue.h"
  28#include "qemu/thread.h"
  29
  30typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
  31                                     void *opaque);
  32
  33/**
  34 * vaddr:
  35 * Type wide enough to contain any #target_ulong virtual address.
  36 */
  37typedef uint64_t vaddr;
  38#define VADDR_PRId PRId64
  39#define VADDR_PRIu PRIu64
  40#define VADDR_PRIo PRIo64
  41#define VADDR_PRIx PRIx64
  42#define VADDR_PRIX PRIX64
  43#define VADDR_MAX UINT64_MAX
  44
  45/**
  46 * SECTION:cpu
  47 * @section_id: QEMU-cpu
  48 * @title: CPU Class
  49 * @short_description: Base class for all CPUs
  50 */
  51
  52#define TYPE_CPU "cpu"
  53
  54/* Since this macro is used a lot in hot code paths and in conjunction with
  55 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
  56 * an unchecked cast.
  57 */
  58#define CPU(obj) ((CPUState *)(obj))
  59
  60#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
  61#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
  62
  63typedef struct CPUWatchpoint CPUWatchpoint;
  64
  65typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
  66                                    bool is_write, bool is_exec, int opaque,
  67                                    unsigned size);
  68
  69struct TranslationBlock;
  70
  71/**
  72 * CPUClass:
  73 * @class_by_name: Callback to map -cpu command line model name to an
  74 * instantiatable CPU type.
  75 * @parse_features: Callback to parse command line arguments.
  76 * @reset: Callback to reset the #CPUState to its initial state.
  77 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
  78 * @has_work: Callback for checking if there is work to do.
  79 * @do_interrupt: Callback for interrupt handling.
  80 * @do_unassigned_access: Callback for unassigned access handling.
  81 * @do_unaligned_access: Callback for unaligned access handling, if
  82 * the target defines #ALIGNED_ONLY.
  83 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
  84 * runtime configurable endianness is currently big-endian. Non-configurable
  85 * CPUs can use the default implementation of this method. This method should
  86 * not be used by any callers other than the pre-1.0 virtio devices.
  87 * @memory_rw_debug: Callback for GDB memory access.
  88 * @dump_state: Callback for dumping state.
  89 * @dump_statistics: Callback for dumping statistics.
  90 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
  91 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
  92 * @get_memory_mapping: Callback for obtaining the memory mappings.
  93 * @set_pc: Callback for setting the Program Counter register.
  94 * @synchronize_from_tb: Callback for synchronizing state from a TCG
  95 * #TranslationBlock.
  96 * @handle_mmu_fault: Callback for handling an MMU fault.
  97 * @get_phys_page_debug: Callback for obtaining a physical address.
  98 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
  99 *       associated memory transaction attributes to use for the access.
 100 *       CPUs which use memory transaction attributes should implement this
 101 *       instead of get_phys_page_debug.
 102 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
 103 *       a memory access with the specified memory transaction attributes.
 104 * @gdb_read_register: Callback for letting GDB read a register.
 105 * @gdb_write_register: Callback for letting GDB write a register.
 106 * @debug_check_watchpoint: Callback: return true if the architectural
 107 *       watchpoint whose address has matched should really fire.
 108 * @debug_excp_handler: Callback for handling debug exceptions.
 109 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
 110 * 64-bit VM coredump.
 111 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
 112 * note to a 32-bit VM coredump.
 113 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
 114 * 32-bit VM coredump.
 115 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
 116 * note to a 32-bit VM coredump.
 117 * @vmsd: State description for migration.
 118 * @gdb_num_core_regs: Number of core registers accessible to GDB.
 119 * @gdb_core_xml_file: File name for core registers GDB XML description.
 120 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
 121 *           before the insn which triggers a watchpoint rather than after it.
 122 * @gdb_arch_name: Optional callback that returns the architecture name known
 123 * to GDB. The caller must free the returned string with g_free.
 124 * @cpu_exec_enter: Callback for cpu_exec preparation.
 125 * @cpu_exec_exit: Callback for cpu_exec cleanup.
 126 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
 127 * @disas_set_info: Setup architecture specific components of disassembly info
 128 *
 129 * Represents a CPU family or model.
 130 */
 131typedef struct CPUClass {
 132    /*< private >*/
 133    DeviceClass parent_class;
 134    /*< public >*/
 135
 136    ObjectClass *(*class_by_name)(const char *cpu_model);
 137    void (*parse_features)(CPUState *cpu, char *str, Error **errp);
 138
 139    void (*reset)(CPUState *cpu);
 140    int reset_dump_flags;
 141    bool (*has_work)(CPUState *cpu);
 142    void (*do_interrupt)(CPUState *cpu);
 143    CPUUnassignedAccess do_unassigned_access;
 144    void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
 145                                int is_write, int is_user, uintptr_t retaddr);
 146    bool (*virtio_is_big_endian)(CPUState *cpu);
 147    int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
 148                           uint8_t *buf, int len, bool is_write);
 149    void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 150                       int flags);
 151    void (*dump_statistics)(CPUState *cpu, FILE *f,
 152                            fprintf_function cpu_fprintf, int flags);
 153    int64_t (*get_arch_id)(CPUState *cpu);
 154    bool (*get_paging_enabled)(const CPUState *cpu);
 155    void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
 156                               Error **errp);
 157    void (*set_pc)(CPUState *cpu, vaddr value);
 158    void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
 159    int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
 160                            int mmu_index);
 161    hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
 162    hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
 163                                        MemTxAttrs *attrs);
 164    int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
 165    int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
 166    int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
 167    bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
 168    void (*debug_excp_handler)(CPUState *cpu);
 169
 170    int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
 171                            int cpuid, void *opaque);
 172    int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
 173                                void *opaque);
 174    int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
 175                            int cpuid, void *opaque);
 176    int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
 177                                void *opaque);
 178
 179    const struct VMStateDescription *vmsd;
 180    int gdb_num_core_regs;
 181    const char *gdb_core_xml_file;
 182    gchar * (*gdb_arch_name)(CPUState *cpu);
 183    bool gdb_stop_before_watchpoint;
 184
 185    void (*cpu_exec_enter)(CPUState *cpu);
 186    void (*cpu_exec_exit)(CPUState *cpu);
 187    bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
 188
 189    void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
 190} CPUClass;
 191
 192#ifdef HOST_WORDS_BIGENDIAN
 193typedef struct icount_decr_u16 {
 194    uint16_t high;
 195    uint16_t low;
 196} icount_decr_u16;
 197#else
 198typedef struct icount_decr_u16 {
 199    uint16_t low;
 200    uint16_t high;
 201} icount_decr_u16;
 202#endif
 203
 204typedef struct CPUBreakpoint {
 205    vaddr pc;
 206    int flags; /* BP_* */
 207    QTAILQ_ENTRY(CPUBreakpoint) entry;
 208} CPUBreakpoint;
 209
 210struct CPUWatchpoint {
 211    vaddr vaddr;
 212    vaddr len;
 213    vaddr hitaddr;
 214    MemTxAttrs hitattrs;
 215    int flags; /* BP_* */
 216    QTAILQ_ENTRY(CPUWatchpoint) entry;
 217};
 218
 219struct KVMState;
 220struct kvm_run;
 221
 222#define TB_JMP_CACHE_BITS 12
 223#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
 224
 225/**
 226 * CPUState:
 227 * @cpu_index: CPU index (informative).
 228 * @nr_cores: Number of cores within this CPU package.
 229 * @nr_threads: Number of threads within this CPU.
 230 * @numa_node: NUMA node this CPU is belonging to.
 231 * @host_tid: Host thread ID.
 232 * @running: #true if CPU is currently running (usermode).
 233 * @created: Indicates whether the CPU thread has been successfully created.
 234 * @interrupt_request: Indicates a pending interrupt request.
 235 * @halted: Nonzero if the CPU is in suspended state.
 236 * @stop: Indicates a pending stop request.
 237 * @stopped: Indicates the CPU has been artificially stopped.
 238 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
 239 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
 240 *           CPU and return to its top level loop.
 241 * @singlestep_enabled: Flags for single-stepping.
 242 * @icount_extra: Instructions until next timer event.
 243 * @icount_decr: Number of cycles left, with interrupt flag in high bit.
 244 * This allows a single read-compare-cbranch-write sequence to test
 245 * for both decrementer underflow and exceptions.
 246 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
 247 * requires that IO only be performed on the last instruction of a TB
 248 * so that interrupts take effect immediately.
 249 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
 250 *            AddressSpaces this CPU has)
 251 * @num_ases: number of CPUAddressSpaces in @cpu_ases
 252 * @as: Pointer to the first AddressSpace, for the convenience of targets which
 253 *      only have a single AddressSpace
 254 * @env_ptr: Pointer to subclass-specific CPUArchState field.
 255 * @current_tb: Currently executing TB.
 256 * @gdb_regs: Additional GDB registers.
 257 * @gdb_num_regs: Number of total registers accessible to GDB.
 258 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
 259 * @next_cpu: Next CPU sharing TB cache.
 260 * @opaque: User data.
 261 * @mem_io_pc: Host Program Counter at which the memory was accessed.
 262 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
 263 * @kvm_fd: vCPU file descriptor for KVM.
 264 * @work_mutex: Lock to prevent multiple access to queued_work_*.
 265 * @queued_work_first: First asynchronous work pending.
 266 *
 267 * State of one CPU core or thread.
 268 */
 269struct CPUState {
 270    /*< private >*/
 271    DeviceState parent_obj;
 272    /*< public >*/
 273
 274    int nr_cores;
 275    int nr_threads;
 276    int numa_node;
 277
 278    struct QemuThread *thread;
 279#ifdef _WIN32
 280    HANDLE hThread;
 281#endif
 282    int thread_id;
 283    uint32_t host_tid;
 284    bool running;
 285    struct QemuCond *halt_cond;
 286    bool thread_kicked;
 287    bool created;
 288    bool stop;
 289    bool stopped;
 290    bool crash_occurred;
 291    bool exit_request;
 292    uint32_t interrupt_request;
 293    int singlestep_enabled;
 294    int64_t icount_extra;
 295    sigjmp_buf jmp_env;
 296
 297    QemuMutex work_mutex;
 298    struct qemu_work_item *queued_work_first, *queued_work_last;
 299
 300    CPUAddressSpace *cpu_ases;
 301    int num_ases;
 302    AddressSpace *as;
 303    MemoryRegion *memory;
 304
 305    void *env_ptr; /* CPUArchState */
 306    struct TranslationBlock *current_tb;
 307    struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
 308    struct GDBRegisterState *gdb_regs;
 309    int gdb_num_regs;
 310    int gdb_num_g_regs;
 311    QTAILQ_ENTRY(CPUState) node;
 312
 313    /* ice debug support */
 314    QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
 315
 316    QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
 317    CPUWatchpoint *watchpoint_hit;
 318
 319    void *opaque;
 320
 321    /* In order to avoid passing too many arguments to the MMIO helpers,
 322     * we store some rarely used information in the CPU context.
 323     */
 324    uintptr_t mem_io_pc;
 325    vaddr mem_io_vaddr;
 326
 327    int kvm_fd;
 328    bool kvm_vcpu_dirty;
 329    struct KVMState *kvm_state;
 330    struct kvm_run *kvm_run;
 331
 332    /* TODO Move common fields from CPUArchState here. */
 333    int cpu_index; /* used by alpha TCG */
 334    uint32_t halted; /* used by alpha, cris, ppc TCG */
 335    union {
 336        uint32_t u32;
 337        icount_decr_u16 u16;
 338    } icount_decr;
 339    uint32_t can_do_io;
 340    int32_t exception_index; /* used by m68k TCG */
 341
 342    /* Used to keep track of an outstanding cpu throttle thread for migration
 343     * autoconverge
 344     */
 345    bool throttle_thread_scheduled;
 346
 347    /* Note that this is accessed at the start of every TB via a negative
 348       offset from AREG0.  Leave this field at the end so as to make the
 349       (absolute value) offset as small as possible.  This reduces code
 350       size, especially for hosts without large memory offsets.  */
 351    uint32_t tcg_exit_req;
 352};
 353
 354QTAILQ_HEAD(CPUTailQ, CPUState);
 355extern struct CPUTailQ cpus;
 356#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
 357#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
 358#define CPU_FOREACH_SAFE(cpu, next_cpu) \
 359    QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
 360#define CPU_FOREACH_REVERSE(cpu) \
 361    QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
 362#define first_cpu QTAILQ_FIRST(&cpus)
 363
 364extern __thread CPUState *current_cpu;
 365
 366/**
 367 * cpu_paging_enabled:
 368 * @cpu: The CPU whose state is to be inspected.
 369 *
 370 * Returns: %true if paging is enabled, %false otherwise.
 371 */
 372bool cpu_paging_enabled(const CPUState *cpu);
 373
 374/**
 375 * cpu_get_memory_mapping:
 376 * @cpu: The CPU whose memory mappings are to be obtained.
 377 * @list: Where to write the memory mappings to.
 378 * @errp: Pointer for reporting an #Error.
 379 */
 380void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
 381                            Error **errp);
 382
 383/**
 384 * cpu_write_elf64_note:
 385 * @f: pointer to a function that writes memory to a file
 386 * @cpu: The CPU whose memory is to be dumped
 387 * @cpuid: ID number of the CPU
 388 * @opaque: pointer to the CPUState struct
 389 */
 390int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
 391                         int cpuid, void *opaque);
 392
 393/**
 394 * cpu_write_elf64_qemunote:
 395 * @f: pointer to a function that writes memory to a file
 396 * @cpu: The CPU whose memory is to be dumped
 397 * @cpuid: ID number of the CPU
 398 * @opaque: pointer to the CPUState struct
 399 */
 400int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
 401                             void *opaque);
 402
 403/**
 404 * cpu_write_elf32_note:
 405 * @f: pointer to a function that writes memory to a file
 406 * @cpu: The CPU whose memory is to be dumped
 407 * @cpuid: ID number of the CPU
 408 * @opaque: pointer to the CPUState struct
 409 */
 410int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
 411                         int cpuid, void *opaque);
 412
 413/**
 414 * cpu_write_elf32_qemunote:
 415 * @f: pointer to a function that writes memory to a file
 416 * @cpu: The CPU whose memory is to be dumped
 417 * @cpuid: ID number of the CPU
 418 * @opaque: pointer to the CPUState struct
 419 */
 420int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
 421                             void *opaque);
 422
 423/**
 424 * CPUDumpFlags:
 425 * @CPU_DUMP_CODE:
 426 * @CPU_DUMP_FPU: dump FPU register state, not just integer
 427 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
 428 */
 429enum CPUDumpFlags {
 430    CPU_DUMP_CODE = 0x00010000,
 431    CPU_DUMP_FPU  = 0x00020000,
 432    CPU_DUMP_CCOP = 0x00040000,
 433};
 434
 435/**
 436 * cpu_dump_state:
 437 * @cpu: The CPU whose state is to be dumped.
 438 * @f: File to dump to.
 439 * @cpu_fprintf: Function to dump with.
 440 * @flags: Flags what to dump.
 441 *
 442 * Dumps CPU state.
 443 */
 444void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 445                    int flags);
 446
 447/**
 448 * cpu_dump_statistics:
 449 * @cpu: The CPU whose state is to be dumped.
 450 * @f: File to dump to.
 451 * @cpu_fprintf: Function to dump with.
 452 * @flags: Flags what to dump.
 453 *
 454 * Dumps CPU statistics.
 455 */
 456void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 457                         int flags);
 458
 459#ifndef CONFIG_USER_ONLY
 460/**
 461 * cpu_get_phys_page_attrs_debug:
 462 * @cpu: The CPU to obtain the physical page address for.
 463 * @addr: The virtual address.
 464 * @attrs: Updated on return with the memory transaction attributes to use
 465 *         for this access.
 466 *
 467 * Obtains the physical page corresponding to a virtual one, together
 468 * with the corresponding memory transaction attributes to use for the access.
 469 * Use it only for debugging because no protection checks are done.
 470 *
 471 * Returns: Corresponding physical page address or -1 if no page found.
 472 */
 473static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
 474                                                   MemTxAttrs *attrs)
 475{
 476    CPUClass *cc = CPU_GET_CLASS(cpu);
 477
 478    if (cc->get_phys_page_attrs_debug) {
 479        return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
 480    }
 481    /* Fallback for CPUs which don't implement the _attrs_ hook */
 482    *attrs = MEMTXATTRS_UNSPECIFIED;
 483    return cc->get_phys_page_debug(cpu, addr);
 484}
 485
 486/**
 487 * cpu_get_phys_page_debug:
 488 * @cpu: The CPU to obtain the physical page address for.
 489 * @addr: The virtual address.
 490 *
 491 * Obtains the physical page corresponding to a virtual one.
 492 * Use it only for debugging because no protection checks are done.
 493 *
 494 * Returns: Corresponding physical page address or -1 if no page found.
 495 */
 496static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
 497{
 498    MemTxAttrs attrs = {};
 499
 500    return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
 501}
 502
 503/** cpu_asidx_from_attrs:
 504 * @cpu: CPU
 505 * @attrs: memory transaction attributes
 506 *
 507 * Returns the address space index specifying the CPU AddressSpace
 508 * to use for a memory access with the given transaction attributes.
 509 */
 510static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
 511{
 512    CPUClass *cc = CPU_GET_CLASS(cpu);
 513
 514    if (cc->asidx_from_attrs) {
 515        return cc->asidx_from_attrs(cpu, attrs);
 516    }
 517    return 0;
 518}
 519#endif
 520
 521/**
 522 * cpu_reset:
 523 * @cpu: The CPU whose state is to be reset.
 524 */
 525void cpu_reset(CPUState *cpu);
 526
 527/**
 528 * cpu_class_by_name:
 529 * @typename: The CPU base type.
 530 * @cpu_model: The model string without any parameters.
 531 *
 532 * Looks up a CPU #ObjectClass matching name @cpu_model.
 533 *
 534 * Returns: A #CPUClass or %NULL if not matching class is found.
 535 */
 536ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
 537
 538/**
 539 * cpu_generic_init:
 540 * @typename: The CPU base type.
 541 * @cpu_model: The model string including optional parameters.
 542 *
 543 * Instantiates a CPU, processes optional parameters and realizes the CPU.
 544 *
 545 * Returns: A #CPUState or %NULL if an error occurred.
 546 */
 547CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
 548
 549/**
 550 * cpu_has_work:
 551 * @cpu: The vCPU to check.
 552 *
 553 * Checks whether the CPU has work to do.
 554 *
 555 * Returns: %true if the CPU has work, %false otherwise.
 556 */
 557static inline bool cpu_has_work(CPUState *cpu)
 558{
 559    CPUClass *cc = CPU_GET_CLASS(cpu);
 560
 561    g_assert(cc->has_work);
 562    return cc->has_work(cpu);
 563}
 564
 565/**
 566 * qemu_cpu_is_self:
 567 * @cpu: The vCPU to check against.
 568 *
 569 * Checks whether the caller is executing on the vCPU thread.
 570 *
 571 * Returns: %true if called from @cpu's thread, %false otherwise.
 572 */
 573bool qemu_cpu_is_self(CPUState *cpu);
 574
 575/**
 576 * qemu_cpu_kick:
 577 * @cpu: The vCPU to kick.
 578 *
 579 * Kicks @cpu's thread.
 580 */
 581void qemu_cpu_kick(CPUState *cpu);
 582
 583/**
 584 * cpu_is_stopped:
 585 * @cpu: The CPU to check.
 586 *
 587 * Checks whether the CPU is stopped.
 588 *
 589 * Returns: %true if run state is not running or if artificially stopped;
 590 * %false otherwise.
 591 */
 592bool cpu_is_stopped(CPUState *cpu);
 593
 594/**
 595 * run_on_cpu:
 596 * @cpu: The vCPU to run on.
 597 * @func: The function to be executed.
 598 * @data: Data to pass to the function.
 599 *
 600 * Schedules the function @func for execution on the vCPU @cpu.
 601 */
 602void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
 603
 604/**
 605 * async_run_on_cpu:
 606 * @cpu: The vCPU to run on.
 607 * @func: The function to be executed.
 608 * @data: Data to pass to the function.
 609 *
 610 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
 611 */
 612void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
 613
 614/**
 615 * qemu_get_cpu:
 616 * @index: The CPUState@cpu_index value of the CPU to obtain.
 617 *
 618 * Gets a CPU matching @index.
 619 *
 620 * Returns: The CPU or %NULL if there is no matching CPU.
 621 */
 622CPUState *qemu_get_cpu(int index);
 623
 624/**
 625 * cpu_exists:
 626 * @id: Guest-exposed CPU ID to lookup.
 627 *
 628 * Search for CPU with specified ID.
 629 *
 630 * Returns: %true - CPU is found, %false - CPU isn't found.
 631 */
 632bool cpu_exists(int64_t id);
 633
 634/**
 635 * cpu_throttle_set:
 636 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
 637 *
 638 * Throttles all vcpus by forcing them to sleep for the given percentage of
 639 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
 640 * (example: 10ms sleep for every 30ms awake).
 641 *
 642 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
 643 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
 644 * is called.
 645 */
 646void cpu_throttle_set(int new_throttle_pct);
 647
 648/**
 649 * cpu_throttle_stop:
 650 *
 651 * Stops the vcpu throttling started by cpu_throttle_set.
 652 */
 653void cpu_throttle_stop(void);
 654
 655/**
 656 * cpu_throttle_active:
 657 *
 658 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
 659 */
 660bool cpu_throttle_active(void);
 661
 662/**
 663 * cpu_throttle_get_percentage:
 664 *
 665 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
 666 *
 667 * Returns: The throttle percentage in range 1 to 99.
 668 */
 669int cpu_throttle_get_percentage(void);
 670
 671#ifndef CONFIG_USER_ONLY
 672
 673typedef void (*CPUInterruptHandler)(CPUState *, int);
 674
 675extern CPUInterruptHandler cpu_interrupt_handler;
 676
 677/**
 678 * cpu_interrupt:
 679 * @cpu: The CPU to set an interrupt on.
 680 * @mask: The interupts to set.
 681 *
 682 * Invokes the interrupt handler.
 683 */
 684static inline void cpu_interrupt(CPUState *cpu, int mask)
 685{
 686    cpu_interrupt_handler(cpu, mask);
 687}
 688
 689#else /* USER_ONLY */
 690
 691void cpu_interrupt(CPUState *cpu, int mask);
 692
 693#endif /* USER_ONLY */
 694
 695#ifdef CONFIG_SOFTMMU
 696static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
 697                                         bool is_write, bool is_exec,
 698                                         int opaque, unsigned size)
 699{
 700    CPUClass *cc = CPU_GET_CLASS(cpu);
 701
 702    if (cc->do_unassigned_access) {
 703        cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
 704    }
 705}
 706
 707static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
 708                                        int is_write, int is_user,
 709                                        uintptr_t retaddr)
 710{
 711    CPUClass *cc = CPU_GET_CLASS(cpu);
 712
 713    cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr);
 714}
 715#endif
 716
 717/**
 718 * cpu_set_pc:
 719 * @cpu: The CPU to set the program counter for.
 720 * @addr: Program counter value.
 721 *
 722 * Sets the program counter for a CPU.
 723 */
 724static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
 725{
 726    CPUClass *cc = CPU_GET_CLASS(cpu);
 727
 728    cc->set_pc(cpu, addr);
 729}
 730
 731/**
 732 * cpu_reset_interrupt:
 733 * @cpu: The CPU to clear the interrupt on.
 734 * @mask: The interrupt mask to clear.
 735 *
 736 * Resets interrupts on the vCPU @cpu.
 737 */
 738void cpu_reset_interrupt(CPUState *cpu, int mask);
 739
 740/**
 741 * cpu_exit:
 742 * @cpu: The CPU to exit.
 743 *
 744 * Requests the CPU @cpu to exit execution.
 745 */
 746void cpu_exit(CPUState *cpu);
 747
 748/**
 749 * cpu_resume:
 750 * @cpu: The CPU to resume.
 751 *
 752 * Resumes CPU, i.e. puts CPU into runnable state.
 753 */
 754void cpu_resume(CPUState *cpu);
 755
 756/**
 757 * qemu_init_vcpu:
 758 * @cpu: The vCPU to initialize.
 759 *
 760 * Initializes a vCPU.
 761 */
 762void qemu_init_vcpu(CPUState *cpu);
 763
 764#define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
 765#define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
 766#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
 767
 768/**
 769 * cpu_single_step:
 770 * @cpu: CPU to the flags for.
 771 * @enabled: Flags to enable.
 772 *
 773 * Enables or disables single-stepping for @cpu.
 774 */
 775void cpu_single_step(CPUState *cpu, int enabled);
 776
 777/* Breakpoint/watchpoint flags */
 778#define BP_MEM_READ           0x01
 779#define BP_MEM_WRITE          0x02
 780#define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
 781#define BP_STOP_BEFORE_ACCESS 0x04
 782/* 0x08 currently unused */
 783#define BP_GDB                0x10
 784#define BP_CPU                0x20
 785#define BP_ANY                (BP_GDB | BP_CPU)
 786#define BP_WATCHPOINT_HIT_READ 0x40
 787#define BP_WATCHPOINT_HIT_WRITE 0x80
 788#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
 789
 790int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
 791                          CPUBreakpoint **breakpoint);
 792int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
 793void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
 794void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
 795
 796/* Return true if PC matches an installed breakpoint.  */
 797static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
 798{
 799    CPUBreakpoint *bp;
 800
 801    if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
 802        QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
 803            if (bp->pc == pc && (bp->flags & mask)) {
 804                return true;
 805            }
 806        }
 807    }
 808    return false;
 809}
 810
 811int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
 812                          int flags, CPUWatchpoint **watchpoint);
 813int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
 814                          vaddr len, int flags);
 815void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
 816void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
 817
 818void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
 819    GCC_FMT_ATTR(2, 3);
 820void cpu_exec_exit(CPUState *cpu);
 821
 822#ifdef CONFIG_SOFTMMU
 823extern const struct VMStateDescription vmstate_cpu_common;
 824#else
 825#define vmstate_cpu_common vmstate_dummy
 826#endif
 827
 828#define VMSTATE_CPU() {                                                     \
 829    .name = "parent_obj",                                                   \
 830    .size = sizeof(CPUState),                                               \
 831    .vmsd = &vmstate_cpu_common,                                            \
 832    .flags = VMS_STRUCT,                                                    \
 833    .offset = 0,                                                            \
 834}
 835
 836#endif
 837