qemu/include/hw/core/cpu.h
<<
>>
Prefs
   1/*
   2 * QEMU CPU model
   3 *
   4 * Copyright (c) 2012 SUSE LINUX Products GmbH
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see
  18 * <http://www.gnu.org/licenses/gpl-2.0.html>
  19 */
  20#ifndef QEMU_CPU_H
  21#define QEMU_CPU_H
  22
  23#include "hw/qdev-core.h"
  24#include "disas/dis-asm.h"
  25#include "exec/cpu-common.h"
  26#include "exec/hwaddr.h"
  27#include "exec/memattrs.h"
  28#include "qapi/qapi-types-run-state.h"
  29#include "qemu/bitmap.h"
  30#include "qemu/rcu_queue.h"
  31#include "qemu/queue.h"
  32#include "qemu/thread.h"
  33#include "qemu/plugin.h"
  34#include "qom/object.h"
  35
  36typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
  37                                     void *opaque);
  38
  39/**
  40 * SECTION:cpu
  41 * @section_id: QEMU-cpu
  42 * @title: CPU Class
  43 * @short_description: Base class for all CPUs
  44 */
  45
  46#define TYPE_CPU "cpu"
  47
  48/* Since this macro is used a lot in hot code paths and in conjunction with
  49 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
  50 * an unchecked cast.
  51 */
  52#define CPU(obj) ((CPUState *)(obj))
  53
  54/*
  55 * The class checkers bring in CPU_GET_CLASS() which is potentially
  56 * expensive given the eventual call to
  57 * object_class_dynamic_cast_assert(). Because of this the CPUState
  58 * has a cached value for the class in cs->cc which is set up in
  59 * cpu_exec_realizefn() for use in hot code paths.
  60 */
  61typedef struct CPUClass CPUClass;
  62DECLARE_CLASS_CHECKERS(CPUClass, CPU,
  63                       TYPE_CPU)
  64
  65/**
  66 * OBJECT_DECLARE_CPU_TYPE:
  67 * @CpuInstanceType: instance struct name
  68 * @CpuClassType: class struct name
  69 * @CPU_MODULE_OBJ_NAME: the CPU name in uppercase with underscore separators
  70 *
  71 * This macro is typically used in "cpu-qom.h" header file, and will:
  72 *
  73 *   - create the typedefs for the CPU object and class structs
  74 *   - register the type for use with g_autoptr
  75 *   - provide three standard type cast functions
  76 *
  77 * The object struct and class struct need to be declared manually.
  78 */
  79#define OBJECT_DECLARE_CPU_TYPE(CpuInstanceType, CpuClassType, CPU_MODULE_OBJ_NAME) \
  80    typedef struct ArchCPU CpuInstanceType; \
  81    OBJECT_DECLARE_TYPE(ArchCPU, CpuClassType, CPU_MODULE_OBJ_NAME);
  82
  83typedef enum MMUAccessType {
  84    MMU_DATA_LOAD  = 0,
  85    MMU_DATA_STORE = 1,
  86    MMU_INST_FETCH = 2
  87} MMUAccessType;
  88
  89typedef struct CPUWatchpoint CPUWatchpoint;
  90
  91/* see tcg-cpu-ops.h */
  92struct TCGCPUOps;
  93
  94/* see accel-cpu.h */
  95struct AccelCPUClass;
  96
  97/* see sysemu-cpu-ops.h */
  98struct SysemuCPUOps;
  99
 100/**
 101 * CPUClass:
 102 * @class_by_name: Callback to map -cpu command line model name to an
 103 * instantiatable CPU type.
 104 * @parse_features: Callback to parse command line arguments.
 105 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
 106 * @has_work: Callback for checking if there is work to do.
 107 * @memory_rw_debug: Callback for GDB memory access.
 108 * @dump_state: Callback for dumping state.
 109 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
 110 * @set_pc: Callback for setting the Program Counter register. This
 111 *       should have the semantics used by the target architecture when
 112 *       setting the PC from a source such as an ELF file entry point;
 113 *       for example on Arm it will also set the Thumb mode bit based
 114 *       on the least significant bit of the new PC value.
 115 *       If the target behaviour here is anything other than "set
 116 *       the PC register to the value passed in" then the target must
 117 *       also implement the synchronize_from_tb hook.
 118 * @get_pc: Callback for getting the Program Counter register.
 119 *       As above, with the semantics of the target architecture.
 120 * @gdb_read_register: Callback for letting GDB read a register.
 121 * @gdb_write_register: Callback for letting GDB write a register.
 122 * @gdb_adjust_breakpoint: Callback for adjusting the address of a
 123 *       breakpoint.  Used by AVR to handle a gdb mis-feature with
 124 *       its Harvard architecture split code and data.
 125 * @gdb_num_core_regs: Number of core registers accessible to GDB.
 126 * @gdb_core_xml_file: File name for core registers GDB XML description.
 127 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
 128 *           before the insn which triggers a watchpoint rather than after it.
 129 * @gdb_arch_name: Optional callback that returns the architecture name known
 130 * to GDB. The caller must free the returned string with g_free.
 131 * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
 132 *   gdb stub. Returns a pointer to the XML contents for the specified XML file
 133 *   or NULL if the CPU doesn't have a dynamically generated content for it.
 134 * @disas_set_info: Setup architecture specific components of disassembly info
 135 * @adjust_watchpoint_address: Perform a target-specific adjustment to an
 136 * address before attempting to match it against watchpoints.
 137 * @deprecation_note: If this CPUClass is deprecated, this field provides
 138 *                    related information.
 139 *
 140 * Represents a CPU family or model.
 141 */
 142struct CPUClass {
 143    /*< private >*/
 144    DeviceClass parent_class;
 145    /*< public >*/
 146
 147    ObjectClass *(*class_by_name)(const char *cpu_model);
 148    void (*parse_features)(const char *typename, char *str, Error **errp);
 149
 150    bool (*has_work)(CPUState *cpu);
 151    int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
 152                           uint8_t *buf, int len, bool is_write);
 153    void (*dump_state)(CPUState *cpu, FILE *, int flags);
 154    int64_t (*get_arch_id)(CPUState *cpu);
 155    void (*set_pc)(CPUState *cpu, vaddr value);
 156    vaddr (*get_pc)(CPUState *cpu);
 157    int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
 158    int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
 159    vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
 160
 161    const char *gdb_core_xml_file;
 162    gchar * (*gdb_arch_name)(CPUState *cpu);
 163    const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
 164
 165    void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
 166
 167    const char *deprecation_note;
 168    struct AccelCPUClass *accel_cpu;
 169
 170    /* when system emulation is not available, this pointer is NULL */
 171    const struct SysemuCPUOps *sysemu_ops;
 172
 173    /* when TCG is not available, this pointer is NULL */
 174    const struct TCGCPUOps *tcg_ops;
 175
 176    /*
 177     * if not NULL, this is called in order for the CPUClass to initialize
 178     * class data that depends on the accelerator, see accel/accel-common.c.
 179     */
 180    void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc);
 181
 182    /*
 183     * Keep non-pointer data at the end to minimize holes.
 184     */
 185    int reset_dump_flags;
 186    int gdb_num_core_regs;
 187    bool gdb_stop_before_watchpoint;
 188};
 189
 190/*
 191 * Low 16 bits: number of cycles left, used only in icount mode.
 192 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs
 193 * for this CPU and return to its top level loop (even in non-icount mode).
 194 * This allows a single read-compare-cbranch-write sequence to test
 195 * for both decrementer underflow and exceptions.
 196 */
 197typedef union IcountDecr {
 198    uint32_t u32;
 199    struct {
 200#if HOST_BIG_ENDIAN
 201        uint16_t high;
 202        uint16_t low;
 203#else
 204        uint16_t low;
 205        uint16_t high;
 206#endif
 207    } u16;
 208} IcountDecr;
 209
 210typedef struct CPUBreakpoint {
 211    vaddr pc;
 212    int flags; /* BP_* */
 213    QTAILQ_ENTRY(CPUBreakpoint) entry;
 214} CPUBreakpoint;
 215
 216struct CPUWatchpoint {
 217    vaddr vaddr;
 218    vaddr len;
 219    vaddr hitaddr;
 220    MemTxAttrs hitattrs;
 221    int flags; /* BP_* */
 222    QTAILQ_ENTRY(CPUWatchpoint) entry;
 223};
 224
 225#ifdef CONFIG_PLUGIN
 226/*
 227 * For plugins we sometime need to save the resolved iotlb data before
 228 * the memory regions get moved around  by io_writex.
 229 */
 230typedef struct SavedIOTLB {
 231    MemoryRegionSection *section;
 232    hwaddr mr_offset;
 233} SavedIOTLB;
 234#endif
 235
 236struct KVMState;
 237struct kvm_run;
 238
 239struct hax_vcpu_state;
 240struct hvf_vcpu_state;
 241
 242/* work queue */
 243
 244/* The union type allows passing of 64 bit target pointers on 32 bit
 245 * hosts in a single parameter
 246 */
 247typedef union {
 248    int           host_int;
 249    unsigned long host_ulong;
 250    void         *host_ptr;
 251    vaddr         target_ptr;
 252} run_on_cpu_data;
 253
 254#define RUN_ON_CPU_HOST_PTR(p)    ((run_on_cpu_data){.host_ptr = (p)})
 255#define RUN_ON_CPU_HOST_INT(i)    ((run_on_cpu_data){.host_int = (i)})
 256#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
 257#define RUN_ON_CPU_TARGET_PTR(v)  ((run_on_cpu_data){.target_ptr = (v)})
 258#define RUN_ON_CPU_NULL           RUN_ON_CPU_HOST_PTR(NULL)
 259
 260typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
 261
 262struct qemu_work_item;
 263
 264#define CPU_UNSET_NUMA_NODE_ID -1
 265#define CPU_TRACE_DSTATE_MAX_EVENTS 32
 266
 267/**
 268 * CPUState:
 269 * @cpu_index: CPU index (informative).
 270 * @cluster_index: Identifies which cluster this CPU is in.
 271 *   For boards which don't define clusters or for "loose" CPUs not assigned
 272 *   to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
 273 *   be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
 274 *   QOM parent.
 275 * @tcg_cflags: Pre-computed cflags for this cpu.
 276 * @nr_cores: Number of cores within this CPU package.
 277 * @nr_threads: Number of threads within this CPU.
 278 * @running: #true if CPU is currently running (lockless).
 279 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
 280 * valid under cpu_list_lock.
 281 * @created: Indicates whether the CPU thread has been successfully created.
 282 * @interrupt_request: Indicates a pending interrupt request.
 283 * @halted: Nonzero if the CPU is in suspended state.
 284 * @stop: Indicates a pending stop request.
 285 * @stopped: Indicates the CPU has been artificially stopped.
 286 * @unplug: Indicates a pending CPU unplug request.
 287 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
 288 * @singlestep_enabled: Flags for single-stepping.
 289 * @icount_extra: Instructions until next timer event.
 290 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
 291 * requires that IO only be performed on the last instruction of a TB
 292 * so that interrupts take effect immediately.
 293 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
 294 *            AddressSpaces this CPU has)
 295 * @num_ases: number of CPUAddressSpaces in @cpu_ases
 296 * @as: Pointer to the first AddressSpace, for the convenience of targets which
 297 *      only have a single AddressSpace
 298 * @env_ptr: Pointer to subclass-specific CPUArchState field.
 299 * @icount_decr_ptr: Pointer to IcountDecr field within subclass.
 300 * @gdb_regs: Additional GDB registers.
 301 * @gdb_num_regs: Number of total registers accessible to GDB.
 302 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
 303 * @next_cpu: Next CPU sharing TB cache.
 304 * @opaque: User data.
 305 * @mem_io_pc: Host Program Counter at which the memory was accessed.
 306 * @kvm_fd: vCPU file descriptor for KVM.
 307 * @work_mutex: Lock to prevent multiple access to @work_list.
 308 * @work_list: List of pending asynchronous work.
 309 * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
 310 *                        to @trace_dstate).
 311 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
 312 * @plugin_mask: Plugin event bitmap. Modified only via async work.
 313 * @ignore_memory_transaction_failures: Cached copy of the MachineState
 314 *    flag of the same name: allows the board to suppress calling of the
 315 *    CPU do_transaction_failed hook function.
 316 * @kvm_dirty_gfns: Points to the KVM dirty ring for this CPU when KVM dirty
 317 *    ring is enabled.
 318 * @kvm_fetch_index: Keeps the index that we last fetched from the per-vCPU
 319 *    dirty ring structure.
 320 *
 321 * State of one CPU core or thread.
 322 */
 323struct CPUState {
 324    /*< private >*/
 325    DeviceState parent_obj;
 326    /* cache to avoid expensive CPU_GET_CLASS */
 327    CPUClass *cc;
 328    /*< public >*/
 329
 330    int nr_cores;
 331    int nr_threads;
 332
 333    struct QemuThread *thread;
 334#ifdef _WIN32
 335    HANDLE hThread;
 336    QemuSemaphore sem;
 337#endif
 338    int thread_id;
 339    bool running, has_waiter;
 340    struct QemuCond *halt_cond;
 341    bool thread_kicked;
 342    bool created;
 343    bool stop;
 344    bool stopped;
 345
 346    /* Should CPU start in powered-off state? */
 347    bool start_powered_off;
 348
 349    bool unplug;
 350    bool crash_occurred;
 351    bool exit_request;
 352    bool in_exclusive_context;
 353    uint32_t cflags_next_tb;
 354    /* updates protected by BQL */
 355    uint32_t interrupt_request;
 356    int singlestep_enabled;
 357    int64_t icount_budget;
 358    int64_t icount_extra;
 359    uint64_t random_seed;
 360    sigjmp_buf jmp_env;
 361
 362    QemuMutex work_mutex;
 363    QSIMPLEQ_HEAD(, qemu_work_item) work_list;
 364
 365    CPUAddressSpace *cpu_ases;
 366    int num_ases;
 367    AddressSpace *as;
 368    MemoryRegion *memory;
 369
 370    CPUArchState *env_ptr;
 371    IcountDecr *icount_decr_ptr;
 372
 373    CPUJumpCache *tb_jmp_cache;
 374
 375    struct GDBRegisterState *gdb_regs;
 376    int gdb_num_regs;
 377    int gdb_num_g_regs;
 378    QTAILQ_ENTRY(CPUState) node;
 379
 380    /* ice debug support */
 381    QTAILQ_HEAD(, CPUBreakpoint) breakpoints;
 382
 383    QTAILQ_HEAD(, CPUWatchpoint) watchpoints;
 384    CPUWatchpoint *watchpoint_hit;
 385
 386    void *opaque;
 387
 388    /* In order to avoid passing too many arguments to the MMIO helpers,
 389     * we store some rarely used information in the CPU context.
 390     */
 391    uintptr_t mem_io_pc;
 392
 393    /* Only used in KVM */
 394    int kvm_fd;
 395    struct KVMState *kvm_state;
 396    struct kvm_run *kvm_run;
 397    struct kvm_dirty_gfn *kvm_dirty_gfns;
 398    uint32_t kvm_fetch_index;
 399    uint64_t dirty_pages;
 400
 401    /* Used for events with 'vcpu' and *without* the 'disabled' properties */
 402    DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS);
 403    DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
 404
 405    DECLARE_BITMAP(plugin_mask, QEMU_PLUGIN_EV_MAX);
 406
 407#ifdef CONFIG_PLUGIN
 408    GArray *plugin_mem_cbs;
 409    /* saved iotlb data from io_writex */
 410    SavedIOTLB saved_iotlb;
 411#endif
 412
 413    /* TODO Move common fields from CPUArchState here. */
 414    int cpu_index;
 415    int cluster_index;
 416    uint32_t tcg_cflags;
 417    uint32_t halted;
 418    uint32_t can_do_io;
 419    int32_t exception_index;
 420
 421    /* shared by kvm, hax and hvf */
 422    bool vcpu_dirty;
 423
 424    /* Used to keep track of an outstanding cpu throttle thread for migration
 425     * autoconverge
 426     */
 427    bool throttle_thread_scheduled;
 428
 429    /*
 430     * Sleep throttle_us_per_full microseconds once dirty ring is full
 431     * if dirty page rate limit is enabled.
 432     */
 433    int64_t throttle_us_per_full;
 434
 435    bool ignore_memory_transaction_failures;
 436
 437    /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
 438    bool prctl_unalign_sigbus;
 439
 440    struct hax_vcpu_state *hax_vcpu;
 441
 442    struct hvf_vcpu_state *hvf;
 443
 444    /* track IOMMUs whose translations we've cached in the TCG TLB */
 445    GArray *iommu_notifiers;
 446};
 447
 448typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
 449extern CPUTailQ cpus;
 450
 451#define first_cpu        QTAILQ_FIRST_RCU(&cpus)
 452#define CPU_NEXT(cpu)    QTAILQ_NEXT_RCU(cpu, node)
 453#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus, node)
 454#define CPU_FOREACH_SAFE(cpu, next_cpu) \
 455    QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus, node, next_cpu)
 456
 457extern __thread CPUState *current_cpu;
 458
 459/**
 460 * qemu_tcg_mttcg_enabled:
 461 * Check whether we are running MultiThread TCG or not.
 462 *
 463 * Returns: %true if we are in MTTCG mode %false otherwise.
 464 */
 465extern bool mttcg_enabled;
 466#define qemu_tcg_mttcg_enabled() (mttcg_enabled)
 467
 468/**
 469 * cpu_paging_enabled:
 470 * @cpu: The CPU whose state is to be inspected.
 471 *
 472 * Returns: %true if paging is enabled, %false otherwise.
 473 */
 474bool cpu_paging_enabled(const CPUState *cpu);
 475
 476/**
 477 * cpu_get_memory_mapping:
 478 * @cpu: The CPU whose memory mappings are to be obtained.
 479 * @list: Where to write the memory mappings to.
 480 * @errp: Pointer for reporting an #Error.
 481 */
 482void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
 483                            Error **errp);
 484
 485#if !defined(CONFIG_USER_ONLY)
 486
 487/**
 488 * cpu_write_elf64_note:
 489 * @f: pointer to a function that writes memory to a file
 490 * @cpu: The CPU whose memory is to be dumped
 491 * @cpuid: ID number of the CPU
 492 * @opaque: pointer to the CPUState struct
 493 */
 494int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
 495                         int cpuid, void *opaque);
 496
 497/**
 498 * cpu_write_elf64_qemunote:
 499 * @f: pointer to a function that writes memory to a file
 500 * @cpu: The CPU whose memory is to be dumped
 501 * @cpuid: ID number of the CPU
 502 * @opaque: pointer to the CPUState struct
 503 */
 504int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
 505                             void *opaque);
 506
 507/**
 508 * cpu_write_elf32_note:
 509 * @f: pointer to a function that writes memory to a file
 510 * @cpu: The CPU whose memory is to be dumped
 511 * @cpuid: ID number of the CPU
 512 * @opaque: pointer to the CPUState struct
 513 */
 514int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
 515                         int cpuid, void *opaque);
 516
 517/**
 518 * cpu_write_elf32_qemunote:
 519 * @f: pointer to a function that writes memory to a file
 520 * @cpu: The CPU whose memory is to be dumped
 521 * @cpuid: ID number of the CPU
 522 * @opaque: pointer to the CPUState struct
 523 */
 524int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
 525                             void *opaque);
 526
 527/**
 528 * cpu_get_crash_info:
 529 * @cpu: The CPU to get crash information for
 530 *
 531 * Gets the previously saved crash information.
 532 * Caller is responsible for freeing the data.
 533 */
 534GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
 535
 536#endif /* !CONFIG_USER_ONLY */
 537
 538/**
 539 * CPUDumpFlags:
 540 * @CPU_DUMP_CODE:
 541 * @CPU_DUMP_FPU: dump FPU register state, not just integer
 542 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
 543 */
 544enum CPUDumpFlags {
 545    CPU_DUMP_CODE = 0x00010000,
 546    CPU_DUMP_FPU  = 0x00020000,
 547    CPU_DUMP_CCOP = 0x00040000,
 548};
 549
 550/**
 551 * cpu_dump_state:
 552 * @cpu: The CPU whose state is to be dumped.
 553 * @f: If non-null, dump to this stream, else to current print sink.
 554 *
 555 * Dumps CPU state.
 556 */
 557void cpu_dump_state(CPUState *cpu, FILE *f, int flags);
 558
 559#ifndef CONFIG_USER_ONLY
 560/**
 561 * cpu_get_phys_page_attrs_debug:
 562 * @cpu: The CPU to obtain the physical page address for.
 563 * @addr: The virtual address.
 564 * @attrs: Updated on return with the memory transaction attributes to use
 565 *         for this access.
 566 *
 567 * Obtains the physical page corresponding to a virtual one, together
 568 * with the corresponding memory transaction attributes to use for the access.
 569 * Use it only for debugging because no protection checks are done.
 570 *
 571 * Returns: Corresponding physical page address or -1 if no page found.
 572 */
 573hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
 574                                     MemTxAttrs *attrs);
 575
 576/**
 577 * cpu_get_phys_page_debug:
 578 * @cpu: The CPU to obtain the physical page address for.
 579 * @addr: The virtual address.
 580 *
 581 * Obtains the physical page corresponding to a virtual one.
 582 * Use it only for debugging because no protection checks are done.
 583 *
 584 * Returns: Corresponding physical page address or -1 if no page found.
 585 */
 586hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
 587
 588/** cpu_asidx_from_attrs:
 589 * @cpu: CPU
 590 * @attrs: memory transaction attributes
 591 *
 592 * Returns the address space index specifying the CPU AddressSpace
 593 * to use for a memory access with the given transaction attributes.
 594 */
 595int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs);
 596
 597/**
 598 * cpu_virtio_is_big_endian:
 599 * @cpu: CPU
 600
 601 * Returns %true if a CPU which supports runtime configurable endianness
 602 * is currently big-endian.
 603 */
 604bool cpu_virtio_is_big_endian(CPUState *cpu);
 605
 606#endif /* CONFIG_USER_ONLY */
 607
 608/**
 609 * cpu_list_add:
 610 * @cpu: The CPU to be added to the list of CPUs.
 611 */
 612void cpu_list_add(CPUState *cpu);
 613
 614/**
 615 * cpu_list_remove:
 616 * @cpu: The CPU to be removed from the list of CPUs.
 617 */
 618void cpu_list_remove(CPUState *cpu);
 619
 620/**
 621 * cpu_reset:
 622 * @cpu: The CPU whose state is to be reset.
 623 */
 624void cpu_reset(CPUState *cpu);
 625
 626/**
 627 * cpu_class_by_name:
 628 * @typename: The CPU base type.
 629 * @cpu_model: The model string without any parameters.
 630 *
 631 * Looks up a CPU #ObjectClass matching name @cpu_model.
 632 *
 633 * Returns: A #CPUClass or %NULL if not matching class is found.
 634 */
 635ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
 636
 637/**
 638 * cpu_create:
 639 * @typename: The CPU type.
 640 *
 641 * Instantiates a CPU and realizes the CPU.
 642 *
 643 * Returns: A #CPUState or %NULL if an error occurred.
 644 */
 645CPUState *cpu_create(const char *typename);
 646
 647/**
 648 * parse_cpu_option:
 649 * @cpu_option: The -cpu option including optional parameters.
 650 *
 651 * processes optional parameters and registers them as global properties
 652 *
 653 * Returns: type of CPU to create or prints error and terminates process
 654 *          if an error occurred.
 655 */
 656const char *parse_cpu_option(const char *cpu_option);
 657
 658/**
 659 * cpu_has_work:
 660 * @cpu: The vCPU to check.
 661 *
 662 * Checks whether the CPU has work to do.
 663 *
 664 * Returns: %true if the CPU has work, %false otherwise.
 665 */
 666static inline bool cpu_has_work(CPUState *cpu)
 667{
 668    CPUClass *cc = CPU_GET_CLASS(cpu);
 669
 670    g_assert(cc->has_work);
 671    return cc->has_work(cpu);
 672}
 673
 674/**
 675 * qemu_cpu_is_self:
 676 * @cpu: The vCPU to check against.
 677 *
 678 * Checks whether the caller is executing on the vCPU thread.
 679 *
 680 * Returns: %true if called from @cpu's thread, %false otherwise.
 681 */
 682bool qemu_cpu_is_self(CPUState *cpu);
 683
 684/**
 685 * qemu_cpu_kick:
 686 * @cpu: The vCPU to kick.
 687 *
 688 * Kicks @cpu's thread.
 689 */
 690void qemu_cpu_kick(CPUState *cpu);
 691
 692/**
 693 * cpu_is_stopped:
 694 * @cpu: The CPU to check.
 695 *
 696 * Checks whether the CPU is stopped.
 697 *
 698 * Returns: %true if run state is not running or if artificially stopped;
 699 * %false otherwise.
 700 */
 701bool cpu_is_stopped(CPUState *cpu);
 702
 703/**
 704 * do_run_on_cpu:
 705 * @cpu: The vCPU to run on.
 706 * @func: The function to be executed.
 707 * @data: Data to pass to the function.
 708 * @mutex: Mutex to release while waiting for @func to run.
 709 *
 710 * Used internally in the implementation of run_on_cpu.
 711 */
 712void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
 713                   QemuMutex *mutex);
 714
 715/**
 716 * run_on_cpu:
 717 * @cpu: The vCPU to run on.
 718 * @func: The function to be executed.
 719 * @data: Data to pass to the function.
 720 *
 721 * Schedules the function @func for execution on the vCPU @cpu.
 722 */
 723void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
 724
 725/**
 726 * async_run_on_cpu:
 727 * @cpu: The vCPU to run on.
 728 * @func: The function to be executed.
 729 * @data: Data to pass to the function.
 730 *
 731 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
 732 */
 733void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
 734
 735/**
 736 * async_safe_run_on_cpu:
 737 * @cpu: The vCPU to run on.
 738 * @func: The function to be executed.
 739 * @data: Data to pass to the function.
 740 *
 741 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
 742 * while all other vCPUs are sleeping.
 743 *
 744 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
 745 * BQL.
 746 */
 747void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
 748
 749/**
 750 * cpu_in_exclusive_context()
 751 * @cpu: The vCPU to check
 752 *
 753 * Returns true if @cpu is an exclusive context, for example running
 754 * something which has previously been queued via async_safe_run_on_cpu().
 755 */
 756static inline bool cpu_in_exclusive_context(const CPUState *cpu)
 757{
 758    return cpu->in_exclusive_context;
 759}
 760
 761/**
 762 * qemu_get_cpu:
 763 * @index: The CPUState@cpu_index value of the CPU to obtain.
 764 *
 765 * Gets a CPU matching @index.
 766 *
 767 * Returns: The CPU or %NULL if there is no matching CPU.
 768 */
 769CPUState *qemu_get_cpu(int index);
 770
 771/**
 772 * cpu_exists:
 773 * @id: Guest-exposed CPU ID to lookup.
 774 *
 775 * Search for CPU with specified ID.
 776 *
 777 * Returns: %true - CPU is found, %false - CPU isn't found.
 778 */
 779bool cpu_exists(int64_t id);
 780
 781/**
 782 * cpu_by_arch_id:
 783 * @id: Guest-exposed CPU ID of the CPU to obtain.
 784 *
 785 * Get a CPU with matching @id.
 786 *
 787 * Returns: The CPU or %NULL if there is no matching CPU.
 788 */
 789CPUState *cpu_by_arch_id(int64_t id);
 790
 791/**
 792 * cpu_interrupt:
 793 * @cpu: The CPU to set an interrupt on.
 794 * @mask: The interrupts to set.
 795 *
 796 * Invokes the interrupt handler.
 797 */
 798
 799void cpu_interrupt(CPUState *cpu, int mask);
 800
 801/**
 802 * cpu_set_pc:
 803 * @cpu: The CPU to set the program counter for.
 804 * @addr: Program counter value.
 805 *
 806 * Sets the program counter for a CPU.
 807 */
 808static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
 809{
 810    CPUClass *cc = CPU_GET_CLASS(cpu);
 811
 812    cc->set_pc(cpu, addr);
 813}
 814
 815/**
 816 * cpu_reset_interrupt:
 817 * @cpu: The CPU to clear the interrupt on.
 818 * @mask: The interrupt mask to clear.
 819 *
 820 * Resets interrupts on the vCPU @cpu.
 821 */
 822void cpu_reset_interrupt(CPUState *cpu, int mask);
 823
 824/**
 825 * cpu_exit:
 826 * @cpu: The CPU to exit.
 827 *
 828 * Requests the CPU @cpu to exit execution.
 829 */
 830void cpu_exit(CPUState *cpu);
 831
 832/**
 833 * cpu_resume:
 834 * @cpu: The CPU to resume.
 835 *
 836 * Resumes CPU, i.e. puts CPU into runnable state.
 837 */
 838void cpu_resume(CPUState *cpu);
 839
 840/**
 841 * cpu_remove_sync:
 842 * @cpu: The CPU to remove.
 843 *
 844 * Requests the CPU to be removed and waits till it is removed.
 845 */
 846void cpu_remove_sync(CPUState *cpu);
 847
 848/**
 849 * process_queued_cpu_work() - process all items on CPU work queue
 850 * @cpu: The CPU which work queue to process.
 851 */
 852void process_queued_cpu_work(CPUState *cpu);
 853
 854/**
 855 * cpu_exec_start:
 856 * @cpu: The CPU for the current thread.
 857 *
 858 * Record that a CPU has started execution and can be interrupted with
 859 * cpu_exit.
 860 */
 861void cpu_exec_start(CPUState *cpu);
 862
 863/**
 864 * cpu_exec_end:
 865 * @cpu: The CPU for the current thread.
 866 *
 867 * Record that a CPU has stopped execution and exclusive sections
 868 * can be executed without interrupting it.
 869 */
 870void cpu_exec_end(CPUState *cpu);
 871
 872/**
 873 * start_exclusive:
 874 *
 875 * Wait for a concurrent exclusive section to end, and then start
 876 * a section of work that is run while other CPUs are not running
 877 * between cpu_exec_start and cpu_exec_end.  CPUs that are running
 878 * cpu_exec are exited immediately.  CPUs that call cpu_exec_start
 879 * during the exclusive section go to sleep until this CPU calls
 880 * end_exclusive.
 881 */
 882void start_exclusive(void);
 883
 884/**
 885 * end_exclusive:
 886 *
 887 * Concludes an exclusive execution section started by start_exclusive.
 888 */
 889void end_exclusive(void);
 890
 891/**
 892 * qemu_init_vcpu:
 893 * @cpu: The vCPU to initialize.
 894 *
 895 * Initializes a vCPU.
 896 */
 897void qemu_init_vcpu(CPUState *cpu);
 898
 899#define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
 900#define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
 901#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
 902
 903/**
 904 * cpu_single_step:
 905 * @cpu: CPU to the flags for.
 906 * @enabled: Flags to enable.
 907 *
 908 * Enables or disables single-stepping for @cpu.
 909 */
 910void cpu_single_step(CPUState *cpu, int enabled);
 911
 912/* Breakpoint/watchpoint flags */
 913#define BP_MEM_READ           0x01
 914#define BP_MEM_WRITE          0x02
 915#define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
 916#define BP_STOP_BEFORE_ACCESS 0x04
 917/* 0x08 currently unused */
 918#define BP_GDB                0x10
 919#define BP_CPU                0x20
 920#define BP_ANY                (BP_GDB | BP_CPU)
 921#define BP_WATCHPOINT_HIT_READ 0x40
 922#define BP_WATCHPOINT_HIT_WRITE 0x80
 923#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
 924
 925int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
 926                          CPUBreakpoint **breakpoint);
 927int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
 928void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
 929void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
 930
 931/* Return true if PC matches an installed breakpoint.  */
 932static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
 933{
 934    CPUBreakpoint *bp;
 935
 936    if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
 937        QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
 938            if (bp->pc == pc && (bp->flags & mask)) {
 939                return true;
 940            }
 941        }
 942    }
 943    return false;
 944}
 945
 946#ifdef CONFIG_USER_ONLY
 947static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
 948                                        int flags, CPUWatchpoint **watchpoint)
 949{
 950    return -ENOSYS;
 951}
 952
 953static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
 954                                        vaddr len, int flags)
 955{
 956    return -ENOSYS;
 957}
 958
 959static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
 960                                                CPUWatchpoint *wp)
 961{
 962}
 963
 964static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
 965{
 966}
 967
 968static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
 969                                        MemTxAttrs atr, int fl, uintptr_t ra)
 970{
 971}
 972
 973static inline int cpu_watchpoint_address_matches(CPUState *cpu,
 974                                                 vaddr addr, vaddr len)
 975{
 976    return 0;
 977}
 978#else
 979int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
 980                          int flags, CPUWatchpoint **watchpoint);
 981int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
 982                          vaddr len, int flags);
 983void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
 984void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
 985
 986/**
 987 * cpu_check_watchpoint:
 988 * @cpu: cpu context
 989 * @addr: guest virtual address
 990 * @len: access length
 991 * @attrs: memory access attributes
 992 * @flags: watchpoint access type
 993 * @ra: unwind return address
 994 *
 995 * Check for a watchpoint hit in [addr, addr+len) of the type
 996 * specified by @flags.  Exit via exception with a hit.
 997 */
 998void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
 999                          MemTxAttrs attrs, int flags, uintptr_t ra);
1000
1001/**
1002 * cpu_watchpoint_address_matches:
1003 * @cpu: cpu context
1004 * @addr: guest virtual address
1005 * @len: access length
1006 *
1007 * Return the watchpoint flags that apply to [addr, addr+len).
1008 * If no watchpoint is registered for the range, the result is 0.
1009 */
1010int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
1011#endif
1012
1013/**
1014 * cpu_get_address_space:
1015 * @cpu: CPU to get address space from
1016 * @asidx: index identifying which address space to get
1017 *
1018 * Return the requested address space of this CPU. @asidx
1019 * specifies which address space to read.
1020 */
1021AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
1022
1023G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...)
1024    G_GNUC_PRINTF(2, 3);
1025
1026/* $(top_srcdir)/cpu.c */
1027void cpu_class_init_props(DeviceClass *dc);
1028void cpu_exec_initfn(CPUState *cpu);
1029void cpu_exec_realizefn(CPUState *cpu, Error **errp);
1030void cpu_exec_unrealizefn(CPUState *cpu);
1031
1032/**
1033 * target_words_bigendian:
1034 * Returns true if the (default) endianness of the target is big endian,
1035 * false otherwise. Note that in target-specific code, you can use
1036 * TARGET_BIG_ENDIAN directly instead. On the other hand, common
1037 * code should normally never need to know about the endianness of the
1038 * target, so please do *not* use this function unless you know very well
1039 * what you are doing!
1040 */
1041bool target_words_bigendian(void);
1042
1043void page_size_init(void);
1044
1045#ifdef NEED_CPU_H
1046
1047#ifdef CONFIG_SOFTMMU
1048
1049extern const VMStateDescription vmstate_cpu_common;
1050
1051#define VMSTATE_CPU() {                                                     \
1052    .name = "parent_obj",                                                   \
1053    .size = sizeof(CPUState),                                               \
1054    .vmsd = &vmstate_cpu_common,                                            \
1055    .flags = VMS_STRUCT,                                                    \
1056    .offset = 0,                                                            \
1057}
1058#endif /* CONFIG_SOFTMMU */
1059
1060#endif /* NEED_CPU_H */
1061
1062#define UNASSIGNED_CPU_INDEX -1
1063#define UNASSIGNED_CLUSTER_INDEX -1
1064
1065#endif
1066