qemu/include/qom/cpu.h
<<
>>
Prefs
   1/*
   2 * QEMU CPU model
   3 *
   4 * Copyright (c) 2012 SUSE LINUX Products GmbH
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see
  18 * <http://www.gnu.org/licenses/gpl-2.0.html>
  19 */
  20#ifndef QEMU_CPU_H
  21#define QEMU_CPU_H
  22
  23#include "hw/qdev-core.h"
  24#include "disas/bfd.h"
  25#include "exec/hwaddr.h"
  26#include "exec/memattrs.h"
  27#include "qapi/qapi-types-run-state.h"
  28#include "qemu/bitmap.h"
  29#include "qemu/fprintf-fn.h"
  30#include "qemu/rcu_queue.h"
  31#include "qemu/queue.h"
  32#include "qemu/thread.h"
  33
  34typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
  35                                     void *opaque);
  36
  37/**
  38 * vaddr:
  39 * Type wide enough to contain any #target_ulong virtual address.
  40 */
  41typedef uint64_t vaddr;
  42#define VADDR_PRId PRId64
  43#define VADDR_PRIu PRIu64
  44#define VADDR_PRIo PRIo64
  45#define VADDR_PRIx PRIx64
  46#define VADDR_PRIX PRIX64
  47#define VADDR_MAX UINT64_MAX
  48
  49/**
  50 * SECTION:cpu
  51 * @section_id: QEMU-cpu
  52 * @title: CPU Class
  53 * @short_description: Base class for all CPUs
  54 */
  55
  56#define TYPE_CPU "cpu"
  57
  58/* Since this macro is used a lot in hot code paths and in conjunction with
  59 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
  60 * an unchecked cast.
  61 */
  62#define CPU(obj) ((CPUState *)(obj))
  63
  64#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
  65#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
  66
  67typedef enum MMUAccessType {
  68    MMU_DATA_LOAD  = 0,
  69    MMU_DATA_STORE = 1,
  70    MMU_INST_FETCH = 2
  71} MMUAccessType;
  72
  73typedef struct CPUWatchpoint CPUWatchpoint;
  74
  75typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
  76                                    bool is_write, bool is_exec, int opaque,
  77                                    unsigned size);
  78
  79struct TranslationBlock;
  80
  81/**
  82 * CPUClass:
  83 * @class_by_name: Callback to map -cpu command line model name to an
  84 * instantiatable CPU type.
  85 * @parse_features: Callback to parse command line arguments.
  86 * @reset: Callback to reset the #CPUState to its initial state.
  87 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
  88 * @has_work: Callback for checking if there is work to do.
  89 * @do_interrupt: Callback for interrupt handling.
  90 * @do_unassigned_access: Callback for unassigned access handling.
  91 * (this is deprecated: new targets should use do_transaction_failed instead)
  92 * @do_unaligned_access: Callback for unaligned access handling, if
  93 * the target defines #ALIGNED_ONLY.
  94 * @do_transaction_failed: Callback for handling failed memory transactions
  95 * (ie bus faults or external aborts; not MMU faults)
  96 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
  97 * runtime configurable endianness is currently big-endian. Non-configurable
  98 * CPUs can use the default implementation of this method. This method should
  99 * not be used by any callers other than the pre-1.0 virtio devices.
 100 * @memory_rw_debug: Callback for GDB memory access.
 101 * @dump_state: Callback for dumping state.
 102 * @dump_statistics: Callback for dumping statistics.
 103 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
 104 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
 105 * @get_memory_mapping: Callback for obtaining the memory mappings.
 106 * @set_pc: Callback for setting the Program Counter register. This
 107 *       should have the semantics used by the target architecture when
 108 *       setting the PC from a source such as an ELF file entry point;
 109 *       for example on Arm it will also set the Thumb mode bit based
 110 *       on the least significant bit of the new PC value.
 111 *       If the target behaviour here is anything other than "set
 112 *       the PC register to the value passed in" then the target must
 113 *       also implement the synchronize_from_tb hook.
 114 * @synchronize_from_tb: Callback for synchronizing state from a TCG
 115 *       #TranslationBlock. This is called when we abandon execution
 116 *       of a TB before starting it, and must set all parts of the CPU
 117 *       state which the previous TB in the chain may not have updated.
 118 *       This always includes at least the program counter; some targets
 119 *       will need to do more. If this hook is not implemented then the
 120 *       default is to call @set_pc(tb->pc).
 121 * @handle_mmu_fault: Callback for handling an MMU fault.
 122 * @get_phys_page_debug: Callback for obtaining a physical address.
 123 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
 124 *       associated memory transaction attributes to use for the access.
 125 *       CPUs which use memory transaction attributes should implement this
 126 *       instead of get_phys_page_debug.
 127 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
 128 *       a memory access with the specified memory transaction attributes.
 129 * @gdb_read_register: Callback for letting GDB read a register.
 130 * @gdb_write_register: Callback for letting GDB write a register.
 131 * @debug_check_watchpoint: Callback: return true if the architectural
 132 *       watchpoint whose address has matched should really fire.
 133 * @debug_excp_handler: Callback for handling debug exceptions.
 134 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
 135 * 64-bit VM coredump.
 136 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
 137 * note to a 32-bit VM coredump.
 138 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
 139 * 32-bit VM coredump.
 140 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
 141 * note to a 32-bit VM coredump.
 142 * @vmsd: State description for migration.
 143 * @gdb_num_core_regs: Number of core registers accessible to GDB.
 144 * @gdb_core_xml_file: File name for core registers GDB XML description.
 145 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
 146 *           before the insn which triggers a watchpoint rather than after it.
 147 * @gdb_arch_name: Optional callback that returns the architecture name known
 148 * to GDB. The caller must free the returned string with g_free.
 149 * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
 150 *   gdb stub. Returns a pointer to the XML contents for the specified XML file
 151 *   or NULL if the CPU doesn't have a dynamically generated content for it.
 152 * @cpu_exec_enter: Callback for cpu_exec preparation.
 153 * @cpu_exec_exit: Callback for cpu_exec cleanup.
 154 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
 155 * @disas_set_info: Setup architecture specific components of disassembly info
 156 * @adjust_watchpoint_address: Perform a target-specific adjustment to an
 157 * address before attempting to match it against watchpoints.
 158 *
 159 * Represents a CPU family or model.
 160 */
 161typedef struct CPUClass {
 162    /*< private >*/
 163    DeviceClass parent_class;
 164    /*< public >*/
 165
 166    ObjectClass *(*class_by_name)(const char *cpu_model);
 167    void (*parse_features)(const char *typename, char *str, Error **errp);
 168
 169    void (*reset)(CPUState *cpu);
 170    int reset_dump_flags;
 171    bool (*has_work)(CPUState *cpu);
 172    void (*do_interrupt)(CPUState *cpu);
 173    CPUUnassignedAccess do_unassigned_access;
 174    void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
 175                                MMUAccessType access_type,
 176                                int mmu_idx, uintptr_t retaddr);
 177    void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
 178                                  unsigned size, MMUAccessType access_type,
 179                                  int mmu_idx, MemTxAttrs attrs,
 180                                  MemTxResult response, uintptr_t retaddr);
 181    bool (*virtio_is_big_endian)(CPUState *cpu);
 182    int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
 183                           uint8_t *buf, int len, bool is_write);
 184    void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 185                       int flags);
 186    GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
 187    void (*dump_statistics)(CPUState *cpu, FILE *f,
 188                            fprintf_function cpu_fprintf, int flags);
 189    int64_t (*get_arch_id)(CPUState *cpu);
 190    bool (*get_paging_enabled)(const CPUState *cpu);
 191    void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
 192                               Error **errp);
 193    void (*set_pc)(CPUState *cpu, vaddr value);
 194    void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
 195    int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw,
 196                            int mmu_index);
 197    hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
 198    hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
 199                                        MemTxAttrs *attrs);
 200    int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
 201    int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
 202    int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
 203    bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
 204    void (*debug_excp_handler)(CPUState *cpu);
 205
 206    int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
 207                            int cpuid, void *opaque);
 208    int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
 209                                void *opaque);
 210    int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
 211                            int cpuid, void *opaque);
 212    int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
 213                                void *opaque);
 214
 215    const struct VMStateDescription *vmsd;
 216    const char *gdb_core_xml_file;
 217    gchar * (*gdb_arch_name)(CPUState *cpu);
 218    const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
 219    void (*cpu_exec_enter)(CPUState *cpu);
 220    void (*cpu_exec_exit)(CPUState *cpu);
 221    bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
 222
 223    void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
 224    vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
 225    void (*tcg_initialize)(void);
 226
 227    /* Keep non-pointer data at the end to minimize holes.  */
 228    int gdb_num_core_regs;
 229    bool gdb_stop_before_watchpoint;
 230} CPUClass;
 231
 232#ifdef HOST_WORDS_BIGENDIAN
 233typedef struct icount_decr_u16 {
 234    uint16_t high;
 235    uint16_t low;
 236} icount_decr_u16;
 237#else
 238typedef struct icount_decr_u16 {
 239    uint16_t low;
 240    uint16_t high;
 241} icount_decr_u16;
 242#endif
 243
 244typedef struct CPUBreakpoint {
 245    vaddr pc;
 246    int flags; /* BP_* */
 247    QTAILQ_ENTRY(CPUBreakpoint) entry;
 248} CPUBreakpoint;
 249
 250struct CPUWatchpoint {
 251    vaddr vaddr;
 252    vaddr len;
 253    vaddr hitaddr;
 254    MemTxAttrs hitattrs;
 255    int flags; /* BP_* */
 256    QTAILQ_ENTRY(CPUWatchpoint) entry;
 257};
 258
 259struct KVMState;
 260struct kvm_run;
 261
 262struct hax_vcpu_state;
 263
 264#define TB_JMP_CACHE_BITS 12
 265#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
 266
 267/* work queue */
 268
 269/* The union type allows passing of 64 bit target pointers on 32 bit
 270 * hosts in a single parameter
 271 */
 272typedef union {
 273    int           host_int;
 274    unsigned long host_ulong;
 275    void         *host_ptr;
 276    vaddr         target_ptr;
 277} run_on_cpu_data;
 278
 279#define RUN_ON_CPU_HOST_PTR(p)    ((run_on_cpu_data){.host_ptr = (p)})
 280#define RUN_ON_CPU_HOST_INT(i)    ((run_on_cpu_data){.host_int = (i)})
 281#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
 282#define RUN_ON_CPU_TARGET_PTR(v)  ((run_on_cpu_data){.target_ptr = (v)})
 283#define RUN_ON_CPU_NULL           RUN_ON_CPU_HOST_PTR(NULL)
 284
 285typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
 286
 287struct qemu_work_item;
 288
 289#define CPU_UNSET_NUMA_NODE_ID -1
 290#define CPU_TRACE_DSTATE_MAX_EVENTS 32
 291
 292/**
 293 * CPUState:
 294 * @cpu_index: CPU index (informative).
 295 * @cluster_index: Identifies which cluster this CPU is in.
 296 *   For boards which don't define clusters or for "loose" CPUs not assigned
 297 *   to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
 298 *   be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
 299 *   QOM parent.
 300 * @nr_cores: Number of cores within this CPU package.
 301 * @nr_threads: Number of threads within this CPU.
 302 * @running: #true if CPU is currently running (lockless).
 303 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
 304 * valid under cpu_list_lock.
 305 * @created: Indicates whether the CPU thread has been successfully created.
 306 * @interrupt_request: Indicates a pending interrupt request.
 307 * @halted: Nonzero if the CPU is in suspended state.
 308 * @stop: Indicates a pending stop request.
 309 * @stopped: Indicates the CPU has been artificially stopped.
 310 * @unplug: Indicates a pending CPU unplug request.
 311 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
 312 * @singlestep_enabled: Flags for single-stepping.
 313 * @icount_extra: Instructions until next timer event.
 314 * @icount_decr: Low 16 bits: number of cycles left, only used in icount mode.
 315 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs for this
 316 * CPU and return to its top level loop (even in non-icount mode).
 317 * This allows a single read-compare-cbranch-write sequence to test
 318 * for both decrementer underflow and exceptions.
 319 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
 320 * requires that IO only be performed on the last instruction of a TB
 321 * so that interrupts take effect immediately.
 322 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
 323 *            AddressSpaces this CPU has)
 324 * @num_ases: number of CPUAddressSpaces in @cpu_ases
 325 * @as: Pointer to the first AddressSpace, for the convenience of targets which
 326 *      only have a single AddressSpace
 327 * @env_ptr: Pointer to subclass-specific CPUArchState field.
 328 * @gdb_regs: Additional GDB registers.
 329 * @gdb_num_regs: Number of total registers accessible to GDB.
 330 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
 331 * @next_cpu: Next CPU sharing TB cache.
 332 * @opaque: User data.
 333 * @mem_io_pc: Host Program Counter at which the memory was accessed.
 334 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
 335 * @kvm_fd: vCPU file descriptor for KVM.
 336 * @work_mutex: Lock to prevent multiple access to queued_work_*.
 337 * @queued_work_first: First asynchronous work pending.
 338 * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
 339 *                        to @trace_dstate).
 340 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
 341 * @ignore_memory_transaction_failures: Cached copy of the MachineState
 342 *    flag of the same name: allows the board to suppress calling of the
 343 *    CPU do_transaction_failed hook function.
 344 *
 345 * State of one CPU core or thread.
 346 */
 347struct CPUState {
 348    /*< private >*/
 349    DeviceState parent_obj;
 350    /*< public >*/
 351
 352    int nr_cores;
 353    int nr_threads;
 354
 355    struct QemuThread *thread;
 356#ifdef _WIN32
 357    HANDLE hThread;
 358#endif
 359    int thread_id;
 360    bool running, has_waiter;
 361    struct QemuCond *halt_cond;
 362    bool thread_kicked;
 363    bool created;
 364    bool stop;
 365    bool stopped;
 366    bool unplug;
 367    bool crash_occurred;
 368    bool exit_request;
 369    uint32_t cflags_next_tb;
 370    /* updates protected by BQL */
 371    uint32_t interrupt_request;
 372    int singlestep_enabled;
 373    int64_t icount_budget;
 374    int64_t icount_extra;
 375    sigjmp_buf jmp_env;
 376
 377    QemuMutex work_mutex;
 378    struct qemu_work_item *queued_work_first, *queued_work_last;
 379
 380    CPUAddressSpace *cpu_ases;
 381    int num_ases;
 382    AddressSpace *as;
 383    MemoryRegion *memory;
 384
 385    void *env_ptr; /* CPUArchState */
 386
 387    /* Accessed in parallel; all accesses must be atomic */
 388    struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
 389
 390    struct GDBRegisterState *gdb_regs;
 391    int gdb_num_regs;
 392    int gdb_num_g_regs;
 393    QTAILQ_ENTRY(CPUState) node;
 394
 395    /* ice debug support */
 396    QTAILQ_HEAD(, CPUBreakpoint) breakpoints;
 397
 398    QTAILQ_HEAD(, CPUWatchpoint) watchpoints;
 399    CPUWatchpoint *watchpoint_hit;
 400
 401    void *opaque;
 402
 403    /* In order to avoid passing too many arguments to the MMIO helpers,
 404     * we store some rarely used information in the CPU context.
 405     */
 406    uintptr_t mem_io_pc;
 407    vaddr mem_io_vaddr;
 408    /*
 409     * This is only needed for the legacy cpu_unassigned_access() hook;
 410     * when all targets using it have been converted to use
 411     * cpu_transaction_failed() instead it can be removed.
 412     */
 413    MMUAccessType mem_io_access_type;
 414
 415    int kvm_fd;
 416    struct KVMState *kvm_state;
 417    struct kvm_run *kvm_run;
 418
 419    /* Used for events with 'vcpu' and *without* the 'disabled' properties */
 420    DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS);
 421    DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
 422
 423    /* TODO Move common fields from CPUArchState here. */
 424    int cpu_index;
 425    int cluster_index;
 426    uint32_t halted;
 427    uint32_t can_do_io;
 428    int32_t exception_index;
 429
 430    /* shared by kvm, hax and hvf */
 431    bool vcpu_dirty;
 432
 433    /* Used to keep track of an outstanding cpu throttle thread for migration
 434     * autoconverge
 435     */
 436    bool throttle_thread_scheduled;
 437
 438    bool ignore_memory_transaction_failures;
 439
 440    /* Note that this is accessed at the start of every TB via a negative
 441       offset from AREG0.  Leave this field at the end so as to make the
 442       (absolute value) offset as small as possible.  This reduces code
 443       size, especially for hosts without large memory offsets.  */
 444    union {
 445        uint32_t u32;
 446        icount_decr_u16 u16;
 447    } icount_decr;
 448
 449    struct hax_vcpu_state *hax_vcpu;
 450
 451    int hvf_fd;
 452
 453    /* track IOMMUs whose translations we've cached in the TCG TLB */
 454    GArray *iommu_notifiers;
 455};
 456
 457typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
 458extern CPUTailQ cpus;
 459
 460#define first_cpu        QTAILQ_FIRST_RCU(&cpus)
 461#define CPU_NEXT(cpu)    QTAILQ_NEXT_RCU(cpu, node)
 462#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus, node)
 463#define CPU_FOREACH_SAFE(cpu, next_cpu) \
 464    QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus, node, next_cpu)
 465
 466extern __thread CPUState *current_cpu;
 467
 468static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
 469{
 470    unsigned int i;
 471
 472    for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
 473        atomic_set(&cpu->tb_jmp_cache[i], NULL);
 474    }
 475}
 476
 477/**
 478 * qemu_tcg_mttcg_enabled:
 479 * Check whether we are running MultiThread TCG or not.
 480 *
 481 * Returns: %true if we are in MTTCG mode %false otherwise.
 482 */
 483extern bool mttcg_enabled;
 484#define qemu_tcg_mttcg_enabled() (mttcg_enabled)
 485
 486/**
 487 * cpu_paging_enabled:
 488 * @cpu: The CPU whose state is to be inspected.
 489 *
 490 * Returns: %true if paging is enabled, %false otherwise.
 491 */
 492bool cpu_paging_enabled(const CPUState *cpu);
 493
 494/**
 495 * cpu_get_memory_mapping:
 496 * @cpu: The CPU whose memory mappings are to be obtained.
 497 * @list: Where to write the memory mappings to.
 498 * @errp: Pointer for reporting an #Error.
 499 */
 500void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
 501                            Error **errp);
 502
 503/**
 504 * cpu_write_elf64_note:
 505 * @f: pointer to a function that writes memory to a file
 506 * @cpu: The CPU whose memory is to be dumped
 507 * @cpuid: ID number of the CPU
 508 * @opaque: pointer to the CPUState struct
 509 */
 510int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
 511                         int cpuid, void *opaque);
 512
 513/**
 514 * cpu_write_elf64_qemunote:
 515 * @f: pointer to a function that writes memory to a file
 516 * @cpu: The CPU whose memory is to be dumped
 517 * @cpuid: ID number of the CPU
 518 * @opaque: pointer to the CPUState struct
 519 */
 520int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
 521                             void *opaque);
 522
 523/**
 524 * cpu_write_elf32_note:
 525 * @f: pointer to a function that writes memory to a file
 526 * @cpu: The CPU whose memory is to be dumped
 527 * @cpuid: ID number of the CPU
 528 * @opaque: pointer to the CPUState struct
 529 */
 530int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
 531                         int cpuid, void *opaque);
 532
 533/**
 534 * cpu_write_elf32_qemunote:
 535 * @f: pointer to a function that writes memory to a file
 536 * @cpu: The CPU whose memory is to be dumped
 537 * @cpuid: ID number of the CPU
 538 * @opaque: pointer to the CPUState struct
 539 */
 540int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
 541                             void *opaque);
 542
 543/**
 544 * cpu_get_crash_info:
 545 * @cpu: The CPU to get crash information for
 546 *
 547 * Gets the previously saved crash information.
 548 * Caller is responsible for freeing the data.
 549 */
 550GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
 551
 552/**
 553 * CPUDumpFlags:
 554 * @CPU_DUMP_CODE:
 555 * @CPU_DUMP_FPU: dump FPU register state, not just integer
 556 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
 557 */
 558enum CPUDumpFlags {
 559    CPU_DUMP_CODE = 0x00010000,
 560    CPU_DUMP_FPU  = 0x00020000,
 561    CPU_DUMP_CCOP = 0x00040000,
 562};
 563
 564/**
 565 * cpu_dump_state:
 566 * @cpu: The CPU whose state is to be dumped.
 567 * @f: File to dump to.
 568 * @cpu_fprintf: Function to dump with.
 569 * @flags: Flags what to dump.
 570 *
 571 * Dumps CPU state.
 572 */
 573void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 574                    int flags);
 575
 576/**
 577 * cpu_dump_statistics:
 578 * @cpu: The CPU whose state is to be dumped.
 579 * @f: File to dump to.
 580 * @cpu_fprintf: Function to dump with.
 581 * @flags: Flags what to dump.
 582 *
 583 * Dumps CPU statistics.
 584 */
 585void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 586                         int flags);
 587
 588#ifndef CONFIG_USER_ONLY
 589/**
 590 * cpu_get_phys_page_attrs_debug:
 591 * @cpu: The CPU to obtain the physical page address for.
 592 * @addr: The virtual address.
 593 * @attrs: Updated on return with the memory transaction attributes to use
 594 *         for this access.
 595 *
 596 * Obtains the physical page corresponding to a virtual one, together
 597 * with the corresponding memory transaction attributes to use for the access.
 598 * Use it only for debugging because no protection checks are done.
 599 *
 600 * Returns: Corresponding physical page address or -1 if no page found.
 601 */
 602static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
 603                                                   MemTxAttrs *attrs)
 604{
 605    CPUClass *cc = CPU_GET_CLASS(cpu);
 606
 607    if (cc->get_phys_page_attrs_debug) {
 608        return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
 609    }
 610    /* Fallback for CPUs which don't implement the _attrs_ hook */
 611    *attrs = MEMTXATTRS_UNSPECIFIED;
 612    return cc->get_phys_page_debug(cpu, addr);
 613}
 614
 615/**
 616 * cpu_get_phys_page_debug:
 617 * @cpu: The CPU to obtain the physical page address for.
 618 * @addr: The virtual address.
 619 *
 620 * Obtains the physical page corresponding to a virtual one.
 621 * Use it only for debugging because no protection checks are done.
 622 *
 623 * Returns: Corresponding physical page address or -1 if no page found.
 624 */
 625static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
 626{
 627    MemTxAttrs attrs = {};
 628
 629    return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
 630}
 631
 632/** cpu_asidx_from_attrs:
 633 * @cpu: CPU
 634 * @attrs: memory transaction attributes
 635 *
 636 * Returns the address space index specifying the CPU AddressSpace
 637 * to use for a memory access with the given transaction attributes.
 638 */
 639static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
 640{
 641    CPUClass *cc = CPU_GET_CLASS(cpu);
 642    int ret = 0;
 643
 644    if (cc->asidx_from_attrs) {
 645        ret = cc->asidx_from_attrs(cpu, attrs);
 646        assert(ret < cpu->num_ases && ret >= 0);
 647    }
 648    return ret;
 649}
 650#endif
 651
 652/**
 653 * cpu_list_add:
 654 * @cpu: The CPU to be added to the list of CPUs.
 655 */
 656void cpu_list_add(CPUState *cpu);
 657
 658/**
 659 * cpu_list_remove:
 660 * @cpu: The CPU to be removed from the list of CPUs.
 661 */
 662void cpu_list_remove(CPUState *cpu);
 663
 664/**
 665 * cpu_reset:
 666 * @cpu: The CPU whose state is to be reset.
 667 */
 668void cpu_reset(CPUState *cpu);
 669
 670/**
 671 * cpu_class_by_name:
 672 * @typename: The CPU base type.
 673 * @cpu_model: The model string without any parameters.
 674 *
 675 * Looks up a CPU #ObjectClass matching name @cpu_model.
 676 *
 677 * Returns: A #CPUClass or %NULL if not matching class is found.
 678 */
 679ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
 680
 681/**
 682 * cpu_create:
 683 * @typename: The CPU type.
 684 *
 685 * Instantiates a CPU and realizes the CPU.
 686 *
 687 * Returns: A #CPUState or %NULL if an error occurred.
 688 */
 689CPUState *cpu_create(const char *typename);
 690
 691/**
 692 * parse_cpu_model:
 693 * @cpu_model: The model string including optional parameters.
 694 *
 695 * processes optional parameters and registers them as global properties
 696 *
 697 * Returns: type of CPU to create or prints error and terminates process
 698 *          if an error occurred.
 699 */
 700const char *parse_cpu_model(const char *cpu_model);
 701
 702/**
 703 * cpu_has_work:
 704 * @cpu: The vCPU to check.
 705 *
 706 * Checks whether the CPU has work to do.
 707 *
 708 * Returns: %true if the CPU has work, %false otherwise.
 709 */
 710static inline bool cpu_has_work(CPUState *cpu)
 711{
 712    CPUClass *cc = CPU_GET_CLASS(cpu);
 713
 714    g_assert(cc->has_work);
 715    return cc->has_work(cpu);
 716}
 717
 718/**
 719 * qemu_cpu_is_self:
 720 * @cpu: The vCPU to check against.
 721 *
 722 * Checks whether the caller is executing on the vCPU thread.
 723 *
 724 * Returns: %true if called from @cpu's thread, %false otherwise.
 725 */
 726bool qemu_cpu_is_self(CPUState *cpu);
 727
 728/**
 729 * qemu_cpu_kick:
 730 * @cpu: The vCPU to kick.
 731 *
 732 * Kicks @cpu's thread.
 733 */
 734void qemu_cpu_kick(CPUState *cpu);
 735
 736/**
 737 * cpu_is_stopped:
 738 * @cpu: The CPU to check.
 739 *
 740 * Checks whether the CPU is stopped.
 741 *
 742 * Returns: %true if run state is not running or if artificially stopped;
 743 * %false otherwise.
 744 */
 745bool cpu_is_stopped(CPUState *cpu);
 746
 747/**
 748 * do_run_on_cpu:
 749 * @cpu: The vCPU to run on.
 750 * @func: The function to be executed.
 751 * @data: Data to pass to the function.
 752 * @mutex: Mutex to release while waiting for @func to run.
 753 *
 754 * Used internally in the implementation of run_on_cpu.
 755 */
 756void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
 757                   QemuMutex *mutex);
 758
 759/**
 760 * run_on_cpu:
 761 * @cpu: The vCPU to run on.
 762 * @func: The function to be executed.
 763 * @data: Data to pass to the function.
 764 *
 765 * Schedules the function @func for execution on the vCPU @cpu.
 766 */
 767void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
 768
 769/**
 770 * async_run_on_cpu:
 771 * @cpu: The vCPU to run on.
 772 * @func: The function to be executed.
 773 * @data: Data to pass to the function.
 774 *
 775 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
 776 */
 777void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
 778
 779/**
 780 * async_safe_run_on_cpu:
 781 * @cpu: The vCPU to run on.
 782 * @func: The function to be executed.
 783 * @data: Data to pass to the function.
 784 *
 785 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
 786 * while all other vCPUs are sleeping.
 787 *
 788 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
 789 * BQL.
 790 */
 791void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
 792
 793/**
 794 * qemu_get_cpu:
 795 * @index: The CPUState@cpu_index value of the CPU to obtain.
 796 *
 797 * Gets a CPU matching @index.
 798 *
 799 * Returns: The CPU or %NULL if there is no matching CPU.
 800 */
 801CPUState *qemu_get_cpu(int index);
 802
 803/**
 804 * cpu_exists:
 805 * @id: Guest-exposed CPU ID to lookup.
 806 *
 807 * Search for CPU with specified ID.
 808 *
 809 * Returns: %true - CPU is found, %false - CPU isn't found.
 810 */
 811bool cpu_exists(int64_t id);
 812
 813/**
 814 * cpu_by_arch_id:
 815 * @id: Guest-exposed CPU ID of the CPU to obtain.
 816 *
 817 * Get a CPU with matching @id.
 818 *
 819 * Returns: The CPU or %NULL if there is no matching CPU.
 820 */
 821CPUState *cpu_by_arch_id(int64_t id);
 822
 823/**
 824 * cpu_throttle_set:
 825 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
 826 *
 827 * Throttles all vcpus by forcing them to sleep for the given percentage of
 828 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
 829 * (example: 10ms sleep for every 30ms awake).
 830 *
 831 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
 832 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
 833 * is called.
 834 */
 835void cpu_throttle_set(int new_throttle_pct);
 836
 837/**
 838 * cpu_throttle_stop:
 839 *
 840 * Stops the vcpu throttling started by cpu_throttle_set.
 841 */
 842void cpu_throttle_stop(void);
 843
 844/**
 845 * cpu_throttle_active:
 846 *
 847 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
 848 */
 849bool cpu_throttle_active(void);
 850
 851/**
 852 * cpu_throttle_get_percentage:
 853 *
 854 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
 855 *
 856 * Returns: The throttle percentage in range 1 to 99.
 857 */
 858int cpu_throttle_get_percentage(void);
 859
 860#ifndef CONFIG_USER_ONLY
 861
 862typedef void (*CPUInterruptHandler)(CPUState *, int);
 863
 864extern CPUInterruptHandler cpu_interrupt_handler;
 865
 866/**
 867 * cpu_interrupt:
 868 * @cpu: The CPU to set an interrupt on.
 869 * @mask: The interrupts to set.
 870 *
 871 * Invokes the interrupt handler.
 872 */
 873static inline void cpu_interrupt(CPUState *cpu, int mask)
 874{
 875    cpu_interrupt_handler(cpu, mask);
 876}
 877
 878#else /* USER_ONLY */
 879
 880void cpu_interrupt(CPUState *cpu, int mask);
 881
 882#endif /* USER_ONLY */
 883
 884#ifdef NEED_CPU_H
 885
 886#ifdef CONFIG_SOFTMMU
 887static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
 888                                         bool is_write, bool is_exec,
 889                                         int opaque, unsigned size)
 890{
 891    CPUClass *cc = CPU_GET_CLASS(cpu);
 892
 893    if (cc->do_unassigned_access) {
 894        cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
 895    }
 896}
 897
 898static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
 899                                        MMUAccessType access_type,
 900                                        int mmu_idx, uintptr_t retaddr)
 901{
 902    CPUClass *cc = CPU_GET_CLASS(cpu);
 903
 904    cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
 905}
 906
 907static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
 908                                          vaddr addr, unsigned size,
 909                                          MMUAccessType access_type,
 910                                          int mmu_idx, MemTxAttrs attrs,
 911                                          MemTxResult response,
 912                                          uintptr_t retaddr)
 913{
 914    CPUClass *cc = CPU_GET_CLASS(cpu);
 915
 916    if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) {
 917        cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
 918                                  mmu_idx, attrs, response, retaddr);
 919    }
 920}
 921#endif
 922
 923#endif /* NEED_CPU_H */
 924
 925/**
 926 * cpu_set_pc:
 927 * @cpu: The CPU to set the program counter for.
 928 * @addr: Program counter value.
 929 *
 930 * Sets the program counter for a CPU.
 931 */
 932static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
 933{
 934    CPUClass *cc = CPU_GET_CLASS(cpu);
 935
 936    cc->set_pc(cpu, addr);
 937}
 938
 939/**
 940 * cpu_reset_interrupt:
 941 * @cpu: The CPU to clear the interrupt on.
 942 * @mask: The interrupt mask to clear.
 943 *
 944 * Resets interrupts on the vCPU @cpu.
 945 */
 946void cpu_reset_interrupt(CPUState *cpu, int mask);
 947
 948/**
 949 * cpu_exit:
 950 * @cpu: The CPU to exit.
 951 *
 952 * Requests the CPU @cpu to exit execution.
 953 */
 954void cpu_exit(CPUState *cpu);
 955
 956/**
 957 * cpu_resume:
 958 * @cpu: The CPU to resume.
 959 *
 960 * Resumes CPU, i.e. puts CPU into runnable state.
 961 */
 962void cpu_resume(CPUState *cpu);
 963
 964/**
 965 * cpu_remove:
 966 * @cpu: The CPU to remove.
 967 *
 968 * Requests the CPU to be removed.
 969 */
 970void cpu_remove(CPUState *cpu);
 971
 972 /**
 973 * cpu_remove_sync:
 974 * @cpu: The CPU to remove.
 975 *
 976 * Requests the CPU to be removed and waits till it is removed.
 977 */
 978void cpu_remove_sync(CPUState *cpu);
 979
 980/**
 981 * process_queued_cpu_work() - process all items on CPU work queue
 982 * @cpu: The CPU which work queue to process.
 983 */
 984void process_queued_cpu_work(CPUState *cpu);
 985
 986/**
 987 * cpu_exec_start:
 988 * @cpu: The CPU for the current thread.
 989 *
 990 * Record that a CPU has started execution and can be interrupted with
 991 * cpu_exit.
 992 */
 993void cpu_exec_start(CPUState *cpu);
 994
 995/**
 996 * cpu_exec_end:
 997 * @cpu: The CPU for the current thread.
 998 *
 999 * Record that a CPU has stopped execution and exclusive sections
1000 * can be executed without interrupting it.
1001 */
1002void cpu_exec_end(CPUState *cpu);
1003
1004/**
1005 * start_exclusive:
1006 *
1007 * Wait for a concurrent exclusive section to end, and then start
1008 * a section of work that is run while other CPUs are not running
1009 * between cpu_exec_start and cpu_exec_end.  CPUs that are running
1010 * cpu_exec are exited immediately.  CPUs that call cpu_exec_start
1011 * during the exclusive section go to sleep until this CPU calls
1012 * end_exclusive.
1013 */
1014void start_exclusive(void);
1015
1016/**
1017 * end_exclusive:
1018 *
1019 * Concludes an exclusive execution section started by start_exclusive.
1020 */
1021void end_exclusive(void);
1022
1023/**
1024 * qemu_init_vcpu:
1025 * @cpu: The vCPU to initialize.
1026 *
1027 * Initializes a vCPU.
1028 */
1029void qemu_init_vcpu(CPUState *cpu);
1030
1031#define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
1032#define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
1033#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
1034
1035/**
1036 * cpu_single_step:
1037 * @cpu: CPU to the flags for.
1038 * @enabled: Flags to enable.
1039 *
1040 * Enables or disables single-stepping for @cpu.
1041 */
1042void cpu_single_step(CPUState *cpu, int enabled);
1043
1044/* Breakpoint/watchpoint flags */
1045#define BP_MEM_READ           0x01
1046#define BP_MEM_WRITE          0x02
1047#define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
1048#define BP_STOP_BEFORE_ACCESS 0x04
1049/* 0x08 currently unused */
1050#define BP_GDB                0x10
1051#define BP_CPU                0x20
1052#define BP_ANY                (BP_GDB | BP_CPU)
1053#define BP_WATCHPOINT_HIT_READ 0x40
1054#define BP_WATCHPOINT_HIT_WRITE 0x80
1055#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
1056
1057int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
1058                          CPUBreakpoint **breakpoint);
1059int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
1060void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
1061void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
1062
1063/* Return true if PC matches an installed breakpoint.  */
1064static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
1065{
1066    CPUBreakpoint *bp;
1067
1068    if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
1069        QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
1070            if (bp->pc == pc && (bp->flags & mask)) {
1071                return true;
1072            }
1073        }
1074    }
1075    return false;
1076}
1077
1078int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1079                          int flags, CPUWatchpoint **watchpoint);
1080int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1081                          vaddr len, int flags);
1082void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
1083void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
1084
1085/**
1086 * cpu_get_address_space:
1087 * @cpu: CPU to get address space from
1088 * @asidx: index identifying which address space to get
1089 *
1090 * Return the requested address space of this CPU. @asidx
1091 * specifies which address space to read.
1092 */
1093AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
1094
1095void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
1096    GCC_FMT_ATTR(2, 3);
1097extern Property cpu_common_props[];
1098void cpu_exec_initfn(CPUState *cpu);
1099void cpu_exec_realizefn(CPUState *cpu, Error **errp);
1100void cpu_exec_unrealizefn(CPUState *cpu);
1101
1102/**
1103 * target_words_bigendian:
1104 * Returns true if the (default) endianness of the target is big endian,
1105 * false otherwise. Note that in target-specific code, you can use
1106 * TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common
1107 * code should normally never need to know about the endianness of the
1108 * target, so please do *not* use this function unless you know very well
1109 * what you are doing!
1110 */
1111bool target_words_bigendian(void);
1112
1113#ifdef NEED_CPU_H
1114
1115#ifdef CONFIG_SOFTMMU
1116extern const struct VMStateDescription vmstate_cpu_common;
1117#else
1118#define vmstate_cpu_common vmstate_dummy
1119#endif
1120
1121#define VMSTATE_CPU() {                                                     \
1122    .name = "parent_obj",                                                   \
1123    .size = sizeof(CPUState),                                               \
1124    .vmsd = &vmstate_cpu_common,                                            \
1125    .flags = VMS_STRUCT,                                                    \
1126    .offset = 0,                                                            \
1127}
1128
1129#endif /* NEED_CPU_H */
1130
1131#define UNASSIGNED_CPU_INDEX -1
1132#define UNASSIGNED_CLUSTER_INDEX -1
1133
1134#endif
1135