qemu/include/qom/cpu.h
<<
>>
Prefs
   1/*
   2 * QEMU CPU model
   3 *
   4 * Copyright (c) 2012 SUSE LINUX Products GmbH
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see
  18 * <http://www.gnu.org/licenses/gpl-2.0.html>
  19 */
  20#ifndef QEMU_CPU_H
  21#define QEMU_CPU_H
  22
  23#include "hw/qdev-core.h"
  24#include "disas/bfd.h"
  25#include "exec/hwaddr.h"
  26#include "exec/memattrs.h"
  27#include "qemu/bitmap.h"
  28#include "qemu/queue.h"
  29#include "qemu/thread.h"
  30
  31typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
  32                                     void *opaque);
  33
  34/**
  35 * vaddr:
  36 * Type wide enough to contain any #target_ulong virtual address.
  37 */
  38typedef uint64_t vaddr;
  39#define VADDR_PRId PRId64
  40#define VADDR_PRIu PRIu64
  41#define VADDR_PRIo PRIo64
  42#define VADDR_PRIx PRIx64
  43#define VADDR_PRIX PRIX64
  44#define VADDR_MAX UINT64_MAX
  45
  46/**
  47 * SECTION:cpu
  48 * @section_id: QEMU-cpu
  49 * @title: CPU Class
  50 * @short_description: Base class for all CPUs
  51 */
  52
  53#define TYPE_CPU "cpu"
  54
  55/* Since this macro is used a lot in hot code paths and in conjunction with
  56 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
  57 * an unchecked cast.
  58 */
  59#define CPU(obj) ((CPUState *)(obj))
  60
  61#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
  62#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
  63
  64typedef enum MMUAccessType {
  65    MMU_DATA_LOAD  = 0,
  66    MMU_DATA_STORE = 1,
  67    MMU_INST_FETCH = 2
  68} MMUAccessType;
  69
  70typedef struct CPUWatchpoint CPUWatchpoint;
  71
  72typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
  73                                    bool is_write, bool is_exec, int opaque,
  74                                    unsigned size);
  75
  76struct TranslationBlock;
  77
  78/**
  79 * CPUClass:
  80 * @class_by_name: Callback to map -cpu command line model name to an
  81 * instantiatable CPU type.
  82 * @parse_features: Callback to parse command line arguments.
  83 * @reset: Callback to reset the #CPUState to its initial state.
  84 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
  85 * @has_work: Callback for checking if there is work to do.
  86 * @do_interrupt: Callback for interrupt handling.
  87 * @do_unassigned_access: Callback for unassigned access handling.
  88 * @do_unaligned_access: Callback for unaligned access handling, if
  89 * the target defines #ALIGNED_ONLY.
  90 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
  91 * runtime configurable endianness is currently big-endian. Non-configurable
  92 * CPUs can use the default implementation of this method. This method should
  93 * not be used by any callers other than the pre-1.0 virtio devices.
  94 * @memory_rw_debug: Callback for GDB memory access.
  95 * @dump_state: Callback for dumping state.
  96 * @dump_statistics: Callback for dumping statistics.
  97 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
  98 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
  99 * @get_memory_mapping: Callback for obtaining the memory mappings.
 100 * @set_pc: Callback for setting the Program Counter register.
 101 * @synchronize_from_tb: Callback for synchronizing state from a TCG
 102 * #TranslationBlock.
 103 * @handle_mmu_fault: Callback for handling an MMU fault.
 104 * @get_phys_page_debug: Callback for obtaining a physical address.
 105 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
 106 *       associated memory transaction attributes to use for the access.
 107 *       CPUs which use memory transaction attributes should implement this
 108 *       instead of get_phys_page_debug.
 109 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
 110 *       a memory access with the specified memory transaction attributes.
 111 * @gdb_read_register: Callback for letting GDB read a register.
 112 * @gdb_write_register: Callback for letting GDB write a register.
 113 * @debug_check_watchpoint: Callback: return true if the architectural
 114 *       watchpoint whose address has matched should really fire.
 115 * @debug_excp_handler: Callback for handling debug exceptions.
 116 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
 117 * 64-bit VM coredump.
 118 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
 119 * note to a 32-bit VM coredump.
 120 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
 121 * 32-bit VM coredump.
 122 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
 123 * note to a 32-bit VM coredump.
 124 * @vmsd: State description for migration.
 125 * @gdb_num_core_regs: Number of core registers accessible to GDB.
 126 * @gdb_core_xml_file: File name for core registers GDB XML description.
 127 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
 128 *           before the insn which triggers a watchpoint rather than after it.
 129 * @gdb_arch_name: Optional callback that returns the architecture name known
 130 * to GDB. The caller must free the returned string with g_free.
 131 * @cpu_exec_enter: Callback for cpu_exec preparation.
 132 * @cpu_exec_exit: Callback for cpu_exec cleanup.
 133 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
 134 * @disas_set_info: Setup architecture specific components of disassembly info
 135 * @adjust_watchpoint_address: Perform a target-specific adjustment to an
 136 * address before attempting to match it against watchpoints.
 137 *
 138 * Represents a CPU family or model.
 139 */
 140typedef struct CPUClass {
 141    /*< private >*/
 142    DeviceClass parent_class;
 143    /*< public >*/
 144
 145    ObjectClass *(*class_by_name)(const char *cpu_model);
 146    void (*parse_features)(const char *typename, char *str, Error **errp);
 147
 148    void (*reset)(CPUState *cpu);
 149    int reset_dump_flags;
 150    bool (*has_work)(CPUState *cpu);
 151    void (*do_interrupt)(CPUState *cpu);
 152    CPUUnassignedAccess do_unassigned_access;
 153    void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
 154                                MMUAccessType access_type,
 155                                int mmu_idx, uintptr_t retaddr);
 156    bool (*virtio_is_big_endian)(CPUState *cpu);
 157    int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
 158                           uint8_t *buf, int len, bool is_write);
 159    void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 160                       int flags);
 161    GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
 162    void (*dump_statistics)(CPUState *cpu, FILE *f,
 163                            fprintf_function cpu_fprintf, int flags);
 164    int64_t (*get_arch_id)(CPUState *cpu);
 165    bool (*get_paging_enabled)(const CPUState *cpu);
 166    void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
 167                               Error **errp);
 168    void (*set_pc)(CPUState *cpu, vaddr value);
 169    void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
 170    int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
 171                            int mmu_index);
 172    hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
 173    hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
 174                                        MemTxAttrs *attrs);
 175    int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
 176    int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
 177    int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
 178    bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
 179    void (*debug_excp_handler)(CPUState *cpu);
 180
 181    int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
 182                            int cpuid, void *opaque);
 183    int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
 184                                void *opaque);
 185    int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
 186                            int cpuid, void *opaque);
 187    int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
 188                                void *opaque);
 189
 190    const struct VMStateDescription *vmsd;
 191    int gdb_num_core_regs;
 192    const char *gdb_core_xml_file;
 193    gchar * (*gdb_arch_name)(CPUState *cpu);
 194    bool gdb_stop_before_watchpoint;
 195
 196    void (*cpu_exec_enter)(CPUState *cpu);
 197    void (*cpu_exec_exit)(CPUState *cpu);
 198    bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
 199
 200    void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
 201    vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
 202} CPUClass;
 203
 204#ifdef HOST_WORDS_BIGENDIAN
 205typedef struct icount_decr_u16 {
 206    uint16_t high;
 207    uint16_t low;
 208} icount_decr_u16;
 209#else
 210typedef struct icount_decr_u16 {
 211    uint16_t low;
 212    uint16_t high;
 213} icount_decr_u16;
 214#endif
 215
 216typedef struct CPUBreakpoint {
 217    vaddr pc;
 218    int flags; /* BP_* */
 219    QTAILQ_ENTRY(CPUBreakpoint) entry;
 220} CPUBreakpoint;
 221
 222struct CPUWatchpoint {
 223    vaddr vaddr;
 224    vaddr len;
 225    vaddr hitaddr;
 226    MemTxAttrs hitattrs;
 227    int flags; /* BP_* */
 228    QTAILQ_ENTRY(CPUWatchpoint) entry;
 229};
 230
 231struct KVMState;
 232struct kvm_run;
 233
 234struct hax_vcpu_state;
 235
 236#define TB_JMP_CACHE_BITS 12
 237#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
 238
 239/* work queue */
 240
 241/* The union type allows passing of 64 bit target pointers on 32 bit
 242 * hosts in a single parameter
 243 */
 244typedef union {
 245    int           host_int;
 246    unsigned long host_ulong;
 247    void         *host_ptr;
 248    vaddr         target_ptr;
 249} run_on_cpu_data;
 250
 251#define RUN_ON_CPU_HOST_PTR(p)    ((run_on_cpu_data){.host_ptr = (p)})
 252#define RUN_ON_CPU_HOST_INT(i)    ((run_on_cpu_data){.host_int = (i)})
 253#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
 254#define RUN_ON_CPU_TARGET_PTR(v)  ((run_on_cpu_data){.target_ptr = (v)})
 255#define RUN_ON_CPU_NULL           RUN_ON_CPU_HOST_PTR(NULL)
 256
 257typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
 258
 259struct qemu_work_item;
 260
 261#define CPU_UNSET_NUMA_NODE_ID -1
 262#define CPU_TRACE_DSTATE_MAX_EVENTS 32
 263
 264/**
 265 * CPUState:
 266 * @cpu_index: CPU index (informative).
 267 * @nr_cores: Number of cores within this CPU package.
 268 * @nr_threads: Number of threads within this CPU.
 269 * @running: #true if CPU is currently running (lockless).
 270 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
 271 * valid under cpu_list_lock.
 272 * @created: Indicates whether the CPU thread has been successfully created.
 273 * @interrupt_request: Indicates a pending interrupt request.
 274 * @halted: Nonzero if the CPU is in suspended state.
 275 * @stop: Indicates a pending stop request.
 276 * @stopped: Indicates the CPU has been artificially stopped.
 277 * @unplug: Indicates a pending CPU unplug request.
 278 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
 279 * @singlestep_enabled: Flags for single-stepping.
 280 * @icount_extra: Instructions until next timer event.
 281 * @icount_decr: Low 16 bits: number of cycles left, only used in icount mode.
 282 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs for this
 283 * CPU and return to its top level loop (even in non-icount mode).
 284 * This allows a single read-compare-cbranch-write sequence to test
 285 * for both decrementer underflow and exceptions.
 286 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
 287 * requires that IO only be performed on the last instruction of a TB
 288 * so that interrupts take effect immediately.
 289 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
 290 *            AddressSpaces this CPU has)
 291 * @num_ases: number of CPUAddressSpaces in @cpu_ases
 292 * @as: Pointer to the first AddressSpace, for the convenience of targets which
 293 *      only have a single AddressSpace
 294 * @env_ptr: Pointer to subclass-specific CPUArchState field.
 295 * @gdb_regs: Additional GDB registers.
 296 * @gdb_num_regs: Number of total registers accessible to GDB.
 297 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
 298 * @next_cpu: Next CPU sharing TB cache.
 299 * @opaque: User data.
 300 * @mem_io_pc: Host Program Counter at which the memory was accessed.
 301 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
 302 * @kvm_fd: vCPU file descriptor for KVM.
 303 * @work_mutex: Lock to prevent multiple access to queued_work_*.
 304 * @queued_work_first: First asynchronous work pending.
 305 * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
 306 *                        to @trace_dstate).
 307 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
 308 *
 309 * State of one CPU core or thread.
 310 */
 311struct CPUState {
 312    /*< private >*/
 313    DeviceState parent_obj;
 314    /*< public >*/
 315
 316    int nr_cores;
 317    int nr_threads;
 318
 319    struct QemuThread *thread;
 320#ifdef _WIN32
 321    HANDLE hThread;
 322#endif
 323    int thread_id;
 324    bool running, has_waiter;
 325    struct QemuCond *halt_cond;
 326    bool thread_kicked;
 327    bool created;
 328    bool stop;
 329    bool stopped;
 330    bool unplug;
 331    bool crash_occurred;
 332    bool exit_request;
 333    /* updates protected by BQL */
 334    uint32_t interrupt_request;
 335    int singlestep_enabled;
 336    int64_t icount_budget;
 337    int64_t icount_extra;
 338    sigjmp_buf jmp_env;
 339
 340    QemuMutex work_mutex;
 341    struct qemu_work_item *queued_work_first, *queued_work_last;
 342
 343    CPUAddressSpace *cpu_ases;
 344    int num_ases;
 345    AddressSpace *as;
 346    MemoryRegion *memory;
 347
 348    void *env_ptr; /* CPUArchState */
 349
 350    /* Accessed in parallel; all accesses must be atomic */
 351    struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
 352
 353    struct GDBRegisterState *gdb_regs;
 354    int gdb_num_regs;
 355    int gdb_num_g_regs;
 356    QTAILQ_ENTRY(CPUState) node;
 357
 358    /* ice debug support */
 359    QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
 360
 361    QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
 362    CPUWatchpoint *watchpoint_hit;
 363
 364    void *opaque;
 365
 366    /* In order to avoid passing too many arguments to the MMIO helpers,
 367     * we store some rarely used information in the CPU context.
 368     */
 369    uintptr_t mem_io_pc;
 370    vaddr mem_io_vaddr;
 371
 372    int kvm_fd;
 373    struct KVMState *kvm_state;
 374    struct kvm_run *kvm_run;
 375
 376    /* Used for events with 'vcpu' and *without* the 'disabled' properties */
 377    DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS);
 378    DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
 379
 380    /* TODO Move common fields from CPUArchState here. */
 381    int cpu_index; /* used by alpha TCG */
 382    uint32_t halted; /* used by alpha, cris, ppc TCG */
 383    uint32_t can_do_io;
 384    int32_t exception_index; /* used by m68k TCG */
 385
 386    /* shared by kvm, hax and hvf */
 387    bool vcpu_dirty;
 388
 389    /* Used to keep track of an outstanding cpu throttle thread for migration
 390     * autoconverge
 391     */
 392    bool throttle_thread_scheduled;
 393
 394    /* Note that this is accessed at the start of every TB via a negative
 395       offset from AREG0.  Leave this field at the end so as to make the
 396       (absolute value) offset as small as possible.  This reduces code
 397       size, especially for hosts without large memory offsets.  */
 398    union {
 399        uint32_t u32;
 400        icount_decr_u16 u16;
 401    } icount_decr;
 402
 403    struct hax_vcpu_state *hax_vcpu;
 404
 405    /* The pending_tlb_flush flag is set and cleared atomically to
 406     * avoid potential races. The aim of the flag is to avoid
 407     * unnecessary flushes.
 408     */
 409    uint16_t pending_tlb_flush;
 410};
 411
 412QTAILQ_HEAD(CPUTailQ, CPUState);
 413extern struct CPUTailQ cpus;
 414#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
 415#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
 416#define CPU_FOREACH_SAFE(cpu, next_cpu) \
 417    QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
 418#define CPU_FOREACH_REVERSE(cpu) \
 419    QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
 420#define first_cpu QTAILQ_FIRST(&cpus)
 421
 422extern __thread CPUState *current_cpu;
 423
 424static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
 425{
 426    unsigned int i;
 427
 428    for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
 429        atomic_set(&cpu->tb_jmp_cache[i], NULL);
 430    }
 431}
 432
 433/**
 434 * qemu_tcg_mttcg_enabled:
 435 * Check whether we are running MultiThread TCG or not.
 436 *
 437 * Returns: %true if we are in MTTCG mode %false otherwise.
 438 */
 439extern bool mttcg_enabled;
 440#define qemu_tcg_mttcg_enabled() (mttcg_enabled)
 441
 442/**
 443 * cpu_paging_enabled:
 444 * @cpu: The CPU whose state is to be inspected.
 445 *
 446 * Returns: %true if paging is enabled, %false otherwise.
 447 */
 448bool cpu_paging_enabled(const CPUState *cpu);
 449
 450/**
 451 * cpu_get_memory_mapping:
 452 * @cpu: The CPU whose memory mappings are to be obtained.
 453 * @list: Where to write the memory mappings to.
 454 * @errp: Pointer for reporting an #Error.
 455 */
 456void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
 457                            Error **errp);
 458
 459/**
 460 * cpu_write_elf64_note:
 461 * @f: pointer to a function that writes memory to a file
 462 * @cpu: The CPU whose memory is to be dumped
 463 * @cpuid: ID number of the CPU
 464 * @opaque: pointer to the CPUState struct
 465 */
 466int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
 467                         int cpuid, void *opaque);
 468
 469/**
 470 * cpu_write_elf64_qemunote:
 471 * @f: pointer to a function that writes memory to a file
 472 * @cpu: The CPU whose memory is to be dumped
 473 * @cpuid: ID number of the CPU
 474 * @opaque: pointer to the CPUState struct
 475 */
 476int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
 477                             void *opaque);
 478
 479/**
 480 * cpu_write_elf32_note:
 481 * @f: pointer to a function that writes memory to a file
 482 * @cpu: The CPU whose memory is to be dumped
 483 * @cpuid: ID number of the CPU
 484 * @opaque: pointer to the CPUState struct
 485 */
 486int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
 487                         int cpuid, void *opaque);
 488
 489/**
 490 * cpu_write_elf32_qemunote:
 491 * @f: pointer to a function that writes memory to a file
 492 * @cpu: The CPU whose memory is to be dumped
 493 * @cpuid: ID number of the CPU
 494 * @opaque: pointer to the CPUState struct
 495 */
 496int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
 497                             void *opaque);
 498
 499/**
 500 * cpu_get_crash_info:
 501 * @cpu: The CPU to get crash information for
 502 *
 503 * Gets the previously saved crash information.
 504 * Caller is responsible for freeing the data.
 505 */
 506GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
 507
 508/**
 509 * CPUDumpFlags:
 510 * @CPU_DUMP_CODE:
 511 * @CPU_DUMP_FPU: dump FPU register state, not just integer
 512 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
 513 */
 514enum CPUDumpFlags {
 515    CPU_DUMP_CODE = 0x00010000,
 516    CPU_DUMP_FPU  = 0x00020000,
 517    CPU_DUMP_CCOP = 0x00040000,
 518};
 519
 520/**
 521 * cpu_dump_state:
 522 * @cpu: The CPU whose state is to be dumped.
 523 * @f: File to dump to.
 524 * @cpu_fprintf: Function to dump with.
 525 * @flags: Flags what to dump.
 526 *
 527 * Dumps CPU state.
 528 */
 529void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 530                    int flags);
 531
 532/**
 533 * cpu_dump_statistics:
 534 * @cpu: The CPU whose state is to be dumped.
 535 * @f: File to dump to.
 536 * @cpu_fprintf: Function to dump with.
 537 * @flags: Flags what to dump.
 538 *
 539 * Dumps CPU statistics.
 540 */
 541void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
 542                         int flags);
 543
 544#ifndef CONFIG_USER_ONLY
 545/**
 546 * cpu_get_phys_page_attrs_debug:
 547 * @cpu: The CPU to obtain the physical page address for.
 548 * @addr: The virtual address.
 549 * @attrs: Updated on return with the memory transaction attributes to use
 550 *         for this access.
 551 *
 552 * Obtains the physical page corresponding to a virtual one, together
 553 * with the corresponding memory transaction attributes to use for the access.
 554 * Use it only for debugging because no protection checks are done.
 555 *
 556 * Returns: Corresponding physical page address or -1 if no page found.
 557 */
 558static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
 559                                                   MemTxAttrs *attrs)
 560{
 561    CPUClass *cc = CPU_GET_CLASS(cpu);
 562
 563    if (cc->get_phys_page_attrs_debug) {
 564        return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
 565    }
 566    /* Fallback for CPUs which don't implement the _attrs_ hook */
 567    *attrs = MEMTXATTRS_UNSPECIFIED;
 568    return cc->get_phys_page_debug(cpu, addr);
 569}
 570
 571/**
 572 * cpu_get_phys_page_debug:
 573 * @cpu: The CPU to obtain the physical page address for.
 574 * @addr: The virtual address.
 575 *
 576 * Obtains the physical page corresponding to a virtual one.
 577 * Use it only for debugging because no protection checks are done.
 578 *
 579 * Returns: Corresponding physical page address or -1 if no page found.
 580 */
 581static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
 582{
 583    MemTxAttrs attrs = {};
 584
 585    return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
 586}
 587
 588/** cpu_asidx_from_attrs:
 589 * @cpu: CPU
 590 * @attrs: memory transaction attributes
 591 *
 592 * Returns the address space index specifying the CPU AddressSpace
 593 * to use for a memory access with the given transaction attributes.
 594 */
 595static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
 596{
 597    CPUClass *cc = CPU_GET_CLASS(cpu);
 598
 599    if (cc->asidx_from_attrs) {
 600        return cc->asidx_from_attrs(cpu, attrs);
 601    }
 602    return 0;
 603}
 604#endif
 605
 606/**
 607 * cpu_list_add:
 608 * @cpu: The CPU to be added to the list of CPUs.
 609 */
 610void cpu_list_add(CPUState *cpu);
 611
 612/**
 613 * cpu_list_remove:
 614 * @cpu: The CPU to be removed from the list of CPUs.
 615 */
 616void cpu_list_remove(CPUState *cpu);
 617
 618/**
 619 * cpu_reset:
 620 * @cpu: The CPU whose state is to be reset.
 621 */
 622void cpu_reset(CPUState *cpu);
 623
 624/**
 625 * cpu_class_by_name:
 626 * @typename: The CPU base type.
 627 * @cpu_model: The model string without any parameters.
 628 *
 629 * Looks up a CPU #ObjectClass matching name @cpu_model.
 630 *
 631 * Returns: A #CPUClass or %NULL if not matching class is found.
 632 */
 633ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
 634
 635/**
 636 * cpu_generic_init:
 637 * @typename: The CPU base type.
 638 * @cpu_model: The model string including optional parameters.
 639 *
 640 * Instantiates a CPU, processes optional parameters and realizes the CPU.
 641 *
 642 * Returns: A #CPUState or %NULL if an error occurred.
 643 */
 644CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
 645
 646/**
 647 * cpu_has_work:
 648 * @cpu: The vCPU to check.
 649 *
 650 * Checks whether the CPU has work to do.
 651 *
 652 * Returns: %true if the CPU has work, %false otherwise.
 653 */
 654static inline bool cpu_has_work(CPUState *cpu)
 655{
 656    CPUClass *cc = CPU_GET_CLASS(cpu);
 657
 658    g_assert(cc->has_work);
 659    return cc->has_work(cpu);
 660}
 661
 662/**
 663 * qemu_cpu_is_self:
 664 * @cpu: The vCPU to check against.
 665 *
 666 * Checks whether the caller is executing on the vCPU thread.
 667 *
 668 * Returns: %true if called from @cpu's thread, %false otherwise.
 669 */
 670bool qemu_cpu_is_self(CPUState *cpu);
 671
 672/**
 673 * qemu_cpu_kick:
 674 * @cpu: The vCPU to kick.
 675 *
 676 * Kicks @cpu's thread.
 677 */
 678void qemu_cpu_kick(CPUState *cpu);
 679
 680/**
 681 * cpu_is_stopped:
 682 * @cpu: The CPU to check.
 683 *
 684 * Checks whether the CPU is stopped.
 685 *
 686 * Returns: %true if run state is not running or if artificially stopped;
 687 * %false otherwise.
 688 */
 689bool cpu_is_stopped(CPUState *cpu);
 690
 691/**
 692 * do_run_on_cpu:
 693 * @cpu: The vCPU to run on.
 694 * @func: The function to be executed.
 695 * @data: Data to pass to the function.
 696 * @mutex: Mutex to release while waiting for @func to run.
 697 *
 698 * Used internally in the implementation of run_on_cpu.
 699 */
 700void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
 701                   QemuMutex *mutex);
 702
 703/**
 704 * run_on_cpu:
 705 * @cpu: The vCPU to run on.
 706 * @func: The function to be executed.
 707 * @data: Data to pass to the function.
 708 *
 709 * Schedules the function @func for execution on the vCPU @cpu.
 710 */
 711void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
 712
 713/**
 714 * async_run_on_cpu:
 715 * @cpu: The vCPU to run on.
 716 * @func: The function to be executed.
 717 * @data: Data to pass to the function.
 718 *
 719 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
 720 */
 721void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
 722
 723/**
 724 * async_safe_run_on_cpu:
 725 * @cpu: The vCPU to run on.
 726 * @func: The function to be executed.
 727 * @data: Data to pass to the function.
 728 *
 729 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
 730 * while all other vCPUs are sleeping.
 731 *
 732 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
 733 * BQL.
 734 */
 735void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
 736
 737/**
 738 * qemu_get_cpu:
 739 * @index: The CPUState@cpu_index value of the CPU to obtain.
 740 *
 741 * Gets a CPU matching @index.
 742 *
 743 * Returns: The CPU or %NULL if there is no matching CPU.
 744 */
 745CPUState *qemu_get_cpu(int index);
 746
 747/**
 748 * cpu_exists:
 749 * @id: Guest-exposed CPU ID to lookup.
 750 *
 751 * Search for CPU with specified ID.
 752 *
 753 * Returns: %true - CPU is found, %false - CPU isn't found.
 754 */
 755bool cpu_exists(int64_t id);
 756
 757/**
 758 * cpu_throttle_set:
 759 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
 760 *
 761 * Throttles all vcpus by forcing them to sleep for the given percentage of
 762 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
 763 * (example: 10ms sleep for every 30ms awake).
 764 *
 765 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
 766 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
 767 * is called.
 768 */
 769void cpu_throttle_set(int new_throttle_pct);
 770
 771/**
 772 * cpu_throttle_stop:
 773 *
 774 * Stops the vcpu throttling started by cpu_throttle_set.
 775 */
 776void cpu_throttle_stop(void);
 777
 778/**
 779 * cpu_throttle_active:
 780 *
 781 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
 782 */
 783bool cpu_throttle_active(void);
 784
 785/**
 786 * cpu_throttle_get_percentage:
 787 *
 788 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
 789 *
 790 * Returns: The throttle percentage in range 1 to 99.
 791 */
 792int cpu_throttle_get_percentage(void);
 793
 794#ifndef CONFIG_USER_ONLY
 795
 796typedef void (*CPUInterruptHandler)(CPUState *, int);
 797
 798extern CPUInterruptHandler cpu_interrupt_handler;
 799
 800/**
 801 * cpu_interrupt:
 802 * @cpu: The CPU to set an interrupt on.
 803 * @mask: The interupts to set.
 804 *
 805 * Invokes the interrupt handler.
 806 */
 807static inline void cpu_interrupt(CPUState *cpu, int mask)
 808{
 809    cpu_interrupt_handler(cpu, mask);
 810}
 811
 812#else /* USER_ONLY */
 813
 814void cpu_interrupt(CPUState *cpu, int mask);
 815
 816#endif /* USER_ONLY */
 817
 818#ifdef NEED_CPU_H
 819
 820#ifdef CONFIG_SOFTMMU
 821static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
 822                                         bool is_write, bool is_exec,
 823                                         int opaque, unsigned size)
 824{
 825    CPUClass *cc = CPU_GET_CLASS(cpu);
 826
 827    if (cc->do_unassigned_access) {
 828        cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
 829    }
 830}
 831
 832static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
 833                                        MMUAccessType access_type,
 834                                        int mmu_idx, uintptr_t retaddr)
 835{
 836    CPUClass *cc = CPU_GET_CLASS(cpu);
 837
 838    cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
 839}
 840#endif
 841
 842#endif /* NEED_CPU_H */
 843
 844/**
 845 * cpu_set_pc:
 846 * @cpu: The CPU to set the program counter for.
 847 * @addr: Program counter value.
 848 *
 849 * Sets the program counter for a CPU.
 850 */
 851static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
 852{
 853    CPUClass *cc = CPU_GET_CLASS(cpu);
 854
 855    cc->set_pc(cpu, addr);
 856}
 857
 858/**
 859 * cpu_reset_interrupt:
 860 * @cpu: The CPU to clear the interrupt on.
 861 * @mask: The interrupt mask to clear.
 862 *
 863 * Resets interrupts on the vCPU @cpu.
 864 */
 865void cpu_reset_interrupt(CPUState *cpu, int mask);
 866
 867/**
 868 * cpu_exit:
 869 * @cpu: The CPU to exit.
 870 *
 871 * Requests the CPU @cpu to exit execution.
 872 */
 873void cpu_exit(CPUState *cpu);
 874
 875/**
 876 * cpu_resume:
 877 * @cpu: The CPU to resume.
 878 *
 879 * Resumes CPU, i.e. puts CPU into runnable state.
 880 */
 881void cpu_resume(CPUState *cpu);
 882
 883/**
 884 * cpu_remove:
 885 * @cpu: The CPU to remove.
 886 *
 887 * Requests the CPU to be removed.
 888 */
 889void cpu_remove(CPUState *cpu);
 890
 891 /**
 892 * cpu_remove_sync:
 893 * @cpu: The CPU to remove.
 894 *
 895 * Requests the CPU to be removed and waits till it is removed.
 896 */
 897void cpu_remove_sync(CPUState *cpu);
 898
 899/**
 900 * process_queued_cpu_work() - process all items on CPU work queue
 901 * @cpu: The CPU which work queue to process.
 902 */
 903void process_queued_cpu_work(CPUState *cpu);
 904
 905/**
 906 * cpu_exec_start:
 907 * @cpu: The CPU for the current thread.
 908 *
 909 * Record that a CPU has started execution and can be interrupted with
 910 * cpu_exit.
 911 */
 912void cpu_exec_start(CPUState *cpu);
 913
 914/**
 915 * cpu_exec_end:
 916 * @cpu: The CPU for the current thread.
 917 *
 918 * Record that a CPU has stopped execution and exclusive sections
 919 * can be executed without interrupting it.
 920 */
 921void cpu_exec_end(CPUState *cpu);
 922
 923/**
 924 * start_exclusive:
 925 *
 926 * Wait for a concurrent exclusive section to end, and then start
 927 * a section of work that is run while other CPUs are not running
 928 * between cpu_exec_start and cpu_exec_end.  CPUs that are running
 929 * cpu_exec are exited immediately.  CPUs that call cpu_exec_start
 930 * during the exclusive section go to sleep until this CPU calls
 931 * end_exclusive.
 932 */
 933void start_exclusive(void);
 934
 935/**
 936 * end_exclusive:
 937 *
 938 * Concludes an exclusive execution section started by start_exclusive.
 939 */
 940void end_exclusive(void);
 941
 942/**
 943 * qemu_init_vcpu:
 944 * @cpu: The vCPU to initialize.
 945 *
 946 * Initializes a vCPU.
 947 */
 948void qemu_init_vcpu(CPUState *cpu);
 949
 950#define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
 951#define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
 952#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
 953
 954/**
 955 * cpu_single_step:
 956 * @cpu: CPU to the flags for.
 957 * @enabled: Flags to enable.
 958 *
 959 * Enables or disables single-stepping for @cpu.
 960 */
 961void cpu_single_step(CPUState *cpu, int enabled);
 962
 963/* Breakpoint/watchpoint flags */
 964#define BP_MEM_READ           0x01
 965#define BP_MEM_WRITE          0x02
 966#define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
 967#define BP_STOP_BEFORE_ACCESS 0x04
 968/* 0x08 currently unused */
 969#define BP_GDB                0x10
 970#define BP_CPU                0x20
 971#define BP_ANY                (BP_GDB | BP_CPU)
 972#define BP_WATCHPOINT_HIT_READ 0x40
 973#define BP_WATCHPOINT_HIT_WRITE 0x80
 974#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
 975
 976int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
 977                          CPUBreakpoint **breakpoint);
 978int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
 979void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
 980void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
 981
 982/* Return true if PC matches an installed breakpoint.  */
 983static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
 984{
 985    CPUBreakpoint *bp;
 986
 987    if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
 988        QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
 989            if (bp->pc == pc && (bp->flags & mask)) {
 990                return true;
 991            }
 992        }
 993    }
 994    return false;
 995}
 996
 997int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
 998                          int flags, CPUWatchpoint **watchpoint);
 999int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1000                          vaddr len, int flags);
1001void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
1002void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
1003
1004/**
1005 * cpu_get_address_space:
1006 * @cpu: CPU to get address space from
1007 * @asidx: index identifying which address space to get
1008 *
1009 * Return the requested address space of this CPU. @asidx
1010 * specifies which address space to read.
1011 */
1012AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
1013
1014void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
1015    GCC_FMT_ATTR(2, 3);
1016extern Property cpu_common_props[];
1017void cpu_exec_initfn(CPUState *cpu);
1018void cpu_exec_realizefn(CPUState *cpu, Error **errp);
1019void cpu_exec_unrealizefn(CPUState *cpu);
1020
1021#ifdef NEED_CPU_H
1022
1023#ifdef CONFIG_SOFTMMU
1024extern const struct VMStateDescription vmstate_cpu_common;
1025#else
1026#define vmstate_cpu_common vmstate_dummy
1027#endif
1028
1029#define VMSTATE_CPU() {                                                     \
1030    .name = "parent_obj",                                                   \
1031    .size = sizeof(CPUState),                                               \
1032    .vmsd = &vmstate_cpu_common,                                            \
1033    .flags = VMS_STRUCT,                                                    \
1034    .offset = 0,                                                            \
1035}
1036
1037#endif /* NEED_CPU_H */
1038
1039#define UNASSIGNED_CPU_INDEX -1
1040
1041#endif
1042