1/* 2 * QEMU CPU model 3 * 4 * Copyright (c) 2012 SUSE LINUX Products GmbH 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 */ 20#ifndef QEMU_CPU_H 21#define QEMU_CPU_H 22 23#include "hw/qdev-core.h" 24#include "disas/bfd.h" 25#include "exec/hwaddr.h" 26#include "exec/memattrs.h" 27#include "qemu/bitmap.h" 28#include "qemu/queue.h" 29#include "qemu/thread.h" 30 31typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, 32 void *opaque); 33 34/** 35 * vaddr: 36 * Type wide enough to contain any #target_ulong virtual address. 37 */ 38typedef uint64_t vaddr; 39#define VADDR_PRId PRId64 40#define VADDR_PRIu PRIu64 41#define VADDR_PRIo PRIo64 42#define VADDR_PRIx PRIx64 43#define VADDR_PRIX PRIX64 44#define VADDR_MAX UINT64_MAX 45 46/** 47 * SECTION:cpu 48 * @section_id: QEMU-cpu 49 * @title: CPU Class 50 * @short_description: Base class for all CPUs 51 */ 52 53#define TYPE_CPU "cpu" 54 55/* Since this macro is used a lot in hot code paths and in conjunction with 56 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using 57 * an unchecked cast. 58 */ 59#define CPU(obj) ((CPUState *)(obj)) 60 61#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU) 62#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU) 63 64typedef enum MMUAccessType { 65 MMU_DATA_LOAD = 0, 66 MMU_DATA_STORE = 1, 67 MMU_INST_FETCH = 2 68} MMUAccessType; 69 70typedef struct CPUWatchpoint CPUWatchpoint; 71 72typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr, 73 bool is_write, bool is_exec, int opaque, 74 unsigned size); 75 76struct TranslationBlock; 77 78/** 79 * CPUClass: 80 * @class_by_name: Callback to map -cpu command line model name to an 81 * instantiatable CPU type. 82 * @parse_features: Callback to parse command line arguments. 83 * @reset: Callback to reset the #CPUState to its initial state. 84 * @reset_dump_flags: #CPUDumpFlags to use for reset logging. 85 * @has_work: Callback for checking if there is work to do. 86 * @do_interrupt: Callback for interrupt handling. 87 * @do_unassigned_access: Callback for unassigned access handling. 88 * @do_unaligned_access: Callback for unaligned access handling, if 89 * the target defines #ALIGNED_ONLY. 90 * @virtio_is_big_endian: Callback to return %true if a CPU which supports 91 * runtime configurable endianness is currently big-endian. Non-configurable 92 * CPUs can use the default implementation of this method. This method should 93 * not be used by any callers other than the pre-1.0 virtio devices. 94 * @memory_rw_debug: Callback for GDB memory access. 95 * @dump_state: Callback for dumping state. 96 * @dump_statistics: Callback for dumping statistics. 97 * @get_arch_id: Callback for getting architecture-dependent CPU ID. 98 * @get_paging_enabled: Callback for inquiring whether paging is enabled. 99 * @get_memory_mapping: Callback for obtaining the memory mappings. 100 * @set_pc: Callback for setting the Program Counter register. 101 * @synchronize_from_tb: Callback for synchronizing state from a TCG 102 * #TranslationBlock. 103 * @handle_mmu_fault: Callback for handling an MMU fault. 104 * @get_phys_page_debug: Callback for obtaining a physical address. 105 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the 106 * associated memory transaction attributes to use for the access. 107 * CPUs which use memory transaction attributes should implement this 108 * instead of get_phys_page_debug. 109 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for 110 * a memory access with the specified memory transaction attributes. 111 * @gdb_read_register: Callback for letting GDB read a register. 112 * @gdb_write_register: Callback for letting GDB write a register. 113 * @debug_check_watchpoint: Callback: return true if the architectural 114 * watchpoint whose address has matched should really fire. 115 * @debug_excp_handler: Callback for handling debug exceptions. 116 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a 117 * 64-bit VM coredump. 118 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF 119 * note to a 32-bit VM coredump. 120 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a 121 * 32-bit VM coredump. 122 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF 123 * note to a 32-bit VM coredump. 124 * @vmsd: State description for migration. 125 * @gdb_num_core_regs: Number of core registers accessible to GDB. 126 * @gdb_core_xml_file: File name for core registers GDB XML description. 127 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop 128 * before the insn which triggers a watchpoint rather than after it. 129 * @gdb_arch_name: Optional callback that returns the architecture name known 130 * to GDB. The caller must free the returned string with g_free. 131 * @cpu_exec_enter: Callback for cpu_exec preparation. 132 * @cpu_exec_exit: Callback for cpu_exec cleanup. 133 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec. 134 * @disas_set_info: Setup architecture specific components of disassembly info 135 * 136 * Represents a CPU family or model. 137 */ 138typedef struct CPUClass { 139 /*< private >*/ 140 DeviceClass parent_class; 141 /*< public >*/ 142 143 ObjectClass *(*class_by_name)(const char *cpu_model); 144 void (*parse_features)(const char *typename, char *str, Error **errp); 145 146 void (*reset)(CPUState *cpu); 147 int reset_dump_flags; 148 bool (*has_work)(CPUState *cpu); 149 void (*do_interrupt)(CPUState *cpu); 150 CPUUnassignedAccess do_unassigned_access; 151 void (*do_unaligned_access)(CPUState *cpu, vaddr addr, 152 MMUAccessType access_type, 153 int mmu_idx, uintptr_t retaddr); 154 bool (*virtio_is_big_endian)(CPUState *cpu); 155 int (*memory_rw_debug)(CPUState *cpu, vaddr addr, 156 uint8_t *buf, int len, bool is_write); 157 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, 158 int flags); 159 void (*dump_statistics)(CPUState *cpu, FILE *f, 160 fprintf_function cpu_fprintf, int flags); 161 int64_t (*get_arch_id)(CPUState *cpu); 162 bool (*get_paging_enabled)(const CPUState *cpu); 163 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list, 164 Error **errp); 165 void (*set_pc)(CPUState *cpu, vaddr value); 166 vaddr (*get_pc)(CPUState *cpu); 167 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb); 168 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw, 169 int mmu_index); 170 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr); 171 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr, 172 MemTxAttrs *attrs); 173 int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs); 174 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg); 175 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg); 176 bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp); 177 void (*debug_excp_handler)(CPUState *cpu); 178 179 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu, 180 int cpuid, void *opaque); 181 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu, 182 void *opaque); 183 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu, 184 int cpuid, void *opaque); 185 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu, 186 void *opaque); 187 188 void (*set_debug_context)(CPUState *cpu, unsigned int ctx); 189 const char **debug_contexts; 190 const struct VMStateDescription *vmsd; 191 int gdb_num_core_regs; 192 const char *gdb_core_xml_file; 193 gchar * (*gdb_arch_name)(CPUState *cpu); 194 bool gdb_stop_before_watchpoint; 195 196 void (*cpu_exec_enter)(CPUState *cpu); 197 void (*cpu_exec_exit)(CPUState *cpu); 198 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); 199 200 void (*disas_set_info)(CPUState *cpu, disassemble_info *info); 201} CPUClass; 202 203#ifdef HOST_WORDS_BIGENDIAN 204typedef struct icount_decr_u16 { 205 uint16_t high; 206 uint16_t low; 207} icount_decr_u16; 208#else 209typedef struct icount_decr_u16 { 210 uint16_t low; 211 uint16_t high; 212} icount_decr_u16; 213#endif 214 215typedef struct CPUBreakpoint { 216 vaddr pc; 217 int flags; /* BP_* */ 218 QTAILQ_ENTRY(CPUBreakpoint) entry; 219} CPUBreakpoint; 220 221struct CPUWatchpoint { 222 vaddr vaddr; 223 vaddr len; 224 vaddr hitaddr; 225 MemTxAttrs hitattrs; 226 int flags; /* BP_* */ 227 QTAILQ_ENTRY(CPUWatchpoint) entry; 228}; 229 230struct KVMState; 231struct kvm_run; 232 233#define TB_JMP_CACHE_BITS 12 234#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) 235 236/* work queue */ 237 238/* The union type allows passing of 64 bit target pointers on 32 bit 239 * hosts in a single parameter 240 */ 241typedef union { 242 int host_int; 243 unsigned long host_ulong; 244 void *host_ptr; 245 vaddr target_ptr; 246} run_on_cpu_data; 247 248#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)}) 249#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)}) 250#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)}) 251#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)}) 252#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL) 253 254typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data); 255 256struct qemu_work_item; 257 258/** 259 * CPUState: 260 * @cpu_index: CPU index (informative). 261 * @nr_cores: Number of cores within this CPU package. 262 * @nr_threads: Number of threads within this CPU. 263 * @numa_node: NUMA node this CPU is belonging to. 264 * @host_tid: Host thread ID. 265 * @running: #true if CPU is currently running (lockless). 266 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end; 267 * valid under cpu_list_lock. 268 * @created: Indicates whether the CPU thread has been successfully created. 269 * @interrupt_request: Indicates a pending interrupt request. 270 * @halted: Nonzero if the CPU is in suspended state. 271 * @stop: Indicates a pending stop request. 272 * @stopped: Indicates the CPU has been artificially stopped. 273 * @unplug: Indicates a pending CPU unplug request. 274 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU 275 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this 276 * CPU and return to its top level loop. 277 * @singlestep_enabled: Flags for single-stepping. 278 * @icount_extra: Instructions until next timer event. 279 * @icount_decr: Number of cycles left, with interrupt flag in high bit. 280 * This allows a single read-compare-cbranch-write sequence to test 281 * for both decrementer underflow and exceptions. 282 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution 283 * requires that IO only be performed on the last instruction of a TB 284 * so that interrupts take effect immediately. 285 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the 286 * AddressSpaces this CPU has) 287 * @num_ases: number of CPUAddressSpaces in @cpu_ases 288 * @as: Pointer to the first AddressSpace, for the convenience of targets which 289 * only have a single AddressSpace 290 * @env_ptr: Pointer to subclass-specific CPUArchState field. 291 * @gdb_regs: Additional GDB registers. 292 * @gdb_num_regs: Number of total registers accessible to GDB. 293 * @gdb_num_g_regs: Number of registers in GDB 'g' packets. 294 * @next_cpu: Next CPU sharing TB cache. 295 * @opaque: User data. 296 * @mem_io_pc: Host Program Counter at which the memory was accessed. 297 * @mem_io_vaddr: Target virtual address at which the memory was accessed. 298 * @kvm_fd: vCPU file descriptor for KVM. 299 * @work_mutex: Lock to prevent multiple access to queued_work_*. 300 * @queued_work_first: First asynchronous work pending. 301 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask). 302 * 303 * State of one CPU core or thread. 304 */ 305struct CPUState { 306 /*< private >*/ 307 DeviceState parent_obj; 308 /*< public >*/ 309 310 int nr_cores; 311 int nr_threads; 312 int numa_node; 313 314 struct QemuThread *thread; 315#ifdef _WIN32 316 HANDLE hThread; 317#endif 318 int thread_id; 319 uint32_t host_tid; 320 bool running, has_waiter; 321 struct QemuCond *halt_cond; 322 bool thread_kicked; 323 bool created; 324 bool stop; 325 bool stopped; 326 bool unplug; 327 bool crash_occurred; 328 bool exit_request; 329 uint32_t interrupt_request; 330 int singlestep_enabled; 331 int64_t icount_extra; 332 sigjmp_buf jmp_env; 333 334 QemuMutex work_mutex; 335 struct qemu_work_item *queued_work_first, *queued_work_last; 336 337 CPUAddressSpace *cpu_ases; 338 int num_ases; 339 AddressSpace *as; 340 MemoryRegion *memory; 341 342 void *env_ptr; /* CPUArchState */ 343 344 /* Writes protected by tb_lock, reads not thread-safe */ 345 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; 346 347 struct GDBRegisterState *gdb_regs; 348 int gdb_num_regs; 349 int gdb_num_g_regs; 350 QTAILQ_ENTRY(CPUState) node; 351 352 /* ice debug support */ 353 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; 354 355 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; 356 CPUWatchpoint *watchpoint_hit; 357 358 void *opaque; 359 360 /* In order to avoid passing too many arguments to the MMIO helpers, 361 * we store some rarely used information in the CPU context. 362 */ 363 uintptr_t mem_io_pc; 364 vaddr mem_io_vaddr; 365 366 int kvm_fd; 367 bool kvm_vcpu_dirty; 368 struct KVMState *kvm_state; 369 struct kvm_run *kvm_run; 370 371 /* 372 * Used for events with 'vcpu' and *without* the 'disabled' properties. 373 * Dynamically allocated based on bitmap requried to hold up to 374 * trace_get_vcpu_event_count() entries. 375 */ 376 unsigned long *trace_dstate; 377 378 /* TODO Move common fields from CPUArchState here. */ 379 int cpu_index; /* used by alpha TCG */ 380 uint32_t halted; /* used by alpha, cris, ppc TCG */ 381 union { 382 uint32_t u32; 383 icount_decr_u16 u16; 384 } icount_decr; 385 uint32_t can_do_io; 386 int32_t exception_index; /* used by m68k TCG */ 387 388 /* Used to keep track of an outstanding cpu throttle thread for migration 389 * autoconverge 390 */ 391 bool throttle_thread_scheduled; 392 393 /* Note that this is accessed at the start of every TB via a negative 394 offset from AREG0. Leave this field at the end so as to make the 395 (absolute value) offset as small as possible. This reduces code 396 size, especially for hosts without large memory offsets. */ 397 uint32_t tcg_exit_req; 398 399 bool reset_pin; /* state of reset pin */ 400 bool halt_pin; /* state of halt pin */ 401 bool arch_halt_pin; 402 403 char *gdb_id; 404}; 405 406QTAILQ_HEAD(CPUTailQ, CPUState); 407extern struct CPUTailQ cpus; 408#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node) 409#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node) 410#define CPU_FOREACH_SAFE(cpu, next_cpu) \ 411 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu) 412#define CPU_FOREACH_REVERSE(cpu) \ 413 QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node) 414#define first_cpu QTAILQ_FIRST(&cpus) 415 416extern __thread CPUState *current_cpu; 417 418/** 419 * cpu_paging_enabled: 420 * @cpu: The CPU whose state is to be inspected. 421 * 422 * Returns: %true if paging is enabled, %false otherwise. 423 */ 424bool cpu_paging_enabled(const CPUState *cpu); 425 426/** 427 * cpu_get_memory_mapping: 428 * @cpu: The CPU whose memory mappings are to be obtained. 429 * @list: Where to write the memory mappings to. 430 * @errp: Pointer for reporting an #Error. 431 */ 432void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, 433 Error **errp); 434 435/** 436 * cpu_write_elf64_note: 437 * @f: pointer to a function that writes memory to a file 438 * @cpu: The CPU whose memory is to be dumped 439 * @cpuid: ID number of the CPU 440 * @opaque: pointer to the CPUState struct 441 */ 442int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, 443 int cpuid, void *opaque); 444 445/** 446 * cpu_write_elf64_qemunote: 447 * @f: pointer to a function that writes memory to a file 448 * @cpu: The CPU whose memory is to be dumped 449 * @cpuid: ID number of the CPU 450 * @opaque: pointer to the CPUState struct 451 */ 452int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 453 void *opaque); 454 455/** 456 * cpu_write_elf32_note: 457 * @f: pointer to a function that writes memory to a file 458 * @cpu: The CPU whose memory is to be dumped 459 * @cpuid: ID number of the CPU 460 * @opaque: pointer to the CPUState struct 461 */ 462int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, 463 int cpuid, void *opaque); 464 465/** 466 * cpu_write_elf32_qemunote: 467 * @f: pointer to a function that writes memory to a file 468 * @cpu: The CPU whose memory is to be dumped 469 * @cpuid: ID number of the CPU 470 * @opaque: pointer to the CPUState struct 471 */ 472int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 473 void *opaque); 474 475/** 476 * CPUDumpFlags: 477 * @CPU_DUMP_CODE: 478 * @CPU_DUMP_FPU: dump FPU register state, not just integer 479 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state 480 */ 481enum CPUDumpFlags { 482 CPU_DUMP_CODE = 0x00010000, 483 CPU_DUMP_FPU = 0x00020000, 484 CPU_DUMP_CCOP = 0x00040000, 485}; 486 487/** 488 * cpu_dump_state: 489 * @cpu: The CPU whose state is to be dumped. 490 * @f: File to dump to. 491 * @cpu_fprintf: Function to dump with. 492 * @flags: Flags what to dump. 493 * 494 * Dumps CPU state. 495 */ 496void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, 497 int flags); 498 499/** 500 * cpu_dump_statistics: 501 * @cpu: The CPU whose state is to be dumped. 502 * @f: File to dump to. 503 * @cpu_fprintf: Function to dump with. 504 * @flags: Flags what to dump. 505 * 506 * Dumps CPU statistics. 507 */ 508void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, 509 int flags); 510 511#ifndef CONFIG_USER_ONLY 512/** 513 * cpu_get_phys_page_attrs_debug: 514 * @cpu: The CPU to obtain the physical page address for. 515 * @addr: The virtual address. 516 * @attrs: Updated on return with the memory transaction attributes to use 517 * for this access. 518 * 519 * Obtains the physical page corresponding to a virtual one, together 520 * with the corresponding memory transaction attributes to use for the access. 521 * Use it only for debugging because no protection checks are done. 522 * 523 * Returns: Corresponding physical page address or -1 if no page found. 524 */ 525static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, 526 MemTxAttrs *attrs) 527{ 528 CPUClass *cc = CPU_GET_CLASS(cpu); 529 530 if (cc->get_phys_page_attrs_debug) { 531 return cc->get_phys_page_attrs_debug(cpu, addr, attrs); 532 } 533 /* Fallback for CPUs which don't implement the _attrs_ hook */ 534 *attrs = MEMTXATTRS_UNSPECIFIED; 535 return cc->get_phys_page_debug(cpu, addr); 536} 537 538/** 539 * cpu_get_phys_page_debug: 540 * @cpu: The CPU to obtain the physical page address for. 541 * @addr: The virtual address. 542 * 543 * Obtains the physical page corresponding to a virtual one. 544 * Use it only for debugging because no protection checks are done. 545 * 546 * Returns: Corresponding physical page address or -1 if no page found. 547 */ 548static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr) 549{ 550 MemTxAttrs attrs = {}; 551 552 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs); 553} 554 555/** cpu_asidx_from_attrs: 556 * @cpu: CPU 557 * @attrs: memory transaction attributes 558 * 559 * Returns the address space index specifying the CPU AddressSpace 560 * to use for a memory access with the given transaction attributes. 561 */ 562static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) 563{ 564 CPUClass *cc = CPU_GET_CLASS(cpu); 565 566 if (cc->asidx_from_attrs) { 567 return cc->asidx_from_attrs(cpu, attrs); 568 } 569 return 0; 570} 571#endif 572 573/** 574 * cpu_list_add: 575 * @cpu: The CPU to be added to the list of CPUs. 576 */ 577void cpu_list_add(CPUState *cpu); 578 579/** 580 * cpu_list_remove: 581 * @cpu: The CPU to be removed from the list of CPUs. 582 */ 583void cpu_list_remove(CPUState *cpu); 584 585/** 586 * cpu_reset: 587 * @cpu: The CPU whose state is to be reset. 588 */ 589void cpu_reset(CPUState *cpu); 590 591/** 592 * cpu_class_by_name: 593 * @typename: The CPU base type. 594 * @cpu_model: The model string without any parameters. 595 * 596 * Looks up a CPU #ObjectClass matching name @cpu_model. 597 * 598 * Returns: A #CPUClass or %NULL if not matching class is found. 599 */ 600ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model); 601 602/** 603 * cpu_generic_init: 604 * @typename: The CPU base type. 605 * @cpu_model: The model string including optional parameters. 606 * 607 * Instantiates a CPU, processes optional parameters and realizes the CPU. 608 * 609 * Returns: A #CPUState or %NULL if an error occurred. 610 */ 611CPUState *cpu_generic_init(const char *typename, const char *cpu_model); 612 613/** 614 * cpu_has_work: 615 * @cpu: The vCPU to check. 616 * 617 * Checks whether the CPU has work to do. 618 * 619 * Returns: %true if the CPU has work, %false otherwise. 620 */ 621static inline bool cpu_has_work(CPUState *cpu) 622{ 623 CPUClass *cc = CPU_GET_CLASS(cpu); 624 625 g_assert(cc->has_work); 626 return cc->has_work(cpu); 627} 628 629/** 630 * qemu_cpu_is_self: 631 * @cpu: The vCPU to check against. 632 * 633 * Checks whether the caller is executing on the vCPU thread. 634 * 635 * Returns: %true if called from @cpu's thread, %false otherwise. 636 */ 637bool qemu_cpu_is_self(CPUState *cpu); 638 639/** 640 * qemu_cpu_kick: 641 * @cpu: The vCPU to kick. 642 * 643 * Kicks @cpu's thread. 644 */ 645void qemu_cpu_kick(CPUState *cpu); 646 647/** 648 * cpu_is_stopped: 649 * @cpu: The CPU to check. 650 * 651 * Checks whether the CPU is stopped. 652 * 653 * Returns: %true if run state is not running or if artificially stopped; 654 * %false otherwise. 655 */ 656bool cpu_is_stopped(CPUState *cpu); 657 658/** 659 * do_run_on_cpu: 660 * @cpu: The vCPU to run on. 661 * @func: The function to be executed. 662 * @data: Data to pass to the function. 663 * @mutex: Mutex to release while waiting for @func to run. 664 * 665 * Used internally in the implementation of run_on_cpu. 666 */ 667void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data, 668 QemuMutex *mutex); 669 670/** 671 * run_on_cpu: 672 * @cpu: The vCPU to run on. 673 * @func: The function to be executed. 674 * @data: Data to pass to the function. 675 * 676 * Schedules the function @func for execution on the vCPU @cpu. 677 */ 678void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data); 679 680/** 681 * async_run_on_cpu: 682 * @cpu: The vCPU to run on. 683 * @func: The function to be executed. 684 * @data: Data to pass to the function. 685 * 686 * Schedules the function @func for execution on the vCPU @cpu asynchronously. 687 */ 688void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data); 689 690/** 691 * async_safe_run_on_cpu: 692 * @cpu: The vCPU to run on. 693 * @func: The function to be executed. 694 * @data: Data to pass to the function. 695 * 696 * Schedules the function @func for execution on the vCPU @cpu asynchronously, 697 * while all other vCPUs are sleeping. 698 * 699 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the 700 * BQL. 701 */ 702void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data); 703 704/** 705 * qemu_get_cpu: 706 * @index: The CPUState@cpu_index value of the CPU to obtain. 707 * 708 * Gets a CPU matching @index. 709 * 710 * Returns: The CPU or %NULL if there is no matching CPU. 711 */ 712CPUState *qemu_get_cpu(int index); 713 714/** 715 * cpu_exists: 716 * @id: Guest-exposed CPU ID to lookup. 717 * 718 * Search for CPU with specified ID. 719 * 720 * Returns: %true - CPU is found, %false - CPU isn't found. 721 */ 722bool cpu_exists(int64_t id); 723 724/** 725 * cpu_throttle_set: 726 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99. 727 * 728 * Throttles all vcpus by forcing them to sleep for the given percentage of 729 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly. 730 * (example: 10ms sleep for every 30ms awake). 731 * 732 * cpu_throttle_set can be called as needed to adjust new_throttle_pct. 733 * Once the throttling starts, it will remain in effect until cpu_throttle_stop 734 * is called. 735 */ 736void cpu_throttle_set(int new_throttle_pct); 737 738/** 739 * cpu_throttle_stop: 740 * 741 * Stops the vcpu throttling started by cpu_throttle_set. 742 */ 743void cpu_throttle_stop(void); 744 745/** 746 * cpu_throttle_active: 747 * 748 * Returns: %true if the vcpus are currently being throttled, %false otherwise. 749 */ 750bool cpu_throttle_active(void); 751 752/** 753 * cpu_throttle_get_percentage: 754 * 755 * Returns the vcpu throttle percentage. See cpu_throttle_set for details. 756 * 757 * Returns: The throttle percentage in range 1 to 99. 758 */ 759int cpu_throttle_get_percentage(void); 760 761#ifndef CONFIG_USER_ONLY 762 763typedef void (*CPUInterruptHandler)(CPUState *, int); 764 765extern CPUInterruptHandler cpu_interrupt_handler; 766 767/** 768 * cpu_interrupt: 769 * @cpu: The CPU to set an interrupt on. 770 * @mask: The interupts to set. 771 * 772 * Invokes the interrupt handler. 773 */ 774static inline void cpu_interrupt(CPUState *cpu, int mask) 775{ 776 cpu_interrupt_handler(cpu, mask); 777} 778 779#else /* USER_ONLY */ 780 781void cpu_interrupt(CPUState *cpu, int mask); 782 783#endif /* USER_ONLY */ 784 785#ifdef CONFIG_SOFTMMU 786static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, 787 bool is_write, bool is_exec, 788 int opaque, unsigned size) 789{ 790 CPUClass *cc = CPU_GET_CLASS(cpu); 791 792 if (cc->do_unassigned_access) { 793 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size); 794 } 795} 796 797static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 798 MMUAccessType access_type, 799 int mmu_idx, uintptr_t retaddr) 800{ 801 CPUClass *cc = CPU_GET_CLASS(cpu); 802 803 cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); 804} 805#endif 806 807/** 808 * cpu_set_pc: 809 * @cpu: The CPU to set the program counter for. 810 * @addr: Program counter value. 811 * 812 * Sets the program counter for a CPU. 813 */ 814static inline void cpu_set_pc(CPUState *cpu, vaddr addr) 815{ 816 CPUClass *cc = CPU_GET_CLASS(cpu); 817 818 cc->set_pc(cpu, addr); 819} 820 821/** 822 * cpu_reset_interrupt: 823 * @cpu: The CPU to clear the interrupt on. 824 * @mask: The interrupt mask to clear. 825 * 826 * Resets interrupts on the vCPU @cpu. 827 */ 828void cpu_reset_interrupt(CPUState *cpu, int mask); 829 830/** 831 * cpu_exit: 832 * @cpu: The CPU to exit. 833 * 834 * Requests the CPU @cpu to exit execution. 835 */ 836void cpu_exit(CPUState *cpu); 837 838/** 839 * cpu_resume: 840 * @cpu: The CPU to resume. 841 * 842 * Resumes CPU, i.e. puts CPU into runnable state. 843 */ 844void cpu_resume(CPUState *cpu); 845 846/** 847 * cpu_remove: 848 * @cpu: The CPU to remove. 849 * 850 * Requests the CPU to be removed. 851 */ 852void cpu_remove(CPUState *cpu); 853 854 /** 855 * cpu_remove_sync: 856 * @cpu: The CPU to remove. 857 * 858 * Requests the CPU to be removed and waits till it is removed. 859 */ 860void cpu_remove_sync(CPUState *cpu); 861 862/** 863 * process_queued_cpu_work() - process all items on CPU work queue 864 * @cpu: The CPU which work queue to process. 865 */ 866void process_queued_cpu_work(CPUState *cpu); 867 868/** 869 * cpu_exec_start: 870 * @cpu: The CPU for the current thread. 871 * 872 * Record that a CPU has started execution and can be interrupted with 873 * cpu_exit. 874 */ 875void cpu_exec_start(CPUState *cpu); 876 877/** 878 * cpu_exec_end: 879 * @cpu: The CPU for the current thread. 880 * 881 * Record that a CPU has stopped execution and exclusive sections 882 * can be executed without interrupting it. 883 */ 884void cpu_exec_end(CPUState *cpu); 885 886/** 887 * start_exclusive: 888 * 889 * Wait for a concurrent exclusive section to end, and then start 890 * a section of work that is run while other CPUs are not running 891 * between cpu_exec_start and cpu_exec_end. CPUs that are running 892 * cpu_exec are exited immediately. CPUs that call cpu_exec_start 893 * during the exclusive section go to sleep until this CPU calls 894 * end_exclusive. 895 */ 896void start_exclusive(void); 897 898/** 899 * end_exclusive: 900 * 901 * Concludes an exclusive execution section started by start_exclusive. 902 */ 903void end_exclusive(void); 904 905/** 906 * qemu_init_vcpu: 907 * @cpu: The vCPU to initialize. 908 * 909 * Initializes a vCPU. 910 */ 911void qemu_init_vcpu(CPUState *cpu); 912 913#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ 914#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ 915#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ 916 917/** 918 * cpu_single_step: 919 * @cpu: CPU to the flags for. 920 * @enabled: Flags to enable. 921 * 922 * Enables or disables single-stepping for @cpu. 923 */ 924void cpu_single_step(CPUState *cpu, int enabled); 925 926/* Breakpoint/watchpoint flags */ 927#define BP_MEM_READ 0x01 928#define BP_MEM_WRITE 0x02 929#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) 930#define BP_STOP_BEFORE_ACCESS 0x04 931/* 0x08 currently unused */ 932#define BP_GDB 0x10 933#define BP_CPU 0x20 934#define BP_ANY (BP_GDB | BP_CPU) 935#define BP_WATCHPOINT_HIT_READ 0x40 936#define BP_WATCHPOINT_HIT_WRITE 0x80 937#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE) 938 939int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, 940 CPUBreakpoint **breakpoint); 941int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags); 942void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint); 943void cpu_breakpoint_remove_all(CPUState *cpu, int mask); 944 945/* Return true if PC matches an installed breakpoint. */ 946static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) 947{ 948 CPUBreakpoint *bp; 949 950 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { 951 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 952 if (bp->pc == pc && (bp->flags & mask)) { 953 return true; 954 } 955 } 956 } 957 return false; 958} 959 960int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 961 int flags, CPUWatchpoint **watchpoint); 962int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, 963 vaddr len, int flags); 964void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); 965void cpu_watchpoint_remove_all(CPUState *cpu, int mask); 966 967/** 968 * cpu_get_address_space: 969 * @cpu: CPU to get address space from 970 * @asidx: index identifying which address space to get 971 * 972 * Return the requested address space of this CPU. @asidx 973 * specifies which address space to read. 974 */ 975AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx); 976 977void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...) 978 GCC_FMT_ATTR(2, 3); 979void cpu_exec_initfn(CPUState *cpu); 980void cpu_exec_realizefn(CPUState *cpu, Error **errp); 981void cpu_exec_unrealizefn(CPUState *cpu); 982 983#ifdef CONFIG_SOFTMMU 984extern const struct VMStateDescription vmstate_cpu_common; 985#else 986#define vmstate_cpu_common vmstate_dummy 987#endif 988 989#define VMSTATE_CPU() { \ 990 .name = "parent_obj", \ 991 .size = sizeof(CPUState), \ 992 .vmsd = &vmstate_cpu_common, \ 993 .flags = VMS_STRUCT, \ 994 .offset = 0, \ 995} 996 997void cpu_halt_gpio(void *opaque, int irq, int level); 998void cpu_reset_gpio(void *opaque, int irq, int level); 999void cpu_halt_update(CPUState *cpu); 1000
1001#define UNASSIGNED_CPU_INDEX -1 1002 1003#endif 1004