1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef CPU_ALL_H
20#define CPU_ALL_H
21
22#include "exec/cpu-common.h"
23#include "exec/memory.h"
24#include "qemu/thread.h"
25#include "hw/core/cpu.h"
26#include "qemu/rcu.h"
27
28#define EXCP_INTERRUPT 0x10000
29#define EXCP_HLT 0x10001
30#define EXCP_DEBUG 0x10002
31#define EXCP_HALTED 0x10003
32#define EXCP_YIELD 0x10004
33#define EXCP_ATOMIC 0x10005
34
35
36
37
38
39
40
41
42
43#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
44#define BSWAP_NEEDED
45#endif
46
47#ifdef BSWAP_NEEDED
48
49static inline uint16_t tswap16(uint16_t s)
50{
51 return bswap16(s);
52}
53
54static inline uint32_t tswap32(uint32_t s)
55{
56 return bswap32(s);
57}
58
59static inline uint64_t tswap64(uint64_t s)
60{
61 return bswap64(s);
62}
63
64static inline void tswap16s(uint16_t *s)
65{
66 *s = bswap16(*s);
67}
68
69static inline void tswap32s(uint32_t *s)
70{
71 *s = bswap32(*s);
72}
73
74static inline void tswap64s(uint64_t *s)
75{
76 *s = bswap64(*s);
77}
78
79#else
80
81static inline uint16_t tswap16(uint16_t s)
82{
83 return s;
84}
85
86static inline uint32_t tswap32(uint32_t s)
87{
88 return s;
89}
90
91static inline uint64_t tswap64(uint64_t s)
92{
93 return s;
94}
95
96static inline void tswap16s(uint16_t *s)
97{
98}
99
100static inline void tswap32s(uint32_t *s)
101{
102}
103
104static inline void tswap64s(uint64_t *s)
105{
106}
107
108#endif
109
110#if TARGET_LONG_SIZE == 4
111#define tswapl(s) tswap32(s)
112#define tswapls(s) tswap32s((uint32_t *)(s))
113#define bswaptls(s) bswap32s(s)
114#else
115#define tswapl(s) tswap64(s)
116#define tswapls(s) tswap64s((uint64_t *)(s))
117#define bswaptls(s) bswap64s(s)
118#endif
119
120
121
122
123#if defined(TARGET_WORDS_BIGENDIAN)
124#define lduw_p(p) lduw_be_p(p)
125#define ldsw_p(p) ldsw_be_p(p)
126#define ldl_p(p) ldl_be_p(p)
127#define ldq_p(p) ldq_be_p(p)
128#define stw_p(p, v) stw_be_p(p, v)
129#define stl_p(p, v) stl_be_p(p, v)
130#define stq_p(p, v) stq_be_p(p, v)
131#define ldn_p(p, sz) ldn_be_p(p, sz)
132#define stn_p(p, sz, v) stn_be_p(p, sz, v)
133#else
134#define lduw_p(p) lduw_le_p(p)
135#define ldsw_p(p) ldsw_le_p(p)
136#define ldl_p(p) ldl_le_p(p)
137#define ldq_p(p) ldq_le_p(p)
138#define stw_p(p, v) stw_le_p(p, v)
139#define stl_p(p, v) stl_le_p(p, v)
140#define stq_p(p, v) stq_le_p(p, v)
141#define ldn_p(p, sz) ldn_le_p(p, sz)
142#define stn_p(p, sz, v) stn_le_p(p, sz, v)
143#endif
144
145
146
147#if defined(CONFIG_USER_ONLY)
148#include "exec/user/abitypes.h"
149
150
151
152
153extern uintptr_t guest_base;
154extern bool have_guest_base;
155extern unsigned long reserved_va;
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171#define GUEST_ADDR_MAX_ \
172 ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
173 UINT32_MAX : ~0ul)
174#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
175
176#else
177
178#include "exec/hwaddr.h"
179
180#define SUFFIX
181#define ARG1 as
182#define ARG1_DECL AddressSpace *as
183#define TARGET_ENDIANNESS
184#include "exec/memory_ldst.h.inc"
185
186#define SUFFIX _cached_slow
187#define ARG1 cache
188#define ARG1_DECL MemoryRegionCache *cache
189#define TARGET_ENDIANNESS
190#include "exec/memory_ldst.h.inc"
191
192static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
193{
194 address_space_stl_notdirty(as, addr, val,
195 MEMTXATTRS_UNSPECIFIED, NULL);
196}
197
198#define SUFFIX
199#define ARG1 as
200#define ARG1_DECL AddressSpace *as
201#define TARGET_ENDIANNESS
202#include "exec/memory_ldst_phys.h.inc"
203
204
205#define ENDIANNESS
206#include "exec/memory_ldst_cached.h.inc"
207
208#define SUFFIX _cached
209#define ARG1 cache
210#define ARG1_DECL MemoryRegionCache *cache
211#define TARGET_ENDIANNESS
212#include "exec/memory_ldst_phys.h.inc"
213#endif
214
215
216
217#ifdef TARGET_PAGE_BITS_VARY
218# include "exec/page-vary.h"
219extern const TargetPageBits target_page;
220#ifdef CONFIG_DEBUG_TCG
221#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
222#define TARGET_PAGE_MASK ({ assert(target_page.decided); \
223 (target_long)target_page.mask; })
224#else
225#define TARGET_PAGE_BITS target_page.bits
226#define TARGET_PAGE_MASK ((target_long)target_page.mask)
227#endif
228#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
229#else
230#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
231#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
232#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
233#endif
234
235#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
236
237
238#define PAGE_READ 0x0001
239#define PAGE_WRITE 0x0002
240#define PAGE_EXEC 0x0004
241#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
242#define PAGE_VALID 0x0008
243
244
245
246#define PAGE_WRITE_ORG 0x0010
247
248
249
250
251#define PAGE_WRITE_INV 0x0020
252
253#define PAGE_RESET 0x0040
254
255#define PAGE_ANON 0x0080
256
257#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
258
259#define PAGE_RESERVED 0x0100
260#endif
261
262#define PAGE_TARGET_1 0x0200
263#define PAGE_TARGET_2 0x0400
264
265#if defined(CONFIG_USER_ONLY)
266void page_dump(FILE *f);
267
268typedef int (*walk_memory_regions_fn)(void *, target_ulong,
269 target_ulong, unsigned long);
270int walk_memory_regions(void *, walk_memory_regions_fn);
271
272int page_get_flags(target_ulong address);
273void page_set_flags(target_ulong start, target_ulong end, int flags);
274int page_check_range(target_ulong start, target_ulong len, int flags);
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289void *page_alloc_target_data(target_ulong address, size_t size);
290
291
292
293
294
295
296
297
298void *page_get_target_data(target_ulong address);
299#endif
300
301CPUArchState *cpu_copy(CPUArchState *env);
302
303
304
305
306
307
308
309
310
311
312#define CPU_INTERRUPT_HARD 0x0002
313
314
315
316#define CPU_INTERRUPT_EXITTB 0x0004
317
318
319#define CPU_INTERRUPT_HALT 0x0020
320
321
322#define CPU_INTERRUPT_DEBUG 0x0080
323
324
325#define CPU_INTERRUPT_RESET 0x0400
326
327
328
329#define CPU_INTERRUPT_TGT_EXT_0 0x0008
330#define CPU_INTERRUPT_TGT_EXT_1 0x0010
331#define CPU_INTERRUPT_TGT_EXT_2 0x0040
332#define CPU_INTERRUPT_TGT_EXT_3 0x0200
333#define CPU_INTERRUPT_TGT_EXT_4 0x1000
334
335
336
337
338
339
340#define CPU_INTERRUPT_TGT_INT_0 0x0100
341#define CPU_INTERRUPT_TGT_INT_1 0x0800
342#define CPU_INTERRUPT_TGT_INT_2 0x2000
343
344
345
346
347#define CPU_INTERRUPT_SSTEP_MASK \
348 (CPU_INTERRUPT_HARD \
349 | CPU_INTERRUPT_TGT_EXT_0 \
350 | CPU_INTERRUPT_TGT_EXT_1 \
351 | CPU_INTERRUPT_TGT_EXT_2 \
352 | CPU_INTERRUPT_TGT_EXT_3 \
353 | CPU_INTERRUPT_TGT_EXT_4)
354
355#ifdef CONFIG_USER_ONLY
356
357
358
359
360
361
362#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
363#define TLB_MMIO 0
364#define TLB_WATCHPOINT 0
365
366#else
367
368
369
370
371
372
373
374
375
376
377
378#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
379
380
381#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
382
383#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
384
385#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
386
387#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
388
389#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
390
391
392
393
394#define TLB_FLAGS_MASK \
395 (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
396 | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
397
398
399
400
401
402
403
404
405static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
406{
407 return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
408}
409
410
411
412
413
414
415
416static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
417{
418 return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
419}
420
421#ifdef CONFIG_TCG
422
423void dump_drift_info(GString *buf);
424
425void dump_exec_info(GString *buf);
426void dump_opcount_info(GString *buf);
427#endif
428
429#endif
430
431
432int cpu_exec(CPUState *cpu);
433void tcg_exec_realizefn(CPUState *cpu, Error **errp);
434void tcg_exec_unrealizefn(CPUState *cpu);
435
436
437
438
439
440
441
442static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
443{
444 cpu->parent_obj.env_ptr = &cpu->env;
445 cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
446}
447
448
449
450
451
452
453
454static inline ArchCPU *env_archcpu(CPUArchState *env)
455{
456 return container_of(env, ArchCPU, env);
457}
458
459
460
461
462
463
464
465static inline CPUState *env_cpu(CPUArchState *env)
466{
467 return &env_archcpu(env)->parent_obj;
468}
469
470
471
472
473
474
475
476static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
477{
478 ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
479 return &arch_cpu->neg;
480}
481
482
483
484
485
486
487
488static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
489{
490 ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
491 return &arch_cpu->neg;
492}
493
494
495
496
497
498
499
500static inline CPUTLB *env_tlb(CPUArchState *env)
501{
502 return &env_neg(env)->tlb;
503}
504
505#endif
506