1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef CPU_ALL_H
20#define CPU_ALL_H
21
22#include "exec/cpu-common.h"
23#include "exec/memory.h"
24#include "qemu/thread.h"
25#include "hw/core/cpu.h"
26#include "qemu/rcu.h"
27
28#define EXCP_INTERRUPT 0x10000
29#define EXCP_HLT 0x10001
30#define EXCP_DEBUG 0x10002
31#define EXCP_HALTED 0x10003
32#define EXCP_YIELD 0x10004
33#define EXCP_ATOMIC 0x10005
34
35
36
37
38
39
40
41
42
43#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
44#define BSWAP_NEEDED
45#endif
46
47#ifdef BSWAP_NEEDED
48
49static inline uint16_t tswap16(uint16_t s)
50{
51 return bswap16(s);
52}
53
54static inline uint32_t tswap32(uint32_t s)
55{
56 return bswap32(s);
57}
58
59static inline uint64_t tswap64(uint64_t s)
60{
61 return bswap64(s);
62}
63
64static inline void tswap16s(uint16_t *s)
65{
66 *s = bswap16(*s);
67}
68
69static inline void tswap32s(uint32_t *s)
70{
71 *s = bswap32(*s);
72}
73
74static inline void tswap64s(uint64_t *s)
75{
76 *s = bswap64(*s);
77}
78
79#else
80
81static inline uint16_t tswap16(uint16_t s)
82{
83 return s;
84}
85
86static inline uint32_t tswap32(uint32_t s)
87{
88 return s;
89}
90
91static inline uint64_t tswap64(uint64_t s)
92{
93 return s;
94}
95
96static inline void tswap16s(uint16_t *s)
97{
98}
99
100static inline void tswap32s(uint32_t *s)
101{
102}
103
104static inline void tswap64s(uint64_t *s)
105{
106}
107
108#endif
109
110#if TARGET_LONG_SIZE == 4
111#define tswapl(s) tswap32(s)
112#define tswapls(s) tswap32s((uint32_t *)(s))
113#define bswaptls(s) bswap32s(s)
114#else
115#define tswapl(s) tswap64(s)
116#define tswapls(s) tswap64s((uint64_t *)(s))
117#define bswaptls(s) bswap64s(s)
118#endif
119
120
121
122
123#if TARGET_BIG_ENDIAN
124#define lduw_p(p) lduw_be_p(p)
125#define ldsw_p(p) ldsw_be_p(p)
126#define ldl_p(p) ldl_be_p(p)
127#define ldq_p(p) ldq_be_p(p)
128#define stw_p(p, v) stw_be_p(p, v)
129#define stl_p(p, v) stl_be_p(p, v)
130#define stq_p(p, v) stq_be_p(p, v)
131#define ldn_p(p, sz) ldn_be_p(p, sz)
132#define stn_p(p, sz, v) stn_be_p(p, sz, v)
133#else
134#define lduw_p(p) lduw_le_p(p)
135#define ldsw_p(p) ldsw_le_p(p)
136#define ldl_p(p) ldl_le_p(p)
137#define ldq_p(p) ldq_le_p(p)
138#define stw_p(p, v) stw_le_p(p, v)
139#define stl_p(p, v) stl_le_p(p, v)
140#define stq_p(p, v) stq_le_p(p, v)
141#define ldn_p(p, sz) ldn_le_p(p, sz)
142#define stn_p(p, sz, v) stn_le_p(p, sz, v)
143#endif
144
145
146
147#if defined(CONFIG_USER_ONLY)
148#include "exec/user/abitypes.h"
149
150
151
152
153extern uintptr_t guest_base;
154extern bool have_guest_base;
155extern unsigned long reserved_va;
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171#define GUEST_ADDR_MAX_ \
172 ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
173 UINT32_MAX : ~0ul)
174#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
175
176#else
177
178#include "exec/hwaddr.h"
179
180#define SUFFIX
181#define ARG1 as
182#define ARG1_DECL AddressSpace *as
183#define TARGET_ENDIANNESS
184#include "exec/memory_ldst.h.inc"
185
186#define SUFFIX _cached_slow
187#define ARG1 cache
188#define ARG1_DECL MemoryRegionCache *cache
189#define TARGET_ENDIANNESS
190#include "exec/memory_ldst.h.inc"
191
192static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
193{
194 address_space_stl_notdirty(as, addr, val,
195 MEMTXATTRS_UNSPECIFIED, NULL);
196}
197
198#define SUFFIX
199#define ARG1 as
200#define ARG1_DECL AddressSpace *as
201#define TARGET_ENDIANNESS
202#include "exec/memory_ldst_phys.h.inc"
203
204
205#define ENDIANNESS
206#include "exec/memory_ldst_cached.h.inc"
207
208#define SUFFIX _cached
209#define ARG1 cache
210#define ARG1_DECL MemoryRegionCache *cache
211#define TARGET_ENDIANNESS
212#include "exec/memory_ldst_phys.h.inc"
213#endif
214
215
216
217#ifdef TARGET_PAGE_BITS_VARY
218# include "exec/page-vary.h"
219extern const TargetPageBits target_page;
220#ifdef CONFIG_DEBUG_TCG
221#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
222#define TARGET_PAGE_MASK ({ assert(target_page.decided); \
223 (target_long)target_page.mask; })
224#else
225#define TARGET_PAGE_BITS target_page.bits
226#define TARGET_PAGE_MASK ((target_long)target_page.mask)
227#endif
228#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
229#else
230#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
231#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
232#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
233#endif
234
235#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
236
237
238#define PAGE_READ 0x0001
239#define PAGE_WRITE 0x0002
240#define PAGE_EXEC 0x0004
241#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
242#define PAGE_VALID 0x0008
243
244
245
246#define PAGE_WRITE_ORG 0x0010
247
248
249
250
251#define PAGE_WRITE_INV 0x0020
252
253#define PAGE_RESET 0x0040
254
255#define PAGE_ANON 0x0080
256
257#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
258
259#define PAGE_RESERVED 0x0100
260#endif
261
262#define PAGE_TARGET_1 0x0200
263#define PAGE_TARGET_2 0x0400
264
265#if defined(CONFIG_USER_ONLY)
266void page_dump(FILE *f);
267
268typedef int (*walk_memory_regions_fn)(void *, target_ulong,
269 target_ulong, unsigned long);
270int walk_memory_regions(void *, walk_memory_regions_fn);
271
272int page_get_flags(target_ulong address);
273void page_set_flags(target_ulong start, target_ulong end, int flags);
274void page_reset_target_data(target_ulong start, target_ulong end);
275int page_check_range(target_ulong start, target_ulong len, int flags);
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290void *page_alloc_target_data(target_ulong address, size_t size);
291
292
293
294
295
296
297
298
299void *page_get_target_data(target_ulong address);
300#endif
301
302CPUArchState *cpu_copy(CPUArchState *env);
303
304
305
306
307
308
309
310
311
312
313#define CPU_INTERRUPT_HARD 0x0002
314
315
316
317#define CPU_INTERRUPT_EXITTB 0x0004
318
319
320#define CPU_INTERRUPT_HALT 0x0020
321
322
323#define CPU_INTERRUPT_DEBUG 0x0080
324
325
326#define CPU_INTERRUPT_RESET 0x0400
327
328
329
330#define CPU_INTERRUPT_TGT_EXT_0 0x0008
331#define CPU_INTERRUPT_TGT_EXT_1 0x0010
332#define CPU_INTERRUPT_TGT_EXT_2 0x0040
333#define CPU_INTERRUPT_TGT_EXT_3 0x0200
334#define CPU_INTERRUPT_TGT_EXT_4 0x1000
335
336
337
338
339
340
341#define CPU_INTERRUPT_TGT_INT_0 0x0100
342#define CPU_INTERRUPT_TGT_INT_1 0x0800
343#define CPU_INTERRUPT_TGT_INT_2 0x2000
344
345
346
347
348#define CPU_INTERRUPT_SSTEP_MASK \
349 (CPU_INTERRUPT_HARD \
350 | CPU_INTERRUPT_TGT_EXT_0 \
351 | CPU_INTERRUPT_TGT_EXT_1 \
352 | CPU_INTERRUPT_TGT_EXT_2 \
353 | CPU_INTERRUPT_TGT_EXT_3 \
354 | CPU_INTERRUPT_TGT_EXT_4)
355
356#ifdef CONFIG_USER_ONLY
357
358
359
360
361
362
363#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
364#define TLB_MMIO 0
365#define TLB_WATCHPOINT 0
366
367#else
368
369
370
371
372
373
374
375
376
377
378
379#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
380
381
382#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
383
384#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
385
386#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
387
388#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
389
390#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
391
392
393
394
395#define TLB_FLAGS_MASK \
396 (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
397 | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
398
399
400
401
402
403
404
405
406static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
407{
408 return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
409}
410
411
412
413
414
415
416
417static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
418{
419 return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
420}
421
422#ifdef CONFIG_TCG
423
424void dump_exec_info(GString *buf);
425#endif
426
427#endif
428
429
430int cpu_exec(CPUState *cpu);
431void tcg_exec_realizefn(CPUState *cpu, Error **errp);
432void tcg_exec_unrealizefn(CPUState *cpu);
433
434
435
436
437
438
439
440static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
441{
442 cpu->parent_obj.env_ptr = &cpu->env;
443 cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
444}
445
446
447
448
449
450
451
452static inline ArchCPU *env_archcpu(CPUArchState *env)
453{
454 return container_of(env, ArchCPU, env);
455}
456
457
458
459
460
461
462
463static inline CPUState *env_cpu(CPUArchState *env)
464{
465 return &env_archcpu(env)->parent_obj;
466}
467
468
469
470
471
472
473
474static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
475{
476 ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
477 return &arch_cpu->neg;
478}
479
480
481
482
483
484
485
486static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
487{
488 ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
489 return &arch_cpu->neg;
490}
491
492
493
494
495
496
497
498static inline CPUTLB *env_tlb(CPUArchState *env)
499{
500 return &env_neg(env)->tlb;
501}
502
503#endif
504