1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef CPU_ALL_H
20#define CPU_ALL_H
21
22#include "exec/cpu-common.h"
23#include "exec/memory.h"
24#include "qemu/thread.h"
25#include "qom/cpu.h"
26#include "qemu/rcu.h"
27
28#define EXCP_INTERRUPT 0x10000
29#define EXCP_HLT 0x10001
30#define EXCP_DEBUG 0x10002
31#define EXCP_HALTED 0x10003
32#define EXCP_YIELD 0x10004
33#define EXCP_ATOMIC 0x10005
34
35
36
37
38
39
40
41
42
43#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
44#define BSWAP_NEEDED
45#endif
46
47#ifdef BSWAP_NEEDED
48
49static inline uint16_t tswap16(uint16_t s)
50{
51 return bswap16(s);
52}
53
54static inline uint32_t tswap32(uint32_t s)
55{
56 return bswap32(s);
57}
58
59static inline uint64_t tswap64(uint64_t s)
60{
61 return bswap64(s);
62}
63
64static inline void tswap16s(uint16_t *s)
65{
66 *s = bswap16(*s);
67}
68
69static inline void tswap32s(uint32_t *s)
70{
71 *s = bswap32(*s);
72}
73
74static inline void tswap64s(uint64_t *s)
75{
76 *s = bswap64(*s);
77}
78
79#else
80
81static inline uint16_t tswap16(uint16_t s)
82{
83 return s;
84}
85
86static inline uint32_t tswap32(uint32_t s)
87{
88 return s;
89}
90
91static inline uint64_t tswap64(uint64_t s)
92{
93 return s;
94}
95
96static inline void tswap16s(uint16_t *s)
97{
98}
99
100static inline void tswap32s(uint32_t *s)
101{
102}
103
104static inline void tswap64s(uint64_t *s)
105{
106}
107
108#endif
109
110#if TARGET_LONG_SIZE == 4
111#define tswapl(s) tswap32(s)
112#define tswapls(s) tswap32s((uint32_t *)(s))
113#define bswaptls(s) bswap32s(s)
114#else
115#define tswapl(s) tswap64(s)
116#define tswapls(s) tswap64s((uint64_t *)(s))
117#define bswaptls(s) bswap64s(s)
118#endif
119
120
121
122
123#if defined(TARGET_WORDS_BIGENDIAN)
124#define lduw_p(p) lduw_be_p(p)
125#define ldsw_p(p) ldsw_be_p(p)
126#define ldl_p(p) ldl_be_p(p)
127#define ldq_p(p) ldq_be_p(p)
128#define ldfl_p(p) ldfl_be_p(p)
129#define ldfq_p(p) ldfq_be_p(p)
130#define stw_p(p, v) stw_be_p(p, v)
131#define stl_p(p, v) stl_be_p(p, v)
132#define stq_p(p, v) stq_be_p(p, v)
133#define stfl_p(p, v) stfl_be_p(p, v)
134#define stfq_p(p, v) stfq_be_p(p, v)
135#define ldn_p(p, sz) ldn_be_p(p, sz)
136#define stn_p(p, sz, v) stn_be_p(p, sz, v)
137#else
138#define lduw_p(p) lduw_le_p(p)
139#define ldsw_p(p) ldsw_le_p(p)
140#define ldl_p(p) ldl_le_p(p)
141#define ldq_p(p) ldq_le_p(p)
142#define ldfl_p(p) ldfl_le_p(p)
143#define ldfq_p(p) ldfq_le_p(p)
144#define stw_p(p, v) stw_le_p(p, v)
145#define stl_p(p, v) stl_le_p(p, v)
146#define stq_p(p, v) stq_le_p(p, v)
147#define stfl_p(p, v) stfl_le_p(p, v)
148#define stfq_p(p, v) stfq_le_p(p, v)
149#define ldn_p(p, sz) ldn_le_p(p, sz)
150#define stn_p(p, sz, v) stn_le_p(p, sz, v)
151#endif
152
153
154
155#if defined(CONFIG_USER_ONLY)
156#include "exec/user/abitypes.h"
157
158
159
160
161extern unsigned long guest_base;
162extern int have_guest_base;
163extern unsigned long reserved_va;
164
165#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
166#define GUEST_ADDR_MAX (~0ul)
167#else
168#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : \
169 (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
170#endif
171#else
172
173#include "exec/hwaddr.h"
174
175#define SUFFIX
176#define ARG1 as
177#define ARG1_DECL AddressSpace *as
178#define TARGET_ENDIANNESS
179#include "exec/memory_ldst.inc.h"
180
181#define SUFFIX _cached_slow
182#define ARG1 cache
183#define ARG1_DECL MemoryRegionCache *cache
184#define TARGET_ENDIANNESS
185#include "exec/memory_ldst.inc.h"
186
187static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
188{
189 address_space_stl_notdirty(as, addr, val,
190 MEMTXATTRS_UNSPECIFIED, NULL);
191}
192
193#define SUFFIX
194#define ARG1 as
195#define ARG1_DECL AddressSpace *as
196#define TARGET_ENDIANNESS
197#include "exec/memory_ldst_phys.inc.h"
198
199
200#define ENDIANNESS
201#include "exec/memory_ldst_cached.inc.h"
202
203#define SUFFIX _cached
204#define ARG1 cache
205#define ARG1_DECL MemoryRegionCache *cache
206#define TARGET_ENDIANNESS
207#include "exec/memory_ldst_phys.inc.h"
208#endif
209
210
211
212#ifdef TARGET_PAGE_BITS_VARY
213extern bool target_page_bits_decided;
214extern int target_page_bits;
215#define TARGET_PAGE_BITS ({ assert(target_page_bits_decided); \
216 target_page_bits; })
217#else
218#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
219#endif
220
221#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
222#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
223#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
224
225
226
227
228extern uintptr_t qemu_host_page_size;
229extern intptr_t qemu_host_page_mask;
230
231#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
232#define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \
233 qemu_real_host_page_mask)
234
235
236#define PAGE_READ 0x0001
237#define PAGE_WRITE 0x0002
238#define PAGE_EXEC 0x0004
239#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
240#define PAGE_VALID 0x0008
241
242
243#define PAGE_WRITE_ORG 0x0010
244
245
246#define PAGE_WRITE_INV 0x0040
247#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
248
249#define PAGE_RESERVED 0x0020
250#endif
251
252#if defined(CONFIG_USER_ONLY)
253void page_dump(FILE *f);
254
255typedef int (*walk_memory_regions_fn)(void *, target_ulong,
256 target_ulong, unsigned long);
257int walk_memory_regions(void *, walk_memory_regions_fn);
258
259int page_get_flags(target_ulong address);
260void page_set_flags(target_ulong start, target_ulong end, int flags);
261int page_check_range(target_ulong start, target_ulong len, int flags);
262#endif
263
264CPUArchState *cpu_copy(CPUArchState *env);
265
266
267
268
269
270
271
272
273
274
275#define CPU_INTERRUPT_HARD 0x0002
276
277
278
279#define CPU_INTERRUPT_EXITTB 0x0004
280
281
282#define CPU_INTERRUPT_HALT 0x0020
283
284
285#define CPU_INTERRUPT_DEBUG 0x0080
286
287
288#define CPU_INTERRUPT_RESET 0x0400
289
290
291
292#define CPU_INTERRUPT_TGT_EXT_0 0x0008
293#define CPU_INTERRUPT_TGT_EXT_1 0x0010
294#define CPU_INTERRUPT_TGT_EXT_2 0x0040
295#define CPU_INTERRUPT_TGT_EXT_3 0x0200
296#define CPU_INTERRUPT_TGT_EXT_4 0x1000
297
298
299
300
301
302
303#define CPU_INTERRUPT_TGT_INT_0 0x0100
304#define CPU_INTERRUPT_TGT_INT_1 0x0800
305#define CPU_INTERRUPT_TGT_INT_2 0x2000
306
307
308
309
310#define CPU_INTERRUPT_SSTEP_MASK \
311 (CPU_INTERRUPT_HARD \
312 | CPU_INTERRUPT_TGT_EXT_0 \
313 | CPU_INTERRUPT_TGT_EXT_1 \
314 | CPU_INTERRUPT_TGT_EXT_2 \
315 | CPU_INTERRUPT_TGT_EXT_3 \
316 | CPU_INTERRUPT_TGT_EXT_4)
317
318#if !defined(CONFIG_USER_ONLY)
319
320
321
322
323
324
325
326#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1))
327
328
329#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
330
331#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
332
333#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4))
334
335
336
337
338#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
339 | TLB_RECHECK)
340
341
342
343
344
345
346
347
348static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
349{
350 return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
351}
352
353
354
355
356
357
358
359static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
360{
361 return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
362}
363
364void dump_exec_info(void);
365void dump_opcount_info(void);
366#endif
367
368int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
369 uint8_t *buf, target_ulong len, int is_write);
370
371int cpu_exec(CPUState *cpu);
372
373
374
375
376
377
378
379static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
380{
381 cpu->parent_obj.env_ptr = &cpu->env;
382 cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
383}
384
385
386
387
388
389
390
391static inline ArchCPU *env_archcpu(CPUArchState *env)
392{
393 return container_of(env, ArchCPU, env);
394}
395
396
397
398
399
400
401
402static inline CPUState *env_cpu(CPUArchState *env)
403{
404 return &env_archcpu(env)->parent_obj;
405}
406
407
408
409
410
411
412
413static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
414{
415 ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
416 return &arch_cpu->neg;
417}
418
419
420
421
422
423
424
425static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
426{
427 ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
428 return &arch_cpu->neg;
429}
430
431
432
433
434
435
436
437static inline CPUTLB *env_tlb(CPUArchState *env)
438{
439 return &env_neg(env)->tlb;
440}
441
442#endif
443