1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#if DATA_SIZE == 8
25#define SUFFIX q
26#define LSUFFIX q
27#define SDATA_TYPE int64_t
28#define DATA_TYPE uint64_t
29#elif DATA_SIZE == 4
30#define SUFFIX l
31#define LSUFFIX l
32#define SDATA_TYPE int32_t
33#define DATA_TYPE uint32_t
34#elif DATA_SIZE == 2
35#define SUFFIX w
36#define LSUFFIX uw
37#define SDATA_TYPE int16_t
38#define DATA_TYPE uint16_t
39#elif DATA_SIZE == 1
40#define SUFFIX b
41#define LSUFFIX ub
42#define SDATA_TYPE int8_t
43#define DATA_TYPE uint8_t
44#else
45#error unsupported data size
46#endif
47
48
49
50
51
52
53
54#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
55# define WORD_TYPE DATA_TYPE
56# define USUFFIX SUFFIX
57#else
58# define WORD_TYPE tcg_target_ulong
59# define USUFFIX glue(u, SUFFIX)
60# define SSUFFIX glue(s, SUFFIX)
61#endif
62
63#ifdef SOFTMMU_CODE_ACCESS
64#define READ_ACCESS_TYPE MMU_INST_FETCH
65#define ADDR_READ addr_code
66#else
67#define READ_ACCESS_TYPE MMU_DATA_LOAD
68#define ADDR_READ addr_read
69#endif
70
71#if DATA_SIZE == 8
72# define BSWAP(X) bswap64(X)
73#elif DATA_SIZE == 4
74# define BSWAP(X) bswap32(X)
75#elif DATA_SIZE == 2
76# define BSWAP(X) bswap16(X)
77#else
78# define BSWAP(X) (X)
79#endif
80
81#if DATA_SIZE == 1
82# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
83# define helper_be_ld_name helper_le_ld_name
84# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
85# define helper_be_lds_name helper_le_lds_name
86# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
87# define helper_be_st_name helper_le_st_name
88#else
89# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
90# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
91# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
92# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
93# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
94# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
95#endif
96
97#ifndef SOFTMMU_CODE_ACCESS
98static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
99 size_t mmu_idx, size_t index,
100 target_ulong addr,
101 uintptr_t retaddr,
102 bool recheck,
103 MMUAccessType access_type)
104{
105 CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
106 return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, recheck,
107 access_type, DATA_SIZE);
108}
109#endif
110
111WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
112 TCGMemOpIdx oi, uintptr_t retaddr)
113{
114 uintptr_t mmu_idx = get_mmuidx(oi);
115 uintptr_t index = tlb_index(env, mmu_idx, addr);
116 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
117 target_ulong tlb_addr = entry->ADDR_READ;
118 unsigned a_bits = get_alignment_bits(get_memop(oi));
119 uintptr_t haddr;
120 DATA_TYPE res;
121
122 if (addr & ((1 << a_bits) - 1)) {
123 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
124 mmu_idx, retaddr);
125 }
126
127
128 if (!tlb_hit(tlb_addr, addr)) {
129 if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
130 tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
131 mmu_idx, retaddr);
132 index = tlb_index(env, mmu_idx, addr);
133 entry = tlb_entry(env, mmu_idx, addr);
134 }
135 tlb_addr = entry->ADDR_READ;
136 }
137
138
139 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
140 if ((addr & (DATA_SIZE - 1)) != 0) {
141 goto do_unaligned_access;
142 }
143
144
145
146 res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
147 tlb_addr & TLB_RECHECK,
148 READ_ACCESS_TYPE);
149 res = TGT_LE(res);
150 return res;
151 }
152
153
154 if (DATA_SIZE > 1
155 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
156 >= TARGET_PAGE_SIZE)) {
157 target_ulong addr1, addr2;
158 DATA_TYPE res1, res2;
159 unsigned shift;
160 do_unaligned_access:
161 addr1 = addr & ~(DATA_SIZE - 1);
162 addr2 = addr1 + DATA_SIZE;
163 res1 = helper_le_ld_name(env, addr1, oi, retaddr);
164 res2 = helper_le_ld_name(env, addr2, oi, retaddr);
165 shift = (addr & (DATA_SIZE - 1)) * 8;
166
167
168 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
169 return res;
170 }
171
172 haddr = addr + entry->addend;
173#if DATA_SIZE == 1
174 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
175#else
176 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
177#endif
178 return res;
179}
180
181#if DATA_SIZE > 1
182WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
183 TCGMemOpIdx oi, uintptr_t retaddr)
184{
185 uintptr_t mmu_idx = get_mmuidx(oi);
186 uintptr_t index = tlb_index(env, mmu_idx, addr);
187 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
188 target_ulong tlb_addr = entry->ADDR_READ;
189 unsigned a_bits = get_alignment_bits(get_memop(oi));
190 uintptr_t haddr;
191 DATA_TYPE res;
192
193 if (addr & ((1 << a_bits) - 1)) {
194 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
195 mmu_idx, retaddr);
196 }
197
198
199 if (!tlb_hit(tlb_addr, addr)) {
200 if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
201 tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
202 mmu_idx, retaddr);
203 index = tlb_index(env, mmu_idx, addr);
204 entry = tlb_entry(env, mmu_idx, addr);
205 }
206 tlb_addr = entry->ADDR_READ;
207 }
208
209
210 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
211 if ((addr & (DATA_SIZE - 1)) != 0) {
212 goto do_unaligned_access;
213 }
214
215
216
217 res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
218 tlb_addr & TLB_RECHECK,
219 READ_ACCESS_TYPE);
220 res = TGT_BE(res);
221 return res;
222 }
223
224
225 if (DATA_SIZE > 1
226 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
227 >= TARGET_PAGE_SIZE)) {
228 target_ulong addr1, addr2;
229 DATA_TYPE res1, res2;
230 unsigned shift;
231 do_unaligned_access:
232 addr1 = addr & ~(DATA_SIZE - 1);
233 addr2 = addr1 + DATA_SIZE;
234 res1 = helper_be_ld_name(env, addr1, oi, retaddr);
235 res2 = helper_be_ld_name(env, addr2, oi, retaddr);
236 shift = (addr & (DATA_SIZE - 1)) * 8;
237
238
239 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
240 return res;
241 }
242
243 haddr = addr + entry->addend;
244 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
245 return res;
246}
247#endif
248
249#ifndef SOFTMMU_CODE_ACCESS
250
251
252
253#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
254WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
255 TCGMemOpIdx oi, uintptr_t retaddr)
256{
257 return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
258}
259
260# if DATA_SIZE > 1
261WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
262 TCGMemOpIdx oi, uintptr_t retaddr)
263{
264 return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
265}
266# endif
267#endif
268
269static inline void glue(io_write, SUFFIX)(CPUArchState *env,
270 size_t mmu_idx, size_t index,
271 DATA_TYPE val,
272 target_ulong addr,
273 uintptr_t retaddr,
274 bool recheck)
275{
276 CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
277 return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
278 recheck, DATA_SIZE);
279}
280
281void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
282 TCGMemOpIdx oi, uintptr_t retaddr)
283{
284 uintptr_t mmu_idx = get_mmuidx(oi);
285 uintptr_t index = tlb_index(env, mmu_idx, addr);
286 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
287 target_ulong tlb_addr = tlb_addr_write(entry);
288 unsigned a_bits = get_alignment_bits(get_memop(oi));
289 uintptr_t haddr;
290
291 if (addr & ((1 << a_bits) - 1)) {
292 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
293 mmu_idx, retaddr);
294 }
295
296
297 if (!tlb_hit(tlb_addr, addr)) {
298 if (!VICTIM_TLB_HIT(addr_write, addr)) {
299 tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
300 mmu_idx, retaddr);
301 index = tlb_index(env, mmu_idx, addr);
302 entry = tlb_entry(env, mmu_idx, addr);
303 }
304 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
305 }
306
307
308 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
309 if ((addr & (DATA_SIZE - 1)) != 0) {
310 goto do_unaligned_access;
311 }
312
313
314
315 val = TGT_LE(val);
316 glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr,
317 retaddr, tlb_addr & TLB_RECHECK);
318 return;
319 }
320
321
322 if (DATA_SIZE > 1
323 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
324 >= TARGET_PAGE_SIZE)) {
325 int i;
326 target_ulong page2;
327 CPUTLBEntry *entry2;
328 do_unaligned_access:
329
330
331
332 page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
333 entry2 = tlb_entry(env, mmu_idx, page2);
334 if (!tlb_hit_page(tlb_addr_write(entry2), page2)
335 && !VICTIM_TLB_HIT(addr_write, page2)) {
336 tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
337 mmu_idx, retaddr);
338 }
339
340
341
342
343 for (i = 0; i < DATA_SIZE; ++i) {
344
345 uint8_t val8 = val >> (i * 8);
346 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
347 oi, retaddr);
348 }
349 return;
350 }
351
352 haddr = addr + entry->addend;
353#if DATA_SIZE == 1
354 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
355#else
356 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
357#endif
358}
359
360#if DATA_SIZE > 1
361void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
362 TCGMemOpIdx oi, uintptr_t retaddr)
363{
364 uintptr_t mmu_idx = get_mmuidx(oi);
365 uintptr_t index = tlb_index(env, mmu_idx, addr);
366 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
367 target_ulong tlb_addr = tlb_addr_write(entry);
368 unsigned a_bits = get_alignment_bits(get_memop(oi));
369 uintptr_t haddr;
370
371 if (addr & ((1 << a_bits) - 1)) {
372 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
373 mmu_idx, retaddr);
374 }
375
376
377 if (!tlb_hit(tlb_addr, addr)) {
378 if (!VICTIM_TLB_HIT(addr_write, addr)) {
379 tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
380 mmu_idx, retaddr);
381 index = tlb_index(env, mmu_idx, addr);
382 entry = tlb_entry(env, mmu_idx, addr);
383 }
384 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
385 }
386
387
388 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
389 if ((addr & (DATA_SIZE - 1)) != 0) {
390 goto do_unaligned_access;
391 }
392
393
394
395 val = TGT_BE(val);
396 glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr,
397 tlb_addr & TLB_RECHECK);
398 return;
399 }
400
401
402 if (DATA_SIZE > 1
403 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
404 >= TARGET_PAGE_SIZE)) {
405 int i;
406 target_ulong page2;
407 CPUTLBEntry *entry2;
408 do_unaligned_access:
409
410
411
412 page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
413 entry2 = tlb_entry(env, mmu_idx, page2);
414 if (!tlb_hit_page(tlb_addr_write(entry2), page2)
415 && !VICTIM_TLB_HIT(addr_write, page2)) {
416 tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
417 mmu_idx, retaddr);
418 }
419
420
421
422
423 for (i = 0; i < DATA_SIZE; ++i) {
424
425 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
426 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
427 oi, retaddr);
428 }
429 return;
430 }
431
432 haddr = addr + entry->addend;
433 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
434}
435#endif
436#endif
437
438#undef READ_ACCESS_TYPE
439#undef DATA_TYPE
440#undef SUFFIX
441#undef LSUFFIX
442#undef DATA_SIZE
443#undef ADDR_READ
444#undef WORD_TYPE
445#undef SDATA_TYPE
446#undef USUFFIX
447#undef SSUFFIX
448#undef BSWAP
449#undef helper_le_ld_name
450#undef helper_be_ld_name
451#undef helper_le_lds_name
452#undef helper_be_lds_name
453#undef helper_le_st_name
454#undef helper_be_st_name
455