1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#if DATA_SIZE == 8
25#define SUFFIX q
26#define LSUFFIX q
27#define SDATA_TYPE int64_t
28#define DATA_TYPE uint64_t
29#elif DATA_SIZE == 4
30#define SUFFIX l
31#define LSUFFIX l
32#define SDATA_TYPE int32_t
33#define DATA_TYPE uint32_t
34#elif DATA_SIZE == 2
35#define SUFFIX w
36#define LSUFFIX uw
37#define SDATA_TYPE int16_t
38#define DATA_TYPE uint16_t
39#elif DATA_SIZE == 1
40#define SUFFIX b
41#define LSUFFIX ub
42#define SDATA_TYPE int8_t
43#define DATA_TYPE uint8_t
44#else
45#error unsupported data size
46#endif
47
48
49
50
51
52
53
54#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
55# define WORD_TYPE DATA_TYPE
56# define USUFFIX SUFFIX
57#else
58# define WORD_TYPE tcg_target_ulong
59# define USUFFIX glue(u, SUFFIX)
60# define SSUFFIX glue(s, SUFFIX)
61#endif
62
63#ifdef SOFTMMU_CODE_ACCESS
64#define READ_ACCESS_TYPE MMU_INST_FETCH
65#define ADDR_READ addr_code
66#else
67#define READ_ACCESS_TYPE MMU_DATA_LOAD
68#define ADDR_READ addr_read
69#endif
70
71#if DATA_SIZE == 8
72# define BSWAP(X) bswap64(X)
73#elif DATA_SIZE == 4
74# define BSWAP(X) bswap32(X)
75#elif DATA_SIZE == 2
76# define BSWAP(X) bswap16(X)
77#else
78# define BSWAP(X) (X)
79#endif
80
81#if DATA_SIZE == 1
82# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
83# define helper_be_ld_name helper_le_ld_name
84# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
85# define helper_be_lds_name helper_le_lds_name
86# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
87# define helper_be_st_name helper_le_st_name
88#else
89# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
90# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
91# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
92# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
93# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
94# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
95#endif
96
97#ifndef SOFTMMU_CODE_ACCESS
98static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
99 size_t mmu_idx, size_t index,
100 target_ulong addr,
101 uintptr_t retaddr)
102{
103 CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
104 return io_readx(env, iotlbentry, addr, retaddr, DATA_SIZE);
105}
106#endif
107
108WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
109 TCGMemOpIdx oi, uintptr_t retaddr)
110{
111 unsigned mmu_idx = get_mmuidx(oi);
112 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
113 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
114 unsigned a_bits = get_alignment_bits(get_memop(oi));
115 uintptr_t haddr;
116 DATA_TYPE res;
117
118 if (addr & ((1 << a_bits) - 1)) {
119 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
120 mmu_idx, retaddr);
121 }
122
123
124 if ((addr & TARGET_PAGE_MASK)
125 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
126 if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
127 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
128 mmu_idx, retaddr);
129 }
130 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
131 }
132
133
134 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
135 if ((addr & (DATA_SIZE - 1)) != 0) {
136 goto do_unaligned_access;
137 }
138
139
140
141 res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
142 res = TGT_LE(res);
143 return res;
144 }
145
146
147 if (DATA_SIZE > 1
148 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
149 >= TARGET_PAGE_SIZE)) {
150 target_ulong addr1, addr2;
151 DATA_TYPE res1, res2;
152 unsigned shift;
153 do_unaligned_access:
154
155 addr1 = addr & ~(DATA_SIZE - 1);
156 addr2 = addr1 + DATA_SIZE;
157 res1 = helper_le_ld_name(env, addr1, oi, retaddr);
158 res2 = helper_le_ld_name(env, addr2, oi, retaddr);
159 shift = (addr & (DATA_SIZE - 1)) * 8;
160
161
162 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
163 return res;
164 }
165
166 haddr = addr + env->tlb_table[mmu_idx][index].addend;
167#if DATA_SIZE == 1
168 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
169#else
170 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
171#endif
172 return res;
173}
174
175#if DATA_SIZE > 1
176WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
177 TCGMemOpIdx oi, uintptr_t retaddr)
178{
179 unsigned mmu_idx = get_mmuidx(oi);
180 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
181 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
182 unsigned a_bits = get_alignment_bits(get_memop(oi));
183 uintptr_t haddr;
184 DATA_TYPE res;
185
186 if (addr & ((1 << a_bits) - 1)) {
187 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
188 mmu_idx, retaddr);
189 }
190
191
192 if ((addr & TARGET_PAGE_MASK)
193 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
194 if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
195 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
196 mmu_idx, retaddr);
197 }
198 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
199 }
200
201
202 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
203 if ((addr & (DATA_SIZE - 1)) != 0) {
204 goto do_unaligned_access;
205 }
206
207
208
209 res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
210 res = TGT_BE(res);
211 return res;
212 }
213
214
215 if (DATA_SIZE > 1
216 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
217 >= TARGET_PAGE_SIZE)) {
218 target_ulong addr1, addr2;
219 DATA_TYPE res1, res2;
220 unsigned shift;
221 do_unaligned_access:
222 addr1 = addr & ~(DATA_SIZE - 1);
223 addr2 = addr1 + DATA_SIZE;
224 res1 = helper_be_ld_name(env, addr1, oi, retaddr);
225 res2 = helper_be_ld_name(env, addr2, oi, retaddr);
226 shift = (addr & (DATA_SIZE - 1)) * 8;
227
228
229 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
230 return res;
231 }
232
233 haddr = addr + env->tlb_table[mmu_idx][index].addend;
234 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
235 return res;
236}
237#endif
238
239#ifndef SOFTMMU_CODE_ACCESS
240
241
242
243#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
244WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
245 TCGMemOpIdx oi, uintptr_t retaddr)
246{
247 return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
248}
249
250# if DATA_SIZE > 1
251WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
252 TCGMemOpIdx oi, uintptr_t retaddr)
253{
254 return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
255}
256# endif
257#endif
258
259static inline void glue(io_write, SUFFIX)(CPUArchState *env,
260 size_t mmu_idx, size_t index,
261 DATA_TYPE val,
262 target_ulong addr,
263 uintptr_t retaddr)
264{
265 CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
266 return io_writex(env, iotlbentry, val, addr, retaddr, DATA_SIZE);
267}
268
269void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
270 TCGMemOpIdx oi, uintptr_t retaddr)
271{
272 unsigned mmu_idx = get_mmuidx(oi);
273 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
274 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
275 unsigned a_bits = get_alignment_bits(get_memop(oi));
276 uintptr_t haddr;
277
278 if (addr & ((1 << a_bits) - 1)) {
279 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
280 mmu_idx, retaddr);
281 }
282
283
284 if ((addr & TARGET_PAGE_MASK)
285 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
286 if (!VICTIM_TLB_HIT(addr_write, addr)) {
287 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
288 }
289 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
290 }
291
292
293 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
294 if ((addr & (DATA_SIZE - 1)) != 0) {
295 goto do_unaligned_access;
296 }
297
298
299
300 val = TGT_LE(val);
301 glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
302 return;
303 }
304
305
306 if (DATA_SIZE > 1
307 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
308 >= TARGET_PAGE_SIZE)) {
309 int i, index2;
310 target_ulong page2, tlb_addr2;
311 do_unaligned_access:
312
313
314
315 page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
316 index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
317 tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
318 if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
319 && !VICTIM_TLB_HIT(addr_write, page2)) {
320 tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
321 mmu_idx, retaddr);
322 }
323
324
325
326
327 for (i = 0; i < DATA_SIZE; ++i) {
328
329 uint8_t val8 = val >> (i * 8);
330 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
331 oi, retaddr);
332 }
333 return;
334 }
335
336 haddr = addr + env->tlb_table[mmu_idx][index].addend;
337#if DATA_SIZE == 1
338 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
339#else
340 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
341#endif
342}
343
344#if DATA_SIZE > 1
345void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
346 TCGMemOpIdx oi, uintptr_t retaddr)
347{
348 unsigned mmu_idx = get_mmuidx(oi);
349 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
350 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
351 unsigned a_bits = get_alignment_bits(get_memop(oi));
352 uintptr_t haddr;
353
354 if (addr & ((1 << a_bits) - 1)) {
355 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
356 mmu_idx, retaddr);
357 }
358
359
360 if ((addr & TARGET_PAGE_MASK)
361 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
362 if (!VICTIM_TLB_HIT(addr_write, addr)) {
363 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
364 }
365 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
366 }
367
368
369 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
370 if ((addr & (DATA_SIZE - 1)) != 0) {
371 goto do_unaligned_access;
372 }
373
374
375
376 val = TGT_BE(val);
377 glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
378 return;
379 }
380
381
382 if (DATA_SIZE > 1
383 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
384 >= TARGET_PAGE_SIZE)) {
385 int i, index2;
386 target_ulong page2, tlb_addr2;
387 do_unaligned_access:
388
389
390
391 page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
392 index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
393 tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
394 if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
395 && !VICTIM_TLB_HIT(addr_write, page2)) {
396 tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
397 mmu_idx, retaddr);
398 }
399
400
401
402
403 for (i = 0; i < DATA_SIZE; ++i) {
404
405 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
406 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
407 oi, retaddr);
408 }
409 return;
410 }
411
412 haddr = addr + env->tlb_table[mmu_idx][index].addend;
413 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
414}
415#endif
416#endif
417
418#undef READ_ACCESS_TYPE
419#undef DATA_TYPE
420#undef SUFFIX
421#undef LSUFFIX
422#undef DATA_SIZE
423#undef ADDR_READ
424#undef WORD_TYPE
425#undef SDATA_TYPE
426#undef USUFFIX
427#undef SSUFFIX
428#undef BSWAP
429#undef helper_le_ld_name
430#undef helper_be_ld_name
431#undef helper_le_lds_name
432#undef helper_be_lds_name
433#undef helper_le_st_name
434#undef helper_be_st_name
435