1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef HEXAGON_MMVEC_MACROS_H
19#define HEXAGON_MMVEC_MACROS_H
20
21#include "qemu/osdep.h"
22#include "qemu/host-utils.h"
23#include "arch.h"
24#include "mmvec/system_ext_mmvec.h"
25
26#ifndef QEMU_GENERATE
27#define VdV (*(MMVector *)(VdV_void))
28#define VsV (*(MMVector *)(VsV_void))
29#define VuV (*(MMVector *)(VuV_void))
30#define VvV (*(MMVector *)(VvV_void))
31#define VwV (*(MMVector *)(VwV_void))
32#define VxV (*(MMVector *)(VxV_void))
33#define VyV (*(MMVector *)(VyV_void))
34
35#define VddV (*(MMVectorPair *)(VddV_void))
36#define VuuV (*(MMVectorPair *)(VuuV_void))
37#define VvvV (*(MMVectorPair *)(VvvV_void))
38#define VxxV (*(MMVectorPair *)(VxxV_void))
39
40#define QeV (*(MMQReg *)(QeV_void))
41#define QdV (*(MMQReg *)(QdV_void))
42#define QsV (*(MMQReg *)(QsV_void))
43#define QtV (*(MMQReg *)(QtV_void))
44#define QuV (*(MMQReg *)(QuV_void))
45#define QvV (*(MMQReg *)(QvV_void))
46#define QxV (*(MMQReg *)(QxV_void))
47#endif
48
49#define LOG_VTCM_BYTE(VA, MASK, VAL, IDX) \
50 do { \
51 env->vtcm_log.data.ub[IDX] = (VAL); \
52 if (MASK) { \
53 set_bit((IDX), env->vtcm_log.mask); \
54 } else { \
55 clear_bit((IDX), env->vtcm_log.mask); \
56 } \
57 env->vtcm_log.va[IDX] = (VA); \
58 } while (0)
59
60#define fNOTQ(VAL) \
61 ({ \
62 MMQReg _ret; \
63 int _i_; \
64 for (_i_ = 0; _i_ < fVECSIZE() / 64; _i_++) { \
65 _ret.ud[_i_] = ~VAL.ud[_i_]; \
66 } \
67 _ret;\
68 })
69#define fGETQBITS(REG, WIDTH, MASK, BITNO) \
70 ((MASK) & (REG.w[(BITNO) >> 5] >> ((BITNO) & 0x1f)))
71#define fGETQBIT(REG, BITNO) fGETQBITS(REG, 1, 1, BITNO)
72#define fGENMASKW(QREG, IDX) \
73 (((fGETQBIT(QREG, (IDX * 4 + 0)) ? 0xFF : 0x0) << 0) | \
74 ((fGETQBIT(QREG, (IDX * 4 + 1)) ? 0xFF : 0x0) << 8) | \
75 ((fGETQBIT(QREG, (IDX * 4 + 2)) ? 0xFF : 0x0) << 16) | \
76 ((fGETQBIT(QREG, (IDX * 4 + 3)) ? 0xFF : 0x0) << 24))
77#define fGETNIBBLE(IDX, SRC) (fSXTN(4, 8, (SRC >> (4 * IDX)) & 0xF))
78#define fGETCRUMB(IDX, SRC) (fSXTN(2, 8, (SRC >> (2 * IDX)) & 0x3))
79#define fGETCRUMB_SYMMETRIC(IDX, SRC) \
80 ((fGETCRUMB(IDX, SRC) >= 0 ? (2 - fGETCRUMB(IDX, SRC)) \
81 : fGETCRUMB(IDX, SRC)))
82#define fGENMASKH(QREG, IDX) \
83 (((fGETQBIT(QREG, (IDX * 2 + 0)) ? 0xFF : 0x0) << 0) | \
84 ((fGETQBIT(QREG, (IDX * 2 + 1)) ? 0xFF : 0x0) << 8))
85#define fGETMASKW(VREG, QREG, IDX) (VREG.w[IDX] & fGENMASKW((QREG), IDX))
86#define fGETMASKH(VREG, QREG, IDX) (VREG.h[IDX] & fGENMASKH((QREG), IDX))
87#define fCONDMASK8(QREG, IDX, YESVAL, NOVAL) \
88 (fGETQBIT(QREG, IDX) ? (YESVAL) : (NOVAL))
89#define fCONDMASK16(QREG, IDX, YESVAL, NOVAL) \
90 ((fGENMASKH(QREG, IDX) & (YESVAL)) | \
91 (fGENMASKH(fNOTQ(QREG), IDX) & (NOVAL)))
92#define fCONDMASK32(QREG, IDX, YESVAL, NOVAL) \
93 ((fGENMASKW(QREG, IDX) & (YESVAL)) | \
94 (fGENMASKW(fNOTQ(QREG), IDX) & (NOVAL)))
95#define fSETQBITS(REG, WIDTH, MASK, BITNO, VAL) \
96 do { \
97 uint32_t __TMP = (VAL); \
98 REG.w[(BITNO) >> 5] &= ~((MASK) << ((BITNO) & 0x1f)); \
99 REG.w[(BITNO) >> 5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f)); \
100 } while (0)
101#define fSETQBIT(REG, BITNO, VAL) fSETQBITS(REG, 1, 1, BITNO, VAL)
102#define fVBYTES() (fVECSIZE())
103#define fVALIGN(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR & ~(LOG2_ALIGNMENT - 1))
104#define fVLASTBYTE(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR | (LOG2_ALIGNMENT - 1))
105#define fVELEM(WIDTH) ((fVECSIZE() * 8) / WIDTH)
106#define fVECLOGSIZE() (7)
107#define fVECSIZE() (1 << fVECLOGSIZE())
108#define fSWAPB(A, B) do { uint8_t tmp = A; A = B; B = tmp; } while (0)
109#define fV_AL_CHECK(EA, MASK) \
110 if ((EA) & (MASK)) { \
111 warn("aligning misaligned vector. EA=%08x", (EA)); \
112 }
113#define fSCATTER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \
114 mem_vector_scatter_init(env)
115#define fGATHER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \
116 mem_vector_gather_init(env)
117#define fSCATTER_FINISH(OP)
118#define fGATHER_FINISH()
119#define fLOG_SCATTER_OP(SIZE) \
120 do { \
121 env->vtcm_log.op = true; \
122 env->vtcm_log.op_size = SIZE; \
123 } while (0)
124#define fVLOG_VTCM_WORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \
125 do { \
126 int log_byte = 0; \
127 target_ulong va = EA; \
128 target_ulong va_high = EA + LEN; \
129 for (int i0 = 0; i0 < 4; i0++) { \
130 log_byte = (va + i0) <= va_high; \
131 LOG_VTCM_BYTE(va + i0, log_byte, INC. ub[4 * IDX + i0], \
132 4 * IDX + i0); \
133 } \
134 } while (0)
135#define fVLOG_VTCM_HALFWORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \
136 do { \
137 int log_byte = 0; \
138 target_ulong va = EA; \
139 target_ulong va_high = EA + LEN; \
140 for (int i0 = 0; i0 < 2; i0++) { \
141 log_byte = (va + i0) <= va_high; \
142 LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \
143 2 * IDX + i0); \
144 } \
145 } while (0)
146
147#define fVLOG_VTCM_HALFWORD_INCREMENT_DV(EA, OFFSET, INC, IDX, IDX2, IDX_H, \
148 ALIGNMENT, LEN) \
149 do { \
150 int log_byte = 0; \
151 target_ulong va = EA; \
152 target_ulong va_high = EA + LEN; \
153 for (int i0 = 0; i0 < 2; i0++) { \
154 log_byte = (va + i0) <= va_high; \
155 LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \
156 2 * IDX + i0); \
157 } \
158 } while (0)
159
160
161#define GATHER_FUNCTION(EA, OFFSET, IDX, LEN, ELEMENT_SIZE, BANK_IDX, QVAL) \
162 do { \
163 int i0; \
164 target_ulong va = EA; \
165 target_ulong va_high = EA + LEN; \
166 uintptr_t ra = GETPC(); \
167 int log_byte = 0; \
168 for (i0 = 0; i0 < ELEMENT_SIZE; i0++) { \
169 log_byte = ((va + i0) <= va_high) && QVAL; \
170 uint8_t B; \
171 B = cpu_ldub_data_ra(env, EA + i0, ra); \
172 env->tmp_VRegs[0].ub[ELEMENT_SIZE * IDX + i0] = B; \
173 LOG_VTCM_BYTE(va + i0, log_byte, B, ELEMENT_SIZE * IDX + i0); \
174 } \
175 } while (0)
176#define fVLOG_VTCM_GATHER_WORD(EA, OFFSET, IDX, LEN) \
177 do { \
178 GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1); \
179 } while (0)
180#define fVLOG_VTCM_GATHER_HALFWORD(EA, OFFSET, IDX, LEN) \
181 do { \
182 GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1); \
183 } while (0)
184#define fVLOG_VTCM_GATHER_HALFWORD_DV(EA, OFFSET, IDX, IDX2, IDX_H, LEN) \
185 do { \
186 GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), 1); \
187 } while (0)
188#define fVLOG_VTCM_GATHER_WORDQ(EA, OFFSET, IDX, Q, LEN) \
189 do { \
190 GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \
191 fGETQBIT(QsV, 4 * IDX + i0)); \
192 } while (0)
193#define fVLOG_VTCM_GATHER_HALFWORDQ(EA, OFFSET, IDX, Q, LEN) \
194 do { \
195 GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \
196 fGETQBIT(QsV, 2 * IDX + i0)); \
197 } while (0)
198#define fVLOG_VTCM_GATHER_HALFWORDQ_DV(EA, OFFSET, IDX, IDX2, IDX_H, Q, LEN) \
199 do { \
200 GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \
201 fGETQBIT(QsV, 2 * IDX + i0)); \
202 } while (0)
203#define SCATTER_OP_WRITE_TO_MEM(TYPE) \
204 do { \
205 uintptr_t ra = GETPC(); \
206 for (int i = 0; i < sizeof(MMVector); i += sizeof(TYPE)) { \
207 if (test_bit(i, env->vtcm_log.mask)) { \
208 TYPE dst = 0; \
209 TYPE inc = 0; \
210 for (int j = 0; j < sizeof(TYPE); j++) { \
211 uint8_t val; \
212 val = cpu_ldub_data_ra(env, env->vtcm_log.va[i + j], ra); \
213 dst |= val << (8 * j); \
214 inc |= env->vtcm_log.data.ub[j + i] << (8 * j); \
215 clear_bit(j + i, env->vtcm_log.mask); \
216 env->vtcm_log.data.ub[j + i] = 0; \
217 } \
218 dst += inc; \
219 for (int j = 0; j < sizeof(TYPE); j++) { \
220 cpu_stb_data_ra(env, env->vtcm_log.va[i + j], \
221 (dst >> (8 * j)) & 0xFF, ra); \
222 } \
223 } \
224 } \
225 } while (0)
226#define SCATTER_OP_PROBE_MEM(TYPE, MMU_IDX, RETADDR) \
227 do { \
228 for (int i = 0; i < sizeof(MMVector); i += sizeof(TYPE)) { \
229 if (test_bit(i, env->vtcm_log.mask)) { \
230 for (int j = 0; j < sizeof(TYPE); j++) { \
231 probe_read(env, env->vtcm_log.va[i + j], 1, \
232 MMU_IDX, RETADDR); \
233 probe_write(env, env->vtcm_log.va[i + j], 1, \
234 MMU_IDX, RETADDR); \
235 } \
236 } \
237 } \
238 } while (0)
239#define SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, ELEM_SIZE, BANK_IDX, QVAL, IN) \
240 do { \
241 int i0; \
242 target_ulong va = EA; \
243 target_ulong va_high = EA + LEN; \
244 int log_byte = 0; \
245 for (i0 = 0; i0 < ELEM_SIZE; i0++) { \
246 log_byte = ((va + i0) <= va_high) && QVAL; \
247 LOG_VTCM_BYTE(va + i0, log_byte, IN.ub[ELEM_SIZE * IDX + i0], \
248 ELEM_SIZE * IDX + i0); \
249 } \
250 } while (0)
251#define fVLOG_VTCM_HALFWORD(EA, OFFSET, IN, IDX, LEN) \
252 do { \
253 SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1, IN); \
254 } while (0)
255#define fVLOG_VTCM_WORD(EA, OFFSET, IN, IDX, LEN) \
256 do { \
257 SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1, IN); \
258 } while (0)
259#define fVLOG_VTCM_HALFWORDQ(EA, OFFSET, IN, IDX, Q, LEN) \
260 do { \
261 SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \
262 fGETQBIT(QsV, 2 * IDX + i0), IN); \
263 } while (0)
264#define fVLOG_VTCM_WORDQ(EA, OFFSET, IN, IDX, Q, LEN) \
265 do { \
266 SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \
267 fGETQBIT(QsV, 4 * IDX + i0), IN); \
268 } while (0)
269#define fVLOG_VTCM_HALFWORD_DV(EA, OFFSET, IN, IDX, IDX2, IDX_H, LEN) \
270 do { \
271 SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, \
272 (2 * IDX2 + IDX_H), 1, IN); \
273 } while (0)
274#define fVLOG_VTCM_HALFWORDQ_DV(EA, OFFSET, IN, IDX, Q, IDX2, IDX_H, LEN) \
275 do { \
276 SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \
277 fGETQBIT(QsV, 2 * IDX + i0), IN); \
278 } while (0)
279#define fSTORERELEASE(EA, TYPE) \
280 do { \
281 fV_AL_CHECK(EA, fVECSIZE() - 1); \
282 } while (0)
283#ifdef QEMU_GENERATE
284#define fLOADMMV(EA, DST) gen_vreg_load(ctx, DST##_off, EA, true)
285#endif
286#ifdef QEMU_GENERATE
287#define fLOADMMVU(EA, DST) gen_vreg_load(ctx, DST##_off, EA, false)
288#endif
289#ifdef QEMU_GENERATE
290#define fSTOREMMV(EA, SRC) \
291 gen_vreg_store(ctx, insn, pkt, EA, SRC##_off, insn->slot, true)
292#endif
293#ifdef QEMU_GENERATE
294#define fSTOREMMVQ(EA, SRC, MASK) \
295 gen_vreg_masked_store(ctx, EA, SRC##_off, MASK##_off, insn->slot, false)
296#endif
297#ifdef QEMU_GENERATE
298#define fSTOREMMVNQ(EA, SRC, MASK) \
299 gen_vreg_masked_store(ctx, EA, SRC##_off, MASK##_off, insn->slot, true)
300#endif
301#ifdef QEMU_GENERATE
302#define fSTOREMMVU(EA, SRC) \
303 gen_vreg_store(ctx, insn, pkt, EA, SRC##_off, insn->slot, false)
304#endif
305#define fVFOREACH(WIDTH, VAR) for (VAR = 0; VAR < fVELEM(WIDTH); VAR++)
306#define fVARRAY_ELEMENT_ACCESS(ARRAY, TYPE, INDEX) \
307 ARRAY.v[(INDEX) / (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) % \
308 (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))]
309
310#define fVSATDW(U, V) fVSATW(((((long long)U) << 32) | fZXTN(32, 64, V)))
311#define fVASL_SATHI(U, V) fVSATW(((U) << 1) | ((V) >> 31))
312#define fVUADDSAT(WIDTH, U, V) \
313 fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V))
314#define fVSADDSAT(WIDTH, U, V) \
315 fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V))
316#define fVUSUBSAT(WIDTH, U, V) \
317 fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V))
318#define fVSSUBSAT(WIDTH, U, V) \
319 fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V))
320#define fVAVGU(WIDTH, U, V) \
321 ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V)) >> 1)
322#define fVAVGURND(WIDTH, U, V) \
323 ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
324#define fVNAVGU(WIDTH, U, V) \
325 ((fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V)) >> 1)
326#define fVNAVGURNDSAT(WIDTH, U, V) \
327 fVSATUN(WIDTH, ((fZXTN(WIDTH, 2 * WIDTH, U) - \
328 fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1))
329#define fVAVGS(WIDTH, U, V) \
330 ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V)) >> 1)
331#define fVAVGSRND(WIDTH, U, V) \
332 ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
333#define fVNAVGS(WIDTH, U, V) \
334 ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V)) >> 1)
335#define fVNAVGSRND(WIDTH, U, V) \
336 ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
337#define fVNAVGSRNDSAT(WIDTH, U, V) \
338 fVSATN(WIDTH, ((fSXTN(WIDTH, 2 * WIDTH, U) - \
339 fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1))
340#define fVNOROUND(VAL, SHAMT) VAL
341#define fVNOSAT(VAL) VAL
342#define fVROUND(VAL, SHAMT) \
343 ((VAL) + (((SHAMT) > 0) ? (1LL << ((SHAMT) - 1)) : 0))
344#define fCARRY_FROM_ADD32(A, B, C) \
345 (((fZXTN(32, 64, A) + fZXTN(32, 64, B) + C) >> 32) & 1)
346#define fUARCH_NOTE_PUMP_4X()
347#define fUARCH_NOTE_PUMP_2X()
348
349#define IV1DEAD()
350#endif
351