1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "internals.h"
23#include "vec_internal.h"
24#include "exec/helper-proto.h"
25#include "exec/cpu_ldst.h"
26#include "exec/exec-all.h"
27#include "tcg/tcg.h"
28#include "fpu/softfloat.h"
29
30static uint16_t mve_eci_mask(CPUARMState *env)
31{
32
33
34
35
36
37 int eci;
38
39 if ((env->condexec_bits & 0xf) != 0) {
40 return 0xffff;
41 }
42
43 eci = env->condexec_bits >> 4;
44 switch (eci) {
45 case ECI_NONE:
46 return 0xffff;
47 case ECI_A0:
48 return 0xfff0;
49 case ECI_A0A1:
50 return 0xff00;
51 case ECI_A0A1A2:
52 case ECI_A0A1A2B0:
53 return 0xf000;
54 default:
55 g_assert_not_reached();
56 }
57}
58
59static uint16_t mve_element_mask(CPUARMState *env)
60{
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78 uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
79
80 if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) {
81 mask |= 0xff;
82 }
83 if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) {
84 mask |= 0xff00;
85 }
86
87 if (env->v7m.ltpsize < 4 &&
88 env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) {
89
90
91
92
93
94
95 int masklen = env->regs[14] << env->v7m.ltpsize;
96 assert(masklen <= 16);
97 uint16_t ltpmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0;
98 mask &= ltpmask;
99 }
100
101
102
103
104
105 mask &= mve_eci_mask(env);
106 return mask;
107}
108
109static void mve_advance_vpt(CPUARMState *env)
110{
111
112 uint32_t vpr = env->v7m.vpr;
113 unsigned mask01, mask23;
114 uint16_t inv_mask;
115 uint16_t eci_mask = mve_eci_mask(env);
116
117 if ((env->condexec_bits & 0xf) == 0) {
118 env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ?
119 (ECI_A0 << 4) : (ECI_NONE << 4);
120 }
121
122 if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) {
123
124 return;
125 }
126
127
128 mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01);
129 mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23);
130
131 inv_mask = eci_mask;
132 if (mask01 <= 8) {
133
134 inv_mask &= ~0xff;
135 }
136 if (mask23 <= 8) {
137
138 inv_mask &= ~0xff00;
139 }
140 vpr ^= inv_mask;
141
142 if (eci_mask & 0xf0) {
143 vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
144 }
145
146 vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1);
147 env->v7m.vpr = vpr;
148}
149
150
151#define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
152 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
153 { \
154 TYPE *d = vd; \
155 uint16_t mask = mve_element_mask(env); \
156 uint16_t eci_mask = mve_eci_mask(env); \
157 unsigned b, e; \
158
159
160
161
162 \
163 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
164 if (eci_mask & (1 << b)) { \
165 d[H##ESIZE(e)] = (mask & (1 << b)) ? \
166 cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
167 } \
168 addr += MSIZE; \
169 } \
170 mve_advance_vpt(env); \
171 }
172
173#define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \
174 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
175 { \
176 TYPE *d = vd; \
177 uint16_t mask = mve_element_mask(env); \
178 unsigned b, e; \
179 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
180 if (mask & (1 << b)) { \
181 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
182 } \
183 addr += MSIZE; \
184 } \
185 mve_advance_vpt(env); \
186 }
187
188DO_VLDR(vldrb, 1, ldub, 1, uint8_t)
189DO_VLDR(vldrh, 2, lduw, 2, uint16_t)
190DO_VLDR(vldrw, 4, ldl, 4, uint32_t)
191
192DO_VSTR(vstrb, 1, stb, 1, uint8_t)
193DO_VSTR(vstrh, 2, stw, 2, uint16_t)
194DO_VSTR(vstrw, 4, stl, 4, uint32_t)
195
196DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t)
197DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t)
198DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t)
199DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t)
200DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t)
201DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t)
202
203DO_VSTR(vstrb_h, 1, stb, 2, int16_t)
204DO_VSTR(vstrb_w, 1, stb, 4, int32_t)
205DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
206
207#undef DO_VLDR
208#undef DO_VSTR
209
210
211
212
213
214
215
216
217#define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \
218 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
219 uint32_t base) \
220 { \
221 TYPE *d = vd; \
222 OFFTYPE *m = vm; \
223 uint16_t mask = mve_element_mask(env); \
224 uint16_t eci_mask = mve_eci_mask(env); \
225 unsigned e; \
226 uint32_t addr; \
227 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
228 if (!(eci_mask & 1)) { \
229 continue; \
230 } \
231 addr = ADDRFN(base, m[H##ESIZE(e)]); \
232 d[H##ESIZE(e)] = (mask & 1) ? \
233 cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
234 if (WB) { \
235 m[H##ESIZE(e)] = addr; \
236 } \
237 } \
238 mve_advance_vpt(env); \
239 }
240
241
242#define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \
243 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
244 uint32_t base) \
245 { \
246 TYPE *d = vd; \
247 TYPE *m = vm; \
248 uint16_t mask = mve_element_mask(env); \
249 uint16_t eci_mask = mve_eci_mask(env); \
250 unsigned e; \
251 uint32_t addr; \
252 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
253 if (!(eci_mask & 1)) { \
254 continue; \
255 } \
256 addr = ADDRFN(base, m[H##ESIZE(e)]); \
257 if (mask & 1) { \
258 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
259 } \
260 if (WB) { \
261 m[H##ESIZE(e)] = addr; \
262 } \
263 } \
264 mve_advance_vpt(env); \
265 }
266
267
268
269
270
271
272
273
274
275#define DO_VLDR64_SG(OP, ADDRFN, WB) \
276 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
277 uint32_t base) \
278 { \
279 uint32_t *d = vd; \
280 uint32_t *m = vm; \
281 uint16_t mask = mve_element_mask(env); \
282 uint16_t eci_mask = mve_eci_mask(env); \
283 unsigned e; \
284 uint32_t addr; \
285 for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
286 if (!(eci_mask & 1)) { \
287 continue; \
288 } \
289 addr = ADDRFN(base, m[H4(e & ~1)]); \
290 addr += 4 * (e & 1); \
291 d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \
292 if (WB && (e & 1)) { \
293 m[H4(e & ~1)] = addr - 4; \
294 } \
295 } \
296 mve_advance_vpt(env); \
297 }
298
299#define DO_VSTR64_SG(OP, ADDRFN, WB) \
300 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
301 uint32_t base) \
302 { \
303 uint32_t *d = vd; \
304 uint32_t *m = vm; \
305 uint16_t mask = mve_element_mask(env); \
306 uint16_t eci_mask = mve_eci_mask(env); \
307 unsigned e; \
308 uint32_t addr; \
309 for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
310 if (!(eci_mask & 1)) { \
311 continue; \
312 } \
313 addr = ADDRFN(base, m[H4(e & ~1)]); \
314 addr += 4 * (e & 1); \
315 if (mask & 1) { \
316 cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \
317 } \
318 if (WB && (e & 1)) { \
319 m[H4(e & ~1)] = addr - 4; \
320 } \
321 } \
322 mve_advance_vpt(env); \
323 }
324
325#define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET))
326#define ADDR_ADD_OSH(BASE, OFFSET) ((BASE) + ((OFFSET) << 1))
327#define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2))
328#define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3))
329
330DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false)
331DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false)
332DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false)
333
334DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false)
335DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false)
336DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false)
337DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false)
338DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false)
339DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false)
340DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false)
341
342DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false)
343DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false)
344DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false)
345DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false)
346DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false)
347
348DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false)
349DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false)
350DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false)
351DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false)
352DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false)
353DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false)
354DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false)
355
356DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false)
357DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false)
358DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false)
359DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false)
360
361DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true)
362DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true)
363DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true)
364DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true)
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382#define DO_VLD4B(OP, O1, O2, O3, O4) \
383 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
384 uint32_t base) \
385 { \
386 int beat, e; \
387 uint16_t mask = mve_eci_mask(env); \
388 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
389 uint32_t addr, data; \
390 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
391 if ((mask & 1) == 0) { \
392 \
393 continue; \
394 } \
395 addr = base + off[beat] * 4; \
396 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
397 for (e = 0; e < 4; e++, data >>= 8) { \
398 uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
399 qd[H1(off[beat])] = data; \
400 } \
401 } \
402 }
403
404#define DO_VLD4H(OP, O1, O2) \
405 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
406 uint32_t base) \
407 { \
408 int beat; \
409 uint16_t mask = mve_eci_mask(env); \
410 static const uint8_t off[4] = { O1, O1, O2, O2 }; \
411 uint32_t addr, data; \
412 int y; \
413 uint16_t *qd; \
414 for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
415 if ((mask & 1) == 0) { \
416 \
417 continue; \
418 } \
419 addr = base + off[beat] * 8 + (beat & 1) * 4; \
420 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
421 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \
422 qd[H2(off[beat])] = data; \
423 data >>= 16; \
424 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \
425 qd[H2(off[beat])] = data; \
426 } \
427 }
428
429#define DO_VLD4W(OP, O1, O2, O3, O4) \
430 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
431 uint32_t base) \
432 { \
433 int beat; \
434 uint16_t mask = mve_eci_mask(env); \
435 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
436 uint32_t addr, data; \
437 uint32_t *qd; \
438 int y; \
439 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
440 if ((mask & 1) == 0) { \
441 \
442 continue; \
443 } \
444 addr = base + off[beat] * 4; \
445 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
446 y = (beat + (O1 & 2)) & 3; \
447 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
448 qd[H4(off[beat] >> 2)] = data; \
449 } \
450 }
451
452DO_VLD4B(vld40b, 0, 1, 10, 11)
453DO_VLD4B(vld41b, 2, 3, 12, 13)
454DO_VLD4B(vld42b, 4, 5, 14, 15)
455DO_VLD4B(vld43b, 6, 7, 8, 9)
456
457DO_VLD4H(vld40h, 0, 5)
458DO_VLD4H(vld41h, 1, 6)
459DO_VLD4H(vld42h, 2, 7)
460DO_VLD4H(vld43h, 3, 4)
461
462DO_VLD4W(vld40w, 0, 1, 10, 11)
463DO_VLD4W(vld41w, 2, 3, 12, 13)
464DO_VLD4W(vld42w, 4, 5, 14, 15)
465DO_VLD4W(vld43w, 6, 7, 8, 9)
466
467#define DO_VLD2B(OP, O1, O2, O3, O4) \
468 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
469 uint32_t base) \
470 { \
471 int beat, e; \
472 uint16_t mask = mve_eci_mask(env); \
473 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
474 uint32_t addr, data; \
475 uint8_t *qd; \
476 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
477 if ((mask & 1) == 0) { \
478 \
479 continue; \
480 } \
481 addr = base + off[beat] * 2; \
482 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
483 for (e = 0; e < 4; e++, data >>= 8) { \
484 qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
485 qd[H1(off[beat] + (e >> 1))] = data; \
486 } \
487 } \
488 }
489
490#define DO_VLD2H(OP, O1, O2, O3, O4) \
491 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
492 uint32_t base) \
493 { \
494 int beat; \
495 uint16_t mask = mve_eci_mask(env); \
496 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
497 uint32_t addr, data; \
498 int e; \
499 uint16_t *qd; \
500 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
501 if ((mask & 1) == 0) { \
502 \
503 continue; \
504 } \
505 addr = base + off[beat] * 4; \
506 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
507 for (e = 0; e < 2; e++, data >>= 16) { \
508 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
509 qd[H2(off[beat])] = data; \
510 } \
511 } \
512 }
513
514#define DO_VLD2W(OP, O1, O2, O3, O4) \
515 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
516 uint32_t base) \
517 { \
518 int beat; \
519 uint16_t mask = mve_eci_mask(env); \
520 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
521 uint32_t addr, data; \
522 uint32_t *qd; \
523 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
524 if ((mask & 1) == 0) { \
525 \
526 continue; \
527 } \
528 addr = base + off[beat]; \
529 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
530 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
531 qd[H4(off[beat] >> 3)] = data; \
532 } \
533 }
534
535DO_VLD2B(vld20b, 0, 2, 12, 14)
536DO_VLD2B(vld21b, 4, 6, 8, 10)
537
538DO_VLD2H(vld20h, 0, 1, 6, 7)
539DO_VLD2H(vld21h, 2, 3, 4, 5)
540
541DO_VLD2W(vld20w, 0, 4, 24, 28)
542DO_VLD2W(vld21w, 8, 12, 16, 20)
543
544#define DO_VST4B(OP, O1, O2, O3, O4) \
545 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
546 uint32_t base) \
547 { \
548 int beat, e; \
549 uint16_t mask = mve_eci_mask(env); \
550 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
551 uint32_t addr, data; \
552 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
553 if ((mask & 1) == 0) { \
554 \
555 continue; \
556 } \
557 addr = base + off[beat] * 4; \
558 data = 0; \
559 for (e = 3; e >= 0; e--) { \
560 uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
561 data = (data << 8) | qd[H1(off[beat])]; \
562 } \
563 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
564 } \
565 }
566
567#define DO_VST4H(OP, O1, O2) \
568 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
569 uint32_t base) \
570 { \
571 int beat; \
572 uint16_t mask = mve_eci_mask(env); \
573 static const uint8_t off[4] = { O1, O1, O2, O2 }; \
574 uint32_t addr, data; \
575 int y; \
576 uint16_t *qd; \
577 for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
578 if ((mask & 1) == 0) { \
579 \
580 continue; \
581 } \
582 addr = base + off[beat] * 8 + (beat & 1) * 4; \
583 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \
584 data = qd[H2(off[beat])]; \
585 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \
586 data |= qd[H2(off[beat])] << 16; \
587 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
588 } \
589 }
590
591#define DO_VST4W(OP, O1, O2, O3, O4) \
592 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
593 uint32_t base) \
594 { \
595 int beat; \
596 uint16_t mask = mve_eci_mask(env); \
597 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
598 uint32_t addr, data; \
599 uint32_t *qd; \
600 int y; \
601 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
602 if ((mask & 1) == 0) { \
603 \
604 continue; \
605 } \
606 addr = base + off[beat] * 4; \
607 y = (beat + (O1 & 2)) & 3; \
608 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
609 data = qd[H4(off[beat] >> 2)]; \
610 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
611 } \
612 }
613
614DO_VST4B(vst40b, 0, 1, 10, 11)
615DO_VST4B(vst41b, 2, 3, 12, 13)
616DO_VST4B(vst42b, 4, 5, 14, 15)
617DO_VST4B(vst43b, 6, 7, 8, 9)
618
619DO_VST4H(vst40h, 0, 5)
620DO_VST4H(vst41h, 1, 6)
621DO_VST4H(vst42h, 2, 7)
622DO_VST4H(vst43h, 3, 4)
623
624DO_VST4W(vst40w, 0, 1, 10, 11)
625DO_VST4W(vst41w, 2, 3, 12, 13)
626DO_VST4W(vst42w, 4, 5, 14, 15)
627DO_VST4W(vst43w, 6, 7, 8, 9)
628
629#define DO_VST2B(OP, O1, O2, O3, O4) \
630 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
631 uint32_t base) \
632 { \
633 int beat, e; \
634 uint16_t mask = mve_eci_mask(env); \
635 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
636 uint32_t addr, data; \
637 uint8_t *qd; \
638 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
639 if ((mask & 1) == 0) { \
640 \
641 continue; \
642 } \
643 addr = base + off[beat] * 2; \
644 data = 0; \
645 for (e = 3; e >= 0; e--) { \
646 qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
647 data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \
648 } \
649 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
650 } \
651 }
652
653#define DO_VST2H(OP, O1, O2, O3, O4) \
654 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
655 uint32_t base) \
656 { \
657 int beat; \
658 uint16_t mask = mve_eci_mask(env); \
659 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
660 uint32_t addr, data; \
661 int e; \
662 uint16_t *qd; \
663 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
664 if ((mask & 1) == 0) { \
665 \
666 continue; \
667 } \
668 addr = base + off[beat] * 4; \
669 data = 0; \
670 for (e = 1; e >= 0; e--) { \
671 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
672 data = (data << 16) | qd[H2(off[beat])]; \
673 } \
674 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
675 } \
676 }
677
678#define DO_VST2W(OP, O1, O2, O3, O4) \
679 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
680 uint32_t base) \
681 { \
682 int beat; \
683 uint16_t mask = mve_eci_mask(env); \
684 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
685 uint32_t addr, data; \
686 uint32_t *qd; \
687 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
688 if ((mask & 1) == 0) { \
689 \
690 continue; \
691 } \
692 addr = base + off[beat]; \
693 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
694 data = qd[H4(off[beat] >> 3)]; \
695 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
696 } \
697 }
698
699DO_VST2B(vst20b, 0, 2, 12, 14)
700DO_VST2B(vst21b, 4, 6, 8, 10)
701
702DO_VST2H(vst20h, 0, 1, 6, 7)
703DO_VST2H(vst21h, 2, 3, 4, 5)
704
705DO_VST2W(vst20w, 0, 4, 24, 28)
706DO_VST2W(vst21w, 8, 12, 16, 20)
707
708
709
710
711
712
713
714
715static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask)
716{
717 if (mask & 1) {
718 *d = r;
719 }
720}
721
722static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask)
723{
724 mergemask_ub((uint8_t *)d, r, mask);
725}
726
727static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask)
728{
729 uint16_t bmask = expand_pred_b(mask);
730 *d = (*d & ~bmask) | (r & bmask);
731}
732
733static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask)
734{
735 mergemask_uh((uint16_t *)d, r, mask);
736}
737
738static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask)
739{
740 uint32_t bmask = expand_pred_b(mask);
741 *d = (*d & ~bmask) | (r & bmask);
742}
743
744static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask)
745{
746 mergemask_uw((uint32_t *)d, r, mask);
747}
748
749static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask)
750{
751 uint64_t bmask = expand_pred_b(mask);
752 *d = (*d & ~bmask) | (r & bmask);
753}
754
755static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
756{
757 mergemask_uq((uint64_t *)d, r, mask);
758}
759
760#define mergemask(D, R, M) \
761 _Generic(D, \
762 uint8_t *: mergemask_ub, \
763 int8_t *: mergemask_sb, \
764 uint16_t *: mergemask_uh, \
765 int16_t *: mergemask_sh, \
766 uint32_t *: mergemask_uw, \
767 int32_t *: mergemask_sw, \
768 uint64_t *: mergemask_uq, \
769 int64_t *: mergemask_sq)(D, R, M)
770
771void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val)
772{
773
774
775
776
777
778 uint32_t *d = vd;
779 uint16_t mask = mve_element_mask(env);
780 unsigned e;
781 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
782 mergemask(&d[H4(e)], val, mask);
783 }
784 mve_advance_vpt(env);
785}
786
787#define DO_1OP(OP, ESIZE, TYPE, FN) \
788 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
789 { \
790 TYPE *d = vd, *m = vm; \
791 uint16_t mask = mve_element_mask(env); \
792 unsigned e; \
793 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
794 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \
795 } \
796 mve_advance_vpt(env); \
797 }
798
799#define DO_CLS_B(N) (clrsb32(N) - 24)
800#define DO_CLS_H(N) (clrsb32(N) - 16)
801
802DO_1OP(vclsb, 1, int8_t, DO_CLS_B)
803DO_1OP(vclsh, 2, int16_t, DO_CLS_H)
804DO_1OP(vclsw, 4, int32_t, clrsb32)
805
806#define DO_CLZ_B(N) (clz32(N) - 24)
807#define DO_CLZ_H(N) (clz32(N) - 16)
808
809DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
810DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
811DO_1OP(vclzw, 4, uint32_t, clz32)
812
813DO_1OP(vrev16b, 2, uint16_t, bswap16)
814DO_1OP(vrev32b, 4, uint32_t, bswap32)
815DO_1OP(vrev32h, 4, uint32_t, hswap32)
816DO_1OP(vrev64b, 8, uint64_t, bswap64)
817DO_1OP(vrev64h, 8, uint64_t, hswap64)
818DO_1OP(vrev64w, 8, uint64_t, wswap64)
819
820#define DO_NOT(N) (~(N))
821
822DO_1OP(vmvn, 8, uint64_t, DO_NOT)
823
824#define DO_ABS(N) ((N) < 0 ? -(N) : (N))
825#define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff))
826#define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff))
827
828DO_1OP(vabsb, 1, int8_t, DO_ABS)
829DO_1OP(vabsh, 2, int16_t, DO_ABS)
830DO_1OP(vabsw, 4, int32_t, DO_ABS)
831
832
833DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
834DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
835
836#define DO_NEG(N) (-(N))
837#define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000))
838#define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000))
839
840DO_1OP(vnegb, 1, int8_t, DO_NEG)
841DO_1OP(vnegh, 2, int16_t, DO_NEG)
842DO_1OP(vnegw, 4, int32_t, DO_NEG)
843
844
845DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
846DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
847
848
849
850
851
852#define DO_1OP_IMM(OP, FN) \
853 void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
854 { \
855 uint64_t *da = vda; \
856 uint16_t mask = mve_element_mask(env); \
857 unsigned e; \
858 for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
859 mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
860 } \
861 mve_advance_vpt(env); \
862 }
863
864#define DO_MOVI(N, I) (I)
865#define DO_ANDI(N, I) ((N) & (I))
866#define DO_ORRI(N, I) ((N) | (I))
867
868DO_1OP_IMM(vmovi, DO_MOVI)
869DO_1OP_IMM(vandi, DO_ANDI)
870DO_1OP_IMM(vorri, DO_ORRI)
871
872#define DO_2OP(OP, ESIZE, TYPE, FN) \
873 void HELPER(glue(mve_, OP))(CPUARMState *env, \
874 void *vd, void *vn, void *vm) \
875 { \
876 TYPE *d = vd, *n = vn, *m = vm; \
877 uint16_t mask = mve_element_mask(env); \
878 unsigned e; \
879 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
880 mergemask(&d[H##ESIZE(e)], \
881 FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \
882 } \
883 mve_advance_vpt(env); \
884 }
885
886
887#define DO_2OP_U(OP, FN) \
888 DO_2OP(OP##b, 1, uint8_t, FN) \
889 DO_2OP(OP##h, 2, uint16_t, FN) \
890 DO_2OP(OP##w, 4, uint32_t, FN)
891
892
893#define DO_2OP_S(OP, FN) \
894 DO_2OP(OP##b, 1, int8_t, FN) \
895 DO_2OP(OP##h, 2, int16_t, FN) \
896 DO_2OP(OP##w, 4, int32_t, FN)
897
898
899
900
901
902
903#define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
904 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
905 { \
906 LTYPE *d = vd; \
907 TYPE *n = vn, *m = vm; \
908 uint16_t mask = mve_element_mask(env); \
909 unsigned le; \
910 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
911 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \
912 m[H##ESIZE(le * 2 + TOP)]); \
913 mergemask(&d[H##LESIZE(le)], r, mask); \
914 } \
915 mve_advance_vpt(env); \
916 }
917
918#define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \
919 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
920 { \
921 TYPE *d = vd, *n = vn, *m = vm; \
922 uint16_t mask = mve_element_mask(env); \
923 unsigned e; \
924 bool qc = false; \
925 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
926 bool sat = false; \
927 TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
928 mergemask(&d[H##ESIZE(e)], r, mask); \
929 qc |= sat & mask & 1; \
930 } \
931 if (qc) { \
932 env->vfp.qc[0] = qc; \
933 } \
934 mve_advance_vpt(env); \
935 }
936
937
938#define DO_2OP_SAT_U(OP, FN) \
939 DO_2OP_SAT(OP##b, 1, uint8_t, FN) \
940 DO_2OP_SAT(OP##h, 2, uint16_t, FN) \
941 DO_2OP_SAT(OP##w, 4, uint32_t, FN)
942
943
944#define DO_2OP_SAT_S(OP, FN) \
945 DO_2OP_SAT(OP##b, 1, int8_t, FN) \
946 DO_2OP_SAT(OP##h, 2, int16_t, FN) \
947 DO_2OP_SAT(OP##w, 4, int32_t, FN)
948
949#define DO_AND(N, M) ((N) & (M))
950#define DO_BIC(N, M) ((N) & ~(M))
951#define DO_ORR(N, M) ((N) | (M))
952#define DO_ORN(N, M) ((N) | ~(M))
953#define DO_EOR(N, M) ((N) ^ (M))
954
955DO_2OP(vand, 8, uint64_t, DO_AND)
956DO_2OP(vbic, 8, uint64_t, DO_BIC)
957DO_2OP(vorr, 8, uint64_t, DO_ORR)
958DO_2OP(vorn, 8, uint64_t, DO_ORN)
959DO_2OP(veor, 8, uint64_t, DO_EOR)
960
961#define DO_ADD(N, M) ((N) + (M))
962#define DO_SUB(N, M) ((N) - (M))
963#define DO_MUL(N, M) ((N) * (M))
964
965DO_2OP_U(vadd, DO_ADD)
966DO_2OP_U(vsub, DO_SUB)
967DO_2OP_U(vmul, DO_MUL)
968
969DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL)
970DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL)
971DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL)
972DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL)
973DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL)
974DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL)
975
976DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL)
977DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL)
978DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL)
979DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL)
980DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL)
981DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL)
982
983
984
985
986
987#define VMULLPH_MASK 0x00ff00ff00ff00ffULL
988#define VMULLPW_MASK 0x0000ffff0000ffffULL
989#define DO_VMULLPBH(N, M) pmull_h((N) & VMULLPH_MASK, (M) & VMULLPH_MASK)
990#define DO_VMULLPTH(N, M) DO_VMULLPBH((N) >> 8, (M) >> 8)
991#define DO_VMULLPBW(N, M) pmull_w((N) & VMULLPW_MASK, (M) & VMULLPW_MASK)
992#define DO_VMULLPTW(N, M) DO_VMULLPBW((N) >> 16, (M) >> 16)
993
994DO_2OP(vmullpbh, 8, uint64_t, DO_VMULLPBH)
995DO_2OP(vmullpth, 8, uint64_t, DO_VMULLPTH)
996DO_2OP(vmullpbw, 8, uint64_t, DO_VMULLPBW)
997DO_2OP(vmullptw, 8, uint64_t, DO_VMULLPTW)
998
999
1000
1001
1002
1003static inline uint8_t do_mulh_b(int32_t n, int32_t m)
1004{
1005 return (n * m) >> 8;
1006}
1007
1008static inline uint16_t do_mulh_h(int32_t n, int32_t m)
1009{
1010 return (n * m) >> 16;
1011}
1012
1013static inline uint32_t do_mulh_w(int64_t n, int64_t m)
1014{
1015 return (n * m) >> 32;
1016}
1017
1018static inline uint8_t do_rmulh_b(int32_t n, int32_t m)
1019{
1020 return (n * m + (1U << 7)) >> 8;
1021}
1022
1023static inline uint16_t do_rmulh_h(int32_t n, int32_t m)
1024{
1025 return (n * m + (1U << 15)) >> 16;
1026}
1027
1028static inline uint32_t do_rmulh_w(int64_t n, int64_t m)
1029{
1030 return (n * m + (1U << 31)) >> 32;
1031}
1032
1033DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
1034DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
1035DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
1036DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
1037DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
1038DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
1039
1040DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b)
1041DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h)
1042DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
1043DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
1044DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
1045DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
1046
1047#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
1048#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
1049
1050DO_2OP_S(vmaxs, DO_MAX)
1051DO_2OP_U(vmaxu, DO_MAX)
1052DO_2OP_S(vmins, DO_MIN)
1053DO_2OP_U(vminu, DO_MIN)
1054
1055#define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
1056
1057DO_2OP_S(vabds, DO_ABD)
1058DO_2OP_U(vabdu, DO_ABD)
1059
1060static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m)
1061{
1062 return ((uint64_t)n + m) >> 1;
1063}
1064
1065static inline int32_t do_vhadd_s(int32_t n, int32_t m)
1066{
1067 return ((int64_t)n + m) >> 1;
1068}
1069
1070static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m)
1071{
1072 return ((uint64_t)n - m) >> 1;
1073}
1074
1075static inline int32_t do_vhsub_s(int32_t n, int32_t m)
1076{
1077 return ((int64_t)n - m) >> 1;
1078}
1079
1080DO_2OP_S(vhadds, do_vhadd_s)
1081DO_2OP_U(vhaddu, do_vhadd_u)
1082DO_2OP_S(vhsubs, do_vhsub_s)
1083DO_2OP_U(vhsubu, do_vhsub_u)
1084
1085#define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
1086#define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
1087#define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
1088#define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
1089
1090DO_2OP_S(vshls, DO_VSHLS)
1091DO_2OP_U(vshlu, DO_VSHLU)
1092DO_2OP_S(vrshls, DO_VRSHLS)
1093DO_2OP_U(vrshlu, DO_VRSHLU)
1094
1095#define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1)
1096#define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1)
1097
1098DO_2OP_S(vrhadds, DO_RHADD_S)
1099DO_2OP_U(vrhaddu, DO_RHADD_U)
1100
1101static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m,
1102 uint32_t inv, uint32_t carry_in, bool update_flags)
1103{
1104 uint16_t mask = mve_element_mask(env);
1105 unsigned e;
1106
1107
1108 if (mask & 0x1111) {
1109 update_flags = true;
1110 }
1111
1112 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
1113 uint64_t r = carry_in;
1114 r += n[H4(e)];
1115 r += m[H4(e)] ^ inv;
1116 if (mask & 1) {
1117 carry_in = r >> 32;
1118 }
1119 mergemask(&d[H4(e)], r, mask);
1120 }
1121
1122 if (update_flags) {
1123
1124 env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK;
1125 env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C;
1126 }
1127 mve_advance_vpt(env);
1128}
1129
1130void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm)
1131{
1132 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
1133 do_vadc(env, vd, vn, vm, 0, carry_in, false);
1134}
1135
1136void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm)
1137{
1138 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
1139 do_vadc(env, vd, vn, vm, -1, carry_in, false);
1140}
1141
1142
1143void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm)
1144{
1145 do_vadc(env, vd, vn, vm, 0, 0, true);
1146}
1147
1148void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
1149{
1150 do_vadc(env, vd, vn, vm, -1, 1, true);
1151}
1152
1153#define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \
1154 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
1155 { \
1156 TYPE *d = vd, *n = vn, *m = vm; \
1157 uint16_t mask = mve_element_mask(env); \
1158 unsigned e; \
1159 TYPE r[16 / ESIZE]; \
1160 \
1161 for (e = 0; e < 16 / ESIZE; e++) { \
1162 if (!(e & 1)) { \
1163 r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \
1164 } else { \
1165 r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \
1166 } \
1167 } \
1168 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1169 mergemask(&d[H##ESIZE(e)], r[e], mask); \
1170 } \
1171 mve_advance_vpt(env); \
1172 }
1173
1174#define DO_VCADD_ALL(OP, FN0, FN1) \
1175 DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \
1176 DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \
1177 DO_VCADD(OP##w, 4, int32_t, FN0, FN1)
1178
1179DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD)
1180DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB)
1181DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s)
1182DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s)
1183
1184static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
1185{
1186 if (val > max) {
1187 *s = true;
1188 return max;
1189 } else if (val < min) {
1190 *s = true;
1191 return min;
1192 }
1193 return val;
1194}
1195
1196#define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s)
1197#define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s)
1198#define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s)
1199
1200#define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s)
1201#define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s)
1202#define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s)
1203
1204#define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s)
1205#define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s)
1206#define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s)
1207
1208#define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s)
1209#define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
1210#define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
1211
1212
1213
1214
1215
1216#define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \
1217 INT8_MIN, INT8_MAX, s)
1218#define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \
1219 INT16_MIN, INT16_MAX, s)
1220#define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \
1221 INT32_MIN, INT32_MAX, s)
1222
1223#define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \
1224 INT8_MIN, INT8_MAX, s)
1225#define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \
1226 INT16_MIN, INT16_MAX, s)
1227#define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \
1228 INT32_MIN, INT32_MAX, s)
1229
1230DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B)
1231DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H)
1232DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W)
1233
1234DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B)
1235DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H)
1236DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W)
1237
1238DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B)
1239DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H)
1240DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W)
1241DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B)
1242DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H)
1243DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W)
1244
1245DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B)
1246DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H)
1247DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W)
1248DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B)
1249DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H)
1250DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
1251
1252
1253
1254
1255
1256#define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
1257 ({ \
1258 uint32_t su32 = 0; \
1259 typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
1260 if (su32) { \
1261 *satp = true; \
1262 } \
1263 r; \
1264 })
1265
1266#define DO_SQSHL_OP(N, M, satp) \
1267 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp)
1268#define DO_UQSHL_OP(N, M, satp) \
1269 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp)
1270#define DO_SQRSHL_OP(N, M, satp) \
1271 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
1272#define DO_UQRSHL_OP(N, M, satp) \
1273 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
1274#define DO_SUQSHL_OP(N, M, satp) \
1275 WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
1276
1277DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
1278DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
1279DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP)
1280DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP)
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290#define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \
1291 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1292 void *vm) \
1293 { \
1294 TYPE *d = vd, *n = vn, *m = vm; \
1295 uint16_t mask = mve_element_mask(env); \
1296 unsigned e; \
1297 bool qc = false; \
1298 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1299 bool sat = false; \
1300 if ((e & 1) == XCHG) { \
1301 TYPE r = FN(n[H##ESIZE(e)], \
1302 m[H##ESIZE(e - XCHG)], \
1303 n[H##ESIZE(e + (1 - 2 * XCHG))], \
1304 m[H##ESIZE(e + (1 - XCHG))], \
1305 ROUND, &sat); \
1306 mergemask(&d[H##ESIZE(e)], r, mask); \
1307 qc |= sat & mask & 1; \
1308 } \
1309 } \
1310 if (qc) { \
1311 env->vfp.qc[0] = qc; \
1312 } \
1313 mve_advance_vpt(env); \
1314 }
1315
1316static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d,
1317 int round, bool *sat)
1318{
1319 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7);
1320 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
1321}
1322
1323static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d,
1324 int round, bool *sat)
1325{
1326 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15);
1327 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
1328}
1329
1330static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d,
1331 int round, bool *sat)
1332{
1333 int64_t m1 = (int64_t)a * b;
1334 int64_t m2 = (int64_t)c * d;
1335 int64_t r;
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 if (sadd64_overflow(m1, m2, &r) ||
1349 sadd64_overflow(r, (round << 30), &r) ||
1350 sadd64_overflow(r, r, &r)) {
1351 *sat = true;
1352 return r < 0 ? INT32_MAX : INT32_MIN;
1353 }
1354 return r >> 32;
1355}
1356
1357static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d,
1358 int round, bool *sat)
1359{
1360 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7);
1361 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
1362}
1363
1364static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d,
1365 int round, bool *sat)
1366{
1367 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15);
1368 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
1369}
1370
1371static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d,
1372 int round, bool *sat)
1373{
1374 int64_t m1 = (int64_t)a * b;
1375 int64_t m2 = (int64_t)c * d;
1376 int64_t r;
1377
1378 if (ssub64_overflow(m1, m2, &r) ||
1379 sadd64_overflow(r, (round << 30), &r) ||
1380 sadd64_overflow(r, r, &r)) {
1381 *sat = true;
1382 return r < 0 ? INT32_MAX : INT32_MIN;
1383 }
1384 return r >> 32;
1385}
1386
1387DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b)
1388DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h)
1389DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w)
1390DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b)
1391DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h)
1392DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w)
1393
1394DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b)
1395DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h)
1396DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w)
1397DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b)
1398DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h)
1399DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w)
1400
1401DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b)
1402DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h)
1403DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w)
1404DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b)
1405DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h)
1406DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w)
1407
1408DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b)
1409DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h)
1410DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w)
1411DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b)
1412DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h)
1413DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
1414
1415#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
1416 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1417 uint32_t rm) \
1418 { \
1419 TYPE *d = vd, *n = vn; \
1420 TYPE m = rm; \
1421 uint16_t mask = mve_element_mask(env); \
1422 unsigned e; \
1423 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1424 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \
1425 } \
1426 mve_advance_vpt(env); \
1427 }
1428
1429#define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \
1430 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1431 uint32_t rm) \
1432 { \
1433 TYPE *d = vd, *n = vn; \
1434 TYPE m = rm; \
1435 uint16_t mask = mve_element_mask(env); \
1436 unsigned e; \
1437 bool qc = false; \
1438 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1439 bool sat = false; \
1440 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \
1441 mask); \
1442 qc |= sat & mask & 1; \
1443 } \
1444 if (qc) { \
1445 env->vfp.qc[0] = qc; \
1446 } \
1447 mve_advance_vpt(env); \
1448 }
1449
1450
1451#define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
1452 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1453 uint32_t rm) \
1454 { \
1455 TYPE *d = vd, *n = vn; \
1456 TYPE m = rm; \
1457 uint16_t mask = mve_element_mask(env); \
1458 unsigned e; \
1459 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1460 mergemask(&d[H##ESIZE(e)], \
1461 FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \
1462 } \
1463 mve_advance_vpt(env); \
1464 }
1465
1466#define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
1467 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1468 uint32_t rm) \
1469 { \
1470 TYPE *d = vd, *n = vn; \
1471 TYPE m = rm; \
1472 uint16_t mask = mve_element_mask(env); \
1473 unsigned e; \
1474 bool qc = false; \
1475 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1476 bool sat = false; \
1477 mergemask(&d[H##ESIZE(e)], \
1478 FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \
1479 mask); \
1480 qc |= sat & mask & 1; \
1481 } \
1482 if (qc) { \
1483 env->vfp.qc[0] = qc; \
1484 } \
1485 mve_advance_vpt(env); \
1486 }
1487
1488
1489#define DO_2OP_SCALAR_U(OP, FN) \
1490 DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
1491 DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
1492 DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
1493#define DO_2OP_SCALAR_S(OP, FN) \
1494 DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \
1495 DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
1496 DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
1497
1498#define DO_2OP_ACC_SCALAR_U(OP, FN) \
1499 DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \
1500 DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \
1501 DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN)
1502
1503DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
1504DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
1505DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
1506DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s)
1507DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
1508DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
1509DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
1510
1511DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B)
1512DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H)
1513DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W)
1514DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B)
1515DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H)
1516DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W)
1517
1518DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B)
1519DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H)
1520DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W)
1521DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B)
1522DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H)
1523DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W)
1524
1525DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B)
1526DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H)
1527DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W)
1528DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
1529DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
1530DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
1531
1532static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat)
1533{
1534 int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7);
1535 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
1536}
1537
1538static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c,
1539 int round, bool *sat)
1540{
1541 int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15);
1542 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
1543}
1544
1545static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c,
1546 int round, bool *sat)
1547{
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 int64_t m1 = (int64_t)a * b;
1562 int64_t m2 = (int64_t)c << 31;
1563 int64_t r;
1564 if (sadd64_overflow(m1, m2, &r) ||
1565 sadd64_overflow(r, (round << 30), &r) ||
1566 sadd64_overflow(r, r, &r)) {
1567 *sat = true;
1568 return r < 0 ? INT32_MAX : INT32_MIN;
1569 }
1570 return r >> 32;
1571}
1572
1573
1574
1575
1576
1577#define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S)
1578#define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S)
1579#define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S)
1580#define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S)
1581#define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S)
1582#define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S)
1583
1584#define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S)
1585#define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S)
1586#define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S)
1587#define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S)
1588#define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S)
1589#define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S)
1590
1591DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B)
1592DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H)
1593DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W)
1594DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B)
1595DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H)
1596DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W)
1597
1598DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B)
1599DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H)
1600DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W)
1601DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B)
1602DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H)
1603DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W)
1604
1605
1606#define DO_VMLA(D, N, M) ((N) * (M) + (D))
1607
1608DO_2OP_ACC_SCALAR_U(vmla, DO_VMLA)
1609
1610
1611#define DO_VMLAS(D, N, M) ((N) * (D) + (M))
1612
1613DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS)
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624#define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
1625 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1626 uint32_t rm) \
1627 { \
1628 LTYPE *d = vd; \
1629 TYPE *n = vn; \
1630 TYPE m = rm; \
1631 uint16_t mask = mve_element_mask(env); \
1632 unsigned le; \
1633 bool qc = false; \
1634 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
1635 bool sat = false; \
1636 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \
1637 mergemask(&d[H##LESIZE(le)], r, mask); \
1638 qc |= sat && (mask & SATMASK); \
1639 } \
1640 if (qc) { \
1641 env->vfp.qc[0] = qc; \
1642 } \
1643 mve_advance_vpt(env); \
1644 }
1645
1646static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat)
1647{
1648 int64_t r = ((int64_t)n * m) * 2;
1649 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat);
1650}
1651
1652static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat)
1653{
1654
1655 int64_t r = (int64_t)n * m;
1656 if (r > INT64_MAX / 2) {
1657 *sat = true;
1658 return INT64_MAX;
1659 } else if (r < INT64_MIN / 2) {
1660 *sat = true;
1661 return INT64_MIN;
1662 } else {
1663 return r * 2;
1664 }
1665}
1666
1667#define SATMASK16B 1
1668#define SATMASK16T (1 << 2)
1669#define SATMASK32 ((1 << 4) | 1)
1670
1671DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \
1672 do_qdmullh, SATMASK16B)
1673DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \
1674 do_qdmullw, SATMASK32)
1675DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \
1676 do_qdmullh, SATMASK16T)
1677DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \
1678 do_qdmullw, SATMASK32)
1679
1680
1681
1682
1683#define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
1684 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1685 void *vm) \
1686 { \
1687 LTYPE *d = vd; \
1688 TYPE *n = vn, *m = vm; \
1689 uint16_t mask = mve_element_mask(env); \
1690 unsigned le; \
1691 bool qc = false; \
1692 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
1693 bool sat = false; \
1694 LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \
1695 LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \
1696 mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \
1697 qc |= sat && (mask & SATMASK); \
1698 } \
1699 if (qc) { \
1700 env->vfp.qc[0] = qc; \
1701 } \
1702 mve_advance_vpt(env); \
1703 }
1704
1705DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B)
1706DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
1707DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T)
1708DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
1709
1710static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
1711{
1712 m &= 0xff;
1713 if (m == 0) {
1714 return 0;
1715 }
1716 n = revbit8(n);
1717 if (m < 8) {
1718 n >>= 8 - m;
1719 }
1720 return n;
1721}
1722
1723static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m)
1724{
1725 m &= 0xff;
1726 if (m == 0) {
1727 return 0;
1728 }
1729 n = revbit16(n);
1730 if (m < 16) {
1731 n >>= 16 - m;
1732 }
1733 return n;
1734}
1735
1736static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m)
1737{
1738 m &= 0xff;
1739 if (m == 0) {
1740 return 0;
1741 }
1742 n = revbit32(n);
1743 if (m < 32) {
1744 n >>= 32 - m;
1745 }
1746 return n;
1747}
1748
1749DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb)
1750DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh)
1751DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw)
1752
1753
1754
1755
1756#define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
1757 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1758 void *vm, uint64_t a) \
1759 { \
1760 uint16_t mask = mve_element_mask(env); \
1761 unsigned e; \
1762 TYPE *n = vn, *m = vm; \
1763 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1764 if (mask & 1) { \
1765 if (e & 1) { \
1766 a ODDACC \
1767 (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
1768 } else { \
1769 a EVENACC \
1770 (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
1771 } \
1772 } \
1773 } \
1774 mve_advance_vpt(env); \
1775 return a; \
1776 }
1777
1778DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=)
1779DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=)
1780DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=)
1781DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
1782
1783DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
1784DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
1785
1786DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=)
1787DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
1788DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
1789DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
1790
1791
1792
1793
1794#define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
1795 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1796 void *vm, uint32_t a) \
1797 { \
1798 uint16_t mask = mve_element_mask(env); \
1799 unsigned e; \
1800 TYPE *n = vn, *m = vm; \
1801 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1802 if (mask & 1) { \
1803 if (e & 1) { \
1804 a ODDACC \
1805 n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
1806 } else { \
1807 a EVENACC \
1808 n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
1809 } \
1810 } \
1811 } \
1812 mve_advance_vpt(env); \
1813 return a; \
1814 }
1815
1816#define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \
1817 DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \
1818 DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \
1819 DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC)
1820
1821#define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \
1822 DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \
1823 DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \
1824 DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC)
1825
1826DO_DAV_S(vmladavs, false, +=, +=)
1827DO_DAV_U(vmladavu, false, +=, +=)
1828DO_DAV_S(vmlsdav, false, +=, -=)
1829DO_DAV_S(vmladavsx, true, +=, +=)
1830DO_DAV_S(vmlsdavx, true, +=, -=)
1831
1832
1833
1834
1835
1836
1837
1838
1839#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
1840 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1841 void *vm, uint64_t a) \
1842 { \
1843 uint16_t mask = mve_element_mask(env); \
1844 unsigned e; \
1845 TYPE *n = vn, *m = vm; \
1846 for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
1847 if (mask & 1) { \
1848 LTYPE mul; \
1849 if (e & 1) { \
1850 mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
1851 if (SUB) { \
1852 mul = -mul; \
1853 } \
1854 } else { \
1855 mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
1856 } \
1857 mul = (mul >> 8) + ((mul >> 7) & 1); \
1858 a += mul; \
1859 } \
1860 } \
1861 mve_advance_vpt(env); \
1862 return a; \
1863 }
1864
1865DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false)
1866DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false)
1867
1868DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false)
1869
1870DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true)
1871DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
1872
1873
1874#define DO_VADDV(OP, ESIZE, TYPE) \
1875 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
1876 uint32_t ra) \
1877 { \
1878 uint16_t mask = mve_element_mask(env); \
1879 unsigned e; \
1880 TYPE *m = vm; \
1881 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1882 if (mask & 1) { \
1883 ra += m[H##ESIZE(e)]; \
1884 } \
1885 } \
1886 mve_advance_vpt(env); \
1887 return ra; \
1888 } \
1889
1890DO_VADDV(vaddvsb, 1, int8_t)
1891DO_VADDV(vaddvsh, 2, int16_t)
1892DO_VADDV(vaddvsw, 4, int32_t)
1893DO_VADDV(vaddvub, 1, uint8_t)
1894DO_VADDV(vaddvuh, 2, uint16_t)
1895DO_VADDV(vaddvuw, 4, uint32_t)
1896
1897
1898
1899
1900
1901
1902#define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \
1903 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
1904 uint32_t ra_in) \
1905 { \
1906 uint16_t mask = mve_element_mask(env); \
1907 unsigned e; \
1908 TYPE *m = vm; \
1909 int64_t ra = (RATYPE)ra_in; \
1910 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1911 if (mask & 1) { \
1912 ra = FN(ra, m[H##ESIZE(e)]); \
1913 } \
1914 } \
1915 mve_advance_vpt(env); \
1916 return ra; \
1917 } \
1918
1919#define DO_VMAXMINV_U(INSN, FN) \
1920 DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \
1921 DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \
1922 DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN)
1923#define DO_VMAXMINV_S(INSN, FN) \
1924 DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \
1925 DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \
1926 DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN)
1927
1928
1929
1930
1931
1932static int64_t do_maxa(int64_t n, int64_t m)
1933{
1934 if (m < 0) {
1935 m = -m;
1936 }
1937 return MAX(n, m);
1938}
1939
1940static int64_t do_mina(int64_t n, int64_t m)
1941{
1942 if (m < 0) {
1943 m = -m;
1944 }
1945 return MIN(n, m);
1946}
1947
1948DO_VMAXMINV_S(vmaxvs, DO_MAX)
1949DO_VMAXMINV_U(vmaxvu, DO_MAX)
1950DO_VMAXMINV_S(vminvs, DO_MIN)
1951DO_VMAXMINV_U(vminvu, DO_MIN)
1952
1953
1954
1955
1956DO_VMAXMINV(vmaxavb, 1, int8_t, uint8_t, do_maxa)
1957DO_VMAXMINV(vmaxavh, 2, int16_t, uint16_t, do_maxa)
1958DO_VMAXMINV(vmaxavw, 4, int32_t, uint32_t, do_maxa)
1959DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina)
1960DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina)
1961DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina)
1962
1963#define DO_VABAV(OP, ESIZE, TYPE) \
1964 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1965 void *vm, uint32_t ra) \
1966 { \
1967 uint16_t mask = mve_element_mask(env); \
1968 unsigned e; \
1969 TYPE *m = vm, *n = vn; \
1970 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1971 if (mask & 1) { \
1972 int64_t n0 = n[H##ESIZE(e)]; \
1973 int64_t m0 = m[H##ESIZE(e)]; \
1974 uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \
1975 ra += r; \
1976 } \
1977 } \
1978 mve_advance_vpt(env); \
1979 return ra; \
1980 }
1981
1982DO_VABAV(vabavsb, 1, int8_t)
1983DO_VABAV(vabavsh, 2, int16_t)
1984DO_VABAV(vabavsw, 4, int32_t)
1985DO_VABAV(vabavub, 1, uint8_t)
1986DO_VABAV(vabavuh, 2, uint16_t)
1987DO_VABAV(vabavuw, 4, uint32_t)
1988
1989#define DO_VADDLV(OP, TYPE, LTYPE) \
1990 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
1991 uint64_t ra) \
1992 { \
1993 uint16_t mask = mve_element_mask(env); \
1994 unsigned e; \
1995 TYPE *m = vm; \
1996 for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
1997 if (mask & 1) { \
1998 ra += (LTYPE)m[H4(e)]; \
1999 } \
2000 } \
2001 mve_advance_vpt(env); \
2002 return ra; \
2003 } \
2004
2005DO_VADDLV(vaddlv_s, int32_t, int64_t)
2006DO_VADDLV(vaddlv_u, uint32_t, uint64_t)
2007
2008
2009#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
2010 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2011 void *vm, uint32_t shift) \
2012 { \
2013 TYPE *d = vd, *m = vm; \
2014 uint16_t mask = mve_element_mask(env); \
2015 unsigned e; \
2016 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2017 mergemask(&d[H##ESIZE(e)], \
2018 FN(m[H##ESIZE(e)], shift), mask); \
2019 } \
2020 mve_advance_vpt(env); \
2021 }
2022
2023#define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
2024 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2025 void *vm, uint32_t shift) \
2026 { \
2027 TYPE *d = vd, *m = vm; \
2028 uint16_t mask = mve_element_mask(env); \
2029 unsigned e; \
2030 bool qc = false; \
2031 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2032 bool sat = false; \
2033 mergemask(&d[H##ESIZE(e)], \
2034 FN(m[H##ESIZE(e)], shift, &sat), mask); \
2035 qc |= sat & mask & 1; \
2036 } \
2037 if (qc) { \
2038 env->vfp.qc[0] = qc; \
2039 } \
2040 mve_advance_vpt(env); \
2041 }
2042
2043
2044#define DO_2SHIFT_U(OP, FN) \
2045 DO_2SHIFT(OP##b, 1, uint8_t, FN) \
2046 DO_2SHIFT(OP##h, 2, uint16_t, FN) \
2047 DO_2SHIFT(OP##w, 4, uint32_t, FN)
2048#define DO_2SHIFT_S(OP, FN) \
2049 DO_2SHIFT(OP##b, 1, int8_t, FN) \
2050 DO_2SHIFT(OP##h, 2, int16_t, FN) \
2051 DO_2SHIFT(OP##w, 4, int32_t, FN)
2052
2053#define DO_2SHIFT_SAT_U(OP, FN) \
2054 DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
2055 DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
2056 DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
2057#define DO_2SHIFT_SAT_S(OP, FN) \
2058 DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
2059 DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
2060 DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
2061
2062DO_2SHIFT_U(vshli_u, DO_VSHLU)
2063DO_2SHIFT_S(vshli_s, DO_VSHLS)
2064DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
2065DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
2066DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
2067DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
2068DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
2069DO_2SHIFT_SAT_U(vqrshli_u, DO_UQRSHL_OP)
2070DO_2SHIFT_SAT_S(vqrshli_s, DO_SQRSHL_OP)
2071
2072
2073#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
2074 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2075 void *vm, uint32_t shift) \
2076 { \
2077 uint64_t *d = vd, *m = vm; \
2078 uint16_t mask; \
2079 uint64_t shiftmask; \
2080 unsigned e; \
2081 if (shift == ESIZE * 8) { \
2082
2083
2084
2085
2086
2087 \
2088 goto done; \
2089 } \
2090 assert(shift < ESIZE * 8); \
2091 mask = mve_element_mask(env); \
2092 \
2093 shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
2094 for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
2095 uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
2096 (d[H8(e)] & ~shiftmask); \
2097 mergemask(&d[H8(e)], r, mask); \
2098 } \
2099done: \
2100 mve_advance_vpt(env); \
2101 }
2102
2103#define DO_SHL(N, SHIFT) ((N) << (SHIFT))
2104#define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
2105#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
2106#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
2107
2108DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
2109DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
2110DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
2111DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
2112DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
2113DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
2114
2115
2116
2117
2118
2119
2120
2121
2122#define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
2123 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2124 void *vm, uint32_t shift) \
2125 { \
2126 LTYPE *d = vd; \
2127 TYPE *m = vm; \
2128 uint16_t mask = mve_element_mask(env); \
2129 unsigned le; \
2130 assert(shift <= 16); \
2131 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2132 LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
2133 mergemask(&d[H##LESIZE(le)], r, mask); \
2134 } \
2135 mve_advance_vpt(env); \
2136 }
2137
2138#define DO_VSHLL_ALL(OP, TOP) \
2139 DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
2140 DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
2141 DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
2142 DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
2143
2144DO_VSHLL_ALL(vshllb, false)
2145DO_VSHLL_ALL(vshllt, true)
2146
2147
2148
2149
2150
2151
2152#define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
2153 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2154 void *vm, uint32_t shift) \
2155 { \
2156 LTYPE *m = vm; \
2157 TYPE *d = vd; \
2158 uint16_t mask = mve_element_mask(env); \
2159 unsigned le; \
2160 mask >>= ESIZE * TOP; \
2161 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2162 TYPE r = FN(m[H##LESIZE(le)], shift); \
2163 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
2164 } \
2165 mve_advance_vpt(env); \
2166 }
2167
2168#define DO_VSHRN_ALL(OP, FN) \
2169 DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
2170 DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
2171 DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
2172 DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
2173
2174static inline uint64_t do_urshr(uint64_t x, unsigned sh)
2175{
2176 if (likely(sh < 64)) {
2177 return (x >> sh) + ((x >> (sh - 1)) & 1);
2178 } else if (sh == 64) {
2179 return x >> 63;
2180 } else {
2181 return 0;
2182 }
2183}
2184
2185static inline int64_t do_srshr(int64_t x, unsigned sh)
2186{
2187 if (likely(sh < 64)) {
2188 return (x >> sh) + ((x >> (sh - 1)) & 1);
2189 } else {
2190
2191 return 0;
2192 }
2193}
2194
2195DO_VSHRN_ALL(vshrn, DO_SHR)
2196DO_VSHRN_ALL(vrshrn, do_urshr)
2197
2198static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
2199 bool *satp)
2200{
2201 if (val > max) {
2202 *satp = true;
2203 return max;
2204 } else if (val < min) {
2205 *satp = true;
2206 return min;
2207 } else {
2208 return val;
2209 }
2210}
2211
2212
2213#define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
2214 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2215 void *vm, uint32_t shift) \
2216 { \
2217 LTYPE *m = vm; \
2218 TYPE *d = vd; \
2219 uint16_t mask = mve_element_mask(env); \
2220 bool qc = false; \
2221 unsigned le; \
2222 mask >>= ESIZE * TOP; \
2223 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2224 bool sat = false; \
2225 TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
2226 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
2227 qc |= sat & mask & 1; \
2228 } \
2229 if (qc) { \
2230 env->vfp.qc[0] = qc; \
2231 } \
2232 mve_advance_vpt(env); \
2233 }
2234
2235#define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
2236 DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
2237 DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
2238
2239#define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
2240 DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
2241 DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
2242
2243#define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
2244 DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
2245 DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
2246
2247#define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
2248 DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
2249 DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
2250
2251#define DO_SHRN_SB(N, M, SATP) \
2252 do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
2253#define DO_SHRN_UB(N, M, SATP) \
2254 do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
2255#define DO_SHRUN_B(N, M, SATP) \
2256 do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
2257
2258#define DO_SHRN_SH(N, M, SATP) \
2259 do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
2260#define DO_SHRN_UH(N, M, SATP) \
2261 do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
2262#define DO_SHRUN_H(N, M, SATP) \
2263 do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
2264
2265#define DO_RSHRN_SB(N, M, SATP) \
2266 do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
2267#define DO_RSHRN_UB(N, M, SATP) \
2268 do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
2269#define DO_RSHRUN_B(N, M, SATP) \
2270 do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
2271
2272#define DO_RSHRN_SH(N, M, SATP) \
2273 do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
2274#define DO_RSHRN_UH(N, M, SATP) \
2275 do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
2276#define DO_RSHRUN_H(N, M, SATP) \
2277 do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
2278
2279DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB)
2280DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH)
2281DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB)
2282DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH)
2283DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B)
2284DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H)
2285
2286DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB)
2287DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH)
2288DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
2289DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
2290DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
2291DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
2292
2293#define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
2294 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
2295 { \
2296 LTYPE *m = vm; \
2297 TYPE *d = vd; \
2298 uint16_t mask = mve_element_mask(env); \
2299 unsigned le; \
2300 mask >>= ESIZE * TOP; \
2301 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2302 mergemask(&d[H##ESIZE(le * 2 + TOP)], \
2303 m[H##LESIZE(le)], mask); \
2304 } \
2305 mve_advance_vpt(env); \
2306 }
2307
2308DO_VMOVN(vmovnbb, false, 1, uint8_t, 2, uint16_t)
2309DO_VMOVN(vmovnbh, false, 2, uint16_t, 4, uint32_t)
2310DO_VMOVN(vmovntb, true, 1, uint8_t, 2, uint16_t)
2311DO_VMOVN(vmovnth, true, 2, uint16_t, 4, uint32_t)
2312
2313#define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
2314 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
2315 { \
2316 LTYPE *m = vm; \
2317 TYPE *d = vd; \
2318 uint16_t mask = mve_element_mask(env); \
2319 bool qc = false; \
2320 unsigned le; \
2321 mask >>= ESIZE * TOP; \
2322 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2323 bool sat = false; \
2324 TYPE r = FN(m[H##LESIZE(le)], &sat); \
2325 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
2326 qc |= sat & mask & 1; \
2327 } \
2328 if (qc) { \
2329 env->vfp.qc[0] = qc; \
2330 } \
2331 mve_advance_vpt(env); \
2332 }
2333
2334#define DO_VMOVN_SAT_UB(BOP, TOP, FN) \
2335 DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
2336 DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
2337
2338#define DO_VMOVN_SAT_UH(BOP, TOP, FN) \
2339 DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
2340 DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
2341
2342#define DO_VMOVN_SAT_SB(BOP, TOP, FN) \
2343 DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
2344 DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
2345
2346#define DO_VMOVN_SAT_SH(BOP, TOP, FN) \
2347 DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
2348 DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
2349
2350#define DO_VQMOVN_SB(N, SATP) \
2351 do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP)
2352#define DO_VQMOVN_UB(N, SATP) \
2353 do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP)
2354#define DO_VQMOVUN_B(N, SATP) \
2355 do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP)
2356
2357#define DO_VQMOVN_SH(N, SATP) \
2358 do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP)
2359#define DO_VQMOVN_UH(N, SATP) \
2360 do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP)
2361#define DO_VQMOVUN_H(N, SATP) \
2362 do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP)
2363
2364DO_VMOVN_SAT_SB(vqmovnbsb, vqmovntsb, DO_VQMOVN_SB)
2365DO_VMOVN_SAT_SH(vqmovnbsh, vqmovntsh, DO_VQMOVN_SH)
2366DO_VMOVN_SAT_UB(vqmovnbub, vqmovntub, DO_VQMOVN_UB)
2367DO_VMOVN_SAT_UH(vqmovnbuh, vqmovntuh, DO_VQMOVN_UH)
2368DO_VMOVN_SAT_SB(vqmovunbb, vqmovuntb, DO_VQMOVUN_B)
2369DO_VMOVN_SAT_SH(vqmovunbh, vqmovunth, DO_VQMOVUN_H)
2370
2371uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
2372 uint32_t shift)
2373{
2374 uint32_t *d = vd;
2375 uint16_t mask = mve_element_mask(env);
2376 unsigned e;
2377 uint32_t r;
2378
2379
2380
2381
2382
2383
2384
2385
2386 if (shift == 0) {
2387 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
2388 r = rdm;
2389 if (mask & 1) {
2390 rdm = d[H4(e)];
2391 }
2392 mergemask(&d[H4(e)], r, mask);
2393 }
2394 } else {
2395 uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
2396
2397 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
2398 r = (d[H4(e)] << shift) | (rdm & shiftmask);
2399 if (mask & 1) {
2400 rdm = d[H4(e)] >> (32 - shift);
2401 }
2402 mergemask(&d[H4(e)], r, mask);
2403 }
2404 }
2405 mve_advance_vpt(env);
2406 return rdm;
2407}
2408
2409uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
2410{
2411 return do_sqrshl_d(n, -(int8_t)shift, false, NULL);
2412}
2413
2414uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift)
2415{
2416 return do_uqrshl_d(n, (int8_t)shift, false, NULL);
2417}
2418
2419uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
2420{
2421 return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
2422}
2423
2424uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
2425{
2426 return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
2427}
2428
2429uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
2430{
2431 return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF);
2432}
2433
2434uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
2435{
2436 return do_uqrshl_d(n, (int8_t)shift, true, &env->QF);
2437}
2438
2439
2440static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
2441 bool round, uint32_t *sat)
2442{
2443 int64_t val, extval;
2444
2445 if (shift <= -48) {
2446
2447 if (round) {
2448 return 0;
2449 }
2450 return src >> 63;
2451 } else if (shift < 0) {
2452 if (round) {
2453 src >>= -shift - 1;
2454 val = (src >> 1) + (src & 1);
2455 } else {
2456 val = src >> -shift;
2457 }
2458 extval = sextract64(val, 0, 48);
2459 if (!sat || val == extval) {
2460 return extval;
2461 }
2462 } else if (shift < 48) {
2463 int64_t extval = sextract64(src << shift, 0, 48);
2464 if (!sat || src == (extval >> shift)) {
2465 return extval;
2466 }
2467 } else if (!sat || src == 0) {
2468 return 0;
2469 }
2470
2471 *sat = 1;
2472 return src >= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17);
2473}
2474
2475
2476static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
2477 bool round, uint32_t *sat)
2478{
2479 uint64_t val, extval;
2480
2481 if (shift <= -(48 + round)) {
2482 return 0;
2483 } else if (shift < 0) {
2484 if (round) {
2485 val = src >> (-shift - 1);
2486 val = (val >> 1) + (val & 1);
2487 } else {
2488 val = src >> -shift;
2489 }
2490 extval = extract64(val, 0, 48);
2491 if (!sat || val == extval) {
2492 return extval;
2493 }
2494 } else if (shift < 48) {
2495 uint64_t extval = extract64(src << shift, 0, 48);
2496 if (!sat || src == (extval >> shift)) {
2497 return extval;
2498 }
2499 } else if (!sat || src == 0) {
2500 return 0;
2501 }
2502
2503 *sat = 1;
2504 return MAKE_64BIT_MASK(0, 48);
2505}
2506
2507uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift)
2508{
2509 return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF);
2510}
2511
2512uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
2513{
2514 return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
2515}
2516
2517uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
2518{
2519 return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
2520}
2521
2522uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
2523{
2524 return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
2525}
2526
2527uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift)
2528{
2529 return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF);
2530}
2531
2532uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
2533{
2534 return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
2535}
2536
2537#define DO_VIDUP(OP, ESIZE, TYPE, FN) \
2538 uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
2539 uint32_t offset, uint32_t imm) \
2540 { \
2541 TYPE *d = vd; \
2542 uint16_t mask = mve_element_mask(env); \
2543 unsigned e; \
2544 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2545 mergemask(&d[H##ESIZE(e)], offset, mask); \
2546 offset = FN(offset, imm); \
2547 } \
2548 mve_advance_vpt(env); \
2549 return offset; \
2550 }
2551
2552#define DO_VIWDUP(OP, ESIZE, TYPE, FN) \
2553 uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
2554 uint32_t offset, uint32_t wrap, \
2555 uint32_t imm) \
2556 { \
2557 TYPE *d = vd; \
2558 uint16_t mask = mve_element_mask(env); \
2559 unsigned e; \
2560 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2561 mergemask(&d[H##ESIZE(e)], offset, mask); \
2562 offset = FN(offset, wrap, imm); \
2563 } \
2564 mve_advance_vpt(env); \
2565 return offset; \
2566 }
2567
2568#define DO_VIDUP_ALL(OP, FN) \
2569 DO_VIDUP(OP##b, 1, int8_t, FN) \
2570 DO_VIDUP(OP##h, 2, int16_t, FN) \
2571 DO_VIDUP(OP##w, 4, int32_t, FN)
2572
2573#define DO_VIWDUP_ALL(OP, FN) \
2574 DO_VIWDUP(OP##b, 1, int8_t, FN) \
2575 DO_VIWDUP(OP##h, 2, int16_t, FN) \
2576 DO_VIWDUP(OP##w, 4, int32_t, FN)
2577
2578static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
2579{
2580 offset += imm;
2581 if (offset == wrap) {
2582 offset = 0;
2583 }
2584 return offset;
2585}
2586
2587static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
2588{
2589 if (offset == 0) {
2590 offset = wrap;
2591 }
2592 offset -= imm;
2593 return offset;
2594}
2595
2596DO_VIDUP_ALL(vidup, DO_ADD)
2597DO_VIWDUP_ALL(viwdup, do_add_wrap)
2598DO_VIWDUP_ALL(vdwdup, do_sub_wrap)
2599
2600
2601
2602
2603
2604
2605
2606
2607#define DO_VCMP(OP, ESIZE, TYPE, FN) \
2608 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \
2609 { \
2610 TYPE *n = vn, *m = vm; \
2611 uint16_t mask = mve_element_mask(env); \
2612 uint16_t eci_mask = mve_eci_mask(env); \
2613 uint16_t beatpred = 0; \
2614 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
2615 unsigned e; \
2616 for (e = 0; e < 16 / ESIZE; e++) { \
2617 bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \
2618 \
2619 beatpred |= r * emask; \
2620 emask <<= ESIZE; \
2621 } \
2622 beatpred &= mask; \
2623 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
2624 (beatpred & eci_mask); \
2625 mve_advance_vpt(env); \
2626 }
2627
2628#define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \
2629 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
2630 uint32_t rm) \
2631 { \
2632 TYPE *n = vn; \
2633 uint16_t mask = mve_element_mask(env); \
2634 uint16_t eci_mask = mve_eci_mask(env); \
2635 uint16_t beatpred = 0; \
2636 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
2637 unsigned e; \
2638 for (e = 0; e < 16 / ESIZE; e++) { \
2639 bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \
2640 \
2641 beatpred |= r * emask; \
2642 emask <<= ESIZE; \
2643 } \
2644 beatpred &= mask; \
2645 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
2646 (beatpred & eci_mask); \
2647 mve_advance_vpt(env); \
2648 }
2649
2650#define DO_VCMP_S(OP, FN) \
2651 DO_VCMP(OP##b, 1, int8_t, FN) \
2652 DO_VCMP(OP##h, 2, int16_t, FN) \
2653 DO_VCMP(OP##w, 4, int32_t, FN) \
2654 DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \
2655 DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \
2656 DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN)
2657
2658#define DO_VCMP_U(OP, FN) \
2659 DO_VCMP(OP##b, 1, uint8_t, FN) \
2660 DO_VCMP(OP##h, 2, uint16_t, FN) \
2661 DO_VCMP(OP##w, 4, uint32_t, FN) \
2662 DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \
2663 DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \
2664 DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN)
2665
2666#define DO_EQ(N, M) ((N) == (M))
2667#define DO_NE(N, M) ((N) != (M))
2668#define DO_EQ(N, M) ((N) == (M))
2669#define DO_EQ(N, M) ((N) == (M))
2670#define DO_GE(N, M) ((N) >= (M))
2671#define DO_LT(N, M) ((N) < (M))
2672#define DO_GT(N, M) ((N) > (M))
2673#define DO_LE(N, M) ((N) <= (M))
2674
2675DO_VCMP_U(vcmpeq, DO_EQ)
2676DO_VCMP_U(vcmpne, DO_NE)
2677DO_VCMP_U(vcmpcs, DO_GE)
2678DO_VCMP_U(vcmphi, DO_GT)
2679DO_VCMP_S(vcmpge, DO_GE)
2680DO_VCMP_S(vcmplt, DO_LT)
2681DO_VCMP_S(vcmpgt, DO_GT)
2682DO_VCMP_S(vcmple, DO_LE)
2683
2684void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm)
2685{
2686
2687
2688
2689
2690
2691 uint64_t *d = vd, *n = vn, *m = vm;
2692 uint16_t mask = mve_element_mask(env);
2693 uint16_t p0 = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
2694 unsigned e;
2695 for (e = 0; e < 16 / 8; e++, mask >>= 8, p0 >>= 8) {
2696 uint64_t r = m[H8(e)];
2697 mergemask(&r, n[H8(e)], p0);
2698 mergemask(&d[H8(e)], r, mask);
2699 }
2700 mve_advance_vpt(env);
2701}
2702
2703void HELPER(mve_vpnot)(CPUARMState *env)
2704{
2705
2706
2707
2708
2709
2710
2711
2712
2713 uint16_t mask = mve_element_mask(env);
2714 uint16_t eci_mask = mve_eci_mask(env);
2715 uint16_t beatpred = ~env->v7m.vpr & mask;
2716 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (beatpred & eci_mask);
2717 mve_advance_vpt(env);
2718}
2719
2720
2721
2722
2723
2724
2725
2726
2727void HELPER(mve_vctp)(CPUARMState *env, uint32_t masklen)
2728{
2729 uint16_t mask = mve_element_mask(env);
2730 uint16_t eci_mask = mve_eci_mask(env);
2731 uint16_t newmask;
2732
2733 assert(masklen <= 16);
2734 newmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0;
2735 newmask &= mask;
2736 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (newmask & eci_mask);
2737 mve_advance_vpt(env);
2738}
2739
2740#define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \
2741 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
2742 { \
2743 TYPE *d = vd, *m = vm; \
2744 uint16_t mask = mve_element_mask(env); \
2745 unsigned e; \
2746 bool qc = false; \
2747 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2748 bool sat = false; \
2749 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \
2750 qc |= sat & mask & 1; \
2751 } \
2752 if (qc) { \
2753 env->vfp.qc[0] = qc; \
2754 } \
2755 mve_advance_vpt(env); \
2756 }
2757
2758#define DO_VQABS_B(N, SATP) \
2759 do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP)
2760#define DO_VQABS_H(N, SATP) \
2761 do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP)
2762#define DO_VQABS_W(N, SATP) \
2763 do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP)
2764
2765#define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP)
2766#define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP)
2767#define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP)
2768
2769DO_1OP_SAT(vqabsb, 1, int8_t, DO_VQABS_B)
2770DO_1OP_SAT(vqabsh, 2, int16_t, DO_VQABS_H)
2771DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W)
2772
2773DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B)
2774DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H)
2775DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W)
2776
2777
2778
2779
2780
2781#define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \
2782 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
2783 { \
2784 UTYPE *d = vd; \
2785 STYPE *m = vm; \
2786 uint16_t mask = mve_element_mask(env); \
2787 unsigned e; \
2788 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2789 UTYPE r = DO_ABS(m[H##ESIZE(e)]); \
2790 r = FN(d[H##ESIZE(e)], r); \
2791 mergemask(&d[H##ESIZE(e)], r, mask); \
2792 } \
2793 mve_advance_vpt(env); \
2794 }
2795
2796DO_VMAXMINA(vmaxab, 1, int8_t, uint8_t, DO_MAX)
2797DO_VMAXMINA(vmaxah, 2, int16_t, uint16_t, DO_MAX)
2798DO_VMAXMINA(vmaxaw, 4, int32_t, uint32_t, DO_MAX)
2799DO_VMAXMINA(vminab, 1, int8_t, uint8_t, DO_MIN)
2800DO_VMAXMINA(vminah, 2, int16_t, uint16_t, DO_MIN)
2801DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN)
2802
2803
2804
2805
2806
2807
2808
2809#define DO_2OP_FP(OP, ESIZE, TYPE, FN) \
2810 void HELPER(glue(mve_, OP))(CPUARMState *env, \
2811 void *vd, void *vn, void *vm) \
2812 { \
2813 TYPE *d = vd, *n = vn, *m = vm; \
2814 TYPE r; \
2815 uint16_t mask = mve_element_mask(env); \
2816 unsigned e; \
2817 float_status *fpst; \
2818 float_status scratch_fpst; \
2819 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2820 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
2821 continue; \
2822 } \
2823 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
2824 &env->vfp.standard_fp_status; \
2825 if (!(mask & 1)) { \
2826 \
2827 scratch_fpst = *fpst; \
2828 fpst = &scratch_fpst; \
2829 } \
2830 r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \
2831 mergemask(&d[H##ESIZE(e)], r, mask); \
2832 } \
2833 mve_advance_vpt(env); \
2834 }
2835
2836#define DO_2OP_FP_ALL(OP, FN) \
2837 DO_2OP_FP(OP##h, 2, float16, float16_##FN) \
2838 DO_2OP_FP(OP##s, 4, float32, float32_##FN)
2839
2840DO_2OP_FP_ALL(vfadd, add)
2841DO_2OP_FP_ALL(vfsub, sub)
2842DO_2OP_FP_ALL(vfmul, mul)
2843
2844static inline float16 float16_abd(float16 a, float16 b, float_status *s)
2845{
2846 return float16_abs(float16_sub(a, b, s));
2847}
2848
2849static inline float32 float32_abd(float32 a, float32 b, float_status *s)
2850{
2851 return float32_abs(float32_sub(a, b, s));
2852}
2853
2854DO_2OP_FP_ALL(vfabd, abd)
2855DO_2OP_FP_ALL(vmaxnm, maxnum)
2856DO_2OP_FP_ALL(vminnm, minnum)
2857
2858static inline float16 float16_maxnuma(float16 a, float16 b, float_status *s)
2859{
2860 return float16_maxnum(float16_abs(a), float16_abs(b), s);
2861}
2862
2863static inline float32 float32_maxnuma(float32 a, float32 b, float_status *s)
2864{
2865 return float32_maxnum(float32_abs(a), float32_abs(b), s);
2866}
2867
2868static inline float16 float16_minnuma(float16 a, float16 b, float_status *s)
2869{
2870 return float16_minnum(float16_abs(a), float16_abs(b), s);
2871}
2872
2873static inline float32 float32_minnuma(float32 a, float32 b, float_status *s)
2874{
2875 return float32_minnum(float32_abs(a), float32_abs(b), s);
2876}
2877
2878DO_2OP_FP_ALL(vmaxnma, maxnuma)
2879DO_2OP_FP_ALL(vminnma, minnuma)
2880
2881#define DO_VCADD_FP(OP, ESIZE, TYPE, FN0, FN1) \
2882 void HELPER(glue(mve_, OP))(CPUARMState *env, \
2883 void *vd, void *vn, void *vm) \
2884 { \
2885 TYPE *d = vd, *n = vn, *m = vm; \
2886 TYPE r[16 / ESIZE]; \
2887 uint16_t tm, mask = mve_element_mask(env); \
2888 unsigned e; \
2889 float_status *fpst; \
2890 float_status scratch_fpst; \
2891 \
2892 for (e = 0, tm = mask; e < 16 / ESIZE; e++, tm >>= ESIZE) { \
2893 if ((tm & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
2894 r[e] = 0; \
2895 continue; \
2896 } \
2897 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
2898 &env->vfp.standard_fp_status; \
2899 if (!(tm & 1)) { \
2900 \
2901 scratch_fpst = *fpst; \
2902 fpst = &scratch_fpst; \
2903 } \
2904 if (!(e & 1)) { \
2905 r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)], fpst); \
2906 } else { \
2907 r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)], fpst); \
2908 } \
2909 } \
2910 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2911 mergemask(&d[H##ESIZE(e)], r[e], mask); \
2912 } \
2913 mve_advance_vpt(env); \
2914 }
2915
2916DO_VCADD_FP(vfcadd90h, 2, float16, float16_sub, float16_add)
2917DO_VCADD_FP(vfcadd90s, 4, float32, float32_sub, float32_add)
2918DO_VCADD_FP(vfcadd270h, 2, float16, float16_add, float16_sub)
2919DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub)
2920
2921#define DO_VFMA(OP, ESIZE, TYPE, CHS) \
2922 void HELPER(glue(mve_, OP))(CPUARMState *env, \
2923 void *vd, void *vn, void *vm) \
2924 { \
2925 TYPE *d = vd, *n = vn, *m = vm; \
2926 TYPE r; \
2927 uint16_t mask = mve_element_mask(env); \
2928 unsigned e; \
2929 float_status *fpst; \
2930 float_status scratch_fpst; \
2931 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2932 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
2933 continue; \
2934 } \
2935 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
2936 &env->vfp.standard_fp_status; \
2937 if (!(mask & 1)) { \
2938 \
2939 scratch_fpst = *fpst; \
2940 fpst = &scratch_fpst; \
2941 } \
2942 r = n[H##ESIZE(e)]; \
2943 if (CHS) { \
2944 r = TYPE##_chs(r); \
2945 } \
2946 r = TYPE##_muladd(r, m[H##ESIZE(e)], d[H##ESIZE(e)], \
2947 0, fpst); \
2948 mergemask(&d[H##ESIZE(e)], r, mask); \
2949 } \
2950 mve_advance_vpt(env); \
2951 }
2952
2953DO_VFMA(vfmah, 2, float16, false)
2954DO_VFMA(vfmas, 4, float32, false)
2955DO_VFMA(vfmsh, 2, float16, true)
2956DO_VFMA(vfmss, 4, float32, true)
2957
2958#define DO_VCMLA(OP, ESIZE, TYPE, ROT, FN) \
2959 void HELPER(glue(mve_, OP))(CPUARMState *env, \
2960 void *vd, void *vn, void *vm) \
2961 { \
2962 TYPE *d = vd, *n = vn, *m = vm; \
2963 TYPE r0, r1, e1, e2, e3, e4; \
2964 uint16_t mask = mve_element_mask(env); \
2965 unsigned e; \
2966 float_status *fpst0, *fpst1; \
2967 float_status scratch_fpst; \
2968 \
2969 for (e = 0; e < 16 / ESIZE; e += 2, mask >>= ESIZE * 2) { \
2970 if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \
2971 continue; \
2972 } \
2973 fpst0 = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
2974 &env->vfp.standard_fp_status; \
2975 fpst1 = fpst0; \
2976 if (!(mask & 1)) { \
2977 scratch_fpst = *fpst0; \
2978 fpst0 = &scratch_fpst; \
2979 } \
2980 if (!(mask & (1 << ESIZE))) { \
2981 scratch_fpst = *fpst1; \
2982 fpst1 = &scratch_fpst; \
2983 } \
2984 switch (ROT) { \
2985 case 0: \
2986 e1 = m[H##ESIZE(e)]; \
2987 e2 = n[H##ESIZE(e)]; \
2988 e3 = m[H##ESIZE(e + 1)]; \
2989 e4 = n[H##ESIZE(e)]; \
2990 break; \
2991 case 1: \
2992 e1 = TYPE##_chs(m[H##ESIZE(e + 1)]); \
2993 e2 = n[H##ESIZE(e + 1)]; \
2994 e3 = m[H##ESIZE(e)]; \
2995 e4 = n[H##ESIZE(e + 1)]; \
2996 break; \
2997 case 2: \
2998 e1 = TYPE##_chs(m[H##ESIZE(e)]); \
2999 e2 = n[H##ESIZE(e)]; \
3000 e3 = TYPE##_chs(m[H##ESIZE(e + 1)]); \
3001 e4 = n[H##ESIZE(e)]; \
3002 break; \
3003 case 3: \
3004 e1 = m[H##ESIZE(e + 1)]; \
3005 e2 = n[H##ESIZE(e + 1)]; \
3006 e3 = TYPE##_chs(m[H##ESIZE(e)]); \
3007 e4 = n[H##ESIZE(e + 1)]; \
3008 break; \
3009 default: \
3010 g_assert_not_reached(); \
3011 } \
3012 r0 = FN(e2, e1, d[H##ESIZE(e)], fpst0); \
3013 r1 = FN(e4, e3, d[H##ESIZE(e + 1)], fpst1); \
3014 mergemask(&d[H##ESIZE(e)], r0, mask); \
3015 mergemask(&d[H##ESIZE(e + 1)], r1, mask >> ESIZE); \
3016 } \
3017 mve_advance_vpt(env); \
3018 }
3019
3020#define DO_VCMULH(N, M, D, S) float16_mul(N, M, S)
3021#define DO_VCMULS(N, M, D, S) float32_mul(N, M, S)
3022
3023#define DO_VCMLAH(N, M, D, S) float16_muladd(N, M, D, 0, S)
3024#define DO_VCMLAS(N, M, D, S) float32_muladd(N, M, D, 0, S)
3025
3026DO_VCMLA(vcmul0h, 2, float16, 0, DO_VCMULH)
3027DO_VCMLA(vcmul0s, 4, float32, 0, DO_VCMULS)
3028DO_VCMLA(vcmul90h, 2, float16, 1, DO_VCMULH)
3029DO_VCMLA(vcmul90s, 4, float32, 1, DO_VCMULS)
3030DO_VCMLA(vcmul180h, 2, float16, 2, DO_VCMULH)
3031DO_VCMLA(vcmul180s, 4, float32, 2, DO_VCMULS)
3032DO_VCMLA(vcmul270h, 2, float16, 3, DO_VCMULH)
3033DO_VCMLA(vcmul270s, 4, float32, 3, DO_VCMULS)
3034
3035DO_VCMLA(vcmla0h, 2, float16, 0, DO_VCMLAH)
3036DO_VCMLA(vcmla0s, 4, float32, 0, DO_VCMLAS)
3037DO_VCMLA(vcmla90h, 2, float16, 1, DO_VCMLAH)
3038DO_VCMLA(vcmla90s, 4, float32, 1, DO_VCMLAS)
3039DO_VCMLA(vcmla180h, 2, float16, 2, DO_VCMLAH)
3040DO_VCMLA(vcmla180s, 4, float32, 2, DO_VCMLAS)
3041DO_VCMLA(vcmla270h, 2, float16, 3, DO_VCMLAH)
3042DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS)
3043
3044#define DO_2OP_FP_SCALAR(OP, ESIZE, TYPE, FN) \
3045 void HELPER(glue(mve_, OP))(CPUARMState *env, \
3046 void *vd, void *vn, uint32_t rm) \
3047 { \
3048 TYPE *d = vd, *n = vn; \
3049 TYPE r, m = rm; \
3050 uint16_t mask = mve_element_mask(env); \
3051 unsigned e; \
3052 float_status *fpst; \
3053 float_status scratch_fpst; \
3054 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3055 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
3056 continue; \
3057 } \
3058 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3059 &env->vfp.standard_fp_status; \
3060 if (!(mask & 1)) { \
3061 \
3062 scratch_fpst = *fpst; \
3063 fpst = &scratch_fpst; \
3064 } \
3065 r = FN(n[H##ESIZE(e)], m, fpst); \
3066 mergemask(&d[H##ESIZE(e)], r, mask); \
3067 } \
3068 mve_advance_vpt(env); \
3069 }
3070
3071#define DO_2OP_FP_SCALAR_ALL(OP, FN) \
3072 DO_2OP_FP_SCALAR(OP##h, 2, float16, float16_##FN) \
3073 DO_2OP_FP_SCALAR(OP##s, 4, float32, float32_##FN)
3074
3075DO_2OP_FP_SCALAR_ALL(vfadd_scalar, add)
3076DO_2OP_FP_SCALAR_ALL(vfsub_scalar, sub)
3077DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul)
3078
3079#define DO_2OP_FP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
3080 void HELPER(glue(mve_, OP))(CPUARMState *env, \
3081 void *vd, void *vn, uint32_t rm) \
3082 { \
3083 TYPE *d = vd, *n = vn; \
3084 TYPE r, m = rm; \
3085 uint16_t mask = mve_element_mask(env); \
3086 unsigned e; \
3087 float_status *fpst; \
3088 float_status scratch_fpst; \
3089 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3090 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
3091 continue; \
3092 } \
3093 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3094 &env->vfp.standard_fp_status; \
3095 if (!(mask & 1)) { \
3096 \
3097 scratch_fpst = *fpst; \
3098 fpst = &scratch_fpst; \
3099 } \
3100 r = FN(n[H##ESIZE(e)], m, d[H##ESIZE(e)], 0, fpst); \
3101 mergemask(&d[H##ESIZE(e)], r, mask); \
3102 } \
3103 mve_advance_vpt(env); \
3104 }
3105
3106
3107#define DO_VFMAS_SCALARH(N, M, D, F, S) float16_muladd(N, D, M, F, S)
3108#define DO_VFMAS_SCALARS(N, M, D, F, S) float32_muladd(N, D, M, F, S)
3109
3110
3111DO_2OP_FP_ACC_SCALAR(vfma_scalarh, 2, float16, float16_muladd)
3112DO_2OP_FP_ACC_SCALAR(vfma_scalars, 4, float32, float32_muladd)
3113DO_2OP_FP_ACC_SCALAR(vfmas_scalarh, 2, float16, DO_VFMAS_SCALARH)
3114DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS)
3115
3116
3117#define DO_FP_VMAXMINV(OP, ESIZE, TYPE, ABS, FN) \
3118 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
3119 uint32_t ra_in) \
3120 { \
3121 uint16_t mask = mve_element_mask(env); \
3122 unsigned e; \
3123 TYPE *m = vm; \
3124 TYPE ra = (TYPE)ra_in; \
3125 float_status *fpst = (ESIZE == 2) ? \
3126 &env->vfp.standard_fp_status_f16 : \
3127 &env->vfp.standard_fp_status; \
3128 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3129 if (mask & 1) { \
3130 TYPE v = m[H##ESIZE(e)]; \
3131 if (TYPE##_is_signaling_nan(ra, fpst)) { \
3132 ra = TYPE##_silence_nan(ra, fpst); \
3133 float_raise(float_flag_invalid, fpst); \
3134 } \
3135 if (TYPE##_is_signaling_nan(v, fpst)) { \
3136 v = TYPE##_silence_nan(v, fpst); \
3137 float_raise(float_flag_invalid, fpst); \
3138 } \
3139 if (ABS) { \
3140 v = TYPE##_abs(v); \
3141 } \
3142 ra = FN(ra, v, fpst); \
3143 } \
3144 } \
3145 mve_advance_vpt(env); \
3146 return ra; \
3147 } \
3148
3149#define NOP(X) (X)
3150
3151DO_FP_VMAXMINV(vmaxnmvh, 2, float16, false, float16_maxnum)
3152DO_FP_VMAXMINV(vmaxnmvs, 4, float32, false, float32_maxnum)
3153DO_FP_VMAXMINV(vminnmvh, 2, float16, false, float16_minnum)
3154DO_FP_VMAXMINV(vminnmvs, 4, float32, false, float32_minnum)
3155DO_FP_VMAXMINV(vmaxnmavh, 2, float16, true, float16_maxnum)
3156DO_FP_VMAXMINV(vmaxnmavs, 4, float32, true, float32_maxnum)
3157DO_FP_VMAXMINV(vminnmavh, 2, float16, true, float16_minnum)
3158DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum)
3159
3160
3161#define DO_VCMP_FP(OP, ESIZE, TYPE, FN) \
3162 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \
3163 { \
3164 TYPE *n = vn, *m = vm; \
3165 uint16_t mask = mve_element_mask(env); \
3166 uint16_t eci_mask = mve_eci_mask(env); \
3167 uint16_t beatpred = 0; \
3168 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
3169 unsigned e; \
3170 float_status *fpst; \
3171 float_status scratch_fpst; \
3172 bool r; \
3173 for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \
3174 if ((mask & emask) == 0) { \
3175 continue; \
3176 } \
3177 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3178 &env->vfp.standard_fp_status; \
3179 if (!(mask & (1 << (e * ESIZE)))) { \
3180 \
3181 scratch_fpst = *fpst; \
3182 fpst = &scratch_fpst; \
3183 } \
3184 r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \
3185 \
3186 beatpred |= r * emask; \
3187 } \
3188 beatpred &= mask; \
3189 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
3190 (beatpred & eci_mask); \
3191 mve_advance_vpt(env); \
3192 }
3193
3194#define DO_VCMP_FP_SCALAR(OP, ESIZE, TYPE, FN) \
3195 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
3196 uint32_t rm) \
3197 { \
3198 TYPE *n = vn; \
3199 uint16_t mask = mve_element_mask(env); \
3200 uint16_t eci_mask = mve_eci_mask(env); \
3201 uint16_t beatpred = 0; \
3202 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
3203 unsigned e; \
3204 float_status *fpst; \
3205 float_status scratch_fpst; \
3206 bool r; \
3207 for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \
3208 if ((mask & emask) == 0) { \
3209 continue; \
3210 } \
3211 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3212 &env->vfp.standard_fp_status; \
3213 if (!(mask & (1 << (e * ESIZE)))) { \
3214 \
3215 scratch_fpst = *fpst; \
3216 fpst = &scratch_fpst; \
3217 } \
3218 r = FN(n[H##ESIZE(e)], (TYPE)rm, fpst); \
3219 \
3220 beatpred |= r * emask; \
3221 } \
3222 beatpred &= mask; \
3223 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
3224 (beatpred & eci_mask); \
3225 mve_advance_vpt(env); \
3226 }
3227
3228#define DO_VCMP_FP_BOTH(VOP, SOP, ESIZE, TYPE, FN) \
3229 DO_VCMP_FP(VOP, ESIZE, TYPE, FN) \
3230 DO_VCMP_FP_SCALAR(SOP, ESIZE, TYPE, FN)
3231
3232
3233
3234
3235
3236
3237
3238
3239#define DO_GE16(X, Y, S) float16_le(Y, X, S)
3240#define DO_GE32(X, Y, S) float32_le(Y, X, S)
3241#define DO_GT16(X, Y, S) float16_lt(Y, X, S)
3242#define DO_GT32(X, Y, S) float32_lt(Y, X, S)
3243
3244DO_VCMP_FP_BOTH(vfcmpeqh, vfcmpeq_scalarh, 2, float16, float16_eq)
3245DO_VCMP_FP_BOTH(vfcmpeqs, vfcmpeq_scalars, 4, float32, float32_eq)
3246
3247DO_VCMP_FP_BOTH(vfcmpneh, vfcmpne_scalarh, 2, float16, !float16_eq)
3248DO_VCMP_FP_BOTH(vfcmpnes, vfcmpne_scalars, 4, float32, !float32_eq)
3249
3250DO_VCMP_FP_BOTH(vfcmpgeh, vfcmpge_scalarh, 2, float16, DO_GE16)
3251DO_VCMP_FP_BOTH(vfcmpges, vfcmpge_scalars, 4, float32, DO_GE32)
3252
3253DO_VCMP_FP_BOTH(vfcmplth, vfcmplt_scalarh, 2, float16, !DO_GE16)
3254DO_VCMP_FP_BOTH(vfcmplts, vfcmplt_scalars, 4, float32, !DO_GE32)
3255
3256DO_VCMP_FP_BOTH(vfcmpgth, vfcmpgt_scalarh, 2, float16, DO_GT16)
3257DO_VCMP_FP_BOTH(vfcmpgts, vfcmpgt_scalars, 4, float32, DO_GT32)
3258
3259DO_VCMP_FP_BOTH(vfcmpleh, vfcmple_scalarh, 2, float16, !DO_GT16)
3260DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32)
3261
3262#define DO_VCVT_FIXED(OP, ESIZE, TYPE, FN) \
3263 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm, \
3264 uint32_t shift) \
3265 { \
3266 TYPE *d = vd, *m = vm; \
3267 TYPE r; \
3268 uint16_t mask = mve_element_mask(env); \
3269 unsigned e; \
3270 float_status *fpst; \
3271 float_status scratch_fpst; \
3272 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3273 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
3274 continue; \
3275 } \
3276 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3277 &env->vfp.standard_fp_status; \
3278 if (!(mask & 1)) { \
3279 \
3280 scratch_fpst = *fpst; \
3281 fpst = &scratch_fpst; \
3282 } \
3283 r = FN(m[H##ESIZE(e)], shift, fpst); \
3284 mergemask(&d[H##ESIZE(e)], r, mask); \
3285 } \
3286 mve_advance_vpt(env); \
3287 }
3288
3289DO_VCVT_FIXED(vcvt_sh, 2, int16_t, helper_vfp_shtoh)
3290DO_VCVT_FIXED(vcvt_uh, 2, uint16_t, helper_vfp_uhtoh)
3291DO_VCVT_FIXED(vcvt_hs, 2, int16_t, helper_vfp_toshh_round_to_zero)
3292DO_VCVT_FIXED(vcvt_hu, 2, uint16_t, helper_vfp_touhh_round_to_zero)
3293DO_VCVT_FIXED(vcvt_sf, 4, int32_t, helper_vfp_sltos)
3294DO_VCVT_FIXED(vcvt_uf, 4, uint32_t, helper_vfp_ultos)
3295DO_VCVT_FIXED(vcvt_fs, 4, int32_t, helper_vfp_tosls_round_to_zero)
3296DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero)
3297
3298
3299#define DO_VCVT_RMODE(OP, ESIZE, TYPE, FN) \
3300 void HELPER(glue(mve_, OP))(CPUARMState *env, \
3301 void *vd, void *vm, uint32_t rmode) \
3302 { \
3303 TYPE *d = vd, *m = vm; \
3304 TYPE r; \
3305 uint16_t mask = mve_element_mask(env); \
3306 unsigned e; \
3307 float_status *fpst; \
3308 float_status scratch_fpst; \
3309 float_status *base_fpst = (ESIZE == 2) ? \
3310 &env->vfp.standard_fp_status_f16 : \
3311 &env->vfp.standard_fp_status; \
3312 uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \
3313 set_float_rounding_mode(rmode, base_fpst); \
3314 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3315 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
3316 continue; \
3317 } \
3318 fpst = base_fpst; \
3319 if (!(mask & 1)) { \
3320 \
3321 scratch_fpst = *fpst; \
3322 fpst = &scratch_fpst; \
3323 } \
3324 r = FN(m[H##ESIZE(e)], 0, fpst); \
3325 mergemask(&d[H##ESIZE(e)], r, mask); \
3326 } \
3327 set_float_rounding_mode(prev_rmode, base_fpst); \
3328 mve_advance_vpt(env); \
3329 }
3330
3331DO_VCVT_RMODE(vcvt_rm_sh, 2, uint16_t, helper_vfp_toshh)
3332DO_VCVT_RMODE(vcvt_rm_uh, 2, uint16_t, helper_vfp_touhh)
3333DO_VCVT_RMODE(vcvt_rm_ss, 4, uint32_t, helper_vfp_tosls)
3334DO_VCVT_RMODE(vcvt_rm_us, 4, uint32_t, helper_vfp_touls)
3335
3336#define DO_VRINT_RM_H(M, F, S) helper_rinth(M, S)
3337#define DO_VRINT_RM_S(M, F, S) helper_rints(M, S)
3338
3339DO_VCVT_RMODE(vrint_rm_h, 2, uint16_t, DO_VRINT_RM_H)
3340DO_VCVT_RMODE(vrint_rm_s, 4, uint32_t, DO_VRINT_RM_S)
3341
3342
3343
3344
3345
3346static void do_vcvt_sh(CPUARMState *env, void *vd, void *vm, int top)
3347{
3348 uint16_t *d = vd;
3349 uint32_t *m = vm;
3350 uint16_t r;
3351 uint16_t mask = mve_element_mask(env);
3352 bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP);
3353 unsigned e;
3354 float_status *fpst;
3355 float_status scratch_fpst;
3356 float_status *base_fpst = &env->vfp.standard_fp_status;
3357 bool old_fz = get_flush_to_zero(base_fpst);
3358 set_flush_to_zero(false, base_fpst);
3359 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
3360 if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) {
3361 continue;
3362 }
3363 fpst = base_fpst;
3364 if (!(mask & 1)) {
3365
3366 scratch_fpst = *fpst;
3367 fpst = &scratch_fpst;
3368 }
3369 r = float32_to_float16(m[H4(e)], ieee, fpst);
3370 mergemask(&d[H2(e * 2 + top)], r, mask >> (top * 2));
3371 }
3372 set_flush_to_zero(old_fz, base_fpst);
3373 mve_advance_vpt(env);
3374}
3375
3376static void do_vcvt_hs(CPUARMState *env, void *vd, void *vm, int top)
3377{
3378 uint32_t *d = vd;
3379 uint16_t *m = vm;
3380 uint32_t r;
3381 uint16_t mask = mve_element_mask(env);
3382 bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP);
3383 unsigned e;
3384 float_status *fpst;
3385 float_status scratch_fpst;
3386 float_status *base_fpst = &env->vfp.standard_fp_status;
3387 bool old_fiz = get_flush_inputs_to_zero(base_fpst);
3388 set_flush_inputs_to_zero(false, base_fpst);
3389 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
3390 if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) {
3391 continue;
3392 }
3393 fpst = base_fpst;
3394 if (!(mask & (1 << (top * 2)))) {
3395
3396 scratch_fpst = *fpst;
3397 fpst = &scratch_fpst;
3398 }
3399 r = float16_to_float32(m[H2(e * 2 + top)], ieee, fpst);
3400 mergemask(&d[H4(e)], r, mask);
3401 }
3402 set_flush_inputs_to_zero(old_fiz, base_fpst);
3403 mve_advance_vpt(env);
3404}
3405
3406void HELPER(mve_vcvtb_sh)(CPUARMState *env, void *vd, void *vm)
3407{
3408 do_vcvt_sh(env, vd, vm, 0);
3409}
3410void HELPER(mve_vcvtt_sh)(CPUARMState *env, void *vd, void *vm)
3411{
3412 do_vcvt_sh(env, vd, vm, 1);
3413}
3414void HELPER(mve_vcvtb_hs)(CPUARMState *env, void *vd, void *vm)
3415{
3416 do_vcvt_hs(env, vd, vm, 0);
3417}
3418void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm)
3419{
3420 do_vcvt_hs(env, vd, vm, 1);
3421}
3422
3423#define DO_1OP_FP(OP, ESIZE, TYPE, FN) \
3424 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm) \
3425 { \
3426 TYPE *d = vd, *m = vm; \
3427 TYPE r; \
3428 uint16_t mask = mve_element_mask(env); \
3429 unsigned e; \
3430 float_status *fpst; \
3431 float_status scratch_fpst; \
3432 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3433 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
3434 continue; \
3435 } \
3436 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3437 &env->vfp.standard_fp_status; \
3438 if (!(mask & 1)) { \
3439 \
3440 scratch_fpst = *fpst; \
3441 fpst = &scratch_fpst; \
3442 } \
3443 r = FN(m[H##ESIZE(e)], fpst); \
3444 mergemask(&d[H##ESIZE(e)], r, mask); \
3445 } \
3446 mve_advance_vpt(env); \
3447 }
3448
3449DO_1OP_FP(vrintx_h, 2, float16, float16_round_to_int)
3450DO_1OP_FP(vrintx_s, 4, float32, float32_round_to_int)
3451