1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "internals.h"
23#include "vec_internal.h"
24#include "exec/helper-proto.h"
25#include "exec/cpu_ldst.h"
26#include "exec/exec-all.h"
27#include "tcg/tcg.h"
28
29static uint16_t mve_element_mask(CPUARMState *env)
30{
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
49
50 if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) {
51 mask |= 0xff;
52 }
53 if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) {
54 mask |= 0xff00;
55 }
56
57 if (env->v7m.ltpsize < 4 &&
58 env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) {
59
60
61
62
63
64
65 int masklen = env->regs[14] << env->v7m.ltpsize;
66 assert(masklen <= 16);
67 mask &= MAKE_64BIT_MASK(0, masklen);
68 }
69
70 if ((env->condexec_bits & 0xf) == 0) {
71
72
73
74
75 int eci = env->condexec_bits >> 4;
76 switch (eci) {
77 case ECI_NONE:
78 break;
79 case ECI_A0:
80 mask &= 0xfff0;
81 break;
82 case ECI_A0A1:
83 mask &= 0xff00;
84 break;
85 case ECI_A0A1A2:
86 case ECI_A0A1A2B0:
87 mask &= 0xf000;
88 break;
89 default:
90 g_assert_not_reached();
91 }
92 }
93
94 return mask;
95}
96
97static void mve_advance_vpt(CPUARMState *env)
98{
99
100 uint32_t vpr = env->v7m.vpr;
101 unsigned mask01, mask23;
102
103 if ((env->condexec_bits & 0xf) == 0) {
104 env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ?
105 (ECI_A0 << 4) : (ECI_NONE << 4);
106 }
107
108 if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) {
109
110 return;
111 }
112
113 mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01);
114 mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23);
115 if (mask01 > 8) {
116
117 vpr ^= 0xff;
118 }
119 if (mask23 > 8) {
120
121 vpr ^= 0xff00;
122 }
123 vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
124 vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1);
125 env->v7m.vpr = vpr;
126}
127
128
129#define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
130 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
131 { \
132 TYPE *d = vd; \
133 uint16_t mask = mve_element_mask(env); \
134 unsigned b, e; \
135
136
137
138
139 \
140 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
141 if (mask & (1 << b)) { \
142 d[H##ESIZE(e)] = cpu_##LDTYPE##_data_ra(env, addr, GETPC()); \
143 } \
144 addr += MSIZE; \
145 } \
146 mve_advance_vpt(env); \
147 }
148
149#define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \
150 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
151 { \
152 TYPE *d = vd; \
153 uint16_t mask = mve_element_mask(env); \
154 unsigned b, e; \
155 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
156 if (mask & (1 << b)) { \
157 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
158 } \
159 addr += MSIZE; \
160 } \
161 mve_advance_vpt(env); \
162 }
163
164DO_VLDR(vldrb, 1, ldub, 1, uint8_t)
165DO_VLDR(vldrh, 2, lduw, 2, uint16_t)
166DO_VLDR(vldrw, 4, ldl, 4, uint32_t)
167
168DO_VSTR(vstrb, 1, stb, 1, uint8_t)
169DO_VSTR(vstrh, 2, stw, 2, uint16_t)
170DO_VSTR(vstrw, 4, stl, 4, uint32_t)
171
172DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t)
173DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t)
174DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t)
175DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t)
176DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t)
177DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t)
178
179DO_VSTR(vstrb_h, 1, stb, 2, int16_t)
180DO_VSTR(vstrb_w, 1, stb, 4, int32_t)
181DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
182
183#undef DO_VLDR
184#undef DO_VSTR
185
186
187
188
189
190
191
192
193static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask)
194{
195 if (mask & 1) {
196 *d = r;
197 }
198}
199
200static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask)
201{
202 mergemask_ub((uint8_t *)d, r, mask);
203}
204
205static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask)
206{
207 uint16_t bmask = expand_pred_b_data[mask & 3];
208 *d = (*d & ~bmask) | (r & bmask);
209}
210
211static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask)
212{
213 mergemask_uh((uint16_t *)d, r, mask);
214}
215
216static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask)
217{
218 uint32_t bmask = expand_pred_b_data[mask & 0xf];
219 *d = (*d & ~bmask) | (r & bmask);
220}
221
222static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask)
223{
224 mergemask_uw((uint32_t *)d, r, mask);
225}
226
227static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask)
228{
229 uint64_t bmask = expand_pred_b_data[mask & 0xff];
230 *d = (*d & ~bmask) | (r & bmask);
231}
232
233static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
234{
235 mergemask_uq((uint64_t *)d, r, mask);
236}
237
238#define mergemask(D, R, M) \
239 _Generic(D, \
240 uint8_t *: mergemask_ub, \
241 int8_t *: mergemask_sb, \
242 uint16_t *: mergemask_uh, \
243 int16_t *: mergemask_sh, \
244 uint32_t *: mergemask_uw, \
245 int32_t *: mergemask_sw, \
246 uint64_t *: mergemask_uq, \
247 int64_t *: mergemask_sq)(D, R, M)
248
249void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val)
250{
251
252
253
254
255
256 uint32_t *d = vd;
257 uint16_t mask = mve_element_mask(env);
258 unsigned e;
259 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
260 mergemask(&d[H4(e)], val, mask);
261 }
262 mve_advance_vpt(env);
263}
264
265#define DO_1OP(OP, ESIZE, TYPE, FN) \
266 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
267 { \
268 TYPE *d = vd, *m = vm; \
269 uint16_t mask = mve_element_mask(env); \
270 unsigned e; \
271 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
272 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \
273 } \
274 mve_advance_vpt(env); \
275 }
276
277#define DO_CLS_B(N) (clrsb32(N) - 24)
278#define DO_CLS_H(N) (clrsb32(N) - 16)
279
280DO_1OP(vclsb, 1, int8_t, DO_CLS_B)
281DO_1OP(vclsh, 2, int16_t, DO_CLS_H)
282DO_1OP(vclsw, 4, int32_t, clrsb32)
283
284#define DO_CLZ_B(N) (clz32(N) - 24)
285#define DO_CLZ_H(N) (clz32(N) - 16)
286
287DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
288DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
289DO_1OP(vclzw, 4, uint32_t, clz32)
290
291DO_1OP(vrev16b, 2, uint16_t, bswap16)
292DO_1OP(vrev32b, 4, uint32_t, bswap32)
293DO_1OP(vrev32h, 4, uint32_t, hswap32)
294DO_1OP(vrev64b, 8, uint64_t, bswap64)
295DO_1OP(vrev64h, 8, uint64_t, hswap64)
296DO_1OP(vrev64w, 8, uint64_t, wswap64)
297
298#define DO_NOT(N) (~(N))
299
300DO_1OP(vmvn, 8, uint64_t, DO_NOT)
301
302#define DO_ABS(N) ((N) < 0 ? -(N) : (N))
303#define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff))
304#define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff))
305
306DO_1OP(vabsb, 1, int8_t, DO_ABS)
307DO_1OP(vabsh, 2, int16_t, DO_ABS)
308DO_1OP(vabsw, 4, int32_t, DO_ABS)
309
310
311DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
312DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
313
314#define DO_NEG(N) (-(N))
315#define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000))
316#define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000))
317
318DO_1OP(vnegb, 1, int8_t, DO_NEG)
319DO_1OP(vnegh, 2, int16_t, DO_NEG)
320DO_1OP(vnegw, 4, int32_t, DO_NEG)
321
322
323DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
324DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
325
326
327
328
329
330#define DO_1OP_IMM(OP, FN) \
331 void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
332 { \
333 uint64_t *da = vda; \
334 uint16_t mask = mve_element_mask(env); \
335 unsigned e; \
336 for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
337 mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
338 } \
339 mve_advance_vpt(env); \
340 }
341
342#define DO_MOVI(N, I) (I)
343#define DO_ANDI(N, I) ((N) & (I))
344#define DO_ORRI(N, I) ((N) | (I))
345
346DO_1OP_IMM(vmovi, DO_MOVI)
347DO_1OP_IMM(vandi, DO_ANDI)
348DO_1OP_IMM(vorri, DO_ORRI)
349
350#define DO_2OP(OP, ESIZE, TYPE, FN) \
351 void HELPER(glue(mve_, OP))(CPUARMState *env, \
352 void *vd, void *vn, void *vm) \
353 { \
354 TYPE *d = vd, *n = vn, *m = vm; \
355 uint16_t mask = mve_element_mask(env); \
356 unsigned e; \
357 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
358 mergemask(&d[H##ESIZE(e)], \
359 FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \
360 } \
361 mve_advance_vpt(env); \
362 }
363
364
365#define DO_2OP_U(OP, FN) \
366 DO_2OP(OP##b, 1, uint8_t, FN) \
367 DO_2OP(OP##h, 2, uint16_t, FN) \
368 DO_2OP(OP##w, 4, uint32_t, FN)
369
370
371#define DO_2OP_S(OP, FN) \
372 DO_2OP(OP##b, 1, int8_t, FN) \
373 DO_2OP(OP##h, 2, int16_t, FN) \
374 DO_2OP(OP##w, 4, int32_t, FN)
375
376
377
378
379
380
381#define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
382 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
383 { \
384 LTYPE *d = vd; \
385 TYPE *n = vn, *m = vm; \
386 uint16_t mask = mve_element_mask(env); \
387 unsigned le; \
388 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
389 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \
390 m[H##ESIZE(le * 2 + TOP)]); \
391 mergemask(&d[H##LESIZE(le)], r, mask); \
392 } \
393 mve_advance_vpt(env); \
394 }
395
396#define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \
397 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
398 { \
399 TYPE *d = vd, *n = vn, *m = vm; \
400 uint16_t mask = mve_element_mask(env); \
401 unsigned e; \
402 bool qc = false; \
403 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
404 bool sat = false; \
405 TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
406 mergemask(&d[H##ESIZE(e)], r, mask); \
407 qc |= sat & mask & 1; \
408 } \
409 if (qc) { \
410 env->vfp.qc[0] = qc; \
411 } \
412 mve_advance_vpt(env); \
413 }
414
415
416#define DO_2OP_SAT_U(OP, FN) \
417 DO_2OP_SAT(OP##b, 1, uint8_t, FN) \
418 DO_2OP_SAT(OP##h, 2, uint16_t, FN) \
419 DO_2OP_SAT(OP##w, 4, uint32_t, FN)
420
421
422#define DO_2OP_SAT_S(OP, FN) \
423 DO_2OP_SAT(OP##b, 1, int8_t, FN) \
424 DO_2OP_SAT(OP##h, 2, int16_t, FN) \
425 DO_2OP_SAT(OP##w, 4, int32_t, FN)
426
427#define DO_AND(N, M) ((N) & (M))
428#define DO_BIC(N, M) ((N) & ~(M))
429#define DO_ORR(N, M) ((N) | (M))
430#define DO_ORN(N, M) ((N) | ~(M))
431#define DO_EOR(N, M) ((N) ^ (M))
432
433DO_2OP(vand, 8, uint64_t, DO_AND)
434DO_2OP(vbic, 8, uint64_t, DO_BIC)
435DO_2OP(vorr, 8, uint64_t, DO_ORR)
436DO_2OP(vorn, 8, uint64_t, DO_ORN)
437DO_2OP(veor, 8, uint64_t, DO_EOR)
438
439#define DO_ADD(N, M) ((N) + (M))
440#define DO_SUB(N, M) ((N) - (M))
441#define DO_MUL(N, M) ((N) * (M))
442
443DO_2OP_U(vadd, DO_ADD)
444DO_2OP_U(vsub, DO_SUB)
445DO_2OP_U(vmul, DO_MUL)
446
447DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL)
448DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL)
449DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL)
450DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL)
451DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL)
452DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL)
453
454DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL)
455DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL)
456DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL)
457DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL)
458DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL)
459DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL)
460
461
462
463
464
465static inline uint8_t do_mulh_b(int32_t n, int32_t m)
466{
467 return (n * m) >> 8;
468}
469
470static inline uint16_t do_mulh_h(int32_t n, int32_t m)
471{
472 return (n * m) >> 16;
473}
474
475static inline uint32_t do_mulh_w(int64_t n, int64_t m)
476{
477 return (n * m) >> 32;
478}
479
480static inline uint8_t do_rmulh_b(int32_t n, int32_t m)
481{
482 return (n * m + (1U << 7)) >> 8;
483}
484
485static inline uint16_t do_rmulh_h(int32_t n, int32_t m)
486{
487 return (n * m + (1U << 15)) >> 16;
488}
489
490static inline uint32_t do_rmulh_w(int64_t n, int64_t m)
491{
492 return (n * m + (1U << 31)) >> 32;
493}
494
495DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
496DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
497DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
498DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
499DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
500DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
501
502DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b)
503DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h)
504DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
505DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
506DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
507DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
508
509#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
510#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
511
512DO_2OP_S(vmaxs, DO_MAX)
513DO_2OP_U(vmaxu, DO_MAX)
514DO_2OP_S(vmins, DO_MIN)
515DO_2OP_U(vminu, DO_MIN)
516
517#define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
518
519DO_2OP_S(vabds, DO_ABD)
520DO_2OP_U(vabdu, DO_ABD)
521
522static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m)
523{
524 return ((uint64_t)n + m) >> 1;
525}
526
527static inline int32_t do_vhadd_s(int32_t n, int32_t m)
528{
529 return ((int64_t)n + m) >> 1;
530}
531
532static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m)
533{
534 return ((uint64_t)n - m) >> 1;
535}
536
537static inline int32_t do_vhsub_s(int32_t n, int32_t m)
538{
539 return ((int64_t)n - m) >> 1;
540}
541
542DO_2OP_S(vhadds, do_vhadd_s)
543DO_2OP_U(vhaddu, do_vhadd_u)
544DO_2OP_S(vhsubs, do_vhsub_s)
545DO_2OP_U(vhsubu, do_vhsub_u)
546
547#define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
548#define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
549#define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
550#define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
551
552DO_2OP_S(vshls, DO_VSHLS)
553DO_2OP_U(vshlu, DO_VSHLU)
554DO_2OP_S(vrshls, DO_VRSHLS)
555DO_2OP_U(vrshlu, DO_VRSHLU)
556
557#define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1)
558#define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1)
559
560DO_2OP_S(vrhadds, DO_RHADD_S)
561DO_2OP_U(vrhaddu, DO_RHADD_U)
562
563static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m,
564 uint32_t inv, uint32_t carry_in, bool update_flags)
565{
566 uint16_t mask = mve_element_mask(env);
567 unsigned e;
568
569
570 if (mask & 0x1111) {
571 update_flags = true;
572 }
573
574 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
575 uint64_t r = carry_in;
576 r += n[H4(e)];
577 r += m[H4(e)] ^ inv;
578 if (mask & 1) {
579 carry_in = r >> 32;
580 }
581 mergemask(&d[H4(e)], r, mask);
582 }
583
584 if (update_flags) {
585
586 env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK;
587 env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C;
588 }
589 mve_advance_vpt(env);
590}
591
592void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm)
593{
594 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
595 do_vadc(env, vd, vn, vm, 0, carry_in, false);
596}
597
598void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm)
599{
600 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
601 do_vadc(env, vd, vn, vm, -1, carry_in, false);
602}
603
604
605void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm)
606{
607 do_vadc(env, vd, vn, vm, 0, 0, true);
608}
609
610void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
611{
612 do_vadc(env, vd, vn, vm, -1, 1, true);
613}
614
615#define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \
616 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
617 { \
618 TYPE *d = vd, *n = vn, *m = vm; \
619 uint16_t mask = mve_element_mask(env); \
620 unsigned e; \
621 TYPE r[16 / ESIZE]; \
622 \
623 for (e = 0; e < 16 / ESIZE; e++) { \
624 if (!(e & 1)) { \
625 r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \
626 } else { \
627 r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \
628 } \
629 } \
630 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
631 mergemask(&d[H##ESIZE(e)], r[e], mask); \
632 } \
633 mve_advance_vpt(env); \
634 }
635
636#define DO_VCADD_ALL(OP, FN0, FN1) \
637 DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \
638 DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \
639 DO_VCADD(OP##w, 4, int32_t, FN0, FN1)
640
641DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD)
642DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB)
643DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s)
644DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s)
645
646static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
647{
648 if (val > max) {
649 *s = true;
650 return max;
651 } else if (val < min) {
652 *s = true;
653 return min;
654 }
655 return val;
656}
657
658#define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s)
659#define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s)
660#define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s)
661
662#define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s)
663#define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s)
664#define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s)
665
666#define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s)
667#define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s)
668#define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s)
669
670#define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s)
671#define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
672#define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
673
674
675
676
677
678#define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \
679 INT8_MIN, INT8_MAX, s)
680#define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \
681 INT16_MIN, INT16_MAX, s)
682#define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \
683 INT32_MIN, INT32_MAX, s)
684
685#define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \
686 INT8_MIN, INT8_MAX, s)
687#define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \
688 INT16_MIN, INT16_MAX, s)
689#define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \
690 INT32_MIN, INT32_MAX, s)
691
692DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B)
693DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H)
694DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W)
695
696DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B)
697DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H)
698DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W)
699
700DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B)
701DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H)
702DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W)
703DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B)
704DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H)
705DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W)
706
707DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B)
708DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H)
709DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W)
710DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B)
711DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H)
712DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
713
714
715
716
717
718#define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
719 ({ \
720 uint32_t su32 = 0; \
721 typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
722 if (su32) { \
723 *satp = true; \
724 } \
725 r; \
726 })
727
728#define DO_SQSHL_OP(N, M, satp) \
729 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp)
730#define DO_UQSHL_OP(N, M, satp) \
731 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp)
732#define DO_SQRSHL_OP(N, M, satp) \
733 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
734#define DO_UQRSHL_OP(N, M, satp) \
735 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
736#define DO_SUQSHL_OP(N, M, satp) \
737 WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
738
739DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
740DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
741DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP)
742DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP)
743
744
745
746
747
748
749
750
751
752#define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \
753 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
754 void *vm) \
755 { \
756 TYPE *d = vd, *n = vn, *m = vm; \
757 uint16_t mask = mve_element_mask(env); \
758 unsigned e; \
759 bool qc = false; \
760 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
761 bool sat = false; \
762 if ((e & 1) == XCHG) { \
763 TYPE r = FN(n[H##ESIZE(e)], \
764 m[H##ESIZE(e - XCHG)], \
765 n[H##ESIZE(e + (1 - 2 * XCHG))], \
766 m[H##ESIZE(e + (1 - XCHG))], \
767 ROUND, &sat); \
768 mergemask(&d[H##ESIZE(e)], r, mask); \
769 qc |= sat & mask & 1; \
770 } \
771 } \
772 if (qc) { \
773 env->vfp.qc[0] = qc; \
774 } \
775 mve_advance_vpt(env); \
776 }
777
778static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d,
779 int round, bool *sat)
780{
781 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7);
782 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
783}
784
785static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d,
786 int round, bool *sat)
787{
788 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15);
789 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
790}
791
792static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d,
793 int round, bool *sat)
794{
795 int64_t m1 = (int64_t)a * b;
796 int64_t m2 = (int64_t)c * d;
797 int64_t r;
798
799
800
801
802
803
804
805
806
807
808
809
810 if (sadd64_overflow(m1, m2, &r) ||
811 sadd64_overflow(r, (round << 30), &r) ||
812 sadd64_overflow(r, r, &r)) {
813 *sat = true;
814 return r < 0 ? INT32_MAX : INT32_MIN;
815 }
816 return r >> 32;
817}
818
819static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d,
820 int round, bool *sat)
821{
822 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7);
823 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
824}
825
826static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d,
827 int round, bool *sat)
828{
829 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15);
830 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
831}
832
833static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d,
834 int round, bool *sat)
835{
836 int64_t m1 = (int64_t)a * b;
837 int64_t m2 = (int64_t)c * d;
838 int64_t r;
839
840 if (ssub64_overflow(m1, m2, &r) ||
841 sadd64_overflow(r, (round << 30), &r) ||
842 sadd64_overflow(r, r, &r)) {
843 *sat = true;
844 return r < 0 ? INT32_MAX : INT32_MIN;
845 }
846 return r >> 32;
847}
848
849DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b)
850DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h)
851DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w)
852DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b)
853DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h)
854DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w)
855
856DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b)
857DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h)
858DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w)
859DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b)
860DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h)
861DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w)
862
863DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b)
864DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h)
865DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w)
866DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b)
867DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h)
868DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w)
869
870DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b)
871DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h)
872DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w)
873DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b)
874DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h)
875DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
876
877#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
878 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
879 uint32_t rm) \
880 { \
881 TYPE *d = vd, *n = vn; \
882 TYPE m = rm; \
883 uint16_t mask = mve_element_mask(env); \
884 unsigned e; \
885 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
886 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \
887 } \
888 mve_advance_vpt(env); \
889 }
890
891#define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \
892 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
893 uint32_t rm) \
894 { \
895 TYPE *d = vd, *n = vn; \
896 TYPE m = rm; \
897 uint16_t mask = mve_element_mask(env); \
898 unsigned e; \
899 bool qc = false; \
900 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
901 bool sat = false; \
902 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \
903 mask); \
904 qc |= sat & mask & 1; \
905 } \
906 if (qc) { \
907 env->vfp.qc[0] = qc; \
908 } \
909 mve_advance_vpt(env); \
910 }
911
912
913#define DO_2OP_SCALAR_U(OP, FN) \
914 DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
915 DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
916 DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
917#define DO_2OP_SCALAR_S(OP, FN) \
918 DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \
919 DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
920 DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
921
922DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
923DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
924DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
925DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s)
926DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
927DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
928DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
929
930DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B)
931DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H)
932DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W)
933DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B)
934DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H)
935DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W)
936
937DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B)
938DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H)
939DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W)
940DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B)
941DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H)
942DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W)
943
944DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B)
945DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H)
946DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W)
947DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
948DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
949DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
950
951
952
953
954
955
956
957
958
959
960#define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
961 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
962 uint32_t rm) \
963 { \
964 LTYPE *d = vd; \
965 TYPE *n = vn; \
966 TYPE m = rm; \
967 uint16_t mask = mve_element_mask(env); \
968 unsigned le; \
969 bool qc = false; \
970 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
971 bool sat = false; \
972 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \
973 mergemask(&d[H##LESIZE(le)], r, mask); \
974 qc |= sat && (mask & SATMASK); \
975 } \
976 if (qc) { \
977 env->vfp.qc[0] = qc; \
978 } \
979 mve_advance_vpt(env); \
980 }
981
982static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat)
983{
984 int64_t r = ((int64_t)n * m) * 2;
985 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat);
986}
987
988static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat)
989{
990
991 int64_t r = (int64_t)n * m;
992 if (r > INT64_MAX / 2) {
993 *sat = true;
994 return INT64_MAX;
995 } else if (r < INT64_MIN / 2) {
996 *sat = true;
997 return INT64_MIN;
998 } else {
999 return r * 2;
1000 }
1001}
1002
1003#define SATMASK16B 1
1004#define SATMASK16T (1 << 2)
1005#define SATMASK32 ((1 << 4) | 1)
1006
1007DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \
1008 do_qdmullh, SATMASK16B)
1009DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \
1010 do_qdmullw, SATMASK32)
1011DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \
1012 do_qdmullh, SATMASK16T)
1013DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \
1014 do_qdmullw, SATMASK32)
1015
1016
1017
1018
1019#define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
1020 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1021 void *vm) \
1022 { \
1023 LTYPE *d = vd; \
1024 TYPE *n = vn, *m = vm; \
1025 uint16_t mask = mve_element_mask(env); \
1026 unsigned le; \
1027 bool qc = false; \
1028 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
1029 bool sat = false; \
1030 LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \
1031 LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \
1032 mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \
1033 qc |= sat && (mask & SATMASK); \
1034 } \
1035 if (qc) { \
1036 env->vfp.qc[0] = qc; \
1037 } \
1038 mve_advance_vpt(env); \
1039 }
1040
1041DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B)
1042DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
1043DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T)
1044DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
1045
1046static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
1047{
1048 m &= 0xff;
1049 if (m == 0) {
1050 return 0;
1051 }
1052 n = revbit8(n);
1053 if (m < 8) {
1054 n >>= 8 - m;
1055 }
1056 return n;
1057}
1058
1059static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m)
1060{
1061 m &= 0xff;
1062 if (m == 0) {
1063 return 0;
1064 }
1065 n = revbit16(n);
1066 if (m < 16) {
1067 n >>= 16 - m;
1068 }
1069 return n;
1070}
1071
1072static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m)
1073{
1074 m &= 0xff;
1075 if (m == 0) {
1076 return 0;
1077 }
1078 n = revbit32(n);
1079 if (m < 32) {
1080 n >>= 32 - m;
1081 }
1082 return n;
1083}
1084
1085DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb)
1086DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh)
1087DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw)
1088
1089
1090
1091
1092#define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
1093 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1094 void *vm, uint64_t a) \
1095 { \
1096 uint16_t mask = mve_element_mask(env); \
1097 unsigned e; \
1098 TYPE *n = vn, *m = vm; \
1099 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1100 if (mask & 1) { \
1101 if (e & 1) { \
1102 a ODDACC \
1103 (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
1104 } else { \
1105 a EVENACC \
1106 (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
1107 } \
1108 } \
1109 } \
1110 mve_advance_vpt(env); \
1111 return a; \
1112 }
1113
1114DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=)
1115DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=)
1116DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=)
1117DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
1118
1119DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
1120DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
1121
1122DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=)
1123DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
1124DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
1125DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
1126
1127
1128
1129
1130
1131
1132
1133
1134#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
1135 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1136 void *vm, uint64_t a) \
1137 { \
1138 uint16_t mask = mve_element_mask(env); \
1139 unsigned e; \
1140 TYPE *n = vn, *m = vm; \
1141 for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
1142 if (mask & 1) { \
1143 LTYPE mul; \
1144 if (e & 1) { \
1145 mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
1146 if (SUB) { \
1147 mul = -mul; \
1148 } \
1149 } else { \
1150 mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
1151 } \
1152 mul = (mul >> 8) + ((mul >> 7) & 1); \
1153 a += mul; \
1154 } \
1155 } \
1156 mve_advance_vpt(env); \
1157 return a; \
1158 }
1159
1160DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false)
1161DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false)
1162
1163DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false)
1164
1165DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true)
1166DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
1167
1168
1169#define DO_VADDV(OP, ESIZE, TYPE) \
1170 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
1171 uint32_t ra) \
1172 { \
1173 uint16_t mask = mve_element_mask(env); \
1174 unsigned e; \
1175 TYPE *m = vm; \
1176 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1177 if (mask & 1) { \
1178 ra += m[H##ESIZE(e)]; \
1179 } \
1180 } \
1181 mve_advance_vpt(env); \
1182 return ra; \
1183 } \
1184
1185DO_VADDV(vaddvsb, 1, uint8_t)
1186DO_VADDV(vaddvsh, 2, uint16_t)
1187DO_VADDV(vaddvsw, 4, uint32_t)
1188DO_VADDV(vaddvub, 1, uint8_t)
1189DO_VADDV(vaddvuh, 2, uint16_t)
1190DO_VADDV(vaddvuw, 4, uint32_t)
1191
1192#define DO_VADDLV(OP, TYPE, LTYPE) \
1193 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
1194 uint64_t ra) \
1195 { \
1196 uint16_t mask = mve_element_mask(env); \
1197 unsigned e; \
1198 TYPE *m = vm; \
1199 for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
1200 if (mask & 1) { \
1201 ra += (LTYPE)m[H4(e)]; \
1202 } \
1203 } \
1204 mve_advance_vpt(env); \
1205 return ra; \
1206 } \
1207
1208DO_VADDLV(vaddlv_s, int32_t, int64_t)
1209DO_VADDLV(vaddlv_u, uint32_t, uint64_t)
1210
1211
1212#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
1213 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
1214 void *vm, uint32_t shift) \
1215 { \
1216 TYPE *d = vd, *m = vm; \
1217 uint16_t mask = mve_element_mask(env); \
1218 unsigned e; \
1219 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1220 mergemask(&d[H##ESIZE(e)], \
1221 FN(m[H##ESIZE(e)], shift), mask); \
1222 } \
1223 mve_advance_vpt(env); \
1224 }
1225
1226#define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
1227 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
1228 void *vm, uint32_t shift) \
1229 { \
1230 TYPE *d = vd, *m = vm; \
1231 uint16_t mask = mve_element_mask(env); \
1232 unsigned e; \
1233 bool qc = false; \
1234 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1235 bool sat = false; \
1236 mergemask(&d[H##ESIZE(e)], \
1237 FN(m[H##ESIZE(e)], shift, &sat), mask); \
1238 qc |= sat & mask & 1; \
1239 } \
1240 if (qc) { \
1241 env->vfp.qc[0] = qc; \
1242 } \
1243 mve_advance_vpt(env); \
1244 }
1245
1246
1247#define DO_2SHIFT_U(OP, FN) \
1248 DO_2SHIFT(OP##b, 1, uint8_t, FN) \
1249 DO_2SHIFT(OP##h, 2, uint16_t, FN) \
1250 DO_2SHIFT(OP##w, 4, uint32_t, FN)
1251#define DO_2SHIFT_S(OP, FN) \
1252 DO_2SHIFT(OP##b, 1, int8_t, FN) \
1253 DO_2SHIFT(OP##h, 2, int16_t, FN) \
1254 DO_2SHIFT(OP##w, 4, int32_t, FN)
1255
1256#define DO_2SHIFT_SAT_U(OP, FN) \
1257 DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
1258 DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
1259 DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
1260#define DO_2SHIFT_SAT_S(OP, FN) \
1261 DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
1262 DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
1263 DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
1264
1265DO_2SHIFT_U(vshli_u, DO_VSHLU)
1266DO_2SHIFT_S(vshli_s, DO_VSHLS)
1267DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
1268DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
1269DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
1270DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
1271DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
1272
1273
1274#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
1275 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
1276 void *vm, uint32_t shift) \
1277 { \
1278 uint64_t *d = vd, *m = vm; \
1279 uint16_t mask; \
1280 uint64_t shiftmask; \
1281 unsigned e; \
1282 if (shift == 0 || shift == ESIZE * 8) { \
1283
1284
1285
1286
1287 \
1288 goto done; \
1289 } \
1290 assert(shift < ESIZE * 8); \
1291 mask = mve_element_mask(env); \
1292 \
1293 shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
1294 for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
1295 uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
1296 (d[H8(e)] & ~shiftmask); \
1297 mergemask(&d[H8(e)], r, mask); \
1298 } \
1299done: \
1300 mve_advance_vpt(env); \
1301 }
1302
1303#define DO_SHL(N, SHIFT) ((N) << (SHIFT))
1304#define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
1305#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
1306#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
1307
1308DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
1309DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
1310DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
1311DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
1312DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
1313DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
1314
1315
1316
1317
1318
1319
1320
1321
1322#define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
1323 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
1324 void *vm, uint32_t shift) \
1325 { \
1326 LTYPE *d = vd; \
1327 TYPE *m = vm; \
1328 uint16_t mask = mve_element_mask(env); \
1329 unsigned le; \
1330 assert(shift <= 16); \
1331 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
1332 LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
1333 mergemask(&d[H##LESIZE(le)], r, mask); \
1334 } \
1335 mve_advance_vpt(env); \
1336 }
1337
1338#define DO_VSHLL_ALL(OP, TOP) \
1339 DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
1340 DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
1341 DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
1342 DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
1343
1344DO_VSHLL_ALL(vshllb, false)
1345DO_VSHLL_ALL(vshllt, true)
1346
1347
1348
1349
1350
1351
1352#define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
1353 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
1354 void *vm, uint32_t shift) \
1355 { \
1356 LTYPE *m = vm; \
1357 TYPE *d = vd; \
1358 uint16_t mask = mve_element_mask(env); \
1359 unsigned le; \
1360 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
1361 TYPE r = FN(m[H##LESIZE(le)], shift); \
1362 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
1363 } \
1364 mve_advance_vpt(env); \
1365 }
1366
1367#define DO_VSHRN_ALL(OP, FN) \
1368 DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
1369 DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
1370 DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
1371 DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
1372
1373static inline uint64_t do_urshr(uint64_t x, unsigned sh)
1374{
1375 if (likely(sh < 64)) {
1376 return (x >> sh) + ((x >> (sh - 1)) & 1);
1377 } else if (sh == 64) {
1378 return x >> 63;
1379 } else {
1380 return 0;
1381 }
1382}
1383
1384static inline int64_t do_srshr(int64_t x, unsigned sh)
1385{
1386 if (likely(sh < 64)) {
1387 return (x >> sh) + ((x >> (sh - 1)) & 1);
1388 } else {
1389
1390 return 0;
1391 }
1392}
1393
1394DO_VSHRN_ALL(vshrn, DO_SHR)
1395DO_VSHRN_ALL(vrshrn, do_urshr)
1396
1397static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
1398 bool *satp)
1399{
1400 if (val > max) {
1401 *satp = true;
1402 return max;
1403 } else if (val < min) {
1404 *satp = true;
1405 return min;
1406 } else {
1407 return val;
1408 }
1409}
1410
1411
1412#define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
1413 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
1414 void *vm, uint32_t shift) \
1415 { \
1416 LTYPE *m = vm; \
1417 TYPE *d = vd; \
1418 uint16_t mask = mve_element_mask(env); \
1419 bool qc = false; \
1420 unsigned le; \
1421 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
1422 bool sat = false; \
1423 TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
1424 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
1425 qc |= sat && (mask & 1 << (TOP * ESIZE)); \
1426 } \
1427 if (qc) { \
1428 env->vfp.qc[0] = qc; \
1429 } \
1430 mve_advance_vpt(env); \
1431 }
1432
1433#define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
1434 DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
1435 DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
1436
1437#define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
1438 DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
1439 DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
1440
1441#define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
1442 DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
1443 DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
1444
1445#define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
1446 DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
1447 DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
1448
1449#define DO_SHRN_SB(N, M, SATP) \
1450 do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
1451#define DO_SHRN_UB(N, M, SATP) \
1452 do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
1453#define DO_SHRUN_B(N, M, SATP) \
1454 do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
1455
1456#define DO_SHRN_SH(N, M, SATP) \
1457 do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
1458#define DO_SHRN_UH(N, M, SATP) \
1459 do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
1460#define DO_SHRUN_H(N, M, SATP) \
1461 do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
1462
1463#define DO_RSHRN_SB(N, M, SATP) \
1464 do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
1465#define DO_RSHRN_UB(N, M, SATP) \
1466 do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
1467#define DO_RSHRUN_B(N, M, SATP) \
1468 do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
1469
1470#define DO_RSHRN_SH(N, M, SATP) \
1471 do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
1472#define DO_RSHRN_UH(N, M, SATP) \
1473 do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
1474#define DO_RSHRUN_H(N, M, SATP) \
1475 do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
1476
1477DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB)
1478DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH)
1479DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB)
1480DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH)
1481DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B)
1482DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H)
1483
1484DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB)
1485DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH)
1486DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
1487DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
1488DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
1489DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
1490
1491uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
1492 uint32_t shift)
1493{
1494 uint32_t *d = vd;
1495 uint16_t mask = mve_element_mask(env);
1496 unsigned e;
1497 uint32_t r;
1498
1499
1500
1501
1502
1503
1504
1505
1506 if (shift == 0) {
1507 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
1508 r = rdm;
1509 if (mask & 1) {
1510 rdm = d[H4(e)];
1511 }
1512 mergemask(&d[H4(e)], r, mask);
1513 }
1514 } else {
1515 uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
1516
1517 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
1518 r = (d[H4(e)] << shift) | (rdm & shiftmask);
1519 if (mask & 1) {
1520 rdm = d[H4(e)] >> (32 - shift);
1521 }
1522 mergemask(&d[H4(e)], r, mask);
1523 }
1524 }
1525 mve_advance_vpt(env);
1526 return rdm;
1527}
1528
1529uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
1530{
1531 return do_sqrshl_d(n, -(int8_t)shift, false, NULL);
1532}
1533
1534uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift)
1535{
1536 return do_uqrshl_d(n, (int8_t)shift, false, NULL);
1537}
1538
1539uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
1540{
1541 return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
1542}
1543
1544uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
1545{
1546 return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
1547}
1548
1549uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
1550{
1551 return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF);
1552}
1553
1554uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
1555{
1556 return do_uqrshl_d(n, (int8_t)shift, true, &env->QF);
1557}
1558
1559
1560static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
1561 bool round, uint32_t *sat)
1562{
1563 if (shift <= -48) {
1564
1565 if (round) {
1566 return 0;
1567 }
1568 return src >> 63;
1569 } else if (shift < 0) {
1570 if (round) {
1571 src >>= -shift - 1;
1572 return (src >> 1) + (src & 1);
1573 }
1574 return src >> -shift;
1575 } else if (shift < 48) {
1576 int64_t val = src << shift;
1577 int64_t extval = sextract64(val, 0, 48);
1578 if (!sat || val == extval) {
1579 return extval;
1580 }
1581 } else if (!sat || src == 0) {
1582 return 0;
1583 }
1584
1585 *sat = 1;
1586 return (1ULL << 47) - (src >= 0);
1587}
1588
1589
1590static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
1591 bool round, uint32_t *sat)
1592{
1593 uint64_t val, extval;
1594
1595 if (shift <= -(48 + round)) {
1596 return 0;
1597 } else if (shift < 0) {
1598 if (round) {
1599 val = src >> (-shift - 1);
1600 val = (val >> 1) + (val & 1);
1601 } else {
1602 val = src >> -shift;
1603 }
1604 extval = extract64(val, 0, 48);
1605 if (!sat || val == extval) {
1606 return extval;
1607 }
1608 } else if (shift < 48) {
1609 uint64_t val = src << shift;
1610 uint64_t extval = extract64(val, 0, 48);
1611 if (!sat || val == extval) {
1612 return extval;
1613 }
1614 } else if (!sat || src == 0) {
1615 return 0;
1616 }
1617
1618 *sat = 1;
1619 return MAKE_64BIT_MASK(0, 48);
1620}
1621
1622uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift)
1623{
1624 return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF);
1625}
1626
1627uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
1628{
1629 return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
1630}
1631
1632uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
1633{
1634 return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
1635}
1636
1637uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
1638{
1639 return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
1640}
1641
1642uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift)
1643{
1644 return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF);
1645}
1646
1647uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
1648{
1649 return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
1650}
1651