1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "cpu.h"
21#include "exec/helper-proto.h"
22#include "exec/exec-all.h"
23#include "internal.h"
24#include "fpu/softfloat.h"
25
26static inline float128 float128_snan_to_qnan(float128 x)
27{
28 float128 r;
29
30 r.high = x.high | 0x0000800000000000;
31 r.low = x.low;
32 return r;
33}
34
35#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36#define float32_snan_to_qnan(x) ((x) | 0x00400000)
37#define float16_snan_to_qnan(x) ((x) | 0x0200)
38
39static inline float32 bfp32_neg(float32 a)
40{
41 if (unlikely(float32_is_any_nan(a))) {
42 return a;
43 } else {
44 return float32_chs(a);
45 }
46}
47
48static inline bool fp_exceptions_enabled(CPUPPCState *env)
49{
50#ifdef CONFIG_USER_ONLY
51 return true;
52#else
53 return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
54#endif
55}
56
57
58
59
60
61
62
63
64uint64_t helper_todouble(uint32_t arg)
65{
66 uint32_t abs_arg = arg & 0x7fffffff;
67 uint64_t ret;
68
69 if (likely(abs_arg >= 0x00800000)) {
70 if (unlikely(extract32(arg, 23, 8) == 0xff)) {
71
72 ret = (uint64_t)extract32(arg, 31, 1) << 63;
73 ret |= (uint64_t)0x7ff << 52;
74 ret |= (uint64_t)extract32(arg, 0, 23) << 29;
75 } else {
76
77 ret = (uint64_t)extract32(arg, 30, 2) << 62;
78 ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
79 ret |= (uint64_t)extract32(arg, 0, 30) << 29;
80 }
81 } else {
82
83 ret = (uint64_t)extract32(arg, 31, 1) << 63;
84 if (unlikely(abs_arg != 0)) {
85
86
87
88
89
90 int shift = clz32(abs_arg) - 8;
91
92
93
94
95 int exp = -126 - shift + 1023 - 1;
96
97 ret |= (uint64_t)exp << 52;
98 ret += (uint64_t)abs_arg << (52 - 23 + shift);
99 }
100 }
101 return ret;
102}
103
104
105
106
107
108uint32_t helper_tosingle(uint64_t arg)
109{
110 int exp = extract64(arg, 52, 11);
111 uint32_t ret;
112
113 if (likely(exp > 896)) {
114
115 ret = extract64(arg, 62, 2) << 30;
116 ret |= extract64(arg, 29, 30);
117 } else {
118
119
120
121
122
123
124
125 ret = extract64(arg, 63, 1) << 31;
126 if (unlikely(exp >= 874)) {
127
128 ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
129 }
130 }
131 return ret;
132}
133
134static inline int ppc_float32_get_unbiased_exp(float32 f)
135{
136 return ((f >> 23) & 0xFF) - 127;
137}
138
139static inline int ppc_float64_get_unbiased_exp(float64 f)
140{
141 return ((f >> 52) & 0x7FF) - 1023;
142}
143
144
145enum {
146 is_normal = 1,
147 is_zero = 2,
148 is_denormal = 4,
149 is_inf = 8,
150 is_qnan = 16,
151 is_snan = 32,
152 is_neg = 64,
153};
154
155#define COMPUTE_CLASS(tp) \
156static int tp##_classify(tp arg) \
157{ \
158 int ret = tp##_is_neg(arg) * is_neg; \
159 if (unlikely(tp##_is_any_nan(arg))) { \
160 float_status dummy = { }; \
161 ret |= (tp##_is_signaling_nan(arg, &dummy) \
162 ? is_snan : is_qnan); \
163 } else if (unlikely(tp##_is_infinity(arg))) { \
164 ret |= is_inf; \
165 } else if (tp##_is_zero(arg)) { \
166 ret |= is_zero; \
167 } else if (tp##_is_zero_or_denormal(arg)) { \
168 ret |= is_denormal; \
169 } else { \
170 ret |= is_normal; \
171 } \
172 return ret; \
173}
174
175COMPUTE_CLASS(float16)
176COMPUTE_CLASS(float32)
177COMPUTE_CLASS(float64)
178COMPUTE_CLASS(float128)
179
180static void set_fprf_from_class(CPUPPCState *env, int class)
181{
182 static const uint8_t fprf[6][2] = {
183 { 0x04, 0x08 },
184 { 0x02, 0x12 },
185 { 0x14, 0x18 },
186 { 0x05, 0x09 },
187 { 0x11, 0x11 },
188 { 0x00, 0x00 },
189 };
190 bool isneg = class & is_neg;
191
192 env->fpscr &= ~FP_FPRF;
193 env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF;
194}
195
196#define COMPUTE_FPRF(tp) \
197void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
198{ \
199 set_fprf_from_class(env, tp##_classify(arg)); \
200}
201
202COMPUTE_FPRF(float16)
203COMPUTE_FPRF(float32)
204COMPUTE_FPRF(float64)
205COMPUTE_FPRF(float128)
206
207
208static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
209{
210
211 env->fpscr |= FP_VX;
212
213 env->fpscr |= FP_FX;
214 if (env->fpscr & FP_VE) {
215
216 env->fpscr |= FP_FEX;
217 if (fp_exceptions_enabled(env)) {
218 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
219 POWERPC_EXCP_FP | op, retaddr);
220 }
221 }
222}
223
224static void finish_invalid_op_arith(CPUPPCState *env, int op,
225 bool set_fpcc, uintptr_t retaddr)
226{
227 env->fpscr &= ~(FP_FR | FP_FI);
228 if (!(env->fpscr & FP_VE)) {
229 if (set_fpcc) {
230 env->fpscr &= ~FP_FPCC;
231 env->fpscr |= (FP_C | FP_FU);
232 }
233 }
234 finish_invalid_op_excp(env, op, retaddr);
235}
236
237
238static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr)
239{
240 env->fpscr |= FP_VXSNAN;
241 finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr);
242}
243
244
245static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc,
246 uintptr_t retaddr)
247{
248 env->fpscr |= FP_VXISI;
249 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr);
250}
251
252
253static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc,
254 uintptr_t retaddr)
255{
256 env->fpscr |= FP_VXIDI;
257 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr);
258}
259
260
261static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc,
262 uintptr_t retaddr)
263{
264 env->fpscr |= FP_VXZDZ;
265 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr);
266}
267
268
269static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc,
270 uintptr_t retaddr)
271{
272 env->fpscr |= FP_VXIMZ;
273 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr);
274}
275
276
277static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc,
278 uintptr_t retaddr)
279{
280 env->fpscr |= FP_VXSQRT;
281 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr);
282}
283
284
285static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
286 uintptr_t retaddr)
287{
288 env->fpscr |= FP_VXVC;
289 if (set_fpcc) {
290 env->fpscr &= ~FP_FPCC;
291 env->fpscr |= (FP_C | FP_FU);
292 }
293
294 env->fpscr |= FP_VX;
295
296 env->fpscr |= FP_FX;
297
298 if (env->fpscr & FP_VE) {
299 CPUState *cs = env_cpu(env);
300
301 cs->exception_index = POWERPC_EXCP_PROGRAM;
302 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
303
304 env->fpscr |= FP_FEX;
305
306 }
307}
308
309
310static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
311 uintptr_t retaddr)
312{
313 env->fpscr |= FP_VXCVI;
314 env->fpscr &= ~(FP_FR | FP_FI);
315 if (!(env->fpscr & FP_VE)) {
316 if (set_fpcc) {
317 env->fpscr &= ~FP_FPCC;
318 env->fpscr |= (FP_C | FP_FU);
319 }
320 }
321 finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr);
322}
323
324static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
325{
326 env->fpscr |= FP_ZX;
327 env->fpscr &= ~(FP_FR | FP_FI);
328
329 env->fpscr |= FP_FX;
330 if (env->fpscr & FP_ZE) {
331
332 env->fpscr |= FP_FEX;
333 if (fp_exceptions_enabled(env)) {
334 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
335 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
336 raddr);
337 }
338 }
339}
340
341static inline int float_overflow_excp(CPUPPCState *env)
342{
343 CPUState *cs = env_cpu(env);
344
345 env->fpscr |= FP_OX;
346
347 env->fpscr |= FP_FX;
348
349 bool overflow_enabled = !!(env->fpscr & FP_OE);
350 if (overflow_enabled) {
351
352 env->fpscr |= FP_FEX;
353
354 cs->exception_index = POWERPC_EXCP_PROGRAM;
355 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
356 }
357
358 return overflow_enabled ? 0 : float_flag_inexact;
359}
360
361static inline void float_underflow_excp(CPUPPCState *env)
362{
363 CPUState *cs = env_cpu(env);
364
365 env->fpscr |= FP_UX;
366
367 env->fpscr |= FP_FX;
368 if (env->fpscr & FP_UE) {
369
370 env->fpscr |= FP_FEX;
371
372 cs->exception_index = POWERPC_EXCP_PROGRAM;
373 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
374 }
375}
376
377static inline void float_inexact_excp(CPUPPCState *env)
378{
379 CPUState *cs = env_cpu(env);
380
381 env->fpscr |= FP_XX;
382
383 env->fpscr |= FP_FX;
384 if (env->fpscr & FP_XE) {
385
386 env->fpscr |= FP_FEX;
387
388 cs->exception_index = POWERPC_EXCP_PROGRAM;
389 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
390 }
391}
392
393void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
394{
395 uint32_t mask = 1u << bit;
396 if (env->fpscr & mask) {
397 ppc_store_fpscr(env, env->fpscr & ~(target_ulong)mask);
398 }
399}
400
401void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
402{
403 uint32_t mask = 1u << bit;
404 if (!(env->fpscr & mask)) {
405 ppc_store_fpscr(env, env->fpscr | mask);
406 }
407}
408
409void helper_store_fpscr(CPUPPCState *env, uint64_t val, uint32_t nibbles)
410{
411 target_ulong mask = 0;
412 int i;
413
414
415 for (i = 0; i < sizeof(target_ulong) * 2; i++) {
416 if (nibbles & (1 << i)) {
417 mask |= (target_ulong) 0xf << (4 * i);
418 }
419 }
420 val = (val & mask) | (env->fpscr & ~mask);
421 ppc_store_fpscr(env, val);
422}
423
424static void do_fpscr_check_status(CPUPPCState *env, uintptr_t raddr)
425{
426 CPUState *cs = env_cpu(env);
427 target_ulong fpscr = env->fpscr;
428 int error = 0;
429
430 if ((fpscr & FP_OX) && (fpscr & FP_OE)) {
431 error = POWERPC_EXCP_FP_OX;
432 } else if ((fpscr & FP_UX) && (fpscr & FP_UE)) {
433 error = POWERPC_EXCP_FP_UX;
434 } else if ((fpscr & FP_XX) && (fpscr & FP_XE)) {
435 error = POWERPC_EXCP_FP_XX;
436 } else if ((fpscr & FP_ZX) && (fpscr & FP_ZE)) {
437 error = POWERPC_EXCP_FP_ZX;
438 } else if (fpscr & FP_VE) {
439 if (fpscr & FP_VXSOFT) {
440 error = POWERPC_EXCP_FP_VXSOFT;
441 } else if (fpscr & FP_VXSNAN) {
442 error = POWERPC_EXCP_FP_VXSNAN;
443 } else if (fpscr & FP_VXISI) {
444 error = POWERPC_EXCP_FP_VXISI;
445 } else if (fpscr & FP_VXIDI) {
446 error = POWERPC_EXCP_FP_VXIDI;
447 } else if (fpscr & FP_VXZDZ) {
448 error = POWERPC_EXCP_FP_VXZDZ;
449 } else if (fpscr & FP_VXIMZ) {
450 error = POWERPC_EXCP_FP_VXIMZ;
451 } else if (fpscr & FP_VXVC) {
452 error = POWERPC_EXCP_FP_VXVC;
453 } else if (fpscr & FP_VXSQRT) {
454 error = POWERPC_EXCP_FP_VXSQRT;
455 } else if (fpscr & FP_VXCVI) {
456 error = POWERPC_EXCP_FP_VXCVI;
457 } else {
458 return;
459 }
460 } else {
461 return;
462 }
463 cs->exception_index = POWERPC_EXCP_PROGRAM;
464 env->error_code = error | POWERPC_EXCP_FP;
465 env->fpscr |= FP_FEX;
466
467 if (fp_exceptions_enabled(env)) {
468 raise_exception_err_ra(env, cs->exception_index,
469 env->error_code, raddr);
470 }
471}
472
473void helper_fpscr_check_status(CPUPPCState *env)
474{
475 do_fpscr_check_status(env, GETPC());
476}
477
478static void do_float_check_status(CPUPPCState *env, bool change_fi,
479 uintptr_t raddr)
480{
481 CPUState *cs = env_cpu(env);
482 int status = get_float_exception_flags(&env->fp_status);
483
484 if (status & float_flag_overflow) {
485 status |= float_overflow_excp(env);
486 } else if (status & float_flag_underflow) {
487 float_underflow_excp(env);
488 }
489 if (status & float_flag_inexact) {
490 float_inexact_excp(env);
491 }
492 if (change_fi) {
493 env->fpscr = FIELD_DP64(env->fpscr, FPSCR, FI,
494 !!(status & float_flag_inexact));
495 }
496
497 if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
498 (env->error_code & POWERPC_EXCP_FP)) {
499
500 if (fp_exceptions_enabled(env)) {
501 raise_exception_err_ra(env, cs->exception_index,
502 env->error_code, raddr);
503 }
504 }
505}
506
507void helper_float_check_status(CPUPPCState *env)
508{
509 do_float_check_status(env, true, GETPC());
510}
511
512void helper_reset_fpstatus(CPUPPCState *env)
513{
514 set_float_exception_flags(0, &env->fp_status);
515}
516
517static void float_invalid_op_addsub(CPUPPCState *env, int flags,
518 bool set_fpcc, uintptr_t retaddr)
519{
520 if (flags & float_flag_invalid_isi) {
521 float_invalid_op_vxisi(env, set_fpcc, retaddr);
522 } else if (flags & float_flag_invalid_snan) {
523 float_invalid_op_vxsnan(env, retaddr);
524 }
525}
526
527
528float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
529{
530 float64 ret = float64_add(arg1, arg2, &env->fp_status);
531 int flags = get_float_exception_flags(&env->fp_status);
532
533 if (unlikely(flags & float_flag_invalid)) {
534 float_invalid_op_addsub(env, flags, 1, GETPC());
535 }
536
537 return ret;
538}
539
540
541float64 helper_fadds(CPUPPCState *env, float64 arg1, float64 arg2)
542{
543 float64 ret = float64r32_add(arg1, arg2, &env->fp_status);
544 int flags = get_float_exception_flags(&env->fp_status);
545
546 if (unlikely(flags & float_flag_invalid)) {
547 float_invalid_op_addsub(env, flags, 1, GETPC());
548 }
549 return ret;
550}
551
552
553float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
554{
555 float64 ret = float64_sub(arg1, arg2, &env->fp_status);
556 int flags = get_float_exception_flags(&env->fp_status);
557
558 if (unlikely(flags & float_flag_invalid)) {
559 float_invalid_op_addsub(env, flags, 1, GETPC());
560 }
561
562 return ret;
563}
564
565
566float64 helper_fsubs(CPUPPCState *env, float64 arg1, float64 arg2)
567{
568 float64 ret = float64r32_sub(arg1, arg2, &env->fp_status);
569 int flags = get_float_exception_flags(&env->fp_status);
570
571 if (unlikely(flags & float_flag_invalid)) {
572 float_invalid_op_addsub(env, flags, 1, GETPC());
573 }
574 return ret;
575}
576
577static void float_invalid_op_mul(CPUPPCState *env, int flags,
578 bool set_fprc, uintptr_t retaddr)
579{
580 if (flags & float_flag_invalid_imz) {
581 float_invalid_op_vximz(env, set_fprc, retaddr);
582 } else if (flags & float_flag_invalid_snan) {
583 float_invalid_op_vxsnan(env, retaddr);
584 }
585}
586
587
588float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
589{
590 float64 ret = float64_mul(arg1, arg2, &env->fp_status);
591 int flags = get_float_exception_flags(&env->fp_status);
592
593 if (unlikely(flags & float_flag_invalid)) {
594 float_invalid_op_mul(env, flags, 1, GETPC());
595 }
596
597 return ret;
598}
599
600
601float64 helper_fmuls(CPUPPCState *env, float64 arg1, float64 arg2)
602{
603 float64 ret = float64r32_mul(arg1, arg2, &env->fp_status);
604 int flags = get_float_exception_flags(&env->fp_status);
605
606 if (unlikely(flags & float_flag_invalid)) {
607 float_invalid_op_mul(env, flags, 1, GETPC());
608 }
609 return ret;
610}
611
612static void float_invalid_op_div(CPUPPCState *env, int flags,
613 bool set_fprc, uintptr_t retaddr)
614{
615 if (flags & float_flag_invalid_idi) {
616 float_invalid_op_vxidi(env, set_fprc, retaddr);
617 } else if (flags & float_flag_invalid_zdz) {
618 float_invalid_op_vxzdz(env, set_fprc, retaddr);
619 } else if (flags & float_flag_invalid_snan) {
620 float_invalid_op_vxsnan(env, retaddr);
621 }
622}
623
624
625float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
626{
627 float64 ret = float64_div(arg1, arg2, &env->fp_status);
628 int flags = get_float_exception_flags(&env->fp_status);
629
630 if (unlikely(flags & float_flag_invalid)) {
631 float_invalid_op_div(env, flags, 1, GETPC());
632 }
633 if (unlikely(flags & float_flag_divbyzero)) {
634 float_zero_divide_excp(env, GETPC());
635 }
636
637 return ret;
638}
639
640
641float64 helper_fdivs(CPUPPCState *env, float64 arg1, float64 arg2)
642{
643 float64 ret = float64r32_div(arg1, arg2, &env->fp_status);
644 int flags = get_float_exception_flags(&env->fp_status);
645
646 if (unlikely(flags & float_flag_invalid)) {
647 float_invalid_op_div(env, flags, 1, GETPC());
648 }
649 if (unlikely(flags & float_flag_divbyzero)) {
650 float_zero_divide_excp(env, GETPC());
651 }
652
653 return ret;
654}
655
656static uint64_t float_invalid_cvt(CPUPPCState *env, int flags,
657 uint64_t ret, uint64_t ret_nan,
658 bool set_fprc, uintptr_t retaddr)
659{
660
661
662
663
664 if (flags & float_flag_invalid_snan) {
665 env->fpscr |= FP_VXSNAN;
666 }
667 float_invalid_op_vxcvi(env, set_fprc, retaddr);
668
669 return flags & float_flag_invalid_cvti ? ret : ret_nan;
670}
671
672#define FPU_FCTI(op, cvt, nanval) \
673uint64_t helper_##op(CPUPPCState *env, float64 arg) \
674{ \
675 uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \
676 int flags = get_float_exception_flags(&env->fp_status); \
677 if (unlikely(flags & float_flag_invalid)) { \
678 ret = float_invalid_cvt(env, flags, ret, nanval, 1, GETPC()); \
679 } \
680 return ret; \
681}
682
683FPU_FCTI(fctiw, int32, 0x80000000U)
684FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
685FPU_FCTI(fctiwu, uint32, 0x00000000U)
686FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
687FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
688FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
689FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
690FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
691
692#define FPU_FCFI(op, cvtr, is_single) \
693uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
694{ \
695 CPU_DoubleU farg; \
696 \
697 if (is_single) { \
698 float32 tmp = cvtr(arg, &env->fp_status); \
699 farg.d = float32_to_float64(tmp, &env->fp_status); \
700 } else { \
701 farg.d = cvtr(arg, &env->fp_status); \
702 } \
703 do_float_check_status(env, true, GETPC()); \
704 return farg.ll; \
705}
706
707FPU_FCFI(fcfid, int64_to_float64, 0)
708FPU_FCFI(fcfids, int64_to_float32, 1)
709FPU_FCFI(fcfidu, uint64_to_float64, 0)
710FPU_FCFI(fcfidus, uint64_to_float32, 1)
711
712static uint64_t do_fri(CPUPPCState *env, uint64_t arg,
713 FloatRoundMode rounding_mode)
714{
715 FloatRoundMode old_rounding_mode = get_float_rounding_mode(&env->fp_status);
716 int flags;
717
718 set_float_rounding_mode(rounding_mode, &env->fp_status);
719 arg = float64_round_to_int(arg, &env->fp_status);
720 set_float_rounding_mode(old_rounding_mode, &env->fp_status);
721
722 flags = get_float_exception_flags(&env->fp_status);
723 if (flags & float_flag_invalid_snan) {
724 float_invalid_op_vxsnan(env, GETPC());
725 }
726
727
728 set_float_exception_flags(flags & ~float_flag_inexact, &env->fp_status);
729 do_float_check_status(env, true, GETPC());
730
731 return arg;
732}
733
734uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
735{
736 return do_fri(env, arg, float_round_ties_away);
737}
738
739uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
740{
741 return do_fri(env, arg, float_round_to_zero);
742}
743
744uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
745{
746 return do_fri(env, arg, float_round_up);
747}
748
749uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
750{
751 return do_fri(env, arg, float_round_down);
752}
753
754static void float_invalid_op_madd(CPUPPCState *env, int flags,
755 bool set_fpcc, uintptr_t retaddr)
756{
757 if (flags & float_flag_invalid_imz) {
758 float_invalid_op_vximz(env, set_fpcc, retaddr);
759 } else {
760 float_invalid_op_addsub(env, flags, set_fpcc, retaddr);
761 }
762}
763
764static float64 do_fmadd(CPUPPCState *env, float64 a, float64 b,
765 float64 c, int madd_flags, uintptr_t retaddr)
766{
767 float64 ret = float64_muladd(a, b, c, madd_flags, &env->fp_status);
768 int flags = get_float_exception_flags(&env->fp_status);
769
770 if (unlikely(flags & float_flag_invalid)) {
771 float_invalid_op_madd(env, flags, 1, retaddr);
772 }
773 return ret;
774}
775
776static uint64_t do_fmadds(CPUPPCState *env, float64 a, float64 b,
777 float64 c, int madd_flags, uintptr_t retaddr)
778{
779 float64 ret = float64r32_muladd(a, b, c, madd_flags, &env->fp_status);
780 int flags = get_float_exception_flags(&env->fp_status);
781
782 if (unlikely(flags & float_flag_invalid)) {
783 float_invalid_op_madd(env, flags, 1, retaddr);
784 }
785 return ret;
786}
787
788#define FPU_FMADD(op, madd_flags) \
789 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
790 uint64_t arg2, uint64_t arg3) \
791 { return do_fmadd(env, arg1, arg2, arg3, madd_flags, GETPC()); } \
792 uint64_t helper_##op##s(CPUPPCState *env, uint64_t arg1, \
793 uint64_t arg2, uint64_t arg3) \
794 { return do_fmadds(env, arg1, arg2, arg3, madd_flags, GETPC()); }
795
796#define MADD_FLGS 0
797#define MSUB_FLGS float_muladd_negate_c
798#define NMADD_FLGS float_muladd_negate_result
799#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
800
801FPU_FMADD(fmadd, MADD_FLGS)
802FPU_FMADD(fnmadd, NMADD_FLGS)
803FPU_FMADD(fmsub, MSUB_FLGS)
804FPU_FMADD(fnmsub, NMSUB_FLGS)
805
806
807static uint64_t do_frsp(CPUPPCState *env, uint64_t arg, uintptr_t retaddr)
808{
809 float32 f32 = float64_to_float32(arg, &env->fp_status);
810 int flags = get_float_exception_flags(&env->fp_status);
811
812 if (unlikely(flags & float_flag_invalid_snan)) {
813 float_invalid_op_vxsnan(env, retaddr);
814 }
815 return helper_todouble(f32);
816}
817
818uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
819{
820 return do_frsp(env, arg, GETPC());
821}
822
823static void float_invalid_op_sqrt(CPUPPCState *env, int flags,
824 bool set_fpcc, uintptr_t retaddr)
825{
826 if (unlikely(flags & float_flag_invalid_sqrt)) {
827 float_invalid_op_vxsqrt(env, set_fpcc, retaddr);
828 } else if (unlikely(flags & float_flag_invalid_snan)) {
829 float_invalid_op_vxsnan(env, retaddr);
830 }
831}
832
833#define FPU_FSQRT(name, op) \
834float64 helper_##name(CPUPPCState *env, float64 arg) \
835{ \
836 float64 ret = op(arg, &env->fp_status); \
837 int flags = get_float_exception_flags(&env->fp_status); \
838 \
839 if (unlikely(flags & float_flag_invalid)) { \
840 float_invalid_op_sqrt(env, flags, 1, GETPC()); \
841 } \
842 \
843 return ret; \
844}
845
846FPU_FSQRT(FSQRT, float64_sqrt)
847FPU_FSQRT(FSQRTS, float64r32_sqrt)
848
849
850float64 helper_fre(CPUPPCState *env, float64 arg)
851{
852
853 float64 ret = float64_div(float64_one, arg, &env->fp_status);
854 int flags = get_float_exception_flags(&env->fp_status);
855
856 if (unlikely(flags & float_flag_invalid_snan)) {
857 float_invalid_op_vxsnan(env, GETPC());
858 }
859 if (unlikely(flags & float_flag_divbyzero)) {
860 float_zero_divide_excp(env, GETPC());
861
862 ret = float64_set_sign(float64_half, float64_is_neg(arg));
863 }
864
865 return ret;
866}
867
868
869uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
870{
871
872 float64 ret = float64r32_div(float64_one, arg, &env->fp_status);
873 int flags = get_float_exception_flags(&env->fp_status);
874
875 if (unlikely(flags & float_flag_invalid_snan)) {
876 float_invalid_op_vxsnan(env, GETPC());
877 }
878 if (unlikely(flags & float_flag_divbyzero)) {
879 float_zero_divide_excp(env, GETPC());
880
881 ret = float64_set_sign(float64_half, float64_is_neg(arg));
882 }
883
884 return ret;
885}
886
887
888float64 helper_frsqrte(CPUPPCState *env, float64 arg)
889{
890
891 float64 rets = float64_sqrt(arg, &env->fp_status);
892 float64 retd = float64_div(float64_one, rets, &env->fp_status);
893 int flags = get_float_exception_flags(&env->fp_status);
894
895 if (unlikely(flags & float_flag_invalid)) {
896 float_invalid_op_sqrt(env, flags, 1, GETPC());
897 }
898 if (unlikely(flags & float_flag_divbyzero)) {
899
900 float_zero_divide_excp(env, GETPC());
901 }
902
903 return retd;
904}
905
906
907float64 helper_frsqrtes(CPUPPCState *env, float64 arg)
908{
909
910 float64 rets = float64_sqrt(arg, &env->fp_status);
911 float64 retd = float64r32_div(float64_one, rets, &env->fp_status);
912 int flags = get_float_exception_flags(&env->fp_status);
913
914 if (unlikely(flags & float_flag_invalid)) {
915 float_invalid_op_sqrt(env, flags, 1, GETPC());
916 }
917 if (unlikely(flags & float_flag_divbyzero)) {
918
919 float_zero_divide_excp(env, GETPC());
920 }
921
922 return retd;
923}
924
925
926uint64_t helper_FSEL(uint64_t a, uint64_t b, uint64_t c)
927{
928 CPU_DoubleU fa;
929
930 fa.ll = a;
931
932 if ((!float64_is_neg(fa.d) || float64_is_zero(fa.d)) &&
933 !float64_is_any_nan(fa.d)) {
934 return c;
935 } else {
936 return b;
937 }
938}
939
940uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
941{
942 int fe_flag = 0;
943 int fg_flag = 0;
944
945 if (unlikely(float64_is_infinity(fra) ||
946 float64_is_infinity(frb) ||
947 float64_is_zero(frb))) {
948 fe_flag = 1;
949 fg_flag = 1;
950 } else {
951 int e_a = ppc_float64_get_unbiased_exp(fra);
952 int e_b = ppc_float64_get_unbiased_exp(frb);
953
954 if (unlikely(float64_is_any_nan(fra) ||
955 float64_is_any_nan(frb))) {
956 fe_flag = 1;
957 } else if ((e_b <= -1022) || (e_b >= 1021)) {
958 fe_flag = 1;
959 } else if (!float64_is_zero(fra) &&
960 (((e_a - e_b) >= 1023) ||
961 ((e_a - e_b) <= -1021) ||
962 (e_a <= -970))) {
963 fe_flag = 1;
964 }
965
966 if (unlikely(float64_is_zero_or_denormal(frb))) {
967
968
969 fg_flag = 1;
970 }
971 }
972
973 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
974}
975
976uint32_t helper_ftsqrt(uint64_t frb)
977{
978 int fe_flag = 0;
979 int fg_flag = 0;
980
981 if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
982 fe_flag = 1;
983 fg_flag = 1;
984 } else {
985 int e_b = ppc_float64_get_unbiased_exp(frb);
986
987 if (unlikely(float64_is_any_nan(frb))) {
988 fe_flag = 1;
989 } else if (unlikely(float64_is_zero(frb))) {
990 fe_flag = 1;
991 } else if (unlikely(float64_is_neg(frb))) {
992 fe_flag = 1;
993 } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) {
994 fe_flag = 1;
995 }
996
997 if (unlikely(float64_is_zero_or_denormal(frb))) {
998
999
1000 fg_flag = 1;
1001 }
1002 }
1003
1004 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1005}
1006
1007void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1008 uint32_t crfD)
1009{
1010 CPU_DoubleU farg1, farg2;
1011 uint32_t ret = 0;
1012
1013 farg1.ll = arg1;
1014 farg2.ll = arg2;
1015
1016 if (unlikely(float64_is_any_nan(farg1.d) ||
1017 float64_is_any_nan(farg2.d))) {
1018 ret = 0x01UL;
1019 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1020 ret = 0x08UL;
1021 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1022 ret = 0x04UL;
1023 } else {
1024 ret = 0x02UL;
1025 }
1026
1027 env->fpscr &= ~FP_FPCC;
1028 env->fpscr |= ret << FPSCR_FPCC;
1029 env->crf[crfD] = ret;
1030 if (unlikely(ret == 0x01UL
1031 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1032 float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1033
1034 float_invalid_op_vxsnan(env, GETPC());
1035 }
1036}
1037
1038void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1039 uint32_t crfD)
1040{
1041 CPU_DoubleU farg1, farg2;
1042 uint32_t ret = 0;
1043
1044 farg1.ll = arg1;
1045 farg2.ll = arg2;
1046
1047 if (unlikely(float64_is_any_nan(farg1.d) ||
1048 float64_is_any_nan(farg2.d))) {
1049 ret = 0x01UL;
1050 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1051 ret = 0x08UL;
1052 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1053 ret = 0x04UL;
1054 } else {
1055 ret = 0x02UL;
1056 }
1057
1058 env->fpscr &= ~FP_FPCC;
1059 env->fpscr |= ret << FPSCR_FPCC;
1060 env->crf[crfD] = (uint32_t) ret;
1061 if (unlikely(ret == 0x01UL)) {
1062 float_invalid_op_vxvc(env, 1, GETPC());
1063 if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1064 float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1065
1066 float_invalid_op_vxsnan(env, GETPC());
1067 }
1068 }
1069}
1070
1071
1072static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1073{
1074 CPU_FloatU u;
1075
1076 u.f = int32_to_float32(val, &env->vec_status);
1077
1078 return u.l;
1079}
1080
1081static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1082{
1083 CPU_FloatU u;
1084
1085 u.f = uint32_to_float32(val, &env->vec_status);
1086
1087 return u.l;
1088}
1089
1090static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1091{
1092 CPU_FloatU u;
1093
1094 u.l = val;
1095
1096 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1097 return 0;
1098 }
1099
1100 return float32_to_int32(u.f, &env->vec_status);
1101}
1102
1103static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1104{
1105 CPU_FloatU u;
1106
1107 u.l = val;
1108
1109 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1110 return 0;
1111 }
1112
1113 return float32_to_uint32(u.f, &env->vec_status);
1114}
1115
1116static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1117{
1118 CPU_FloatU u;
1119
1120 u.l = val;
1121
1122 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1123 return 0;
1124 }
1125
1126 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1127}
1128
1129static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1130{
1131 CPU_FloatU u;
1132
1133 u.l = val;
1134
1135 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1136 return 0;
1137 }
1138
1139 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1140}
1141
1142static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1143{
1144 CPU_FloatU u;
1145 float32 tmp;
1146
1147 u.f = int32_to_float32(val, &env->vec_status);
1148 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1149 u.f = float32_div(u.f, tmp, &env->vec_status);
1150
1151 return u.l;
1152}
1153
1154static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1155{
1156 CPU_FloatU u;
1157 float32 tmp;
1158
1159 u.f = uint32_to_float32(val, &env->vec_status);
1160 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1161 u.f = float32_div(u.f, tmp, &env->vec_status);
1162
1163 return u.l;
1164}
1165
1166static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1167{
1168 CPU_FloatU u;
1169 float32 tmp;
1170
1171 u.l = val;
1172
1173 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1174 return 0;
1175 }
1176 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1177 u.f = float32_mul(u.f, tmp, &env->vec_status);
1178
1179 return float32_to_int32(u.f, &env->vec_status);
1180}
1181
1182static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1183{
1184 CPU_FloatU u;
1185 float32 tmp;
1186
1187 u.l = val;
1188
1189 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1190 return 0;
1191 }
1192 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1193 u.f = float32_mul(u.f, tmp, &env->vec_status);
1194
1195 return float32_to_uint32(u.f, &env->vec_status);
1196}
1197
1198#define HELPER_SPE_SINGLE_CONV(name) \
1199 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1200 { \
1201 return e##name(env, val); \
1202 }
1203
1204HELPER_SPE_SINGLE_CONV(fscfsi);
1205
1206HELPER_SPE_SINGLE_CONV(fscfui);
1207
1208HELPER_SPE_SINGLE_CONV(fscfuf);
1209
1210HELPER_SPE_SINGLE_CONV(fscfsf);
1211
1212HELPER_SPE_SINGLE_CONV(fsctsi);
1213
1214HELPER_SPE_SINGLE_CONV(fsctui);
1215
1216HELPER_SPE_SINGLE_CONV(fsctsiz);
1217
1218HELPER_SPE_SINGLE_CONV(fsctuiz);
1219
1220HELPER_SPE_SINGLE_CONV(fsctsf);
1221
1222HELPER_SPE_SINGLE_CONV(fsctuf);
1223
1224#define HELPER_SPE_VECTOR_CONV(name) \
1225 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1226 { \
1227 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1228 (uint64_t)e##name(env, val); \
1229 }
1230
1231HELPER_SPE_VECTOR_CONV(fscfsi);
1232
1233HELPER_SPE_VECTOR_CONV(fscfui);
1234
1235HELPER_SPE_VECTOR_CONV(fscfuf);
1236
1237HELPER_SPE_VECTOR_CONV(fscfsf);
1238
1239HELPER_SPE_VECTOR_CONV(fsctsi);
1240
1241HELPER_SPE_VECTOR_CONV(fsctui);
1242
1243HELPER_SPE_VECTOR_CONV(fsctsiz);
1244
1245HELPER_SPE_VECTOR_CONV(fsctuiz);
1246
1247HELPER_SPE_VECTOR_CONV(fsctsf);
1248
1249HELPER_SPE_VECTOR_CONV(fsctuf);
1250
1251
1252static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1253{
1254 CPU_FloatU u1, u2;
1255
1256 u1.l = op1;
1257 u2.l = op2;
1258 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1259 return u1.l;
1260}
1261
1262static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1263{
1264 CPU_FloatU u1, u2;
1265
1266 u1.l = op1;
1267 u2.l = op2;
1268 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1269 return u1.l;
1270}
1271
1272static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1273{
1274 CPU_FloatU u1, u2;
1275
1276 u1.l = op1;
1277 u2.l = op2;
1278 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1279 return u1.l;
1280}
1281
1282static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1283{
1284 CPU_FloatU u1, u2;
1285
1286 u1.l = op1;
1287 u2.l = op2;
1288 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1289 return u1.l;
1290}
1291
1292#define HELPER_SPE_SINGLE_ARITH(name) \
1293 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1294 { \
1295 return e##name(env, op1, op2); \
1296 }
1297
1298HELPER_SPE_SINGLE_ARITH(fsadd);
1299
1300HELPER_SPE_SINGLE_ARITH(fssub);
1301
1302HELPER_SPE_SINGLE_ARITH(fsmul);
1303
1304HELPER_SPE_SINGLE_ARITH(fsdiv);
1305
1306#define HELPER_SPE_VECTOR_ARITH(name) \
1307 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1308 { \
1309 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1310 (uint64_t)e##name(env, op1, op2); \
1311 }
1312
1313HELPER_SPE_VECTOR_ARITH(fsadd);
1314
1315HELPER_SPE_VECTOR_ARITH(fssub);
1316
1317HELPER_SPE_VECTOR_ARITH(fsmul);
1318
1319HELPER_SPE_VECTOR_ARITH(fsdiv);
1320
1321
1322static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1323{
1324 CPU_FloatU u1, u2;
1325
1326 u1.l = op1;
1327 u2.l = op2;
1328 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1329}
1330
1331static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1332{
1333 CPU_FloatU u1, u2;
1334
1335 u1.l = op1;
1336 u2.l = op2;
1337 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1338}
1339
1340static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1341{
1342 CPU_FloatU u1, u2;
1343
1344 u1.l = op1;
1345 u2.l = op2;
1346 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1347}
1348
1349static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1350{
1351
1352 return efscmplt(env, op1, op2);
1353}
1354
1355static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1356{
1357
1358 return efscmpgt(env, op1, op2);
1359}
1360
1361static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1362{
1363
1364 return efscmpeq(env, op1, op2);
1365}
1366
1367#define HELPER_SINGLE_SPE_CMP(name) \
1368 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1369 { \
1370 return e##name(env, op1, op2); \
1371 }
1372
1373HELPER_SINGLE_SPE_CMP(fststlt);
1374
1375HELPER_SINGLE_SPE_CMP(fststgt);
1376
1377HELPER_SINGLE_SPE_CMP(fststeq);
1378
1379HELPER_SINGLE_SPE_CMP(fscmplt);
1380
1381HELPER_SINGLE_SPE_CMP(fscmpgt);
1382
1383HELPER_SINGLE_SPE_CMP(fscmpeq);
1384
1385static inline uint32_t evcmp_merge(int t0, int t1)
1386{
1387 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1388}
1389
1390#define HELPER_VECTOR_SPE_CMP(name) \
1391 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1392 { \
1393 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1394 e##name(env, op1, op2)); \
1395 }
1396
1397HELPER_VECTOR_SPE_CMP(fststlt);
1398
1399HELPER_VECTOR_SPE_CMP(fststgt);
1400
1401HELPER_VECTOR_SPE_CMP(fststeq);
1402
1403HELPER_VECTOR_SPE_CMP(fscmplt);
1404
1405HELPER_VECTOR_SPE_CMP(fscmpgt);
1406
1407HELPER_VECTOR_SPE_CMP(fscmpeq);
1408
1409
1410uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1411{
1412 CPU_DoubleU u;
1413
1414 u.d = int32_to_float64(val, &env->vec_status);
1415
1416 return u.ll;
1417}
1418
1419uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1420{
1421 CPU_DoubleU u;
1422
1423 u.d = int64_to_float64(val, &env->vec_status);
1424
1425 return u.ll;
1426}
1427
1428uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1429{
1430 CPU_DoubleU u;
1431
1432 u.d = uint32_to_float64(val, &env->vec_status);
1433
1434 return u.ll;
1435}
1436
1437uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1438{
1439 CPU_DoubleU u;
1440
1441 u.d = uint64_to_float64(val, &env->vec_status);
1442
1443 return u.ll;
1444}
1445
1446uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1447{
1448 CPU_DoubleU u;
1449
1450 u.ll = val;
1451
1452 if (unlikely(float64_is_any_nan(u.d))) {
1453 return 0;
1454 }
1455
1456 return float64_to_int32(u.d, &env->vec_status);
1457}
1458
1459uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1460{
1461 CPU_DoubleU u;
1462
1463 u.ll = val;
1464
1465 if (unlikely(float64_is_any_nan(u.d))) {
1466 return 0;
1467 }
1468
1469 return float64_to_uint32(u.d, &env->vec_status);
1470}
1471
1472uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1473{
1474 CPU_DoubleU u;
1475
1476 u.ll = val;
1477
1478 if (unlikely(float64_is_any_nan(u.d))) {
1479 return 0;
1480 }
1481
1482 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1483}
1484
1485uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1486{
1487 CPU_DoubleU u;
1488
1489 u.ll = val;
1490
1491 if (unlikely(float64_is_any_nan(u.d))) {
1492 return 0;
1493 }
1494
1495 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1496}
1497
1498uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1499{
1500 CPU_DoubleU u;
1501
1502 u.ll = val;
1503
1504 if (unlikely(float64_is_any_nan(u.d))) {
1505 return 0;
1506 }
1507
1508 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1509}
1510
1511uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1512{
1513 CPU_DoubleU u;
1514
1515 u.ll = val;
1516
1517 if (unlikely(float64_is_any_nan(u.d))) {
1518 return 0;
1519 }
1520
1521 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1522}
1523
1524uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1525{
1526 CPU_DoubleU u;
1527 float64 tmp;
1528
1529 u.d = int32_to_float64(val, &env->vec_status);
1530 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1531 u.d = float64_div(u.d, tmp, &env->vec_status);
1532
1533 return u.ll;
1534}
1535
1536uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1537{
1538 CPU_DoubleU u;
1539 float64 tmp;
1540
1541 u.d = uint32_to_float64(val, &env->vec_status);
1542 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1543 u.d = float64_div(u.d, tmp, &env->vec_status);
1544
1545 return u.ll;
1546}
1547
1548uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1549{
1550 CPU_DoubleU u;
1551 float64 tmp;
1552
1553 u.ll = val;
1554
1555 if (unlikely(float64_is_any_nan(u.d))) {
1556 return 0;
1557 }
1558 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1559 u.d = float64_mul(u.d, tmp, &env->vec_status);
1560
1561 return float64_to_int32(u.d, &env->vec_status);
1562}
1563
1564uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1565{
1566 CPU_DoubleU u;
1567 float64 tmp;
1568
1569 u.ll = val;
1570
1571 if (unlikely(float64_is_any_nan(u.d))) {
1572 return 0;
1573 }
1574 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1575 u.d = float64_mul(u.d, tmp, &env->vec_status);
1576
1577 return float64_to_uint32(u.d, &env->vec_status);
1578}
1579
1580uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1581{
1582 CPU_DoubleU u1;
1583 CPU_FloatU u2;
1584
1585 u1.ll = val;
1586 u2.f = float64_to_float32(u1.d, &env->vec_status);
1587
1588 return u2.l;
1589}
1590
1591uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1592{
1593 CPU_DoubleU u2;
1594 CPU_FloatU u1;
1595
1596 u1.l = val;
1597 u2.d = float32_to_float64(u1.f, &env->vec_status);
1598
1599 return u2.ll;
1600}
1601
1602
1603uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1604{
1605 CPU_DoubleU u1, u2;
1606
1607 u1.ll = op1;
1608 u2.ll = op2;
1609 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1610 return u1.ll;
1611}
1612
1613uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1614{
1615 CPU_DoubleU u1, u2;
1616
1617 u1.ll = op1;
1618 u2.ll = op2;
1619 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1620 return u1.ll;
1621}
1622
1623uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1624{
1625 CPU_DoubleU u1, u2;
1626
1627 u1.ll = op1;
1628 u2.ll = op2;
1629 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1630 return u1.ll;
1631}
1632
1633uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1634{
1635 CPU_DoubleU u1, u2;
1636
1637 u1.ll = op1;
1638 u2.ll = op2;
1639 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1640 return u1.ll;
1641}
1642
1643
1644uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1645{
1646 CPU_DoubleU u1, u2;
1647
1648 u1.ll = op1;
1649 u2.ll = op2;
1650 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1651}
1652
1653uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1654{
1655 CPU_DoubleU u1, u2;
1656
1657 u1.ll = op1;
1658 u2.ll = op2;
1659 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1660}
1661
1662uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1663{
1664 CPU_DoubleU u1, u2;
1665
1666 u1.ll = op1;
1667 u2.ll = op2;
1668 return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1669}
1670
1671uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1672{
1673
1674 return helper_efdtstlt(env, op1, op2);
1675}
1676
1677uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1678{
1679
1680 return helper_efdtstgt(env, op1, op2);
1681}
1682
1683uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1684{
1685
1686 return helper_efdtsteq(env, op1, op2);
1687}
1688
1689#define float64_to_float64(x, env) x
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701#define VSX_ADD_SUB(name, op, nels, tp, fld, sfifprf, r2sp) \
1702void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
1703 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1704{ \
1705 ppc_vsr_t t = { }; \
1706 int i; \
1707 \
1708 helper_reset_fpstatus(env); \
1709 \
1710 for (i = 0; i < nels; i++) { \
1711 float_status tstat = env->fp_status; \
1712 set_float_exception_flags(0, &tstat); \
1713 t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
1714 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1715 \
1716 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1717 float_invalid_op_addsub(env, tstat.float_exception_flags, \
1718 sfifprf, GETPC()); \
1719 } \
1720 \
1721 if (r2sp) { \
1722 t.fld = do_frsp(env, t.fld, GETPC()); \
1723 } \
1724 \
1725 if (sfifprf) { \
1726 helper_compute_fprf_float64(env, t.fld); \
1727 } \
1728 } \
1729 *xt = t; \
1730 do_float_check_status(env, sfifprf, GETPC()); \
1731}
1732
1733VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1734VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1735VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1736VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1737VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1738VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1739VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1740VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1741
1742void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
1743 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1744{
1745 ppc_vsr_t t = *xt;
1746 float_status tstat;
1747
1748 helper_reset_fpstatus(env);
1749
1750 tstat = env->fp_status;
1751 if (unlikely(Rc(opcode) != 0)) {
1752 tstat.float_rounding_mode = float_round_to_odd;
1753 }
1754
1755 set_float_exception_flags(0, &tstat);
1756 t.f128 = float128_add(xa->f128, xb->f128, &tstat);
1757 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1758
1759 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1760 float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
1761 }
1762
1763 helper_compute_fprf_float128(env, t.f128);
1764
1765 *xt = t;
1766 do_float_check_status(env, true, GETPC());
1767}
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777#define VSX_MUL(op, nels, tp, fld, sfifprf, r2sp) \
1778void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1779 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1780{ \
1781 ppc_vsr_t t = { }; \
1782 int i; \
1783 \
1784 helper_reset_fpstatus(env); \
1785 \
1786 for (i = 0; i < nels; i++) { \
1787 float_status tstat = env->fp_status; \
1788 set_float_exception_flags(0, &tstat); \
1789 t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
1790 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1791 \
1792 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1793 float_invalid_op_mul(env, tstat.float_exception_flags, \
1794 sfifprf, GETPC()); \
1795 } \
1796 \
1797 if (r2sp) { \
1798 t.fld = do_frsp(env, t.fld, GETPC()); \
1799 } \
1800 \
1801 if (sfifprf) { \
1802 helper_compute_fprf_float64(env, t.fld); \
1803 } \
1804 } \
1805 \
1806 *xt = t; \
1807 do_float_check_status(env, sfifprf, GETPC()); \
1808}
1809
1810VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1811VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1812VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1813VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1814
1815void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
1816 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1817{
1818 ppc_vsr_t t = *xt;
1819 float_status tstat;
1820
1821 helper_reset_fpstatus(env);
1822 tstat = env->fp_status;
1823 if (unlikely(Rc(opcode) != 0)) {
1824 tstat.float_rounding_mode = float_round_to_odd;
1825 }
1826
1827 set_float_exception_flags(0, &tstat);
1828 t.f128 = float128_mul(xa->f128, xb->f128, &tstat);
1829 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1830
1831 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1832 float_invalid_op_mul(env, tstat.float_exception_flags, 1, GETPC());
1833 }
1834 helper_compute_fprf_float128(env, t.f128);
1835
1836 *xt = t;
1837 do_float_check_status(env, true, GETPC());
1838}
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848#define VSX_DIV(op, nels, tp, fld, sfifprf, r2sp) \
1849void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1850 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1851{ \
1852 ppc_vsr_t t = { }; \
1853 int i; \
1854 \
1855 helper_reset_fpstatus(env); \
1856 \
1857 for (i = 0; i < nels; i++) { \
1858 float_status tstat = env->fp_status; \
1859 set_float_exception_flags(0, &tstat); \
1860 t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
1861 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1862 \
1863 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1864 float_invalid_op_div(env, tstat.float_exception_flags, \
1865 sfifprf, GETPC()); \
1866 } \
1867 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1868 float_zero_divide_excp(env, GETPC()); \
1869 } \
1870 \
1871 if (r2sp) { \
1872 t.fld = do_frsp(env, t.fld, GETPC()); \
1873 } \
1874 \
1875 if (sfifprf) { \
1876 helper_compute_fprf_float64(env, t.fld); \
1877 } \
1878 } \
1879 \
1880 *xt = t; \
1881 do_float_check_status(env, sfifprf, GETPC()); \
1882}
1883
1884VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1885VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1886VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1887VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1888
1889void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
1890 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1891{
1892 ppc_vsr_t t = *xt;
1893 float_status tstat;
1894
1895 helper_reset_fpstatus(env);
1896 tstat = env->fp_status;
1897 if (unlikely(Rc(opcode) != 0)) {
1898 tstat.float_rounding_mode = float_round_to_odd;
1899 }
1900
1901 set_float_exception_flags(0, &tstat);
1902 t.f128 = float128_div(xa->f128, xb->f128, &tstat);
1903 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1904
1905 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1906 float_invalid_op_div(env, tstat.float_exception_flags, 1, GETPC());
1907 }
1908 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
1909 float_zero_divide_excp(env, GETPC());
1910 }
1911
1912 helper_compute_fprf_float128(env, t.f128);
1913 *xt = t;
1914 do_float_check_status(env, true, GETPC());
1915}
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925#define VSX_RE(op, nels, tp, fld, sfifprf, r2sp) \
1926void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1927{ \
1928 ppc_vsr_t t = { }; \
1929 int i; \
1930 \
1931 helper_reset_fpstatus(env); \
1932 \
1933 for (i = 0; i < nels; i++) { \
1934 if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
1935 float_invalid_op_vxsnan(env, GETPC()); \
1936 } \
1937 t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
1938 \
1939 if (r2sp) { \
1940 t.fld = do_frsp(env, t.fld, GETPC()); \
1941 } \
1942 \
1943 if (sfifprf) { \
1944 helper_compute_fprf_float64(env, t.fld); \
1945 } \
1946 } \
1947 \
1948 *xt = t; \
1949 do_float_check_status(env, sfifprf, GETPC()); \
1950}
1951
1952VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
1953VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
1954VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
1955VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965#define VSX_SQRT(op, nels, tp, fld, sfifprf, r2sp) \
1966void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1967{ \
1968 ppc_vsr_t t = { }; \
1969 int i; \
1970 \
1971 helper_reset_fpstatus(env); \
1972 \
1973 for (i = 0; i < nels; i++) { \
1974 float_status tstat = env->fp_status; \
1975 set_float_exception_flags(0, &tstat); \
1976 t.fld = tp##_sqrt(xb->fld, &tstat); \
1977 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1978 \
1979 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1980 float_invalid_op_sqrt(env, tstat.float_exception_flags, \
1981 sfifprf, GETPC()); \
1982 } \
1983 \
1984 if (r2sp) { \
1985 t.fld = do_frsp(env, t.fld, GETPC()); \
1986 } \
1987 \
1988 if (sfifprf) { \
1989 helper_compute_fprf_float64(env, t.fld); \
1990 } \
1991 } \
1992 \
1993 *xt = t; \
1994 do_float_check_status(env, sfifprf, GETPC()); \
1995}
1996
1997VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
1998VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
1999VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2000VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010#define VSX_RSQRTE(op, nels, tp, fld, sfifprf, r2sp) \
2011void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2012{ \
2013 ppc_vsr_t t = { }; \
2014 int i; \
2015 \
2016 helper_reset_fpstatus(env); \
2017 \
2018 for (i = 0; i < nels; i++) { \
2019 float_status tstat = env->fp_status; \
2020 set_float_exception_flags(0, &tstat); \
2021 t.fld = tp##_sqrt(xb->fld, &tstat); \
2022 t.fld = tp##_div(tp##_one, t.fld, &tstat); \
2023 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2024 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2025 float_invalid_op_sqrt(env, tstat.float_exception_flags, \
2026 sfifprf, GETPC()); \
2027 } \
2028 if (r2sp) { \
2029 t.fld = do_frsp(env, t.fld, GETPC()); \
2030 } \
2031 \
2032 if (sfifprf) { \
2033 helper_compute_fprf_float64(env, t.fld); \
2034 } \
2035 } \
2036 \
2037 *xt = t; \
2038 do_float_check_status(env, sfifprf, GETPC()); \
2039}
2040
2041VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2042VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2043VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2044VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2057void helper_##op(CPUPPCState *env, uint32_t opcode, \
2058 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2059{ \
2060 int i; \
2061 int fe_flag = 0; \
2062 int fg_flag = 0; \
2063 \
2064 for (i = 0; i < nels; i++) { \
2065 if (unlikely(tp##_is_infinity(xa->fld) || \
2066 tp##_is_infinity(xb->fld) || \
2067 tp##_is_zero(xb->fld))) { \
2068 fe_flag = 1; \
2069 fg_flag = 1; \
2070 } else { \
2071 int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \
2072 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2073 \
2074 if (unlikely(tp##_is_any_nan(xa->fld) || \
2075 tp##_is_any_nan(xb->fld))) { \
2076 fe_flag = 1; \
2077 } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
2078 fe_flag = 1; \
2079 } else if (!tp##_is_zero(xa->fld) && \
2080 (((e_a - e_b) >= emax) || \
2081 ((e_a - e_b) <= (emin + 1)) || \
2082 (e_a <= (emin + nbits)))) { \
2083 fe_flag = 1; \
2084 } \
2085 \
2086 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2087
2088
2089
2090 \
2091 fg_flag = 1; \
2092 } \
2093 } \
2094 } \
2095 \
2096 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2097}
2098
2099VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2100VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2101VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113#define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2114void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \
2115{ \
2116 int i; \
2117 int fe_flag = 0; \
2118 int fg_flag = 0; \
2119 \
2120 for (i = 0; i < nels; i++) { \
2121 if (unlikely(tp##_is_infinity(xb->fld) || \
2122 tp##_is_zero(xb->fld))) { \
2123 fe_flag = 1; \
2124 fg_flag = 1; \
2125 } else { \
2126 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2127 \
2128 if (unlikely(tp##_is_any_nan(xb->fld))) { \
2129 fe_flag = 1; \
2130 } else if (unlikely(tp##_is_zero(xb->fld))) { \
2131 fe_flag = 1; \
2132 } else if (unlikely(tp##_is_neg(xb->fld))) { \
2133 fe_flag = 1; \
2134 } else if (!tp##_is_zero(xb->fld) && \
2135 (e_b <= (emin + nbits))) { \
2136 fe_flag = 1; \
2137 } \
2138 \
2139 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2140
2141
2142
2143 \
2144 fg_flag = 1; \
2145 } \
2146 } \
2147 } \
2148 \
2149 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2150}
2151
2152VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2153VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2154VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166#define VSX_MADD(op, nels, tp, fld, maddflgs, sfifprf) \
2167void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2168 ppc_vsr_t *s1, ppc_vsr_t *s2, ppc_vsr_t *s3) \
2169{ \
2170 ppc_vsr_t t = { }; \
2171 int i; \
2172 \
2173 helper_reset_fpstatus(env); \
2174 \
2175 for (i = 0; i < nels; i++) { \
2176 float_status tstat = env->fp_status; \
2177 set_float_exception_flags(0, &tstat); \
2178 t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, maddflgs, &tstat); \
2179 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2180 \
2181 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2182 float_invalid_op_madd(env, tstat.float_exception_flags, \
2183 sfifprf, GETPC()); \
2184 } \
2185 \
2186 if (sfifprf) { \
2187 helper_compute_fprf_float64(env, t.fld); \
2188 } \
2189 } \
2190 *xt = t; \
2191 do_float_check_status(env, sfifprf, GETPC()); \
2192}
2193
2194VSX_MADD(XSMADDDP, 1, float64, VsrD(0), MADD_FLGS, 1)
2195VSX_MADD(XSMSUBDP, 1, float64, VsrD(0), MSUB_FLGS, 1)
2196VSX_MADD(XSNMADDDP, 1, float64, VsrD(0), NMADD_FLGS, 1)
2197VSX_MADD(XSNMSUBDP, 1, float64, VsrD(0), NMSUB_FLGS, 1)
2198VSX_MADD(XSMADDSP, 1, float64r32, VsrD(0), MADD_FLGS, 1)
2199VSX_MADD(XSMSUBSP, 1, float64r32, VsrD(0), MSUB_FLGS, 1)
2200VSX_MADD(XSNMADDSP, 1, float64r32, VsrD(0), NMADD_FLGS, 1)
2201VSX_MADD(XSNMSUBSP, 1, float64r32, VsrD(0), NMSUB_FLGS, 1)
2202
2203VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0)
2204VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0)
2205VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0)
2206VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0)
2207
2208VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0)
2209VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0)
2210VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0)
2211VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0)
2212
2213
2214
2215
2216
2217
2218
2219
2220#define VSX_MADDQ(op, maddflgs, ro) \
2221void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *s1, ppc_vsr_t *s2,\
2222 ppc_vsr_t *s3) \
2223{ \
2224 ppc_vsr_t t = *xt; \
2225 \
2226 helper_reset_fpstatus(env); \
2227 \
2228 float_status tstat = env->fp_status; \
2229 set_float_exception_flags(0, &tstat); \
2230 if (ro) { \
2231 tstat.float_rounding_mode = float_round_to_odd; \
2232 } \
2233 t.f128 = float128_muladd(s1->f128, s3->f128, s2->f128, maddflgs, &tstat); \
2234 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2235 \
2236 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2237 float_invalid_op_madd(env, tstat.float_exception_flags, \
2238 false, GETPC()); \
2239 } \
2240 \
2241 helper_compute_fprf_float128(env, t.f128); \
2242 *xt = t; \
2243 do_float_check_status(env, true, GETPC()); \
2244}
2245
2246VSX_MADDQ(XSMADDQP, MADD_FLGS, 0)
2247VSX_MADDQ(XSMADDQPO, MADD_FLGS, 1)
2248VSX_MADDQ(XSMSUBQP, MSUB_FLGS, 0)
2249VSX_MADDQ(XSMSUBQPO, MSUB_FLGS, 1)
2250VSX_MADDQ(XSNMADDQP, NMADD_FLGS, 0)
2251VSX_MADDQ(XSNMADDQPO, NMADD_FLGS, 1)
2252VSX_MADDQ(XSNMSUBQP, NMSUB_FLGS, 0)
2253VSX_MADDQ(XSNMSUBQPO, NMSUB_FLGS, 0)
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263#define VSX_SCALAR_CMP(op, tp, cmp, fld, svxvc) \
2264 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2265 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2266{ \
2267 int flags; \
2268 bool r, vxvc; \
2269 \
2270 helper_reset_fpstatus(env); \
2271 \
2272 if (svxvc) { \
2273 r = tp##_##cmp(xb->fld, xa->fld, &env->fp_status); \
2274 } else { \
2275 r = tp##_##cmp##_quiet(xb->fld, xa->fld, &env->fp_status); \
2276 } \
2277 \
2278 flags = get_float_exception_flags(&env->fp_status); \
2279 if (unlikely(flags & float_flag_invalid)) { \
2280 vxvc = svxvc; \
2281 if (flags & float_flag_invalid_snan) { \
2282 float_invalid_op_vxsnan(env, GETPC()); \
2283 vxvc &= !(env->fpscr & FP_VE); \
2284 } \
2285 if (vxvc) { \
2286 float_invalid_op_vxvc(env, 0, GETPC()); \
2287 } \
2288 } \
2289 \
2290 memset(xt, 0, sizeof(*xt)); \
2291 memset(&xt->fld, -r, sizeof(xt->fld)); \
2292 do_float_check_status(env, false, GETPC()); \
2293}
2294
2295VSX_SCALAR_CMP(XSCMPEQDP, float64, eq, VsrD(0), 0)
2296VSX_SCALAR_CMP(XSCMPGEDP, float64, le, VsrD(0), 1)
2297VSX_SCALAR_CMP(XSCMPGTDP, float64, lt, VsrD(0), 1)
2298VSX_SCALAR_CMP(XSCMPEQQP, float128, eq, f128, 0)
2299VSX_SCALAR_CMP(XSCMPGEQP, float128, le, f128, 1)
2300VSX_SCALAR_CMP(XSCMPGTQP, float128, lt, f128, 1)
2301
2302void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
2303 ppc_vsr_t *xa, ppc_vsr_t *xb)
2304{
2305 int64_t exp_a, exp_b;
2306 uint32_t cc;
2307
2308 exp_a = extract64(xa->VsrD(0), 52, 11);
2309 exp_b = extract64(xb->VsrD(0), 52, 11);
2310
2311 if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||
2312 float64_is_any_nan(xb->VsrD(0)))) {
2313 cc = CRF_SO;
2314 } else {
2315 if (exp_a < exp_b) {
2316 cc = CRF_LT;
2317 } else if (exp_a > exp_b) {
2318 cc = CRF_GT;
2319 } else {
2320 cc = CRF_EQ;
2321 }
2322 }
2323
2324 env->fpscr &= ~FP_FPCC;
2325 env->fpscr |= cc << FPSCR_FPCC;
2326 env->crf[BF(opcode)] = cc;
2327
2328 do_float_check_status(env, false, GETPC());
2329}
2330
2331void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
2332 ppc_vsr_t *xa, ppc_vsr_t *xb)
2333{
2334 int64_t exp_a, exp_b;
2335 uint32_t cc;
2336
2337 exp_a = extract64(xa->VsrD(0), 48, 15);
2338 exp_b = extract64(xb->VsrD(0), 48, 15);
2339
2340 if (unlikely(float128_is_any_nan(xa->f128) ||
2341 float128_is_any_nan(xb->f128))) {
2342 cc = CRF_SO;
2343 } else {
2344 if (exp_a < exp_b) {
2345 cc = CRF_LT;
2346 } else if (exp_a > exp_b) {
2347 cc = CRF_GT;
2348 } else {
2349 cc = CRF_EQ;
2350 }
2351 }
2352
2353 env->fpscr &= ~FP_FPCC;
2354 env->fpscr |= cc << FPSCR_FPCC;
2355 env->crf[BF(opcode)] = cc;
2356
2357 do_float_check_status(env, false, GETPC());
2358}
2359
2360static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb,
2361 int crf_idx, bool ordered)
2362{
2363 uint32_t cc;
2364 bool vxsnan_flag = false, vxvc_flag = false;
2365
2366 helper_reset_fpstatus(env);
2367
2368 switch (float64_compare(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) {
2369 case float_relation_less:
2370 cc = CRF_LT;
2371 break;
2372 case float_relation_equal:
2373 cc = CRF_EQ;
2374 break;
2375 case float_relation_greater:
2376 cc = CRF_GT;
2377 break;
2378 case float_relation_unordered:
2379 cc = CRF_SO;
2380
2381 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||
2382 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {
2383 vxsnan_flag = true;
2384 if (!(env->fpscr & FP_VE) && ordered) {
2385 vxvc_flag = true;
2386 }
2387 } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||
2388 float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) {
2389 if (ordered) {
2390 vxvc_flag = true;
2391 }
2392 }
2393
2394 break;
2395 default:
2396 g_assert_not_reached();
2397 }
2398
2399 env->fpscr &= ~FP_FPCC;
2400 env->fpscr |= cc << FPSCR_FPCC;
2401 env->crf[crf_idx] = cc;
2402
2403 if (vxsnan_flag) {
2404 float_invalid_op_vxsnan(env, GETPC());
2405 }
2406 if (vxvc_flag) {
2407 float_invalid_op_vxvc(env, 0, GETPC());
2408 }
2409
2410 do_float_check_status(env, false, GETPC());
2411}
2412
2413void helper_xscmpodp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2414 ppc_vsr_t *xb)
2415{
2416 do_scalar_cmp(env, xa, xb, BF(opcode), true);
2417}
2418
2419void helper_xscmpudp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2420 ppc_vsr_t *xb)
2421{
2422 do_scalar_cmp(env, xa, xb, BF(opcode), false);
2423}
2424
2425static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa,
2426 ppc_vsr_t *xb, int crf_idx, bool ordered)
2427{
2428 uint32_t cc;
2429 bool vxsnan_flag = false, vxvc_flag = false;
2430
2431 helper_reset_fpstatus(env);
2432
2433 switch (float128_compare(xa->f128, xb->f128, &env->fp_status)) {
2434 case float_relation_less:
2435 cc = CRF_LT;
2436 break;
2437 case float_relation_equal:
2438 cc = CRF_EQ;
2439 break;
2440 case float_relation_greater:
2441 cc = CRF_GT;
2442 break;
2443 case float_relation_unordered:
2444 cc = CRF_SO;
2445
2446 if (float128_is_signaling_nan(xa->f128, &env->fp_status) ||
2447 float128_is_signaling_nan(xb->f128, &env->fp_status)) {
2448 vxsnan_flag = true;
2449 if (!(env->fpscr & FP_VE) && ordered) {
2450 vxvc_flag = true;
2451 }
2452 } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) ||
2453 float128_is_quiet_nan(xb->f128, &env->fp_status)) {
2454 if (ordered) {
2455 vxvc_flag = true;
2456 }
2457 }
2458
2459 break;
2460 default:
2461 g_assert_not_reached();
2462 }
2463
2464 env->fpscr &= ~FP_FPCC;
2465 env->fpscr |= cc << FPSCR_FPCC;
2466 env->crf[crf_idx] = cc;
2467
2468 if (vxsnan_flag) {
2469 float_invalid_op_vxsnan(env, GETPC());
2470 }
2471 if (vxvc_flag) {
2472 float_invalid_op_vxvc(env, 0, GETPC());
2473 }
2474
2475 do_float_check_status(env, false, GETPC());
2476}
2477
2478void helper_xscmpoqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2479 ppc_vsr_t *xb)
2480{
2481 do_scalar_cmpq(env, xa, xb, BF(opcode), true);
2482}
2483
2484void helper_xscmpuqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2485 ppc_vsr_t *xb)
2486{
2487 do_scalar_cmpq(env, xa, xb, BF(opcode), false);
2488}
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498#define VSX_MAX_MIN(name, op, nels, tp, fld) \
2499void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
2500 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2501{ \
2502 ppc_vsr_t t = { }; \
2503 int i; \
2504 \
2505 for (i = 0; i < nels; i++) { \
2506 t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
2507 if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2508 tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
2509 float_invalid_op_vxsnan(env, GETPC()); \
2510 } \
2511 } \
2512 \
2513 *xt = t; \
2514 do_float_check_status(env, false, GETPC()); \
2515}
2516
2517VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2518VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2519VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2520VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2521VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2522VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2523
2524#define VSX_MAX_MINC(name, max, tp, fld) \
2525void helper_##name(CPUPPCState *env, \
2526 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2527{ \
2528 ppc_vsr_t t = { }; \
2529 bool first; \
2530 \
2531 helper_reset_fpstatus(env); \
2532 \
2533 if (max) { \
2534 first = tp##_le_quiet(xb->fld, xa->fld, &env->fp_status); \
2535 } else { \
2536 first = tp##_lt_quiet(xa->fld, xb->fld, &env->fp_status); \
2537 } \
2538 \
2539 if (first) { \
2540 t.fld = xa->fld; \
2541 } else { \
2542 t.fld = xb->fld; \
2543 if (env->fp_status.float_exception_flags & float_flag_invalid_snan) { \
2544 float_invalid_op_vxsnan(env, GETPC()); \
2545 } \
2546 } \
2547 \
2548 *xt = t; \
2549}
2550
2551VSX_MAX_MINC(XSMAXCDP, true, float64, VsrD(0));
2552VSX_MAX_MINC(XSMINCDP, false, float64, VsrD(0));
2553VSX_MAX_MINC(XSMAXCQP, true, float128, f128);
2554VSX_MAX_MINC(XSMINCQP, false, float128, f128);
2555
2556#define VSX_MAX_MINJ(name, max) \
2557void helper_##name(CPUPPCState *env, \
2558 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2559{ \
2560 ppc_vsr_t t = { }; \
2561 bool vxsnan_flag = false, vex_flag = false; \
2562 \
2563 if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
2564 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
2565 vxsnan_flag = true; \
2566 } \
2567 t.VsrD(0) = xa->VsrD(0); \
2568 } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
2569 if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2570 vxsnan_flag = true; \
2571 } \
2572 t.VsrD(0) = xb->VsrD(0); \
2573 } else if (float64_is_zero(xa->VsrD(0)) && \
2574 float64_is_zero(xb->VsrD(0))) { \
2575 if (max) { \
2576 if (!float64_is_neg(xa->VsrD(0)) || \
2577 !float64_is_neg(xb->VsrD(0))) { \
2578 t.VsrD(0) = 0ULL; \
2579 } else { \
2580 t.VsrD(0) = 0x8000000000000000ULL; \
2581 } \
2582 } else { \
2583 if (float64_is_neg(xa->VsrD(0)) || \
2584 float64_is_neg(xb->VsrD(0))) { \
2585 t.VsrD(0) = 0x8000000000000000ULL; \
2586 } else { \
2587 t.VsrD(0) = 0ULL; \
2588 } \
2589 } \
2590 } else if ((max && \
2591 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2592 (!max && \
2593 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2594 t.VsrD(0) = xa->VsrD(0); \
2595 } else { \
2596 t.VsrD(0) = xb->VsrD(0); \
2597 } \
2598 \
2599 vex_flag = (env->fpscr & FP_VE) && vxsnan_flag; \
2600 if (vxsnan_flag) { \
2601 float_invalid_op_vxsnan(env, GETPC()); \
2602 } \
2603 if (!vex_flag) { \
2604 *xt = t; \
2605 } \
2606} \
2607
2608VSX_MAX_MINJ(XSMAXJDP, 1);
2609VSX_MAX_MINJ(XSMINJDP, 0);
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2622uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2623 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2624{ \
2625 ppc_vsr_t t = *xt; \
2626 uint32_t crf6 = 0; \
2627 int i; \
2628 int all_true = 1; \
2629 int all_false = 1; \
2630 \
2631 helper_reset_fpstatus(env); \
2632 \
2633 for (i = 0; i < nels; i++) { \
2634 if (unlikely(tp##_is_any_nan(xa->fld) || \
2635 tp##_is_any_nan(xb->fld))) { \
2636 if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2637 tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \
2638 float_invalid_op_vxsnan(env, GETPC()); \
2639 } \
2640 if (svxvc) { \
2641 float_invalid_op_vxvc(env, 0, GETPC()); \
2642 } \
2643 t.fld = 0; \
2644 all_true = 0; \
2645 } else { \
2646 if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \
2647 t.fld = -1; \
2648 all_false = 0; \
2649 } else { \
2650 t.fld = 0; \
2651 all_true = 0; \
2652 } \
2653 } \
2654 } \
2655 \
2656 *xt = t; \
2657 crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2658 return crf6; \
2659}
2660
2661VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2662VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2663VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2664VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2665VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2666VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2667VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2668VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf) \
2681void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2682{ \
2683 ppc_vsr_t t = { }; \
2684 int i; \
2685 \
2686 helper_reset_fpstatus(env); \
2687 \
2688 for (i = 0; i < nels; i++) { \
2689 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2690 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2691 &env->fp_status))) { \
2692 float_invalid_op_vxsnan(env, GETPC()); \
2693 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2694 } \
2695 if (sfifprf) { \
2696 helper_compute_fprf_##ttp(env, t.tfld); \
2697 } \
2698 } \
2699 \
2700 *xt = t; \
2701 do_float_check_status(env, sfifprf, GETPC()); \
2702}
2703
2704VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2705VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
2706
2707#define VSX_CVT_FP_TO_FP2(op, nels, stp, ttp, sfifprf) \
2708void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2709{ \
2710 ppc_vsr_t t = { }; \
2711 int i; \
2712 \
2713 helper_reset_fpstatus(env); \
2714 \
2715 for (i = 0; i < nels; i++) { \
2716 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
2717 if (unlikely(stp##_is_signaling_nan(xb->VsrD(i), \
2718 &env->fp_status))) { \
2719 float_invalid_op_vxsnan(env, GETPC()); \
2720 t.VsrW(2 * i) = ttp##_snan_to_qnan(t.VsrW(2 * i)); \
2721 } \
2722 if (sfifprf) { \
2723 helper_compute_fprf_##ttp(env, t.VsrW(2 * i)); \
2724 } \
2725 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
2726 } \
2727 \
2728 *xt = t; \
2729 do_float_check_status(env, sfifprf, GETPC()); \
2730}
2731
2732VSX_CVT_FP_TO_FP2(xvcvdpsp, 2, float64, float32, 0)
2733VSX_CVT_FP_TO_FP2(xscvdpsp, 1, float64, float32, 1)
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2746void helper_##op(CPUPPCState *env, uint32_t opcode, \
2747 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2748{ \
2749 ppc_vsr_t t = *xt; \
2750 int i; \
2751 \
2752 helper_reset_fpstatus(env); \
2753 \
2754 for (i = 0; i < nels; i++) { \
2755 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2756 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2757 &env->fp_status))) { \
2758 float_invalid_op_vxsnan(env, GETPC()); \
2759 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2760 } \
2761 if (sfprf) { \
2762 helper_compute_fprf_##ttp(env, t.tfld); \
2763 } \
2764 } \
2765 \
2766 *xt = t; \
2767 do_float_check_status(env, true, GETPC()); \
2768}
2769
2770VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfifprf) \
2784void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2785{ \
2786 ppc_vsr_t t = { }; \
2787 int i; \
2788 \
2789 helper_reset_fpstatus(env); \
2790 \
2791 for (i = 0; i < nels; i++) { \
2792 t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
2793 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2794 &env->fp_status))) { \
2795 float_invalid_op_vxsnan(env, GETPC()); \
2796 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2797 } \
2798 if (sfifprf) { \
2799 helper_compute_fprf_##ttp(env, t.tfld); \
2800 } \
2801 } \
2802 \
2803 *xt = t; \
2804 do_float_check_status(env, sfifprf, GETPC()); \
2805}
2806
2807VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2808VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2809VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0)
2810VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2811
2812void helper_XVCVSPBF16(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
2813{
2814 ppc_vsr_t t = { };
2815 int i, status;
2816
2817 helper_reset_fpstatus(env);
2818
2819 for (i = 0; i < 4; i++) {
2820 t.VsrH(2 * i + 1) = float32_to_bfloat16(xb->VsrW(i), &env->fp_status);
2821 }
2822
2823 status = get_float_exception_flags(&env->fp_status);
2824 if (unlikely(status & float_flag_invalid_snan)) {
2825 float_invalid_op_vxsnan(env, GETPC());
2826 }
2827
2828 *xt = t;
2829 do_float_check_status(env, false, GETPC());
2830}
2831
2832void helper_XSCVQPDP(CPUPPCState *env, uint32_t ro, ppc_vsr_t *xt,
2833 ppc_vsr_t *xb)
2834{
2835 ppc_vsr_t t = { };
2836 float_status tstat;
2837
2838 helper_reset_fpstatus(env);
2839
2840 tstat = env->fp_status;
2841 if (ro != 0) {
2842 tstat.float_rounding_mode = float_round_to_odd;
2843 }
2844
2845 t.VsrD(0) = float128_to_float64(xb->f128, &tstat);
2846 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2847 if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) {
2848 float_invalid_op_vxsnan(env, GETPC());
2849 t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0));
2850 }
2851 helper_compute_fprf_float64(env, t.VsrD(0));
2852
2853 *xt = t;
2854 do_float_check_status(env, true, GETPC());
2855}
2856
2857uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2858{
2859 uint64_t result, sign, exp, frac;
2860
2861 helper_reset_fpstatus(env);
2862 float_status tstat = env->fp_status;
2863 set_float_exception_flags(0, &tstat);
2864
2865 sign = extract64(xb, 63, 1);
2866 exp = extract64(xb, 52, 11);
2867 frac = extract64(xb, 0, 52) | 0x10000000000000ULL;
2868
2869 if (unlikely(exp == 0 && extract64(frac, 0, 52) != 0)) {
2870
2871
2872 exp = 1;
2873
2874 frac = deposit64(frac, 53, 1, 0);
2875 }
2876
2877 if (unlikely(exp < 897 && frac != 0)) {
2878
2879 if (897 - exp > 63) {
2880 frac = 0;
2881 } else {
2882
2883 frac >>= (897 - exp);
2884 }
2885
2886 exp = 896;
2887 }
2888
2889 result = sign << 31;
2890 result |= extract64(exp, 10, 1) << 30;
2891 result |= extract64(exp, 0, 7) << 23;
2892 result |= extract64(frac, 29, 23);
2893
2894
2895 return (result << 32) | result;
2896}
2897
2898uint64_t helper_XSCVSPDPN(uint64_t xb)
2899{
2900 return helper_todouble(xb >> 32);
2901}
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, sfi, rnan) \
2915void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2916{ \
2917 ppc_vsr_t t = { }; \
2918 int i, flags; \
2919 \
2920 helper_reset_fpstatus(env); \
2921 \
2922 for (i = 0; i < nels; i++) { \
2923 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2924 flags = env->fp_status.float_exception_flags; \
2925 if (unlikely(flags & float_flag_invalid)) { \
2926 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());\
2927 } \
2928 } \
2929 \
2930 *xt = t; \
2931 do_float_check_status(env, sfi, GETPC()); \
2932}
2933
2934VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), true, \
2935 0x8000000000000000ULL)
2936VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), true, 0ULL)
2937VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), false, \
2938 0x8000000000000000ULL)
2939VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), false, \
2940 0ULL)
2941VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), false, \
2942 0x8000000000000000ULL)
2943VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), false, \
2944 0x80000000ULL)
2945VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), \
2946 false, 0ULL)
2947VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), false, 0U)
2948
2949#define VSX_CVT_FP_TO_INT128(op, tp, rnan) \
2950void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2951{ \
2952 ppc_vsr_t t; \
2953 int flags; \
2954 \
2955 helper_reset_fpstatus(env); \
2956 t.s128 = float128_to_##tp##_round_to_zero(xb->f128, &env->fp_status); \
2957 flags = get_float_exception_flags(&env->fp_status); \
2958 if (unlikely(flags & float_flag_invalid)) { \
2959 t.VsrD(0) = float_invalid_cvt(env, flags, t.VsrD(0), rnan, 0, GETPC());\
2960 t.VsrD(1) = -(t.VsrD(0) & 1); \
2961 } \
2962 \
2963 *xt = t; \
2964 do_float_check_status(env, true, GETPC()); \
2965}
2966
2967VSX_CVT_FP_TO_INT128(XSCVQPUQZ, uint128, 0)
2968VSX_CVT_FP_TO_INT128(XSCVQPSQZ, int128, 0x8000000000000000ULL);
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979#define VSX_CVT_FP_TO_INT2(op, nels, stp, ttp, sfi, rnan) \
2980void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2981{ \
2982 ppc_vsr_t t = { }; \
2983 int i, flags; \
2984 \
2985 helper_reset_fpstatus(env); \
2986 \
2987 for (i = 0; i < nels; i++) { \
2988 t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i), \
2989 &env->fp_status); \
2990 flags = env->fp_status.float_exception_flags; \
2991 if (unlikely(flags & float_flag_invalid)) { \
2992 t.VsrW(2 * i) = float_invalid_cvt(env, flags, t.VsrW(2 * i), \
2993 rnan, 0, GETPC()); \
2994 } \
2995 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
2996 } \
2997 \
2998 *xt = t; \
2999 do_float_check_status(env, sfi, GETPC()); \
3000}
3001
3002VSX_CVT_FP_TO_INT2(xscvdpsxws, 1, float64, int32, true, 0x80000000U)
3003VSX_CVT_FP_TO_INT2(xscvdpuxws, 1, float64, uint32, true, 0U)
3004VSX_CVT_FP_TO_INT2(xvcvdpsxws, 2, float64, int32, false, 0x80000000U)
3005VSX_CVT_FP_TO_INT2(xvcvdpuxws, 2, float64, uint32, false, 0U)
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
3017void helper_##op(CPUPPCState *env, uint32_t opcode, \
3018 ppc_vsr_t *xt, ppc_vsr_t *xb) \
3019{ \
3020 ppc_vsr_t t = { }; \
3021 int flags; \
3022 \
3023 helper_reset_fpstatus(env); \
3024 \
3025 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
3026 flags = get_float_exception_flags(&env->fp_status); \
3027 if (flags & float_flag_invalid) { \
3028 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC()); \
3029 } \
3030 \
3031 *xt = t; \
3032 do_float_check_status(env, true, GETPC()); \
3033}
3034
3035VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \
3036 0x8000000000000000ULL)
3037VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
3038 0xffffffff80000000ULL)
3039VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
3040VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf, r2sp)\
3054void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3055{ \
3056 ppc_vsr_t t = { }; \
3057 int i; \
3058 \
3059 helper_reset_fpstatus(env); \
3060 \
3061 for (i = 0; i < nels; i++) { \
3062 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3063 if (r2sp) { \
3064 t.tfld = do_frsp(env, t.tfld, GETPC()); \
3065 } \
3066 if (sfifprf) { \
3067 helper_compute_fprf_float64(env, t.tfld); \
3068 } \
3069 } \
3070 \
3071 *xt = t; \
3072 do_float_check_status(env, sfifprf, GETPC()); \
3073}
3074
3075VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3076VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3077VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3078VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3079VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3080VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3081VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
3082VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
3083VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3084VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3085
3086#define VSX_CVT_INT_TO_FP2(op, stp, ttp) \
3087void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3088{ \
3089 ppc_vsr_t t = { }; \
3090 int i; \
3091 \
3092 for (i = 0; i < 2; i++) { \
3093 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
3094 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
3095 } \
3096 \
3097 *xt = t; \
3098 do_float_check_status(env, false, GETPC()); \
3099}
3100
3101VSX_CVT_INT_TO_FP2(xvcvsxdsp, int64, float32)
3102VSX_CVT_INT_TO_FP2(xvcvuxdsp, uint64, float32)
3103
3104#define VSX_CVT_INT128_TO_FP(op, tp) \
3105void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)\
3106{ \
3107 helper_reset_fpstatus(env); \
3108 xt->f128 = tp##_to_float128(xb->s128, &env->fp_status); \
3109 helper_compute_fprf_float128(env, xt->f128); \
3110 do_float_check_status(env, true, GETPC()); \
3111}
3112
3113VSX_CVT_INT128_TO_FP(XSCVUQQP, uint128);
3114VSX_CVT_INT128_TO_FP(XSCVSQQP, int128);
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3125void helper_##op(CPUPPCState *env, uint32_t opcode, \
3126 ppc_vsr_t *xt, ppc_vsr_t *xb) \
3127{ \
3128 ppc_vsr_t t = *xt; \
3129 \
3130 helper_reset_fpstatus(env); \
3131 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3132 helper_compute_fprf_##ttp(env, t.tfld); \
3133 \
3134 *xt = t; \
3135 do_float_check_status(env, true, GETPC()); \
3136}
3137
3138VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3139VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3140
3141
3142
3143
3144
3145#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3146 float_round_up + float_round_to_zero)
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157#define VSX_ROUND(op, nels, tp, fld, rmode, sfifprf) \
3158void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3159{ \
3160 ppc_vsr_t t = { }; \
3161 int i; \
3162 FloatRoundMode curr_rounding_mode; \
3163 \
3164 helper_reset_fpstatus(env); \
3165 \
3166 if (rmode != FLOAT_ROUND_CURRENT) { \
3167 curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \
3168 set_float_rounding_mode(rmode, &env->fp_status); \
3169 } \
3170 \
3171 for (i = 0; i < nels; i++) { \
3172 if (unlikely(tp##_is_signaling_nan(xb->fld, \
3173 &env->fp_status))) { \
3174 float_invalid_op_vxsnan(env, GETPC()); \
3175 t.fld = tp##_snan_to_qnan(xb->fld); \
3176 } else { \
3177 t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
3178 } \
3179 if (sfifprf) { \
3180 helper_compute_fprf_float64(env, t.fld); \
3181 } \
3182 } \
3183 \
3184
3185
3186
3187
3188 \
3189 if (rmode != FLOAT_ROUND_CURRENT) { \
3190 set_float_rounding_mode(curr_rounding_mode, &env->fp_status); \
3191 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3192 } \
3193 \
3194 *xt = t; \
3195 do_float_check_status(env, sfifprf, GETPC()); \
3196}
3197
3198VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3199VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3200VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3201VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3202VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3203
3204VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3205VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3206VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3207VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3208VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3209
3210VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3211VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3212VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3213VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3214VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3215
3216uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3217{
3218 helper_reset_fpstatus(env);
3219
3220 uint64_t xt = do_frsp(env, xb, GETPC());
3221
3222 helper_compute_fprf_float64(env, xt);
3223 do_float_check_status(env, true, GETPC());
3224 return xt;
3225}
3226
3227void helper_XVXSIGSP(ppc_vsr_t *xt, ppc_vsr_t *xb)
3228{
3229 ppc_vsr_t t = { };
3230 uint32_t exp, i, fraction;
3231
3232 for (i = 0; i < 4; i++) {
3233 exp = (xb->VsrW(i) >> 23) & 0xFF;
3234 fraction = xb->VsrW(i) & 0x7FFFFF;
3235 if (exp != 0 && exp != 255) {
3236 t.VsrW(i) = fraction | 0x00800000;
3237 } else {
3238 t.VsrW(i) = fraction;
3239 }
3240 }
3241 *xt = t;
3242}
3243
3244#define VSX_TSTDC(tp) \
3245static int32_t tp##_tstdc(tp b, uint32_t dcmx) \
3246{ \
3247 uint32_t match = 0; \
3248 uint32_t sign = tp##_is_neg(b); \
3249 if (tp##_is_any_nan(b)) { \
3250 match = extract32(dcmx, 6, 1); \
3251 } else if (tp##_is_infinity(b)) { \
3252 match = extract32(dcmx, 4 + !sign, 1); \
3253 } else if (tp##_is_zero(b)) { \
3254 match = extract32(dcmx, 2 + !sign, 1); \
3255 } else if (tp##_is_zero_or_denormal(b)) { \
3256 match = extract32(dcmx, 0 + !sign, 1); \
3257 } \
3258 return (match != 0); \
3259}
3260
3261VSX_TSTDC(float32)
3262VSX_TSTDC(float64)
3263VSX_TSTDC(float128)
3264#undef VSX_TSTDC
3265
3266void helper_XVTSTDCDP(ppc_vsr_t *t, ppc_vsr_t *b, uint64_t dcmx, uint32_t v)
3267{
3268 int i;
3269 for (i = 0; i < 2; i++) {
3270 t->s64[i] = (int64_t)-float64_tstdc(b->f64[i], dcmx);
3271 }
3272}
3273
3274void helper_XVTSTDCSP(ppc_vsr_t *t, ppc_vsr_t *b, uint64_t dcmx, uint32_t v)
3275{
3276 int i;
3277 for (i = 0; i < 4; i++) {
3278 t->s32[i] = (int32_t)-float32_tstdc(b->f32[i], dcmx);
3279 }
3280}
3281
3282static bool not_SP_value(float64 val)
3283{
3284 return val != helper_todouble(helper_tosingle(val));
3285}
3286
3287
3288
3289
3290
3291
3292
3293#define VSX_XS_TSTDC(NAME, FLD, TP) \
3294 void helper_##NAME(CPUPPCState *env, uint32_t bf, \
3295 uint32_t dcmx, ppc_vsr_t *b) \
3296 { \
3297 uint32_t cc, match, sign = TP##_is_neg(b->FLD); \
3298 match = TP##_tstdc(b->FLD, dcmx); \
3299 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3300 env->fpscr &= ~FP_FPCC; \
3301 env->fpscr |= cc << FPSCR_FPCC; \
3302 env->crf[bf] = cc; \
3303 }
3304
3305VSX_XS_TSTDC(XSTSTDCDP, VsrD(0), float64)
3306VSX_XS_TSTDC(XSTSTDCQP, f128, float128)
3307#undef VSX_XS_TSTDC
3308
3309void helper_XSTSTDCSP(CPUPPCState *env, uint32_t bf,
3310 uint32_t dcmx, ppc_vsr_t *b)
3311{
3312 uint32_t cc, match, sign = float64_is_neg(b->VsrD(0));
3313 uint32_t exp = (b->VsrD(0) >> 52) & 0x7FF;
3314 int not_sp = (int)not_SP_value(b->VsrD(0));
3315 match = float64_tstdc(b->VsrD(0), dcmx) || (exp > 0 && exp < 0x381);
3316 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3317 env->fpscr &= ~FP_FPCC;
3318 env->fpscr |= cc << FPSCR_FPCC;
3319 env->crf[bf] = cc;
3320}
3321
3322void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
3323 ppc_vsr_t *xt, ppc_vsr_t *xb)
3324{
3325 ppc_vsr_t t = { };
3326 uint8_t r = Rrm(opcode);
3327 uint8_t ex = Rc(opcode);
3328 uint8_t rmc = RMC(opcode);
3329 uint8_t rmode = 0;
3330 float_status tstat;
3331
3332 helper_reset_fpstatus(env);
3333
3334 if (r == 0 && rmc == 0) {
3335 rmode = float_round_ties_away;
3336 } else if (r == 0 && rmc == 0x3) {
3337 rmode = env->fpscr & FP_RN;
3338 } else if (r == 1) {
3339 switch (rmc) {
3340 case 0:
3341 rmode = float_round_nearest_even;
3342 break;
3343 case 1:
3344 rmode = float_round_to_zero;
3345 break;
3346 case 2:
3347 rmode = float_round_up;
3348 break;
3349 case 3:
3350 rmode = float_round_down;
3351 break;
3352 default:
3353 abort();
3354 }
3355 }
3356
3357 tstat = env->fp_status;
3358 set_float_exception_flags(0, &tstat);
3359 set_float_rounding_mode(rmode, &tstat);
3360 t.f128 = float128_round_to_int(xb->f128, &tstat);
3361 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3362
3363 if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) {
3364 float_invalid_op_vxsnan(env, GETPC());
3365 }
3366
3367 if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3368 env->fp_status.float_exception_flags &= ~float_flag_inexact;
3369 }
3370
3371 helper_compute_fprf_float128(env, t.f128);
3372 do_float_check_status(env, true, GETPC());
3373 *xt = t;
3374}
3375
3376void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
3377 ppc_vsr_t *xt, ppc_vsr_t *xb)
3378{
3379 ppc_vsr_t t = { };
3380 uint8_t r = Rrm(opcode);
3381 uint8_t rmc = RMC(opcode);
3382 uint8_t rmode = 0;
3383 floatx80 round_res;
3384 float_status tstat;
3385
3386 helper_reset_fpstatus(env);
3387
3388 if (r == 0 && rmc == 0) {
3389 rmode = float_round_ties_away;
3390 } else if (r == 0 && rmc == 0x3) {
3391 rmode = env->fpscr & FP_RN;
3392 } else if (r == 1) {
3393 switch (rmc) {
3394 case 0:
3395 rmode = float_round_nearest_even;
3396 break;
3397 case 1:
3398 rmode = float_round_to_zero;
3399 break;
3400 case 2:
3401 rmode = float_round_up;
3402 break;
3403 case 3:
3404 rmode = float_round_down;
3405 break;
3406 default:
3407 abort();
3408 }
3409 }
3410
3411 tstat = env->fp_status;
3412 set_float_exception_flags(0, &tstat);
3413 set_float_rounding_mode(rmode, &tstat);
3414 round_res = float128_to_floatx80(xb->f128, &tstat);
3415 t.f128 = floatx80_to_float128(round_res, &tstat);
3416 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3417
3418 if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) {
3419 float_invalid_op_vxsnan(env, GETPC());
3420 t.f128 = float128_snan_to_qnan(t.f128);
3421 }
3422
3423 helper_compute_fprf_float128(env, t.f128);
3424 *xt = t;
3425 do_float_check_status(env, true, GETPC());
3426}
3427
3428void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
3429 ppc_vsr_t *xt, ppc_vsr_t *xb)
3430{
3431 ppc_vsr_t t = { };
3432 float_status tstat;
3433
3434 helper_reset_fpstatus(env);
3435
3436 tstat = env->fp_status;
3437 if (unlikely(Rc(opcode) != 0)) {
3438 tstat.float_rounding_mode = float_round_to_odd;
3439 }
3440
3441 set_float_exception_flags(0, &tstat);
3442 t.f128 = float128_sqrt(xb->f128, &tstat);
3443 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3444
3445 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3446 float_invalid_op_sqrt(env, tstat.float_exception_flags, 1, GETPC());
3447 }
3448
3449 helper_compute_fprf_float128(env, t.f128);
3450 *xt = t;
3451 do_float_check_status(env, true, GETPC());
3452}
3453
3454void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
3455 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
3456{
3457 ppc_vsr_t t = *xt;
3458 float_status tstat;
3459
3460 helper_reset_fpstatus(env);
3461
3462 tstat = env->fp_status;
3463 if (unlikely(Rc(opcode) != 0)) {
3464 tstat.float_rounding_mode = float_round_to_odd;
3465 }
3466
3467 set_float_exception_flags(0, &tstat);
3468 t.f128 = float128_sub(xa->f128, xb->f128, &tstat);
3469 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3470
3471 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3472 float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
3473 }
3474
3475 helper_compute_fprf_float128(env, t.f128);
3476 *xt = t;
3477 do_float_check_status(env, true, GETPC());
3478}
3479
3480static inline void vsxger_excp(CPUPPCState *env, uintptr_t retaddr)
3481{
3482
3483
3484
3485
3486 target_ulong enable;
3487 enable = env->fpscr & (FP_ENABLES | FP_FI | FP_FR);
3488 env->fpscr &= ~(FP_ENABLES | FP_FI | FP_FR);
3489 int status = get_float_exception_flags(&env->fp_status);
3490 if (unlikely(status & float_flag_invalid)) {
3491 if (status & float_flag_invalid_snan) {
3492 float_invalid_op_vxsnan(env, 0);
3493 }
3494 if (status & float_flag_invalid_imz) {
3495 float_invalid_op_vximz(env, false, 0);
3496 }
3497 if (status & float_flag_invalid_isi) {
3498 float_invalid_op_vxisi(env, false, 0);
3499 }
3500 }
3501 do_float_check_status(env, false, retaddr);
3502 env->fpscr |= enable;
3503 do_fpscr_check_status(env, retaddr);
3504}
3505
3506typedef float64 extract_f16(float16, float_status *);
3507
3508static float64 extract_hf16(float16 in, float_status *fp_status)
3509{
3510 return float16_to_float64(in, true, fp_status);
3511}
3512
3513static float64 extract_bf16(bfloat16 in, float_status *fp_status)
3514{
3515 return bfloat16_to_float64(in, fp_status);
3516}
3517
3518static void vsxger16(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3519 ppc_acc_t *at, uint32_t mask, bool acc,
3520 bool neg_mul, bool neg_acc, extract_f16 extract)
3521{
3522 float32 r, aux_acc;
3523 float64 psum, va, vb, vc, vd;
3524 int i, j, xmsk_bit, ymsk_bit;
3525 uint8_t pmsk = FIELD_EX32(mask, GER_MSK, PMSK),
3526 xmsk = FIELD_EX32(mask, GER_MSK, XMSK),
3527 ymsk = FIELD_EX32(mask, GER_MSK, YMSK);
3528 float_status *excp_ptr = &env->fp_status;
3529 for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
3530 for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
3531 if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
3532 va = !(pmsk & 2) ? float64_zero :
3533 extract(a->VsrHF(2 * i), excp_ptr);
3534 vb = !(pmsk & 2) ? float64_zero :
3535 extract(b->VsrHF(2 * j), excp_ptr);
3536 vc = !(pmsk & 1) ? float64_zero :
3537 extract(a->VsrHF(2 * i + 1), excp_ptr);
3538 vd = !(pmsk & 1) ? float64_zero :
3539 extract(b->VsrHF(2 * j + 1), excp_ptr);
3540 psum = float64_mul(va, vb, excp_ptr);
3541 psum = float64r32_muladd(vc, vd, psum, 0, excp_ptr);
3542 r = float64_to_float32(psum, excp_ptr);
3543 if (acc) {
3544 aux_acc = at[i].VsrSF(j);
3545 if (neg_mul) {
3546 r = bfp32_neg(r);
3547 }
3548 if (neg_acc) {
3549 aux_acc = bfp32_neg(aux_acc);
3550 }
3551 r = float32_add(r, aux_acc, excp_ptr);
3552 }
3553 at[i].VsrSF(j) = r;
3554 } else {
3555 at[i].VsrSF(j) = float32_zero;
3556 }
3557 }
3558 }
3559 vsxger_excp(env, GETPC());
3560}
3561
3562typedef void vsxger_zero(ppc_vsr_t *at, int, int);
3563
3564typedef void vsxger_muladd_f(ppc_vsr_t *, ppc_vsr_t *, ppc_vsr_t *, int, int,
3565 int flags, float_status *s);
3566
3567static void vsxger_muladd32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3568 int j, int flags, float_status *s)
3569{
3570 at[i].VsrSF(j) = float32_muladd(a->VsrSF(i), b->VsrSF(j),
3571 at[i].VsrSF(j), flags, s);
3572}
3573
3574static void vsxger_mul32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3575 int j, int flags, float_status *s)
3576{
3577 at[i].VsrSF(j) = float32_mul(a->VsrSF(i), b->VsrSF(j), s);
3578}
3579
3580static void vsxger_zero32(ppc_vsr_t *at, int i, int j)
3581{
3582 at[i].VsrSF(j) = float32_zero;
3583}
3584
3585static void vsxger_muladd64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3586 int j, int flags, float_status *s)
3587{
3588 if (j >= 2) {
3589 j -= 2;
3590 at[i].VsrDF(j) = float64_muladd(a[i / 2].VsrDF(i % 2), b->VsrDF(j),
3591 at[i].VsrDF(j), flags, s);
3592 }
3593}
3594
3595static void vsxger_mul64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3596 int j, int flags, float_status *s)
3597{
3598 if (j >= 2) {
3599 j -= 2;
3600 at[i].VsrDF(j) = float64_mul(a[i / 2].VsrDF(i % 2), b->VsrDF(j), s);
3601 }
3602}
3603
3604static void vsxger_zero64(ppc_vsr_t *at, int i, int j)
3605{
3606 if (j >= 2) {
3607 j -= 2;
3608 at[i].VsrDF(j) = float64_zero;
3609 }
3610}
3611
3612static void vsxger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3613 ppc_acc_t *at, uint32_t mask, bool acc, bool neg_mul,
3614 bool neg_acc, vsxger_muladd_f mul, vsxger_muladd_f muladd,
3615 vsxger_zero zero)
3616{
3617 int i, j, xmsk_bit, ymsk_bit, op_flags;
3618 uint8_t xmsk = mask & 0x0F;
3619 uint8_t ymsk = (mask >> 4) & 0x0F;
3620 float_status *excp_ptr = &env->fp_status;
3621 op_flags = (neg_acc ^ neg_mul) ? float_muladd_negate_c : 0;
3622 op_flags |= (neg_mul) ? float_muladd_negate_result : 0;
3623 helper_reset_fpstatus(env);
3624 for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
3625 for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
3626 if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
3627 if (acc) {
3628 muladd(at, a, b, i, j, op_flags, excp_ptr);
3629 } else {
3630 mul(at, a, b, i, j, op_flags, excp_ptr);
3631 }
3632 } else {
3633 zero(at, i, j);
3634 }
3635 }
3636 }
3637 vsxger_excp(env, GETPC());
3638}
3639
3640QEMU_FLATTEN
3641void helper_XVBF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3642 ppc_acc_t *at, uint32_t mask)
3643{
3644 vsxger16(env, a, b, at, mask, false, false, false, extract_bf16);
3645}
3646
3647QEMU_FLATTEN
3648void helper_XVBF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3649 ppc_acc_t *at, uint32_t mask)
3650{
3651 vsxger16(env, a, b, at, mask, true, false, false, extract_bf16);
3652}
3653
3654QEMU_FLATTEN
3655void helper_XVBF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3656 ppc_acc_t *at, uint32_t mask)
3657{
3658 vsxger16(env, a, b, at, mask, true, false, true, extract_bf16);
3659}
3660
3661QEMU_FLATTEN
3662void helper_XVBF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3663 ppc_acc_t *at, uint32_t mask)
3664{
3665 vsxger16(env, a, b, at, mask, true, true, false, extract_bf16);
3666}
3667
3668QEMU_FLATTEN
3669void helper_XVBF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3670 ppc_acc_t *at, uint32_t mask)
3671{
3672 vsxger16(env, a, b, at, mask, true, true, true, extract_bf16);
3673}
3674
3675QEMU_FLATTEN
3676void helper_XVF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3677 ppc_acc_t *at, uint32_t mask)
3678{
3679 vsxger16(env, a, b, at, mask, false, false, false, extract_hf16);
3680}
3681
3682QEMU_FLATTEN
3683void helper_XVF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3684 ppc_acc_t *at, uint32_t mask)
3685{
3686 vsxger16(env, a, b, at, mask, true, false, false, extract_hf16);
3687}
3688
3689QEMU_FLATTEN
3690void helper_XVF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3691 ppc_acc_t *at, uint32_t mask)
3692{
3693 vsxger16(env, a, b, at, mask, true, false, true, extract_hf16);
3694}
3695
3696QEMU_FLATTEN
3697void helper_XVF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3698 ppc_acc_t *at, uint32_t mask)
3699{
3700 vsxger16(env, a, b, at, mask, true, true, false, extract_hf16);
3701}
3702
3703QEMU_FLATTEN
3704void helper_XVF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3705 ppc_acc_t *at, uint32_t mask)
3706{
3707 vsxger16(env, a, b, at, mask, true, true, true, extract_hf16);
3708}
3709
3710QEMU_FLATTEN
3711void helper_XVF32GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3712 ppc_acc_t *at, uint32_t mask)
3713{
3714 vsxger(env, a, b, at, mask, false, false, false, vsxger_mul32,
3715 vsxger_muladd32, vsxger_zero32);
3716}
3717
3718QEMU_FLATTEN
3719void helper_XVF32GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3720 ppc_acc_t *at, uint32_t mask)
3721{
3722 vsxger(env, a, b, at, mask, true, false, false, vsxger_mul32,
3723 vsxger_muladd32, vsxger_zero32);
3724}
3725
3726QEMU_FLATTEN
3727void helper_XVF32GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3728 ppc_acc_t *at, uint32_t mask)
3729{
3730 vsxger(env, a, b, at, mask, true, false, true, vsxger_mul32,
3731 vsxger_muladd32, vsxger_zero32);
3732}
3733
3734QEMU_FLATTEN
3735void helper_XVF32GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3736 ppc_acc_t *at, uint32_t mask)
3737{
3738 vsxger(env, a, b, at, mask, true, true, false, vsxger_mul32,
3739 vsxger_muladd32, vsxger_zero32);
3740}
3741
3742QEMU_FLATTEN
3743void helper_XVF32GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3744 ppc_acc_t *at, uint32_t mask)
3745{
3746 vsxger(env, a, b, at, mask, true, true, true, vsxger_mul32,
3747 vsxger_muladd32, vsxger_zero32);
3748}
3749
3750QEMU_FLATTEN
3751void helper_XVF64GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3752 ppc_acc_t *at, uint32_t mask)
3753{
3754 vsxger(env, a, b, at, mask, false, false, false, vsxger_mul64,
3755 vsxger_muladd64, vsxger_zero64);
3756}
3757
3758QEMU_FLATTEN
3759void helper_XVF64GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3760 ppc_acc_t *at, uint32_t mask)
3761{
3762 vsxger(env, a, b, at, mask, true, false, false, vsxger_mul64,
3763 vsxger_muladd64, vsxger_zero64);
3764}
3765
3766QEMU_FLATTEN
3767void helper_XVF64GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3768 ppc_acc_t *at, uint32_t mask)
3769{
3770 vsxger(env, a, b, at, mask, true, false, true, vsxger_mul64,
3771 vsxger_muladd64, vsxger_zero64);
3772}
3773
3774QEMU_FLATTEN
3775void helper_XVF64GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3776 ppc_acc_t *at, uint32_t mask)
3777{
3778 vsxger(env, a, b, at, mask, true, true, false, vsxger_mul64,
3779 vsxger_muladd64, vsxger_zero64);
3780}
3781
3782QEMU_FLATTEN
3783void helper_XVF64GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3784 ppc_acc_t *at, uint32_t mask)
3785{
3786 vsxger(env, a, b, at, mask, true, true, true, vsxger_mul64,
3787 vsxger_muladd64, vsxger_zero64);
3788}
3789