1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "cpu.h"
21#include "exec/helper-proto.h"
22#include "exec/exec-all.h"
23#include "internal.h"
24#include "fpu/softfloat.h"
25
26static inline float128 float128_snan_to_qnan(float128 x)
27{
28 float128 r;
29
30 r.high = x.high | 0x0000800000000000;
31 r.low = x.low;
32 return r;
33}
34
35#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36#define float32_snan_to_qnan(x) ((x) | 0x00400000)
37#define float16_snan_to_qnan(x) ((x) | 0x0200)
38
39static inline float32 bfp32_neg(float32 a)
40{
41 if (unlikely(float32_is_any_nan(a))) {
42 return a;
43 } else {
44 return float32_chs(a);
45 }
46}
47
48static inline bool fp_exceptions_enabled(CPUPPCState *env)
49{
50#ifdef CONFIG_USER_ONLY
51 return true;
52#else
53 return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
54#endif
55}
56
57
58
59
60
61
62
63
64uint64_t helper_todouble(uint32_t arg)
65{
66 uint32_t abs_arg = arg & 0x7fffffff;
67 uint64_t ret;
68
69 if (likely(abs_arg >= 0x00800000)) {
70 if (unlikely(extract32(arg, 23, 8) == 0xff)) {
71
72 ret = (uint64_t)extract32(arg, 31, 1) << 63;
73 ret |= (uint64_t)0x7ff << 52;
74 ret |= (uint64_t)extract32(arg, 0, 23) << 29;
75 } else {
76
77 ret = (uint64_t)extract32(arg, 30, 2) << 62;
78 ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
79 ret |= (uint64_t)extract32(arg, 0, 30) << 29;
80 }
81 } else {
82
83 ret = (uint64_t)extract32(arg, 31, 1) << 63;
84 if (unlikely(abs_arg != 0)) {
85
86
87
88
89
90 int shift = clz32(abs_arg) - 8;
91
92
93
94
95 int exp = -126 - shift + 1023 - 1;
96
97 ret |= (uint64_t)exp << 52;
98 ret += (uint64_t)abs_arg << (52 - 23 + shift);
99 }
100 }
101 return ret;
102}
103
104
105
106
107
108uint32_t helper_tosingle(uint64_t arg)
109{
110 int exp = extract64(arg, 52, 11);
111 uint32_t ret;
112
113 if (likely(exp > 896)) {
114
115 ret = extract64(arg, 62, 2) << 30;
116 ret |= extract64(arg, 29, 30);
117 } else {
118
119
120
121
122
123
124
125 ret = extract64(arg, 63, 1) << 31;
126 if (unlikely(exp >= 874)) {
127
128 ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
129 }
130 }
131 return ret;
132}
133
134static inline int ppc_float32_get_unbiased_exp(float32 f)
135{
136 return ((f >> 23) & 0xFF) - 127;
137}
138
139static inline int ppc_float64_get_unbiased_exp(float64 f)
140{
141 return ((f >> 52) & 0x7FF) - 1023;
142}
143
144#define COMPUTE_FPRF(tp) \
145void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
146{ \
147 bool neg = tp##_is_neg(arg); \
148 target_ulong fprf; \
149 if (likely(tp##_is_normal(arg))) { \
150 fprf = neg ? 0x08 << FPSCR_FPRF : 0x04 << FPSCR_FPRF; \
151 } else if (tp##_is_zero(arg)) { \
152 fprf = neg ? 0x12 << FPSCR_FPRF : 0x02 << FPSCR_FPRF; \
153 } else if (tp##_is_zero_or_denormal(arg)) { \
154 fprf = neg ? 0x18 << FPSCR_FPRF : 0x14 << FPSCR_FPRF; \
155 } else if (tp##_is_infinity(arg)) { \
156 fprf = neg ? 0x09 << FPSCR_FPRF : 0x05 << FPSCR_FPRF; \
157 } else { \
158 float_status dummy = { }; \
159 if (tp##_is_signaling_nan(arg, &dummy)) { \
160 fprf = 0x00 << FPSCR_FPRF; \
161 } else { \
162 fprf = 0x11 << FPSCR_FPRF; \
163 } \
164 } \
165 env->fpscr = (env->fpscr & ~FP_FPRF) | fprf; \
166}
167
168COMPUTE_FPRF(float16)
169COMPUTE_FPRF(float32)
170COMPUTE_FPRF(float64)
171COMPUTE_FPRF(float128)
172
173
174static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
175{
176
177 env->fpscr |= FP_VX;
178
179 env->fpscr |= FP_FX;
180 if (env->fpscr & FP_VE) {
181
182 env->fpscr |= FP_FEX;
183 if (fp_exceptions_enabled(env)) {
184 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
185 POWERPC_EXCP_FP | op, retaddr);
186 }
187 }
188}
189
190static void finish_invalid_op_arith(CPUPPCState *env, int op,
191 bool set_fpcc, uintptr_t retaddr)
192{
193 env->fpscr &= ~(FP_FR | FP_FI);
194 if (!(env->fpscr & FP_VE)) {
195 if (set_fpcc) {
196 env->fpscr &= ~FP_FPCC;
197 env->fpscr |= (FP_C | FP_FU);
198 }
199 }
200 finish_invalid_op_excp(env, op, retaddr);
201}
202
203
204static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr)
205{
206 env->fpscr |= FP_VXSNAN;
207 finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr);
208}
209
210
211static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc,
212 uintptr_t retaddr)
213{
214 env->fpscr |= FP_VXISI;
215 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr);
216}
217
218
219static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc,
220 uintptr_t retaddr)
221{
222 env->fpscr |= FP_VXIDI;
223 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr);
224}
225
226
227static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc,
228 uintptr_t retaddr)
229{
230 env->fpscr |= FP_VXZDZ;
231 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr);
232}
233
234
235static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc,
236 uintptr_t retaddr)
237{
238 env->fpscr |= FP_VXIMZ;
239 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr);
240}
241
242
243static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc,
244 uintptr_t retaddr)
245{
246 env->fpscr |= FP_VXSQRT;
247 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr);
248}
249
250
251static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
252 uintptr_t retaddr)
253{
254 env->fpscr |= FP_VXVC;
255 if (set_fpcc) {
256 env->fpscr &= ~FP_FPCC;
257 env->fpscr |= (FP_C | FP_FU);
258 }
259
260 env->fpscr |= FP_VX;
261
262 env->fpscr |= FP_FX;
263
264 if (env->fpscr & FP_VE) {
265 CPUState *cs = env_cpu(env);
266
267 cs->exception_index = POWERPC_EXCP_PROGRAM;
268 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
269
270 env->fpscr |= FP_FEX;
271
272 }
273}
274
275
276static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
277 uintptr_t retaddr)
278{
279 env->fpscr |= FP_VXCVI;
280 env->fpscr &= ~(FP_FR | FP_FI);
281 if (!(env->fpscr & FP_VE)) {
282 if (set_fpcc) {
283 env->fpscr &= ~FP_FPCC;
284 env->fpscr |= (FP_C | FP_FU);
285 }
286 }
287 finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr);
288}
289
290static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
291{
292 env->fpscr |= FP_ZX;
293 env->fpscr &= ~(FP_FR | FP_FI);
294
295 env->fpscr |= FP_FX;
296 if (env->fpscr & FP_ZE) {
297
298 env->fpscr |= FP_FEX;
299 if (fp_exceptions_enabled(env)) {
300 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
301 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
302 raddr);
303 }
304 }
305}
306
307static inline int float_overflow_excp(CPUPPCState *env)
308{
309 CPUState *cs = env_cpu(env);
310
311 env->fpscr |= FP_OX;
312
313 env->fpscr |= FP_FX;
314
315 bool overflow_enabled = !!(env->fpscr & FP_OE);
316 if (overflow_enabled) {
317
318 env->fpscr |= FP_FEX;
319
320 cs->exception_index = POWERPC_EXCP_PROGRAM;
321 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
322 }
323
324 return overflow_enabled ? 0 : float_flag_inexact;
325}
326
327static inline void float_underflow_excp(CPUPPCState *env)
328{
329 CPUState *cs = env_cpu(env);
330
331 env->fpscr |= FP_UX;
332
333 env->fpscr |= FP_FX;
334 if (env->fpscr & FP_UE) {
335
336 env->fpscr |= FP_FEX;
337
338 cs->exception_index = POWERPC_EXCP_PROGRAM;
339 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
340 }
341}
342
343static inline void float_inexact_excp(CPUPPCState *env)
344{
345 CPUState *cs = env_cpu(env);
346
347 env->fpscr |= FP_XX;
348
349 env->fpscr |= FP_FX;
350 if (env->fpscr & FP_XE) {
351
352 env->fpscr |= FP_FEX;
353
354 cs->exception_index = POWERPC_EXCP_PROGRAM;
355 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
356 }
357}
358
359void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
360{
361 uint32_t mask = 1u << bit;
362 if (env->fpscr & mask) {
363 ppc_store_fpscr(env, env->fpscr & ~(target_ulong)mask);
364 }
365}
366
367void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
368{
369 uint32_t mask = 1u << bit;
370 if (!(env->fpscr & mask)) {
371 ppc_store_fpscr(env, env->fpscr | mask);
372 }
373}
374
375void helper_store_fpscr(CPUPPCState *env, uint64_t val, uint32_t nibbles)
376{
377 target_ulong mask = 0;
378 int i;
379
380
381 for (i = 0; i < sizeof(target_ulong) * 2; i++) {
382 if (nibbles & (1 << i)) {
383 mask |= (target_ulong) 0xf << (4 * i);
384 }
385 }
386 val = (val & mask) | (env->fpscr & ~mask);
387 ppc_store_fpscr(env, val);
388}
389
390static void do_fpscr_check_status(CPUPPCState *env, uintptr_t raddr)
391{
392 CPUState *cs = env_cpu(env);
393 target_ulong fpscr = env->fpscr;
394 int error = 0;
395
396 if ((fpscr & FP_OX) && (fpscr & FP_OE)) {
397 error = POWERPC_EXCP_FP_OX;
398 } else if ((fpscr & FP_UX) && (fpscr & FP_UE)) {
399 error = POWERPC_EXCP_FP_UX;
400 } else if ((fpscr & FP_XX) && (fpscr & FP_XE)) {
401 error = POWERPC_EXCP_FP_XX;
402 } else if ((fpscr & FP_ZX) && (fpscr & FP_ZE)) {
403 error = POWERPC_EXCP_FP_ZX;
404 } else if (fpscr & FP_VE) {
405 if (fpscr & FP_VXSOFT) {
406 error = POWERPC_EXCP_FP_VXSOFT;
407 } else if (fpscr & FP_VXSNAN) {
408 error = POWERPC_EXCP_FP_VXSNAN;
409 } else if (fpscr & FP_VXISI) {
410 error = POWERPC_EXCP_FP_VXISI;
411 } else if (fpscr & FP_VXIDI) {
412 error = POWERPC_EXCP_FP_VXIDI;
413 } else if (fpscr & FP_VXZDZ) {
414 error = POWERPC_EXCP_FP_VXZDZ;
415 } else if (fpscr & FP_VXIMZ) {
416 error = POWERPC_EXCP_FP_VXIMZ;
417 } else if (fpscr & FP_VXVC) {
418 error = POWERPC_EXCP_FP_VXVC;
419 } else if (fpscr & FP_VXSQRT) {
420 error = POWERPC_EXCP_FP_VXSQRT;
421 } else if (fpscr & FP_VXCVI) {
422 error = POWERPC_EXCP_FP_VXCVI;
423 } else {
424 return;
425 }
426 } else {
427 return;
428 }
429 cs->exception_index = POWERPC_EXCP_PROGRAM;
430 env->error_code = error | POWERPC_EXCP_FP;
431 env->fpscr |= FP_FEX;
432
433 if (fp_exceptions_enabled(env)) {
434 raise_exception_err_ra(env, cs->exception_index,
435 env->error_code, raddr);
436 }
437}
438
439void helper_fpscr_check_status(CPUPPCState *env)
440{
441 do_fpscr_check_status(env, GETPC());
442}
443
444static void do_float_check_status(CPUPPCState *env, bool change_fi,
445 uintptr_t raddr)
446{
447 CPUState *cs = env_cpu(env);
448 int status = get_float_exception_flags(&env->fp_status);
449
450 if (status & float_flag_overflow) {
451 status |= float_overflow_excp(env);
452 } else if (status & float_flag_underflow) {
453 float_underflow_excp(env);
454 }
455 if (status & float_flag_inexact) {
456 float_inexact_excp(env);
457 }
458 if (change_fi) {
459 env->fpscr = FIELD_DP64(env->fpscr, FPSCR, FI,
460 !!(status & float_flag_inexact));
461 }
462
463 if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
464 (env->error_code & POWERPC_EXCP_FP)) {
465
466 if (fp_exceptions_enabled(env)) {
467 raise_exception_err_ra(env, cs->exception_index,
468 env->error_code, raddr);
469 }
470 }
471}
472
473void helper_float_check_status(CPUPPCState *env)
474{
475 do_float_check_status(env, true, GETPC());
476}
477
478void helper_reset_fpstatus(CPUPPCState *env)
479{
480 set_float_exception_flags(0, &env->fp_status);
481}
482
483static void float_invalid_op_addsub(CPUPPCState *env, int flags,
484 bool set_fpcc, uintptr_t retaddr)
485{
486 if (flags & float_flag_invalid_isi) {
487 float_invalid_op_vxisi(env, set_fpcc, retaddr);
488 } else if (flags & float_flag_invalid_snan) {
489 float_invalid_op_vxsnan(env, retaddr);
490 }
491}
492
493
494float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
495{
496 float64 ret = float64_add(arg1, arg2, &env->fp_status);
497 int flags = get_float_exception_flags(&env->fp_status);
498
499 if (unlikely(flags & float_flag_invalid)) {
500 float_invalid_op_addsub(env, flags, 1, GETPC());
501 }
502
503 return ret;
504}
505
506
507float64 helper_fadds(CPUPPCState *env, float64 arg1, float64 arg2)
508{
509 float64 ret = float64r32_add(arg1, arg2, &env->fp_status);
510 int flags = get_float_exception_flags(&env->fp_status);
511
512 if (unlikely(flags & float_flag_invalid)) {
513 float_invalid_op_addsub(env, flags, 1, GETPC());
514 }
515 return ret;
516}
517
518
519float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
520{
521 float64 ret = float64_sub(arg1, arg2, &env->fp_status);
522 int flags = get_float_exception_flags(&env->fp_status);
523
524 if (unlikely(flags & float_flag_invalid)) {
525 float_invalid_op_addsub(env, flags, 1, GETPC());
526 }
527
528 return ret;
529}
530
531
532float64 helper_fsubs(CPUPPCState *env, float64 arg1, float64 arg2)
533{
534 float64 ret = float64r32_sub(arg1, arg2, &env->fp_status);
535 int flags = get_float_exception_flags(&env->fp_status);
536
537 if (unlikely(flags & float_flag_invalid)) {
538 float_invalid_op_addsub(env, flags, 1, GETPC());
539 }
540 return ret;
541}
542
543static void float_invalid_op_mul(CPUPPCState *env, int flags,
544 bool set_fprc, uintptr_t retaddr)
545{
546 if (flags & float_flag_invalid_imz) {
547 float_invalid_op_vximz(env, set_fprc, retaddr);
548 } else if (flags & float_flag_invalid_snan) {
549 float_invalid_op_vxsnan(env, retaddr);
550 }
551}
552
553
554float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
555{
556 float64 ret = float64_mul(arg1, arg2, &env->fp_status);
557 int flags = get_float_exception_flags(&env->fp_status);
558
559 if (unlikely(flags & float_flag_invalid)) {
560 float_invalid_op_mul(env, flags, 1, GETPC());
561 }
562
563 return ret;
564}
565
566
567float64 helper_fmuls(CPUPPCState *env, float64 arg1, float64 arg2)
568{
569 float64 ret = float64r32_mul(arg1, arg2, &env->fp_status);
570 int flags = get_float_exception_flags(&env->fp_status);
571
572 if (unlikely(flags & float_flag_invalid)) {
573 float_invalid_op_mul(env, flags, 1, GETPC());
574 }
575 return ret;
576}
577
578static void float_invalid_op_div(CPUPPCState *env, int flags,
579 bool set_fprc, uintptr_t retaddr)
580{
581 if (flags & float_flag_invalid_idi) {
582 float_invalid_op_vxidi(env, set_fprc, retaddr);
583 } else if (flags & float_flag_invalid_zdz) {
584 float_invalid_op_vxzdz(env, set_fprc, retaddr);
585 } else if (flags & float_flag_invalid_snan) {
586 float_invalid_op_vxsnan(env, retaddr);
587 }
588}
589
590
591float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
592{
593 float64 ret = float64_div(arg1, arg2, &env->fp_status);
594 int flags = get_float_exception_flags(&env->fp_status);
595
596 if (unlikely(flags & float_flag_invalid)) {
597 float_invalid_op_div(env, flags, 1, GETPC());
598 }
599 if (unlikely(flags & float_flag_divbyzero)) {
600 float_zero_divide_excp(env, GETPC());
601 }
602
603 return ret;
604}
605
606
607float64 helper_fdivs(CPUPPCState *env, float64 arg1, float64 arg2)
608{
609 float64 ret = float64r32_div(arg1, arg2, &env->fp_status);
610 int flags = get_float_exception_flags(&env->fp_status);
611
612 if (unlikely(flags & float_flag_invalid)) {
613 float_invalid_op_div(env, flags, 1, GETPC());
614 }
615 if (unlikely(flags & float_flag_divbyzero)) {
616 float_zero_divide_excp(env, GETPC());
617 }
618
619 return ret;
620}
621
622static uint64_t float_invalid_cvt(CPUPPCState *env, int flags,
623 uint64_t ret, uint64_t ret_nan,
624 bool set_fprc, uintptr_t retaddr)
625{
626
627
628
629
630 if (flags & float_flag_invalid_snan) {
631 env->fpscr |= FP_VXSNAN;
632 }
633 float_invalid_op_vxcvi(env, set_fprc, retaddr);
634
635 return flags & float_flag_invalid_cvti ? ret : ret_nan;
636}
637
638#define FPU_FCTI(op, cvt, nanval) \
639uint64_t helper_##op(CPUPPCState *env, float64 arg) \
640{ \
641 uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \
642 int flags = get_float_exception_flags(&env->fp_status); \
643 if (unlikely(flags & float_flag_invalid)) { \
644 ret = float_invalid_cvt(env, flags, ret, nanval, 1, GETPC()); \
645 } \
646 return ret; \
647}
648
649FPU_FCTI(fctiw, int32, 0x80000000U)
650FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
651FPU_FCTI(fctiwu, uint32, 0x00000000U)
652FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
653FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
654FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
655FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
656FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
657
658#define FPU_FCFI(op, cvtr, is_single) \
659uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
660{ \
661 CPU_DoubleU farg; \
662 \
663 if (is_single) { \
664 float32 tmp = cvtr(arg, &env->fp_status); \
665 farg.d = float32_to_float64(tmp, &env->fp_status); \
666 } else { \
667 farg.d = cvtr(arg, &env->fp_status); \
668 } \
669 do_float_check_status(env, true, GETPC()); \
670 return farg.ll; \
671}
672
673FPU_FCFI(fcfid, int64_to_float64, 0)
674FPU_FCFI(fcfids, int64_to_float32, 1)
675FPU_FCFI(fcfidu, uint64_to_float64, 0)
676FPU_FCFI(fcfidus, uint64_to_float32, 1)
677
678static uint64_t do_fri(CPUPPCState *env, uint64_t arg,
679 FloatRoundMode rounding_mode)
680{
681 FloatRoundMode old_rounding_mode = get_float_rounding_mode(&env->fp_status);
682 int flags;
683
684 set_float_rounding_mode(rounding_mode, &env->fp_status);
685 arg = float64_round_to_int(arg, &env->fp_status);
686 set_float_rounding_mode(old_rounding_mode, &env->fp_status);
687
688 flags = get_float_exception_flags(&env->fp_status);
689 if (flags & float_flag_invalid_snan) {
690 float_invalid_op_vxsnan(env, GETPC());
691 }
692
693
694 set_float_exception_flags(flags & ~float_flag_inexact, &env->fp_status);
695 do_float_check_status(env, true, GETPC());
696
697 return arg;
698}
699
700uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
701{
702 return do_fri(env, arg, float_round_ties_away);
703}
704
705uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
706{
707 return do_fri(env, arg, float_round_to_zero);
708}
709
710uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
711{
712 return do_fri(env, arg, float_round_up);
713}
714
715uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
716{
717 return do_fri(env, arg, float_round_down);
718}
719
720static void float_invalid_op_madd(CPUPPCState *env, int flags,
721 bool set_fpcc, uintptr_t retaddr)
722{
723 if (flags & float_flag_invalid_imz) {
724 float_invalid_op_vximz(env, set_fpcc, retaddr);
725 } else {
726 float_invalid_op_addsub(env, flags, set_fpcc, retaddr);
727 }
728}
729
730static float64 do_fmadd(CPUPPCState *env, float64 a, float64 b,
731 float64 c, int madd_flags, uintptr_t retaddr)
732{
733 float64 ret = float64_muladd(a, b, c, madd_flags, &env->fp_status);
734 int flags = get_float_exception_flags(&env->fp_status);
735
736 if (unlikely(flags & float_flag_invalid)) {
737 float_invalid_op_madd(env, flags, 1, retaddr);
738 }
739 return ret;
740}
741
742static uint64_t do_fmadds(CPUPPCState *env, float64 a, float64 b,
743 float64 c, int madd_flags, uintptr_t retaddr)
744{
745 float64 ret = float64r32_muladd(a, b, c, madd_flags, &env->fp_status);
746 int flags = get_float_exception_flags(&env->fp_status);
747
748 if (unlikely(flags & float_flag_invalid)) {
749 float_invalid_op_madd(env, flags, 1, retaddr);
750 }
751 return ret;
752}
753
754#define FPU_FMADD(op, madd_flags) \
755 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
756 uint64_t arg2, uint64_t arg3) \
757 { return do_fmadd(env, arg1, arg2, arg3, madd_flags, GETPC()); } \
758 uint64_t helper_##op##s(CPUPPCState *env, uint64_t arg1, \
759 uint64_t arg2, uint64_t arg3) \
760 { return do_fmadds(env, arg1, arg2, arg3, madd_flags, GETPC()); }
761
762#define MADD_FLGS 0
763#define MSUB_FLGS float_muladd_negate_c
764#define NMADD_FLGS float_muladd_negate_result
765#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
766
767FPU_FMADD(fmadd, MADD_FLGS)
768FPU_FMADD(fnmadd, NMADD_FLGS)
769FPU_FMADD(fmsub, MSUB_FLGS)
770FPU_FMADD(fnmsub, NMSUB_FLGS)
771
772
773static uint64_t do_frsp(CPUPPCState *env, uint64_t arg, uintptr_t retaddr)
774{
775 float32 f32 = float64_to_float32(arg, &env->fp_status);
776 int flags = get_float_exception_flags(&env->fp_status);
777
778 if (unlikely(flags & float_flag_invalid_snan)) {
779 float_invalid_op_vxsnan(env, retaddr);
780 }
781 return helper_todouble(f32);
782}
783
784uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
785{
786 return do_frsp(env, arg, GETPC());
787}
788
789static void float_invalid_op_sqrt(CPUPPCState *env, int flags,
790 bool set_fpcc, uintptr_t retaddr)
791{
792 if (unlikely(flags & float_flag_invalid_sqrt)) {
793 float_invalid_op_vxsqrt(env, set_fpcc, retaddr);
794 } else if (unlikely(flags & float_flag_invalid_snan)) {
795 float_invalid_op_vxsnan(env, retaddr);
796 }
797}
798
799#define FPU_FSQRT(name, op) \
800float64 helper_##name(CPUPPCState *env, float64 arg) \
801{ \
802 float64 ret = op(arg, &env->fp_status); \
803 int flags = get_float_exception_flags(&env->fp_status); \
804 \
805 if (unlikely(flags & float_flag_invalid)) { \
806 float_invalid_op_sqrt(env, flags, 1, GETPC()); \
807 } \
808 \
809 return ret; \
810}
811
812FPU_FSQRT(FSQRT, float64_sqrt)
813FPU_FSQRT(FSQRTS, float64r32_sqrt)
814
815
816float64 helper_fre(CPUPPCState *env, float64 arg)
817{
818
819 float64 ret = float64_div(float64_one, arg, &env->fp_status);
820 int flags = get_float_exception_flags(&env->fp_status);
821
822 if (unlikely(flags & float_flag_invalid_snan)) {
823 float_invalid_op_vxsnan(env, GETPC());
824 }
825 if (unlikely(flags & float_flag_divbyzero)) {
826 float_zero_divide_excp(env, GETPC());
827
828 ret = float64_set_sign(float64_half, float64_is_neg(arg));
829 }
830
831 return ret;
832}
833
834
835uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
836{
837
838 float64 ret = float64r32_div(float64_one, arg, &env->fp_status);
839 int flags = get_float_exception_flags(&env->fp_status);
840
841 if (unlikely(flags & float_flag_invalid_snan)) {
842 float_invalid_op_vxsnan(env, GETPC());
843 }
844 if (unlikely(flags & float_flag_divbyzero)) {
845 float_zero_divide_excp(env, GETPC());
846
847 ret = float64_set_sign(float64_half, float64_is_neg(arg));
848 }
849
850 return ret;
851}
852
853
854float64 helper_frsqrte(CPUPPCState *env, float64 arg)
855{
856
857 float64 rets = float64_sqrt(arg, &env->fp_status);
858 float64 retd = float64_div(float64_one, rets, &env->fp_status);
859 int flags = get_float_exception_flags(&env->fp_status);
860
861 if (unlikely(flags & float_flag_invalid)) {
862 float_invalid_op_sqrt(env, flags, 1, GETPC());
863 }
864 if (unlikely(flags & float_flag_divbyzero)) {
865
866 float_zero_divide_excp(env, GETPC());
867 }
868
869 return retd;
870}
871
872
873float64 helper_frsqrtes(CPUPPCState *env, float64 arg)
874{
875
876 float64 rets = float64_sqrt(arg, &env->fp_status);
877 float64 retd = float64r32_div(float64_one, rets, &env->fp_status);
878 int flags = get_float_exception_flags(&env->fp_status);
879
880 if (unlikely(flags & float_flag_invalid)) {
881 float_invalid_op_sqrt(env, flags, 1, GETPC());
882 }
883 if (unlikely(flags & float_flag_divbyzero)) {
884
885 float_zero_divide_excp(env, GETPC());
886 }
887
888 return retd;
889}
890
891
892uint64_t helper_FSEL(uint64_t a, uint64_t b, uint64_t c)
893{
894 CPU_DoubleU fa;
895
896 fa.ll = a;
897
898 if ((!float64_is_neg(fa.d) || float64_is_zero(fa.d)) &&
899 !float64_is_any_nan(fa.d)) {
900 return c;
901 } else {
902 return b;
903 }
904}
905
906uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
907{
908 int fe_flag = 0;
909 int fg_flag = 0;
910
911 if (unlikely(float64_is_infinity(fra) ||
912 float64_is_infinity(frb) ||
913 float64_is_zero(frb))) {
914 fe_flag = 1;
915 fg_flag = 1;
916 } else {
917 int e_a = ppc_float64_get_unbiased_exp(fra);
918 int e_b = ppc_float64_get_unbiased_exp(frb);
919
920 if (unlikely(float64_is_any_nan(fra) ||
921 float64_is_any_nan(frb))) {
922 fe_flag = 1;
923 } else if ((e_b <= -1022) || (e_b >= 1021)) {
924 fe_flag = 1;
925 } else if (!float64_is_zero(fra) &&
926 (((e_a - e_b) >= 1023) ||
927 ((e_a - e_b) <= -1021) ||
928 (e_a <= -970))) {
929 fe_flag = 1;
930 }
931
932 if (unlikely(float64_is_zero_or_denormal(frb))) {
933
934
935 fg_flag = 1;
936 }
937 }
938
939 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
940}
941
942uint32_t helper_ftsqrt(uint64_t frb)
943{
944 int fe_flag = 0;
945 int fg_flag = 0;
946
947 if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
948 fe_flag = 1;
949 fg_flag = 1;
950 } else {
951 int e_b = ppc_float64_get_unbiased_exp(frb);
952
953 if (unlikely(float64_is_any_nan(frb))) {
954 fe_flag = 1;
955 } else if (unlikely(float64_is_zero(frb))) {
956 fe_flag = 1;
957 } else if (unlikely(float64_is_neg(frb))) {
958 fe_flag = 1;
959 } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) {
960 fe_flag = 1;
961 }
962
963 if (unlikely(float64_is_zero_or_denormal(frb))) {
964
965
966 fg_flag = 1;
967 }
968 }
969
970 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
971}
972
973void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
974 uint32_t crfD)
975{
976 CPU_DoubleU farg1, farg2;
977 uint32_t ret = 0;
978
979 farg1.ll = arg1;
980 farg2.ll = arg2;
981
982 if (unlikely(float64_is_any_nan(farg1.d) ||
983 float64_is_any_nan(farg2.d))) {
984 ret = 0x01UL;
985 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
986 ret = 0x08UL;
987 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
988 ret = 0x04UL;
989 } else {
990 ret = 0x02UL;
991 }
992
993 env->fpscr &= ~FP_FPCC;
994 env->fpscr |= ret << FPSCR_FPCC;
995 env->crf[crfD] = ret;
996 if (unlikely(ret == 0x01UL
997 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
998 float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
999
1000 float_invalid_op_vxsnan(env, GETPC());
1001 }
1002}
1003
1004void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1005 uint32_t crfD)
1006{
1007 CPU_DoubleU farg1, farg2;
1008 uint32_t ret = 0;
1009
1010 farg1.ll = arg1;
1011 farg2.ll = arg2;
1012
1013 if (unlikely(float64_is_any_nan(farg1.d) ||
1014 float64_is_any_nan(farg2.d))) {
1015 ret = 0x01UL;
1016 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1017 ret = 0x08UL;
1018 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1019 ret = 0x04UL;
1020 } else {
1021 ret = 0x02UL;
1022 }
1023
1024 env->fpscr &= ~FP_FPCC;
1025 env->fpscr |= ret << FPSCR_FPCC;
1026 env->crf[crfD] = (uint32_t) ret;
1027 if (unlikely(ret == 0x01UL)) {
1028 float_invalid_op_vxvc(env, 1, GETPC());
1029 if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1030 float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1031
1032 float_invalid_op_vxsnan(env, GETPC());
1033 }
1034 }
1035}
1036
1037
1038static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1039{
1040 CPU_FloatU u;
1041
1042 u.f = int32_to_float32(val, &env->vec_status);
1043
1044 return u.l;
1045}
1046
1047static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1048{
1049 CPU_FloatU u;
1050
1051 u.f = uint32_to_float32(val, &env->vec_status);
1052
1053 return u.l;
1054}
1055
1056static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1057{
1058 CPU_FloatU u;
1059
1060 u.l = val;
1061
1062 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1063 return 0;
1064 }
1065
1066 return float32_to_int32(u.f, &env->vec_status);
1067}
1068
1069static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1070{
1071 CPU_FloatU u;
1072
1073 u.l = val;
1074
1075 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1076 return 0;
1077 }
1078
1079 return float32_to_uint32(u.f, &env->vec_status);
1080}
1081
1082static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1083{
1084 CPU_FloatU u;
1085
1086 u.l = val;
1087
1088 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1089 return 0;
1090 }
1091
1092 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1093}
1094
1095static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1096{
1097 CPU_FloatU u;
1098
1099 u.l = val;
1100
1101 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1102 return 0;
1103 }
1104
1105 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1106}
1107
1108static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1109{
1110 CPU_FloatU u;
1111 float32 tmp;
1112
1113 u.f = int32_to_float32(val, &env->vec_status);
1114 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1115 u.f = float32_div(u.f, tmp, &env->vec_status);
1116
1117 return u.l;
1118}
1119
1120static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1121{
1122 CPU_FloatU u;
1123 float32 tmp;
1124
1125 u.f = uint32_to_float32(val, &env->vec_status);
1126 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1127 u.f = float32_div(u.f, tmp, &env->vec_status);
1128
1129 return u.l;
1130}
1131
1132static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1133{
1134 CPU_FloatU u;
1135 float32 tmp;
1136
1137 u.l = val;
1138
1139 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1140 return 0;
1141 }
1142 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1143 u.f = float32_mul(u.f, tmp, &env->vec_status);
1144
1145 return float32_to_int32(u.f, &env->vec_status);
1146}
1147
1148static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1149{
1150 CPU_FloatU u;
1151 float32 tmp;
1152
1153 u.l = val;
1154
1155 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1156 return 0;
1157 }
1158 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1159 u.f = float32_mul(u.f, tmp, &env->vec_status);
1160
1161 return float32_to_uint32(u.f, &env->vec_status);
1162}
1163
1164#define HELPER_SPE_SINGLE_CONV(name) \
1165 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1166 { \
1167 return e##name(env, val); \
1168 }
1169
1170HELPER_SPE_SINGLE_CONV(fscfsi);
1171
1172HELPER_SPE_SINGLE_CONV(fscfui);
1173
1174HELPER_SPE_SINGLE_CONV(fscfuf);
1175
1176HELPER_SPE_SINGLE_CONV(fscfsf);
1177
1178HELPER_SPE_SINGLE_CONV(fsctsi);
1179
1180HELPER_SPE_SINGLE_CONV(fsctui);
1181
1182HELPER_SPE_SINGLE_CONV(fsctsiz);
1183
1184HELPER_SPE_SINGLE_CONV(fsctuiz);
1185
1186HELPER_SPE_SINGLE_CONV(fsctsf);
1187
1188HELPER_SPE_SINGLE_CONV(fsctuf);
1189
1190#define HELPER_SPE_VECTOR_CONV(name) \
1191 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1192 { \
1193 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1194 (uint64_t)e##name(env, val); \
1195 }
1196
1197HELPER_SPE_VECTOR_CONV(fscfsi);
1198
1199HELPER_SPE_VECTOR_CONV(fscfui);
1200
1201HELPER_SPE_VECTOR_CONV(fscfuf);
1202
1203HELPER_SPE_VECTOR_CONV(fscfsf);
1204
1205HELPER_SPE_VECTOR_CONV(fsctsi);
1206
1207HELPER_SPE_VECTOR_CONV(fsctui);
1208
1209HELPER_SPE_VECTOR_CONV(fsctsiz);
1210
1211HELPER_SPE_VECTOR_CONV(fsctuiz);
1212
1213HELPER_SPE_VECTOR_CONV(fsctsf);
1214
1215HELPER_SPE_VECTOR_CONV(fsctuf);
1216
1217
1218static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1219{
1220 CPU_FloatU u1, u2;
1221
1222 u1.l = op1;
1223 u2.l = op2;
1224 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1225 return u1.l;
1226}
1227
1228static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1229{
1230 CPU_FloatU u1, u2;
1231
1232 u1.l = op1;
1233 u2.l = op2;
1234 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1235 return u1.l;
1236}
1237
1238static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1239{
1240 CPU_FloatU u1, u2;
1241
1242 u1.l = op1;
1243 u2.l = op2;
1244 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1245 return u1.l;
1246}
1247
1248static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1249{
1250 CPU_FloatU u1, u2;
1251
1252 u1.l = op1;
1253 u2.l = op2;
1254 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1255 return u1.l;
1256}
1257
1258#define HELPER_SPE_SINGLE_ARITH(name) \
1259 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1260 { \
1261 return e##name(env, op1, op2); \
1262 }
1263
1264HELPER_SPE_SINGLE_ARITH(fsadd);
1265
1266HELPER_SPE_SINGLE_ARITH(fssub);
1267
1268HELPER_SPE_SINGLE_ARITH(fsmul);
1269
1270HELPER_SPE_SINGLE_ARITH(fsdiv);
1271
1272#define HELPER_SPE_VECTOR_ARITH(name) \
1273 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1274 { \
1275 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1276 (uint64_t)e##name(env, op1, op2); \
1277 }
1278
1279HELPER_SPE_VECTOR_ARITH(fsadd);
1280
1281HELPER_SPE_VECTOR_ARITH(fssub);
1282
1283HELPER_SPE_VECTOR_ARITH(fsmul);
1284
1285HELPER_SPE_VECTOR_ARITH(fsdiv);
1286
1287
1288static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1289{
1290 CPU_FloatU u1, u2;
1291
1292 u1.l = op1;
1293 u2.l = op2;
1294 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1295}
1296
1297static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1298{
1299 CPU_FloatU u1, u2;
1300
1301 u1.l = op1;
1302 u2.l = op2;
1303 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1304}
1305
1306static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1307{
1308 CPU_FloatU u1, u2;
1309
1310 u1.l = op1;
1311 u2.l = op2;
1312 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1313}
1314
1315static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1316{
1317
1318 return efscmplt(env, op1, op2);
1319}
1320
1321static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1322{
1323
1324 return efscmpgt(env, op1, op2);
1325}
1326
1327static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1328{
1329
1330 return efscmpeq(env, op1, op2);
1331}
1332
1333#define HELPER_SINGLE_SPE_CMP(name) \
1334 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1335 { \
1336 return e##name(env, op1, op2); \
1337 }
1338
1339HELPER_SINGLE_SPE_CMP(fststlt);
1340
1341HELPER_SINGLE_SPE_CMP(fststgt);
1342
1343HELPER_SINGLE_SPE_CMP(fststeq);
1344
1345HELPER_SINGLE_SPE_CMP(fscmplt);
1346
1347HELPER_SINGLE_SPE_CMP(fscmpgt);
1348
1349HELPER_SINGLE_SPE_CMP(fscmpeq);
1350
1351static inline uint32_t evcmp_merge(int t0, int t1)
1352{
1353 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1354}
1355
1356#define HELPER_VECTOR_SPE_CMP(name) \
1357 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1358 { \
1359 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1360 e##name(env, op1, op2)); \
1361 }
1362
1363HELPER_VECTOR_SPE_CMP(fststlt);
1364
1365HELPER_VECTOR_SPE_CMP(fststgt);
1366
1367HELPER_VECTOR_SPE_CMP(fststeq);
1368
1369HELPER_VECTOR_SPE_CMP(fscmplt);
1370
1371HELPER_VECTOR_SPE_CMP(fscmpgt);
1372
1373HELPER_VECTOR_SPE_CMP(fscmpeq);
1374
1375
1376uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1377{
1378 CPU_DoubleU u;
1379
1380 u.d = int32_to_float64(val, &env->vec_status);
1381
1382 return u.ll;
1383}
1384
1385uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1386{
1387 CPU_DoubleU u;
1388
1389 u.d = int64_to_float64(val, &env->vec_status);
1390
1391 return u.ll;
1392}
1393
1394uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1395{
1396 CPU_DoubleU u;
1397
1398 u.d = uint32_to_float64(val, &env->vec_status);
1399
1400 return u.ll;
1401}
1402
1403uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1404{
1405 CPU_DoubleU u;
1406
1407 u.d = uint64_to_float64(val, &env->vec_status);
1408
1409 return u.ll;
1410}
1411
1412uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1413{
1414 CPU_DoubleU u;
1415
1416 u.ll = val;
1417
1418 if (unlikely(float64_is_any_nan(u.d))) {
1419 return 0;
1420 }
1421
1422 return float64_to_int32(u.d, &env->vec_status);
1423}
1424
1425uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1426{
1427 CPU_DoubleU u;
1428
1429 u.ll = val;
1430
1431 if (unlikely(float64_is_any_nan(u.d))) {
1432 return 0;
1433 }
1434
1435 return float64_to_uint32(u.d, &env->vec_status);
1436}
1437
1438uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1439{
1440 CPU_DoubleU u;
1441
1442 u.ll = val;
1443
1444 if (unlikely(float64_is_any_nan(u.d))) {
1445 return 0;
1446 }
1447
1448 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1449}
1450
1451uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1452{
1453 CPU_DoubleU u;
1454
1455 u.ll = val;
1456
1457 if (unlikely(float64_is_any_nan(u.d))) {
1458 return 0;
1459 }
1460
1461 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1462}
1463
1464uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1465{
1466 CPU_DoubleU u;
1467
1468 u.ll = val;
1469
1470 if (unlikely(float64_is_any_nan(u.d))) {
1471 return 0;
1472 }
1473
1474 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1475}
1476
1477uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1478{
1479 CPU_DoubleU u;
1480
1481 u.ll = val;
1482
1483 if (unlikely(float64_is_any_nan(u.d))) {
1484 return 0;
1485 }
1486
1487 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1488}
1489
1490uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1491{
1492 CPU_DoubleU u;
1493 float64 tmp;
1494
1495 u.d = int32_to_float64(val, &env->vec_status);
1496 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1497 u.d = float64_div(u.d, tmp, &env->vec_status);
1498
1499 return u.ll;
1500}
1501
1502uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1503{
1504 CPU_DoubleU u;
1505 float64 tmp;
1506
1507 u.d = uint32_to_float64(val, &env->vec_status);
1508 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1509 u.d = float64_div(u.d, tmp, &env->vec_status);
1510
1511 return u.ll;
1512}
1513
1514uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1515{
1516 CPU_DoubleU u;
1517 float64 tmp;
1518
1519 u.ll = val;
1520
1521 if (unlikely(float64_is_any_nan(u.d))) {
1522 return 0;
1523 }
1524 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1525 u.d = float64_mul(u.d, tmp, &env->vec_status);
1526
1527 return float64_to_int32(u.d, &env->vec_status);
1528}
1529
1530uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1531{
1532 CPU_DoubleU u;
1533 float64 tmp;
1534
1535 u.ll = val;
1536
1537 if (unlikely(float64_is_any_nan(u.d))) {
1538 return 0;
1539 }
1540 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1541 u.d = float64_mul(u.d, tmp, &env->vec_status);
1542
1543 return float64_to_uint32(u.d, &env->vec_status);
1544}
1545
1546uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1547{
1548 CPU_DoubleU u1;
1549 CPU_FloatU u2;
1550
1551 u1.ll = val;
1552 u2.f = float64_to_float32(u1.d, &env->vec_status);
1553
1554 return u2.l;
1555}
1556
1557uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1558{
1559 CPU_DoubleU u2;
1560 CPU_FloatU u1;
1561
1562 u1.l = val;
1563 u2.d = float32_to_float64(u1.f, &env->vec_status);
1564
1565 return u2.ll;
1566}
1567
1568
1569uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1570{
1571 CPU_DoubleU u1, u2;
1572
1573 u1.ll = op1;
1574 u2.ll = op2;
1575 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1576 return u1.ll;
1577}
1578
1579uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1580{
1581 CPU_DoubleU u1, u2;
1582
1583 u1.ll = op1;
1584 u2.ll = op2;
1585 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1586 return u1.ll;
1587}
1588
1589uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1590{
1591 CPU_DoubleU u1, u2;
1592
1593 u1.ll = op1;
1594 u2.ll = op2;
1595 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1596 return u1.ll;
1597}
1598
1599uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1600{
1601 CPU_DoubleU u1, u2;
1602
1603 u1.ll = op1;
1604 u2.ll = op2;
1605 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1606 return u1.ll;
1607}
1608
1609
1610uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1611{
1612 CPU_DoubleU u1, u2;
1613
1614 u1.ll = op1;
1615 u2.ll = op2;
1616 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1617}
1618
1619uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1620{
1621 CPU_DoubleU u1, u2;
1622
1623 u1.ll = op1;
1624 u2.ll = op2;
1625 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1626}
1627
1628uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1629{
1630 CPU_DoubleU u1, u2;
1631
1632 u1.ll = op1;
1633 u2.ll = op2;
1634 return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1635}
1636
1637uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1638{
1639
1640 return helper_efdtstlt(env, op1, op2);
1641}
1642
1643uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1644{
1645
1646 return helper_efdtstgt(env, op1, op2);
1647}
1648
1649uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1650{
1651
1652 return helper_efdtsteq(env, op1, op2);
1653}
1654
1655#define float64_to_float64(x, env) x
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667#define VSX_ADD_SUB(name, op, nels, tp, fld, sfifprf, r2sp) \
1668void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
1669 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1670{ \
1671 ppc_vsr_t t = { }; \
1672 int i; \
1673 \
1674 helper_reset_fpstatus(env); \
1675 \
1676 for (i = 0; i < nels; i++) { \
1677 float_status tstat = env->fp_status; \
1678 set_float_exception_flags(0, &tstat); \
1679 t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
1680 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1681 \
1682 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1683 float_invalid_op_addsub(env, tstat.float_exception_flags, \
1684 sfifprf, GETPC()); \
1685 } \
1686 \
1687 if (r2sp) { \
1688 t.fld = do_frsp(env, t.fld, GETPC()); \
1689 } \
1690 \
1691 if (sfifprf) { \
1692 helper_compute_fprf_float64(env, t.fld); \
1693 } \
1694 } \
1695 *xt = t; \
1696 do_float_check_status(env, sfifprf, GETPC()); \
1697}
1698
1699VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1700VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1701VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1702VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1703VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1704VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1705VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1706VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1707
1708void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
1709 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1710{
1711 ppc_vsr_t t = *xt;
1712 float_status tstat;
1713
1714 helper_reset_fpstatus(env);
1715
1716 tstat = env->fp_status;
1717 if (unlikely(Rc(opcode) != 0)) {
1718 tstat.float_rounding_mode = float_round_to_odd;
1719 }
1720
1721 set_float_exception_flags(0, &tstat);
1722 t.f128 = float128_add(xa->f128, xb->f128, &tstat);
1723 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1724
1725 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1726 float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
1727 }
1728
1729 helper_compute_fprf_float128(env, t.f128);
1730
1731 *xt = t;
1732 do_float_check_status(env, true, GETPC());
1733}
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743#define VSX_MUL(op, nels, tp, fld, sfifprf, r2sp) \
1744void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1745 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1746{ \
1747 ppc_vsr_t t = { }; \
1748 int i; \
1749 \
1750 helper_reset_fpstatus(env); \
1751 \
1752 for (i = 0; i < nels; i++) { \
1753 float_status tstat = env->fp_status; \
1754 set_float_exception_flags(0, &tstat); \
1755 t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
1756 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1757 \
1758 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1759 float_invalid_op_mul(env, tstat.float_exception_flags, \
1760 sfifprf, GETPC()); \
1761 } \
1762 \
1763 if (r2sp) { \
1764 t.fld = do_frsp(env, t.fld, GETPC()); \
1765 } \
1766 \
1767 if (sfifprf) { \
1768 helper_compute_fprf_float64(env, t.fld); \
1769 } \
1770 } \
1771 \
1772 *xt = t; \
1773 do_float_check_status(env, sfifprf, GETPC()); \
1774}
1775
1776VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1777VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1778VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1779VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1780
1781void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
1782 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1783{
1784 ppc_vsr_t t = *xt;
1785 float_status tstat;
1786
1787 helper_reset_fpstatus(env);
1788 tstat = env->fp_status;
1789 if (unlikely(Rc(opcode) != 0)) {
1790 tstat.float_rounding_mode = float_round_to_odd;
1791 }
1792
1793 set_float_exception_flags(0, &tstat);
1794 t.f128 = float128_mul(xa->f128, xb->f128, &tstat);
1795 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1796
1797 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1798 float_invalid_op_mul(env, tstat.float_exception_flags, 1, GETPC());
1799 }
1800 helper_compute_fprf_float128(env, t.f128);
1801
1802 *xt = t;
1803 do_float_check_status(env, true, GETPC());
1804}
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814#define VSX_DIV(op, nels, tp, fld, sfifprf, r2sp) \
1815void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1816 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1817{ \
1818 ppc_vsr_t t = { }; \
1819 int i; \
1820 \
1821 helper_reset_fpstatus(env); \
1822 \
1823 for (i = 0; i < nels; i++) { \
1824 float_status tstat = env->fp_status; \
1825 set_float_exception_flags(0, &tstat); \
1826 t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
1827 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1828 \
1829 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1830 float_invalid_op_div(env, tstat.float_exception_flags, \
1831 sfifprf, GETPC()); \
1832 } \
1833 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1834 float_zero_divide_excp(env, GETPC()); \
1835 } \
1836 \
1837 if (r2sp) { \
1838 t.fld = do_frsp(env, t.fld, GETPC()); \
1839 } \
1840 \
1841 if (sfifprf) { \
1842 helper_compute_fprf_float64(env, t.fld); \
1843 } \
1844 } \
1845 \
1846 *xt = t; \
1847 do_float_check_status(env, sfifprf, GETPC()); \
1848}
1849
1850VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1851VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1852VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1853VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1854
1855void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
1856 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1857{
1858 ppc_vsr_t t = *xt;
1859 float_status tstat;
1860
1861 helper_reset_fpstatus(env);
1862 tstat = env->fp_status;
1863 if (unlikely(Rc(opcode) != 0)) {
1864 tstat.float_rounding_mode = float_round_to_odd;
1865 }
1866
1867 set_float_exception_flags(0, &tstat);
1868 t.f128 = float128_div(xa->f128, xb->f128, &tstat);
1869 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1870
1871 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1872 float_invalid_op_div(env, tstat.float_exception_flags, 1, GETPC());
1873 }
1874 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
1875 float_zero_divide_excp(env, GETPC());
1876 }
1877
1878 helper_compute_fprf_float128(env, t.f128);
1879 *xt = t;
1880 do_float_check_status(env, true, GETPC());
1881}
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891#define VSX_RE(op, nels, tp, fld, sfifprf, r2sp) \
1892void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1893{ \
1894 ppc_vsr_t t = { }; \
1895 int i; \
1896 \
1897 helper_reset_fpstatus(env); \
1898 \
1899 for (i = 0; i < nels; i++) { \
1900 if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
1901 float_invalid_op_vxsnan(env, GETPC()); \
1902 } \
1903 t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
1904 \
1905 if (r2sp) { \
1906 t.fld = do_frsp(env, t.fld, GETPC()); \
1907 } \
1908 \
1909 if (sfifprf) { \
1910 helper_compute_fprf_float64(env, t.fld); \
1911 } \
1912 } \
1913 \
1914 *xt = t; \
1915 do_float_check_status(env, sfifprf, GETPC()); \
1916}
1917
1918VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
1919VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
1920VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
1921VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931#define VSX_SQRT(op, nels, tp, fld, sfifprf, r2sp) \
1932void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1933{ \
1934 ppc_vsr_t t = { }; \
1935 int i; \
1936 \
1937 helper_reset_fpstatus(env); \
1938 \
1939 for (i = 0; i < nels; i++) { \
1940 float_status tstat = env->fp_status; \
1941 set_float_exception_flags(0, &tstat); \
1942 t.fld = tp##_sqrt(xb->fld, &tstat); \
1943 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1944 \
1945 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1946 float_invalid_op_sqrt(env, tstat.float_exception_flags, \
1947 sfifprf, GETPC()); \
1948 } \
1949 \
1950 if (r2sp) { \
1951 t.fld = do_frsp(env, t.fld, GETPC()); \
1952 } \
1953 \
1954 if (sfifprf) { \
1955 helper_compute_fprf_float64(env, t.fld); \
1956 } \
1957 } \
1958 \
1959 *xt = t; \
1960 do_float_check_status(env, sfifprf, GETPC()); \
1961}
1962
1963VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
1964VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
1965VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
1966VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976#define VSX_RSQRTE(op, nels, tp, fld, sfifprf, r2sp) \
1977void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1978{ \
1979 ppc_vsr_t t = { }; \
1980 int i; \
1981 \
1982 helper_reset_fpstatus(env); \
1983 \
1984 for (i = 0; i < nels; i++) { \
1985 float_status tstat = env->fp_status; \
1986 set_float_exception_flags(0, &tstat); \
1987 t.fld = tp##_sqrt(xb->fld, &tstat); \
1988 t.fld = tp##_div(tp##_one, t.fld, &tstat); \
1989 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1990 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1991 float_invalid_op_sqrt(env, tstat.float_exception_flags, \
1992 sfifprf, GETPC()); \
1993 } \
1994 if (r2sp) { \
1995 t.fld = do_frsp(env, t.fld, GETPC()); \
1996 } \
1997 \
1998 if (sfifprf) { \
1999 helper_compute_fprf_float64(env, t.fld); \
2000 } \
2001 } \
2002 \
2003 *xt = t; \
2004 do_float_check_status(env, sfifprf, GETPC()); \
2005}
2006
2007VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2008VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2009VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2010VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2023void helper_##op(CPUPPCState *env, uint32_t opcode, \
2024 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2025{ \
2026 int i; \
2027 int fe_flag = 0; \
2028 int fg_flag = 0; \
2029 \
2030 for (i = 0; i < nels; i++) { \
2031 if (unlikely(tp##_is_infinity(xa->fld) || \
2032 tp##_is_infinity(xb->fld) || \
2033 tp##_is_zero(xb->fld))) { \
2034 fe_flag = 1; \
2035 fg_flag = 1; \
2036 } else { \
2037 int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \
2038 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2039 \
2040 if (unlikely(tp##_is_any_nan(xa->fld) || \
2041 tp##_is_any_nan(xb->fld))) { \
2042 fe_flag = 1; \
2043 } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
2044 fe_flag = 1; \
2045 } else if (!tp##_is_zero(xa->fld) && \
2046 (((e_a - e_b) >= emax) || \
2047 ((e_a - e_b) <= (emin + 1)) || \
2048 (e_a <= (emin + nbits)))) { \
2049 fe_flag = 1; \
2050 } \
2051 \
2052 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2053
2054
2055
2056 \
2057 fg_flag = 1; \
2058 } \
2059 } \
2060 } \
2061 \
2062 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2063}
2064
2065VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2066VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2067VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079#define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2080void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \
2081{ \
2082 int i; \
2083 int fe_flag = 0; \
2084 int fg_flag = 0; \
2085 \
2086 for (i = 0; i < nels; i++) { \
2087 if (unlikely(tp##_is_infinity(xb->fld) || \
2088 tp##_is_zero(xb->fld))) { \
2089 fe_flag = 1; \
2090 fg_flag = 1; \
2091 } else { \
2092 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2093 \
2094 if (unlikely(tp##_is_any_nan(xb->fld))) { \
2095 fe_flag = 1; \
2096 } else if (unlikely(tp##_is_zero(xb->fld))) { \
2097 fe_flag = 1; \
2098 } else if (unlikely(tp##_is_neg(xb->fld))) { \
2099 fe_flag = 1; \
2100 } else if (!tp##_is_zero(xb->fld) && \
2101 (e_b <= (emin + nbits))) { \
2102 fe_flag = 1; \
2103 } \
2104 \
2105 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2106
2107
2108
2109 \
2110 fg_flag = 1; \
2111 } \
2112 } \
2113 } \
2114 \
2115 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2116}
2117
2118VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2119VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2120VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132#define VSX_MADD(op, nels, tp, fld, maddflgs, sfifprf) \
2133void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2134 ppc_vsr_t *s1, ppc_vsr_t *s2, ppc_vsr_t *s3) \
2135{ \
2136 ppc_vsr_t t = { }; \
2137 int i; \
2138 \
2139 helper_reset_fpstatus(env); \
2140 \
2141 for (i = 0; i < nels; i++) { \
2142 float_status tstat = env->fp_status; \
2143 set_float_exception_flags(0, &tstat); \
2144 t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, maddflgs, &tstat); \
2145 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2146 \
2147 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2148 float_invalid_op_madd(env, tstat.float_exception_flags, \
2149 sfifprf, GETPC()); \
2150 } \
2151 \
2152 if (sfifprf) { \
2153 helper_compute_fprf_float64(env, t.fld); \
2154 } \
2155 } \
2156 *xt = t; \
2157 do_float_check_status(env, sfifprf, GETPC()); \
2158}
2159
2160VSX_MADD(XSMADDDP, 1, float64, VsrD(0), MADD_FLGS, 1)
2161VSX_MADD(XSMSUBDP, 1, float64, VsrD(0), MSUB_FLGS, 1)
2162VSX_MADD(XSNMADDDP, 1, float64, VsrD(0), NMADD_FLGS, 1)
2163VSX_MADD(XSNMSUBDP, 1, float64, VsrD(0), NMSUB_FLGS, 1)
2164VSX_MADD(XSMADDSP, 1, float64r32, VsrD(0), MADD_FLGS, 1)
2165VSX_MADD(XSMSUBSP, 1, float64r32, VsrD(0), MSUB_FLGS, 1)
2166VSX_MADD(XSNMADDSP, 1, float64r32, VsrD(0), NMADD_FLGS, 1)
2167VSX_MADD(XSNMSUBSP, 1, float64r32, VsrD(0), NMSUB_FLGS, 1)
2168
2169VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0)
2170VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0)
2171VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0)
2172VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0)
2173
2174VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0)
2175VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0)
2176VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0)
2177VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0)
2178
2179
2180
2181
2182
2183
2184
2185
2186#define VSX_MADDQ(op, maddflgs, ro) \
2187void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *s1, ppc_vsr_t *s2,\
2188 ppc_vsr_t *s3) \
2189{ \
2190 ppc_vsr_t t = *xt; \
2191 \
2192 helper_reset_fpstatus(env); \
2193 \
2194 float_status tstat = env->fp_status; \
2195 set_float_exception_flags(0, &tstat); \
2196 if (ro) { \
2197 tstat.float_rounding_mode = float_round_to_odd; \
2198 } \
2199 t.f128 = float128_muladd(s1->f128, s3->f128, s2->f128, maddflgs, &tstat); \
2200 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2201 \
2202 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2203 float_invalid_op_madd(env, tstat.float_exception_flags, \
2204 false, GETPC()); \
2205 } \
2206 \
2207 helper_compute_fprf_float128(env, t.f128); \
2208 *xt = t; \
2209 do_float_check_status(env, true, GETPC()); \
2210}
2211
2212VSX_MADDQ(XSMADDQP, MADD_FLGS, 0)
2213VSX_MADDQ(XSMADDQPO, MADD_FLGS, 1)
2214VSX_MADDQ(XSMSUBQP, MSUB_FLGS, 0)
2215VSX_MADDQ(XSMSUBQPO, MSUB_FLGS, 1)
2216VSX_MADDQ(XSNMADDQP, NMADD_FLGS, 0)
2217VSX_MADDQ(XSNMADDQPO, NMADD_FLGS, 1)
2218VSX_MADDQ(XSNMSUBQP, NMSUB_FLGS, 0)
2219VSX_MADDQ(XSNMSUBQPO, NMSUB_FLGS, 0)
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229#define VSX_SCALAR_CMP(op, tp, cmp, fld, svxvc) \
2230 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2231 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2232{ \
2233 int flags; \
2234 bool r, vxvc; \
2235 \
2236 helper_reset_fpstatus(env); \
2237 \
2238 if (svxvc) { \
2239 r = tp##_##cmp(xb->fld, xa->fld, &env->fp_status); \
2240 } else { \
2241 r = tp##_##cmp##_quiet(xb->fld, xa->fld, &env->fp_status); \
2242 } \
2243 \
2244 flags = get_float_exception_flags(&env->fp_status); \
2245 if (unlikely(flags & float_flag_invalid)) { \
2246 vxvc = svxvc; \
2247 if (flags & float_flag_invalid_snan) { \
2248 float_invalid_op_vxsnan(env, GETPC()); \
2249 vxvc &= !(env->fpscr & FP_VE); \
2250 } \
2251 if (vxvc) { \
2252 float_invalid_op_vxvc(env, 0, GETPC()); \
2253 } \
2254 } \
2255 \
2256 memset(xt, 0, sizeof(*xt)); \
2257 memset(&xt->fld, -r, sizeof(xt->fld)); \
2258 do_float_check_status(env, false, GETPC()); \
2259}
2260
2261VSX_SCALAR_CMP(XSCMPEQDP, float64, eq, VsrD(0), 0)
2262VSX_SCALAR_CMP(XSCMPGEDP, float64, le, VsrD(0), 1)
2263VSX_SCALAR_CMP(XSCMPGTDP, float64, lt, VsrD(0), 1)
2264VSX_SCALAR_CMP(XSCMPEQQP, float128, eq, f128, 0)
2265VSX_SCALAR_CMP(XSCMPGEQP, float128, le, f128, 1)
2266VSX_SCALAR_CMP(XSCMPGTQP, float128, lt, f128, 1)
2267
2268void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
2269 ppc_vsr_t *xa, ppc_vsr_t *xb)
2270{
2271 int64_t exp_a, exp_b;
2272 uint32_t cc;
2273
2274 exp_a = extract64(xa->VsrD(0), 52, 11);
2275 exp_b = extract64(xb->VsrD(0), 52, 11);
2276
2277 if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||
2278 float64_is_any_nan(xb->VsrD(0)))) {
2279 cc = CRF_SO;
2280 } else {
2281 if (exp_a < exp_b) {
2282 cc = CRF_LT;
2283 } else if (exp_a > exp_b) {
2284 cc = CRF_GT;
2285 } else {
2286 cc = CRF_EQ;
2287 }
2288 }
2289
2290 env->fpscr &= ~FP_FPCC;
2291 env->fpscr |= cc << FPSCR_FPCC;
2292 env->crf[BF(opcode)] = cc;
2293
2294 do_float_check_status(env, false, GETPC());
2295}
2296
2297void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
2298 ppc_vsr_t *xa, ppc_vsr_t *xb)
2299{
2300 int64_t exp_a, exp_b;
2301 uint32_t cc;
2302
2303 exp_a = extract64(xa->VsrD(0), 48, 15);
2304 exp_b = extract64(xb->VsrD(0), 48, 15);
2305
2306 if (unlikely(float128_is_any_nan(xa->f128) ||
2307 float128_is_any_nan(xb->f128))) {
2308 cc = CRF_SO;
2309 } else {
2310 if (exp_a < exp_b) {
2311 cc = CRF_LT;
2312 } else if (exp_a > exp_b) {
2313 cc = CRF_GT;
2314 } else {
2315 cc = CRF_EQ;
2316 }
2317 }
2318
2319 env->fpscr &= ~FP_FPCC;
2320 env->fpscr |= cc << FPSCR_FPCC;
2321 env->crf[BF(opcode)] = cc;
2322
2323 do_float_check_status(env, false, GETPC());
2324}
2325
2326static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb,
2327 int crf_idx, bool ordered)
2328{
2329 uint32_t cc;
2330 bool vxsnan_flag = false, vxvc_flag = false;
2331
2332 helper_reset_fpstatus(env);
2333
2334 switch (float64_compare(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) {
2335 case float_relation_less:
2336 cc = CRF_LT;
2337 break;
2338 case float_relation_equal:
2339 cc = CRF_EQ;
2340 break;
2341 case float_relation_greater:
2342 cc = CRF_GT;
2343 break;
2344 case float_relation_unordered:
2345 cc = CRF_SO;
2346
2347 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||
2348 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {
2349 vxsnan_flag = true;
2350 if (!(env->fpscr & FP_VE) && ordered) {
2351 vxvc_flag = true;
2352 }
2353 } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||
2354 float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) {
2355 if (ordered) {
2356 vxvc_flag = true;
2357 }
2358 }
2359
2360 break;
2361 default:
2362 g_assert_not_reached();
2363 }
2364
2365 env->fpscr &= ~FP_FPCC;
2366 env->fpscr |= cc << FPSCR_FPCC;
2367 env->crf[crf_idx] = cc;
2368
2369 if (vxsnan_flag) {
2370 float_invalid_op_vxsnan(env, GETPC());
2371 }
2372 if (vxvc_flag) {
2373 float_invalid_op_vxvc(env, 0, GETPC());
2374 }
2375
2376 do_float_check_status(env, false, GETPC());
2377}
2378
2379void helper_xscmpodp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2380 ppc_vsr_t *xb)
2381{
2382 do_scalar_cmp(env, xa, xb, BF(opcode), true);
2383}
2384
2385void helper_xscmpudp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2386 ppc_vsr_t *xb)
2387{
2388 do_scalar_cmp(env, xa, xb, BF(opcode), false);
2389}
2390
2391static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa,
2392 ppc_vsr_t *xb, int crf_idx, bool ordered)
2393{
2394 uint32_t cc;
2395 bool vxsnan_flag = false, vxvc_flag = false;
2396
2397 helper_reset_fpstatus(env);
2398
2399 switch (float128_compare(xa->f128, xb->f128, &env->fp_status)) {
2400 case float_relation_less:
2401 cc = CRF_LT;
2402 break;
2403 case float_relation_equal:
2404 cc = CRF_EQ;
2405 break;
2406 case float_relation_greater:
2407 cc = CRF_GT;
2408 break;
2409 case float_relation_unordered:
2410 cc = CRF_SO;
2411
2412 if (float128_is_signaling_nan(xa->f128, &env->fp_status) ||
2413 float128_is_signaling_nan(xb->f128, &env->fp_status)) {
2414 vxsnan_flag = true;
2415 if (!(env->fpscr & FP_VE) && ordered) {
2416 vxvc_flag = true;
2417 }
2418 } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) ||
2419 float128_is_quiet_nan(xb->f128, &env->fp_status)) {
2420 if (ordered) {
2421 vxvc_flag = true;
2422 }
2423 }
2424
2425 break;
2426 default:
2427 g_assert_not_reached();
2428 }
2429
2430 env->fpscr &= ~FP_FPCC;
2431 env->fpscr |= cc << FPSCR_FPCC;
2432 env->crf[crf_idx] = cc;
2433
2434 if (vxsnan_flag) {
2435 float_invalid_op_vxsnan(env, GETPC());
2436 }
2437 if (vxvc_flag) {
2438 float_invalid_op_vxvc(env, 0, GETPC());
2439 }
2440
2441 do_float_check_status(env, false, GETPC());
2442}
2443
2444void helper_xscmpoqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2445 ppc_vsr_t *xb)
2446{
2447 do_scalar_cmpq(env, xa, xb, BF(opcode), true);
2448}
2449
2450void helper_xscmpuqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2451 ppc_vsr_t *xb)
2452{
2453 do_scalar_cmpq(env, xa, xb, BF(opcode), false);
2454}
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464#define VSX_MAX_MIN(name, op, nels, tp, fld) \
2465void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
2466 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2467{ \
2468 ppc_vsr_t t = { }; \
2469 int i; \
2470 \
2471 for (i = 0; i < nels; i++) { \
2472 t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
2473 if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2474 tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
2475 float_invalid_op_vxsnan(env, GETPC()); \
2476 } \
2477 } \
2478 \
2479 *xt = t; \
2480 do_float_check_status(env, false, GETPC()); \
2481}
2482
2483VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2484VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2485VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2486VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2487VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2488VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2489
2490#define VSX_MAX_MINC(name, max, tp, fld) \
2491void helper_##name(CPUPPCState *env, \
2492 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2493{ \
2494 ppc_vsr_t t = { }; \
2495 bool first; \
2496 \
2497 helper_reset_fpstatus(env); \
2498 \
2499 if (max) { \
2500 first = tp##_le_quiet(xb->fld, xa->fld, &env->fp_status); \
2501 } else { \
2502 first = tp##_lt_quiet(xa->fld, xb->fld, &env->fp_status); \
2503 } \
2504 \
2505 if (first) { \
2506 t.fld = xa->fld; \
2507 } else { \
2508 t.fld = xb->fld; \
2509 if (env->fp_status.float_exception_flags & float_flag_invalid_snan) { \
2510 float_invalid_op_vxsnan(env, GETPC()); \
2511 } \
2512 } \
2513 \
2514 *xt = t; \
2515}
2516
2517VSX_MAX_MINC(XSMAXCDP, true, float64, VsrD(0));
2518VSX_MAX_MINC(XSMINCDP, false, float64, VsrD(0));
2519VSX_MAX_MINC(XSMAXCQP, true, float128, f128);
2520VSX_MAX_MINC(XSMINCQP, false, float128, f128);
2521
2522#define VSX_MAX_MINJ(name, max) \
2523void helper_##name(CPUPPCState *env, \
2524 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2525{ \
2526 ppc_vsr_t t = { }; \
2527 bool vxsnan_flag = false, vex_flag = false; \
2528 \
2529 if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
2530 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
2531 vxsnan_flag = true; \
2532 } \
2533 t.VsrD(0) = xa->VsrD(0); \
2534 } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
2535 if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2536 vxsnan_flag = true; \
2537 } \
2538 t.VsrD(0) = xb->VsrD(0); \
2539 } else if (float64_is_zero(xa->VsrD(0)) && \
2540 float64_is_zero(xb->VsrD(0))) { \
2541 if (max) { \
2542 if (!float64_is_neg(xa->VsrD(0)) || \
2543 !float64_is_neg(xb->VsrD(0))) { \
2544 t.VsrD(0) = 0ULL; \
2545 } else { \
2546 t.VsrD(0) = 0x8000000000000000ULL; \
2547 } \
2548 } else { \
2549 if (float64_is_neg(xa->VsrD(0)) || \
2550 float64_is_neg(xb->VsrD(0))) { \
2551 t.VsrD(0) = 0x8000000000000000ULL; \
2552 } else { \
2553 t.VsrD(0) = 0ULL; \
2554 } \
2555 } \
2556 } else if ((max && \
2557 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2558 (!max && \
2559 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2560 t.VsrD(0) = xa->VsrD(0); \
2561 } else { \
2562 t.VsrD(0) = xb->VsrD(0); \
2563 } \
2564 \
2565 vex_flag = (env->fpscr & FP_VE) && vxsnan_flag; \
2566 if (vxsnan_flag) { \
2567 float_invalid_op_vxsnan(env, GETPC()); \
2568 } \
2569 if (!vex_flag) { \
2570 *xt = t; \
2571 } \
2572} \
2573
2574VSX_MAX_MINJ(XSMAXJDP, 1);
2575VSX_MAX_MINJ(XSMINJDP, 0);
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2588uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2589 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2590{ \
2591 ppc_vsr_t t = *xt; \
2592 uint32_t crf6 = 0; \
2593 int i; \
2594 int all_true = 1; \
2595 int all_false = 1; \
2596 \
2597 helper_reset_fpstatus(env); \
2598 \
2599 for (i = 0; i < nels; i++) { \
2600 if (unlikely(tp##_is_any_nan(xa->fld) || \
2601 tp##_is_any_nan(xb->fld))) { \
2602 if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2603 tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \
2604 float_invalid_op_vxsnan(env, GETPC()); \
2605 } \
2606 if (svxvc) { \
2607 float_invalid_op_vxvc(env, 0, GETPC()); \
2608 } \
2609 t.fld = 0; \
2610 all_true = 0; \
2611 } else { \
2612 if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \
2613 t.fld = -1; \
2614 all_false = 0; \
2615 } else { \
2616 t.fld = 0; \
2617 all_true = 0; \
2618 } \
2619 } \
2620 } \
2621 \
2622 *xt = t; \
2623 crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2624 return crf6; \
2625}
2626
2627VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2628VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2629VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2630VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2631VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2632VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2633VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2634VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf) \
2647void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2648{ \
2649 ppc_vsr_t t = { }; \
2650 int i; \
2651 \
2652 helper_reset_fpstatus(env); \
2653 \
2654 for (i = 0; i < nels; i++) { \
2655 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2656 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2657 &env->fp_status))) { \
2658 float_invalid_op_vxsnan(env, GETPC()); \
2659 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2660 } \
2661 if (sfifprf) { \
2662 helper_compute_fprf_##ttp(env, t.tfld); \
2663 } \
2664 } \
2665 \
2666 *xt = t; \
2667 do_float_check_status(env, sfifprf, GETPC()); \
2668}
2669
2670VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2671VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
2672
2673#define VSX_CVT_FP_TO_FP2(op, nels, stp, ttp, sfifprf) \
2674void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2675{ \
2676 ppc_vsr_t t = { }; \
2677 int i; \
2678 \
2679 helper_reset_fpstatus(env); \
2680 \
2681 for (i = 0; i < nels; i++) { \
2682 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
2683 if (unlikely(stp##_is_signaling_nan(xb->VsrD(i), \
2684 &env->fp_status))) { \
2685 float_invalid_op_vxsnan(env, GETPC()); \
2686 t.VsrW(2 * i) = ttp##_snan_to_qnan(t.VsrW(2 * i)); \
2687 } \
2688 if (sfifprf) { \
2689 helper_compute_fprf_##ttp(env, t.VsrW(2 * i)); \
2690 } \
2691 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
2692 } \
2693 \
2694 *xt = t; \
2695 do_float_check_status(env, sfifprf, GETPC()); \
2696}
2697
2698VSX_CVT_FP_TO_FP2(xvcvdpsp, 2, float64, float32, 0)
2699VSX_CVT_FP_TO_FP2(xscvdpsp, 1, float64, float32, 1)
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2712void helper_##op(CPUPPCState *env, uint32_t opcode, \
2713 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2714{ \
2715 ppc_vsr_t t = *xt; \
2716 int i; \
2717 \
2718 helper_reset_fpstatus(env); \
2719 \
2720 for (i = 0; i < nels; i++) { \
2721 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2722 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2723 &env->fp_status))) { \
2724 float_invalid_op_vxsnan(env, GETPC()); \
2725 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2726 } \
2727 if (sfprf) { \
2728 helper_compute_fprf_##ttp(env, t.tfld); \
2729 } \
2730 } \
2731 \
2732 *xt = t; \
2733 do_float_check_status(env, true, GETPC()); \
2734}
2735
2736VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfifprf) \
2750void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2751{ \
2752 ppc_vsr_t t = { }; \
2753 int i; \
2754 \
2755 helper_reset_fpstatus(env); \
2756 \
2757 for (i = 0; i < nels; i++) { \
2758 t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
2759 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2760 &env->fp_status))) { \
2761 float_invalid_op_vxsnan(env, GETPC()); \
2762 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2763 } \
2764 if (sfifprf) { \
2765 helper_compute_fprf_##ttp(env, t.tfld); \
2766 } \
2767 } \
2768 \
2769 *xt = t; \
2770 do_float_check_status(env, sfifprf, GETPC()); \
2771}
2772
2773VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2774VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2775VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0)
2776VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2777
2778void helper_XVCVSPBF16(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
2779{
2780 ppc_vsr_t t = { };
2781 int i, status;
2782
2783 helper_reset_fpstatus(env);
2784
2785 for (i = 0; i < 4; i++) {
2786 t.VsrH(2 * i + 1) = float32_to_bfloat16(xb->VsrW(i), &env->fp_status);
2787 }
2788
2789 status = get_float_exception_flags(&env->fp_status);
2790 if (unlikely(status & float_flag_invalid_snan)) {
2791 float_invalid_op_vxsnan(env, GETPC());
2792 }
2793
2794 *xt = t;
2795 do_float_check_status(env, false, GETPC());
2796}
2797
2798void helper_XSCVQPDP(CPUPPCState *env, uint32_t ro, ppc_vsr_t *xt,
2799 ppc_vsr_t *xb)
2800{
2801 ppc_vsr_t t = { };
2802 float_status tstat;
2803
2804 helper_reset_fpstatus(env);
2805
2806 tstat = env->fp_status;
2807 if (ro != 0) {
2808 tstat.float_rounding_mode = float_round_to_odd;
2809 }
2810
2811 t.VsrD(0) = float128_to_float64(xb->f128, &tstat);
2812 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2813 if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) {
2814 float_invalid_op_vxsnan(env, GETPC());
2815 t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0));
2816 }
2817 helper_compute_fprf_float64(env, t.VsrD(0));
2818
2819 *xt = t;
2820 do_float_check_status(env, true, GETPC());
2821}
2822
2823uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2824{
2825 uint64_t result, sign, exp, frac;
2826
2827 helper_reset_fpstatus(env);
2828 float_status tstat = env->fp_status;
2829 set_float_exception_flags(0, &tstat);
2830
2831 sign = extract64(xb, 63, 1);
2832 exp = extract64(xb, 52, 11);
2833 frac = extract64(xb, 0, 52) | 0x10000000000000ULL;
2834
2835 if (unlikely(exp == 0 && extract64(frac, 0, 52) != 0)) {
2836
2837
2838 exp = 1;
2839
2840 frac = deposit64(frac, 53, 1, 0);
2841 }
2842
2843 if (unlikely(exp < 897 && frac != 0)) {
2844
2845 if (897 - exp > 63) {
2846 frac = 0;
2847 } else {
2848
2849 frac >>= (897 - exp);
2850 }
2851
2852 exp = 896;
2853 }
2854
2855 result = sign << 31;
2856 result |= extract64(exp, 10, 1) << 30;
2857 result |= extract64(exp, 0, 7) << 23;
2858 result |= extract64(frac, 29, 23);
2859
2860
2861 return (result << 32) | result;
2862}
2863
2864uint64_t helper_XSCVSPDPN(uint64_t xb)
2865{
2866 return helper_todouble(xb >> 32);
2867}
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, sfi, rnan) \
2881void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2882{ \
2883 ppc_vsr_t t = { }; \
2884 int i, flags; \
2885 \
2886 helper_reset_fpstatus(env); \
2887 \
2888 for (i = 0; i < nels; i++) { \
2889 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2890 flags = env->fp_status.float_exception_flags; \
2891 if (unlikely(flags & float_flag_invalid)) { \
2892 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());\
2893 } \
2894 } \
2895 \
2896 *xt = t; \
2897 do_float_check_status(env, sfi, GETPC()); \
2898}
2899
2900VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), true, \
2901 0x8000000000000000ULL)
2902VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), true, 0ULL)
2903VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), false, \
2904 0x8000000000000000ULL)
2905VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), false, \
2906 0ULL)
2907VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), false, \
2908 0x8000000000000000ULL)
2909VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), false, \
2910 0x80000000ULL)
2911VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), \
2912 false, 0ULL)
2913VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), false, 0U)
2914
2915#define VSX_CVT_FP_TO_INT128(op, tp, rnan) \
2916void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2917{ \
2918 ppc_vsr_t t; \
2919 int flags; \
2920 \
2921 helper_reset_fpstatus(env); \
2922 t.s128 = float128_to_##tp##_round_to_zero(xb->f128, &env->fp_status); \
2923 flags = get_float_exception_flags(&env->fp_status); \
2924 if (unlikely(flags & float_flag_invalid)) { \
2925 t.VsrD(0) = float_invalid_cvt(env, flags, t.VsrD(0), rnan, 0, GETPC());\
2926 t.VsrD(1) = -(t.VsrD(0) & 1); \
2927 } \
2928 \
2929 *xt = t; \
2930 do_float_check_status(env, true, GETPC()); \
2931}
2932
2933VSX_CVT_FP_TO_INT128(XSCVQPUQZ, uint128, 0)
2934VSX_CVT_FP_TO_INT128(XSCVQPSQZ, int128, 0x8000000000000000ULL);
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945#define VSX_CVT_FP_TO_INT2(op, nels, stp, ttp, sfi, rnan) \
2946void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2947{ \
2948 ppc_vsr_t t = { }; \
2949 int i, flags; \
2950 \
2951 helper_reset_fpstatus(env); \
2952 \
2953 for (i = 0; i < nels; i++) { \
2954 t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i), \
2955 &env->fp_status); \
2956 flags = env->fp_status.float_exception_flags; \
2957 if (unlikely(flags & float_flag_invalid)) { \
2958 t.VsrW(2 * i) = float_invalid_cvt(env, flags, t.VsrW(2 * i), \
2959 rnan, 0, GETPC()); \
2960 } \
2961 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
2962 } \
2963 \
2964 *xt = t; \
2965 do_float_check_status(env, sfi, GETPC()); \
2966}
2967
2968VSX_CVT_FP_TO_INT2(xscvdpsxws, 1, float64, int32, true, 0x80000000U)
2969VSX_CVT_FP_TO_INT2(xscvdpuxws, 1, float64, uint32, true, 0U)
2970VSX_CVT_FP_TO_INT2(xvcvdpsxws, 2, float64, int32, false, 0x80000000U)
2971VSX_CVT_FP_TO_INT2(xvcvdpuxws, 2, float64, uint32, false, 0U)
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
2983void helper_##op(CPUPPCState *env, uint32_t opcode, \
2984 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2985{ \
2986 ppc_vsr_t t = { }; \
2987 int flags; \
2988 \
2989 helper_reset_fpstatus(env); \
2990 \
2991 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2992 flags = get_float_exception_flags(&env->fp_status); \
2993 if (flags & float_flag_invalid) { \
2994 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC()); \
2995 } \
2996 \
2997 *xt = t; \
2998 do_float_check_status(env, true, GETPC()); \
2999}
3000
3001VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \
3002 0x8000000000000000ULL)
3003VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
3004 0xffffffff80000000ULL)
3005VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
3006VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf, r2sp)\
3020void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3021{ \
3022 ppc_vsr_t t = { }; \
3023 int i; \
3024 \
3025 helper_reset_fpstatus(env); \
3026 \
3027 for (i = 0; i < nels; i++) { \
3028 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3029 if (r2sp) { \
3030 t.tfld = do_frsp(env, t.tfld, GETPC()); \
3031 } \
3032 if (sfifprf) { \
3033 helper_compute_fprf_float64(env, t.tfld); \
3034 } \
3035 } \
3036 \
3037 *xt = t; \
3038 do_float_check_status(env, sfifprf, GETPC()); \
3039}
3040
3041VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3042VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3043VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3044VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3045VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3046VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3047VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
3048VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
3049VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3050VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3051
3052#define VSX_CVT_INT_TO_FP2(op, stp, ttp) \
3053void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3054{ \
3055 ppc_vsr_t t = { }; \
3056 int i; \
3057 \
3058 for (i = 0; i < 2; i++) { \
3059 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
3060 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
3061 } \
3062 \
3063 *xt = t; \
3064 do_float_check_status(env, false, GETPC()); \
3065}
3066
3067VSX_CVT_INT_TO_FP2(xvcvsxdsp, int64, float32)
3068VSX_CVT_INT_TO_FP2(xvcvuxdsp, uint64, float32)
3069
3070#define VSX_CVT_INT128_TO_FP(op, tp) \
3071void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)\
3072{ \
3073 helper_reset_fpstatus(env); \
3074 xt->f128 = tp##_to_float128(xb->s128, &env->fp_status); \
3075 helper_compute_fprf_float128(env, xt->f128); \
3076 do_float_check_status(env, true, GETPC()); \
3077}
3078
3079VSX_CVT_INT128_TO_FP(XSCVUQQP, uint128);
3080VSX_CVT_INT128_TO_FP(XSCVSQQP, int128);
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3091void helper_##op(CPUPPCState *env, uint32_t opcode, \
3092 ppc_vsr_t *xt, ppc_vsr_t *xb) \
3093{ \
3094 ppc_vsr_t t = *xt; \
3095 \
3096 helper_reset_fpstatus(env); \
3097 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3098 helper_compute_fprf_##ttp(env, t.tfld); \
3099 \
3100 *xt = t; \
3101 do_float_check_status(env, true, GETPC()); \
3102}
3103
3104VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3105VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3106
3107
3108
3109
3110
3111#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3112 float_round_up + float_round_to_zero)
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123#define VSX_ROUND(op, nels, tp, fld, rmode, sfifprf) \
3124void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3125{ \
3126 ppc_vsr_t t = { }; \
3127 int i; \
3128 FloatRoundMode curr_rounding_mode; \
3129 \
3130 helper_reset_fpstatus(env); \
3131 \
3132 if (rmode != FLOAT_ROUND_CURRENT) { \
3133 curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \
3134 set_float_rounding_mode(rmode, &env->fp_status); \
3135 } \
3136 \
3137 for (i = 0; i < nels; i++) { \
3138 if (unlikely(tp##_is_signaling_nan(xb->fld, \
3139 &env->fp_status))) { \
3140 float_invalid_op_vxsnan(env, GETPC()); \
3141 t.fld = tp##_snan_to_qnan(xb->fld); \
3142 } else { \
3143 t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
3144 } \
3145 if (sfifprf) { \
3146 helper_compute_fprf_float64(env, t.fld); \
3147 } \
3148 } \
3149 \
3150
3151
3152
3153
3154 \
3155 if (rmode != FLOAT_ROUND_CURRENT) { \
3156 set_float_rounding_mode(curr_rounding_mode, &env->fp_status); \
3157 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3158 } \
3159 \
3160 *xt = t; \
3161 do_float_check_status(env, sfifprf, GETPC()); \
3162}
3163
3164VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3165VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3166VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3167VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3168VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3169
3170VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3171VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3172VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3173VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3174VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3175
3176VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3177VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3178VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3179VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3180VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3181
3182uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3183{
3184 helper_reset_fpstatus(env);
3185
3186 uint64_t xt = do_frsp(env, xb, GETPC());
3187
3188 helper_compute_fprf_float64(env, xt);
3189 do_float_check_status(env, true, GETPC());
3190 return xt;
3191}
3192
3193void helper_XVXSIGSP(ppc_vsr_t *xt, ppc_vsr_t *xb)
3194{
3195 ppc_vsr_t t = { };
3196 uint32_t exp, i, fraction;
3197
3198 for (i = 0; i < 4; i++) {
3199 exp = (xb->VsrW(i) >> 23) & 0xFF;
3200 fraction = xb->VsrW(i) & 0x7FFFFF;
3201 if (exp != 0 && exp != 255) {
3202 t.VsrW(i) = fraction | 0x00800000;
3203 } else {
3204 t.VsrW(i) = fraction;
3205 }
3206 }
3207 *xt = t;
3208}
3209
3210#define VSX_TSTDC(tp) \
3211static int32_t tp##_tstdc(tp b, uint32_t dcmx) \
3212{ \
3213 uint32_t match = 0; \
3214 uint32_t sign = tp##_is_neg(b); \
3215 if (tp##_is_any_nan(b)) { \
3216 match = extract32(dcmx, 6, 1); \
3217 } else if (tp##_is_infinity(b)) { \
3218 match = extract32(dcmx, 4 + !sign, 1); \
3219 } else if (tp##_is_zero(b)) { \
3220 match = extract32(dcmx, 2 + !sign, 1); \
3221 } else if (tp##_is_zero_or_denormal(b)) { \
3222 match = extract32(dcmx, 0 + !sign, 1); \
3223 } \
3224 return (match != 0); \
3225}
3226
3227VSX_TSTDC(float32)
3228VSX_TSTDC(float64)
3229VSX_TSTDC(float128)
3230#undef VSX_TSTDC
3231
3232void helper_XVTSTDCDP(ppc_vsr_t *t, ppc_vsr_t *b, uint64_t dcmx, uint32_t v)
3233{
3234 int i;
3235 for (i = 0; i < 2; i++) {
3236 t->s64[i] = (int64_t)-float64_tstdc(b->f64[i], dcmx);
3237 }
3238}
3239
3240void helper_XVTSTDCSP(ppc_vsr_t *t, ppc_vsr_t *b, uint64_t dcmx, uint32_t v)
3241{
3242 int i;
3243 for (i = 0; i < 4; i++) {
3244 t->s32[i] = (int32_t)-float32_tstdc(b->f32[i], dcmx);
3245 }
3246}
3247
3248static bool not_SP_value(float64 val)
3249{
3250 return val != helper_todouble(helper_tosingle(val));
3251}
3252
3253
3254
3255
3256
3257
3258
3259#define VSX_XS_TSTDC(NAME, FLD, TP) \
3260 void helper_##NAME(CPUPPCState *env, uint32_t bf, \
3261 uint32_t dcmx, ppc_vsr_t *b) \
3262 { \
3263 uint32_t cc, match, sign = TP##_is_neg(b->FLD); \
3264 match = TP##_tstdc(b->FLD, dcmx); \
3265 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3266 env->fpscr &= ~FP_FPCC; \
3267 env->fpscr |= cc << FPSCR_FPCC; \
3268 env->crf[bf] = cc; \
3269 }
3270
3271VSX_XS_TSTDC(XSTSTDCDP, VsrD(0), float64)
3272VSX_XS_TSTDC(XSTSTDCQP, f128, float128)
3273#undef VSX_XS_TSTDC
3274
3275void helper_XSTSTDCSP(CPUPPCState *env, uint32_t bf,
3276 uint32_t dcmx, ppc_vsr_t *b)
3277{
3278 uint32_t cc, match, sign = float64_is_neg(b->VsrD(0));
3279 uint32_t exp = (b->VsrD(0) >> 52) & 0x7FF;
3280 int not_sp = (int)not_SP_value(b->VsrD(0));
3281 match = float64_tstdc(b->VsrD(0), dcmx) || (exp > 0 && exp < 0x381);
3282 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3283 env->fpscr &= ~FP_FPCC;
3284 env->fpscr |= cc << FPSCR_FPCC;
3285 env->crf[bf] = cc;
3286}
3287
3288void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
3289 ppc_vsr_t *xt, ppc_vsr_t *xb)
3290{
3291 ppc_vsr_t t = { };
3292 uint8_t r = Rrm(opcode);
3293 uint8_t ex = Rc(opcode);
3294 uint8_t rmc = RMC(opcode);
3295 uint8_t rmode = 0;
3296 float_status tstat;
3297
3298 helper_reset_fpstatus(env);
3299
3300 if (r == 0 && rmc == 0) {
3301 rmode = float_round_ties_away;
3302 } else if (r == 0 && rmc == 0x3) {
3303 rmode = env->fpscr & FP_RN;
3304 } else if (r == 1) {
3305 switch (rmc) {
3306 case 0:
3307 rmode = float_round_nearest_even;
3308 break;
3309 case 1:
3310 rmode = float_round_to_zero;
3311 break;
3312 case 2:
3313 rmode = float_round_up;
3314 break;
3315 case 3:
3316 rmode = float_round_down;
3317 break;
3318 default:
3319 abort();
3320 }
3321 }
3322
3323 tstat = env->fp_status;
3324 set_float_exception_flags(0, &tstat);
3325 set_float_rounding_mode(rmode, &tstat);
3326 t.f128 = float128_round_to_int(xb->f128, &tstat);
3327 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3328
3329 if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) {
3330 float_invalid_op_vxsnan(env, GETPC());
3331 }
3332
3333 if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3334 env->fp_status.float_exception_flags &= ~float_flag_inexact;
3335 }
3336
3337 helper_compute_fprf_float128(env, t.f128);
3338 do_float_check_status(env, true, GETPC());
3339 *xt = t;
3340}
3341
3342void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
3343 ppc_vsr_t *xt, ppc_vsr_t *xb)
3344{
3345 ppc_vsr_t t = { };
3346 uint8_t r = Rrm(opcode);
3347 uint8_t rmc = RMC(opcode);
3348 uint8_t rmode = 0;
3349 floatx80 round_res;
3350 float_status tstat;
3351
3352 helper_reset_fpstatus(env);
3353
3354 if (r == 0 && rmc == 0) {
3355 rmode = float_round_ties_away;
3356 } else if (r == 0 && rmc == 0x3) {
3357 rmode = env->fpscr & FP_RN;
3358 } else if (r == 1) {
3359 switch (rmc) {
3360 case 0:
3361 rmode = float_round_nearest_even;
3362 break;
3363 case 1:
3364 rmode = float_round_to_zero;
3365 break;
3366 case 2:
3367 rmode = float_round_up;
3368 break;
3369 case 3:
3370 rmode = float_round_down;
3371 break;
3372 default:
3373 abort();
3374 }
3375 }
3376
3377 tstat = env->fp_status;
3378 set_float_exception_flags(0, &tstat);
3379 set_float_rounding_mode(rmode, &tstat);
3380 round_res = float128_to_floatx80(xb->f128, &tstat);
3381 t.f128 = floatx80_to_float128(round_res, &tstat);
3382 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3383
3384 if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) {
3385 float_invalid_op_vxsnan(env, GETPC());
3386 t.f128 = float128_snan_to_qnan(t.f128);
3387 }
3388
3389 helper_compute_fprf_float128(env, t.f128);
3390 *xt = t;
3391 do_float_check_status(env, true, GETPC());
3392}
3393
3394void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
3395 ppc_vsr_t *xt, ppc_vsr_t *xb)
3396{
3397 ppc_vsr_t t = { };
3398 float_status tstat;
3399
3400 helper_reset_fpstatus(env);
3401
3402 tstat = env->fp_status;
3403 if (unlikely(Rc(opcode) != 0)) {
3404 tstat.float_rounding_mode = float_round_to_odd;
3405 }
3406
3407 set_float_exception_flags(0, &tstat);
3408 t.f128 = float128_sqrt(xb->f128, &tstat);
3409 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3410
3411 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3412 float_invalid_op_sqrt(env, tstat.float_exception_flags, 1, GETPC());
3413 }
3414
3415 helper_compute_fprf_float128(env, t.f128);
3416 *xt = t;
3417 do_float_check_status(env, true, GETPC());
3418}
3419
3420void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
3421 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
3422{
3423 ppc_vsr_t t = *xt;
3424 float_status tstat;
3425
3426 helper_reset_fpstatus(env);
3427
3428 tstat = env->fp_status;
3429 if (unlikely(Rc(opcode) != 0)) {
3430 tstat.float_rounding_mode = float_round_to_odd;
3431 }
3432
3433 set_float_exception_flags(0, &tstat);
3434 t.f128 = float128_sub(xa->f128, xb->f128, &tstat);
3435 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3436
3437 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3438 float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
3439 }
3440
3441 helper_compute_fprf_float128(env, t.f128);
3442 *xt = t;
3443 do_float_check_status(env, true, GETPC());
3444}
3445
3446static inline void vsxger_excp(CPUPPCState *env, uintptr_t retaddr)
3447{
3448
3449
3450
3451
3452 target_ulong enable;
3453 enable = env->fpscr & (FP_ENABLES | FP_FI | FP_FR);
3454 env->fpscr &= ~(FP_ENABLES | FP_FI | FP_FR);
3455 int status = get_float_exception_flags(&env->fp_status);
3456 if (unlikely(status & float_flag_invalid)) {
3457 if (status & float_flag_invalid_snan) {
3458 float_invalid_op_vxsnan(env, 0);
3459 }
3460 if (status & float_flag_invalid_imz) {
3461 float_invalid_op_vximz(env, false, 0);
3462 }
3463 if (status & float_flag_invalid_isi) {
3464 float_invalid_op_vxisi(env, false, 0);
3465 }
3466 }
3467 do_float_check_status(env, false, retaddr);
3468 env->fpscr |= enable;
3469 do_fpscr_check_status(env, retaddr);
3470}
3471
3472typedef float64 extract_f16(float16, float_status *);
3473
3474static float64 extract_hf16(float16 in, float_status *fp_status)
3475{
3476 return float16_to_float64(in, true, fp_status);
3477}
3478
3479static float64 extract_bf16(bfloat16 in, float_status *fp_status)
3480{
3481 return bfloat16_to_float64(in, fp_status);
3482}
3483
3484static void vsxger16(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3485 ppc_acc_t *at, uint32_t mask, bool acc,
3486 bool neg_mul, bool neg_acc, extract_f16 extract)
3487{
3488 float32 r, aux_acc;
3489 float64 psum, va, vb, vc, vd;
3490 int i, j, xmsk_bit, ymsk_bit;
3491 uint8_t pmsk = FIELD_EX32(mask, GER_MSK, PMSK),
3492 xmsk = FIELD_EX32(mask, GER_MSK, XMSK),
3493 ymsk = FIELD_EX32(mask, GER_MSK, YMSK);
3494 float_status *excp_ptr = &env->fp_status;
3495 for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
3496 for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
3497 if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
3498 va = !(pmsk & 2) ? float64_zero :
3499 extract(a->VsrHF(2 * i), excp_ptr);
3500 vb = !(pmsk & 2) ? float64_zero :
3501 extract(b->VsrHF(2 * j), excp_ptr);
3502 vc = !(pmsk & 1) ? float64_zero :
3503 extract(a->VsrHF(2 * i + 1), excp_ptr);
3504 vd = !(pmsk & 1) ? float64_zero :
3505 extract(b->VsrHF(2 * j + 1), excp_ptr);
3506 psum = float64_mul(va, vb, excp_ptr);
3507 psum = float64r32_muladd(vc, vd, psum, 0, excp_ptr);
3508 r = float64_to_float32(psum, excp_ptr);
3509 if (acc) {
3510 aux_acc = at[i].VsrSF(j);
3511 if (neg_mul) {
3512 r = bfp32_neg(r);
3513 }
3514 if (neg_acc) {
3515 aux_acc = bfp32_neg(aux_acc);
3516 }
3517 r = float32_add(r, aux_acc, excp_ptr);
3518 }
3519 at[i].VsrSF(j) = r;
3520 } else {
3521 at[i].VsrSF(j) = float32_zero;
3522 }
3523 }
3524 }
3525 vsxger_excp(env, GETPC());
3526}
3527
3528typedef void vsxger_zero(ppc_vsr_t *at, int, int);
3529
3530typedef void vsxger_muladd_f(ppc_vsr_t *, ppc_vsr_t *, ppc_vsr_t *, int, int,
3531 int flags, float_status *s);
3532
3533static void vsxger_muladd32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3534 int j, int flags, float_status *s)
3535{
3536 at[i].VsrSF(j) = float32_muladd(a->VsrSF(i), b->VsrSF(j),
3537 at[i].VsrSF(j), flags, s);
3538}
3539
3540static void vsxger_mul32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3541 int j, int flags, float_status *s)
3542{
3543 at[i].VsrSF(j) = float32_mul(a->VsrSF(i), b->VsrSF(j), s);
3544}
3545
3546static void vsxger_zero32(ppc_vsr_t *at, int i, int j)
3547{
3548 at[i].VsrSF(j) = float32_zero;
3549}
3550
3551static void vsxger_muladd64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3552 int j, int flags, float_status *s)
3553{
3554 if (j >= 2) {
3555 j -= 2;
3556 at[i].VsrDF(j) = float64_muladd(a[i / 2].VsrDF(i % 2), b->VsrDF(j),
3557 at[i].VsrDF(j), flags, s);
3558 }
3559}
3560
3561static void vsxger_mul64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3562 int j, int flags, float_status *s)
3563{
3564 if (j >= 2) {
3565 j -= 2;
3566 at[i].VsrDF(j) = float64_mul(a[i / 2].VsrDF(i % 2), b->VsrDF(j), s);
3567 }
3568}
3569
3570static void vsxger_zero64(ppc_vsr_t *at, int i, int j)
3571{
3572 if (j >= 2) {
3573 j -= 2;
3574 at[i].VsrDF(j) = float64_zero;
3575 }
3576}
3577
3578static void vsxger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3579 ppc_acc_t *at, uint32_t mask, bool acc, bool neg_mul,
3580 bool neg_acc, vsxger_muladd_f mul, vsxger_muladd_f muladd,
3581 vsxger_zero zero)
3582{
3583 int i, j, xmsk_bit, ymsk_bit, op_flags;
3584 uint8_t xmsk = mask & 0x0F;
3585 uint8_t ymsk = (mask >> 4) & 0x0F;
3586 float_status *excp_ptr = &env->fp_status;
3587 op_flags = (neg_acc ^ neg_mul) ? float_muladd_negate_c : 0;
3588 op_flags |= (neg_mul) ? float_muladd_negate_result : 0;
3589 helper_reset_fpstatus(env);
3590 for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
3591 for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
3592 if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
3593 if (acc) {
3594 muladd(at, a, b, i, j, op_flags, excp_ptr);
3595 } else {
3596 mul(at, a, b, i, j, op_flags, excp_ptr);
3597 }
3598 } else {
3599 zero(at, i, j);
3600 }
3601 }
3602 }
3603 vsxger_excp(env, GETPC());
3604}
3605
3606QEMU_FLATTEN
3607void helper_XVBF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3608 ppc_acc_t *at, uint32_t mask)
3609{
3610 vsxger16(env, a, b, at, mask, false, false, false, extract_bf16);
3611}
3612
3613QEMU_FLATTEN
3614void helper_XVBF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3615 ppc_acc_t *at, uint32_t mask)
3616{
3617 vsxger16(env, a, b, at, mask, true, false, false, extract_bf16);
3618}
3619
3620QEMU_FLATTEN
3621void helper_XVBF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3622 ppc_acc_t *at, uint32_t mask)
3623{
3624 vsxger16(env, a, b, at, mask, true, false, true, extract_bf16);
3625}
3626
3627QEMU_FLATTEN
3628void helper_XVBF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3629 ppc_acc_t *at, uint32_t mask)
3630{
3631 vsxger16(env, a, b, at, mask, true, true, false, extract_bf16);
3632}
3633
3634QEMU_FLATTEN
3635void helper_XVBF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3636 ppc_acc_t *at, uint32_t mask)
3637{
3638 vsxger16(env, a, b, at, mask, true, true, true, extract_bf16);
3639}
3640
3641QEMU_FLATTEN
3642void helper_XVF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3643 ppc_acc_t *at, uint32_t mask)
3644{
3645 vsxger16(env, a, b, at, mask, false, false, false, extract_hf16);
3646}
3647
3648QEMU_FLATTEN
3649void helper_XVF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3650 ppc_acc_t *at, uint32_t mask)
3651{
3652 vsxger16(env, a, b, at, mask, true, false, false, extract_hf16);
3653}
3654
3655QEMU_FLATTEN
3656void helper_XVF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3657 ppc_acc_t *at, uint32_t mask)
3658{
3659 vsxger16(env, a, b, at, mask, true, false, true, extract_hf16);
3660}
3661
3662QEMU_FLATTEN
3663void helper_XVF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3664 ppc_acc_t *at, uint32_t mask)
3665{
3666 vsxger16(env, a, b, at, mask, true, true, false, extract_hf16);
3667}
3668
3669QEMU_FLATTEN
3670void helper_XVF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3671 ppc_acc_t *at, uint32_t mask)
3672{
3673 vsxger16(env, a, b, at, mask, true, true, true, extract_hf16);
3674}
3675
3676QEMU_FLATTEN
3677void helper_XVF32GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3678 ppc_acc_t *at, uint32_t mask)
3679{
3680 vsxger(env, a, b, at, mask, false, false, false, vsxger_mul32,
3681 vsxger_muladd32, vsxger_zero32);
3682}
3683
3684QEMU_FLATTEN
3685void helper_XVF32GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3686 ppc_acc_t *at, uint32_t mask)
3687{
3688 vsxger(env, a, b, at, mask, true, false, false, vsxger_mul32,
3689 vsxger_muladd32, vsxger_zero32);
3690}
3691
3692QEMU_FLATTEN
3693void helper_XVF32GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3694 ppc_acc_t *at, uint32_t mask)
3695{
3696 vsxger(env, a, b, at, mask, true, false, true, vsxger_mul32,
3697 vsxger_muladd32, vsxger_zero32);
3698}
3699
3700QEMU_FLATTEN
3701void helper_XVF32GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3702 ppc_acc_t *at, uint32_t mask)
3703{
3704 vsxger(env, a, b, at, mask, true, true, false, vsxger_mul32,
3705 vsxger_muladd32, vsxger_zero32);
3706}
3707
3708QEMU_FLATTEN
3709void helper_XVF32GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3710 ppc_acc_t *at, uint32_t mask)
3711{
3712 vsxger(env, a, b, at, mask, true, true, true, vsxger_mul32,
3713 vsxger_muladd32, vsxger_zero32);
3714}
3715
3716QEMU_FLATTEN
3717void helper_XVF64GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3718 ppc_acc_t *at, uint32_t mask)
3719{
3720 vsxger(env, a, b, at, mask, false, false, false, vsxger_mul64,
3721 vsxger_muladd64, vsxger_zero64);
3722}
3723
3724QEMU_FLATTEN
3725void helper_XVF64GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3726 ppc_acc_t *at, uint32_t mask)
3727{
3728 vsxger(env, a, b, at, mask, true, false, false, vsxger_mul64,
3729 vsxger_muladd64, vsxger_zero64);
3730}
3731
3732QEMU_FLATTEN
3733void helper_XVF64GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3734 ppc_acc_t *at, uint32_t mask)
3735{
3736 vsxger(env, a, b, at, mask, true, false, true, vsxger_mul64,
3737 vsxger_muladd64, vsxger_zero64);
3738}
3739
3740QEMU_FLATTEN
3741void helper_XVF64GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3742 ppc_acc_t *at, uint32_t mask)
3743{
3744 vsxger(env, a, b, at, mask, true, true, false, vsxger_mul64,
3745 vsxger_muladd64, vsxger_zero64);
3746}
3747
3748QEMU_FLATTEN
3749void helper_XVF64GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3750 ppc_acc_t *at, uint32_t mask)
3751{
3752 vsxger(env, a, b, at, mask, true, true, true, vsxger_mul64,
3753 vsxger_muladd64, vsxger_zero64);
3754}
3755