1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "qemu/log.h"
21#include "cpu.h"
22#include "exec/helper-proto.h"
23#include "exec/exec-all.h"
24#include "exec/cpu_ldst.h"
25#include "semihosting/semihost.h"
26
27#if !defined(CONFIG_USER_ONLY)
28
29static void cf_rte(CPUM68KState *env)
30{
31 uint32_t sp;
32 uint32_t fmt;
33
34 sp = env->aregs[7];
35 fmt = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
36 env->pc = cpu_ldl_mmuidx_ra(env, sp + 4, MMU_KERNEL_IDX, 0);
37 sp |= (fmt >> 28) & 3;
38 env->aregs[7] = sp + 8;
39
40 cpu_m68k_set_sr(env, fmt);
41}
42
43static void m68k_rte(CPUM68KState *env)
44{
45 uint32_t sp;
46 uint16_t fmt;
47 uint16_t sr;
48
49 sp = env->aregs[7];
50throwaway:
51 sr = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
52 sp += 2;
53 env->pc = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
54 sp += 4;
55 if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) {
56
57 fmt = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
58 sp += 2;
59 switch (fmt >> 12) {
60 case 0:
61 break;
62 case 1:
63 env->aregs[7] = sp;
64 cpu_m68k_set_sr(env, sr);
65 goto throwaway;
66 case 2:
67 case 3:
68 sp += 4;
69 break;
70 case 4:
71 sp += 8;
72 break;
73 case 7:
74 sp += 52;
75 break;
76 }
77 }
78 env->aregs[7] = sp;
79 cpu_m68k_set_sr(env, sr);
80}
81
82static const char *m68k_exception_name(int index)
83{
84 switch (index) {
85 case EXCP_ACCESS:
86 return "Access Fault";
87 case EXCP_ADDRESS:
88 return "Address Error";
89 case EXCP_ILLEGAL:
90 return "Illegal Instruction";
91 case EXCP_DIV0:
92 return "Divide by Zero";
93 case EXCP_CHK:
94 return "CHK/CHK2";
95 case EXCP_TRAPCC:
96 return "FTRAPcc, TRAPcc, TRAPV";
97 case EXCP_PRIVILEGE:
98 return "Privilege Violation";
99 case EXCP_TRACE:
100 return "Trace";
101 case EXCP_LINEA:
102 return "A-Line";
103 case EXCP_LINEF:
104 return "F-Line";
105 case EXCP_DEBEGBP:
106 return "Copro Protocol Violation";
107 case EXCP_FORMAT:
108 return "Format Error";
109 case EXCP_UNINITIALIZED:
110 return "Uninitialized Interrupt";
111 case EXCP_SPURIOUS:
112 return "Spurious Interrupt";
113 case EXCP_INT_LEVEL_1:
114 return "Level 1 Interrupt";
115 case EXCP_INT_LEVEL_1 + 1:
116 return "Level 2 Interrupt";
117 case EXCP_INT_LEVEL_1 + 2:
118 return "Level 3 Interrupt";
119 case EXCP_INT_LEVEL_1 + 3:
120 return "Level 4 Interrupt";
121 case EXCP_INT_LEVEL_1 + 4:
122 return "Level 5 Interrupt";
123 case EXCP_INT_LEVEL_1 + 5:
124 return "Level 6 Interrupt";
125 case EXCP_INT_LEVEL_1 + 6:
126 return "Level 7 Interrupt";
127 case EXCP_TRAP0:
128 return "TRAP #0";
129 case EXCP_TRAP0 + 1:
130 return "TRAP #1";
131 case EXCP_TRAP0 + 2:
132 return "TRAP #2";
133 case EXCP_TRAP0 + 3:
134 return "TRAP #3";
135 case EXCP_TRAP0 + 4:
136 return "TRAP #4";
137 case EXCP_TRAP0 + 5:
138 return "TRAP #5";
139 case EXCP_TRAP0 + 6:
140 return "TRAP #6";
141 case EXCP_TRAP0 + 7:
142 return "TRAP #7";
143 case EXCP_TRAP0 + 8:
144 return "TRAP #8";
145 case EXCP_TRAP0 + 9:
146 return "TRAP #9";
147 case EXCP_TRAP0 + 10:
148 return "TRAP #10";
149 case EXCP_TRAP0 + 11:
150 return "TRAP #11";
151 case EXCP_TRAP0 + 12:
152 return "TRAP #12";
153 case EXCP_TRAP0 + 13:
154 return "TRAP #13";
155 case EXCP_TRAP0 + 14:
156 return "TRAP #14";
157 case EXCP_TRAP0 + 15:
158 return "TRAP #15";
159 case EXCP_FP_BSUN:
160 return "FP Branch/Set on unordered condition";
161 case EXCP_FP_INEX:
162 return "FP Inexact Result";
163 case EXCP_FP_DZ:
164 return "FP Divide by Zero";
165 case EXCP_FP_UNFL:
166 return "FP Underflow";
167 case EXCP_FP_OPERR:
168 return "FP Operand Error";
169 case EXCP_FP_OVFL:
170 return "FP Overflow";
171 case EXCP_FP_SNAN:
172 return "FP Signaling NAN";
173 case EXCP_FP_UNIMP:
174 return "FP Unimplemented Data Type";
175 case EXCP_MMU_CONF:
176 return "MMU Configuration Error";
177 case EXCP_MMU_ILLEGAL:
178 return "MMU Illegal Operation";
179 case EXCP_MMU_ACCESS:
180 return "MMU Access Level Violation";
181 case 64 ... 255:
182 return "User Defined Vector";
183 }
184 return "Unassigned";
185}
186
187static void cf_interrupt_all(CPUM68KState *env, int is_hw)
188{
189 CPUState *cs = env_cpu(env);
190 uint32_t sp;
191 uint32_t sr;
192 uint32_t fmt;
193 uint32_t retaddr;
194 uint32_t vector;
195
196 fmt = 0;
197 retaddr = env->pc;
198
199 if (!is_hw) {
200 switch (cs->exception_index) {
201 case EXCP_RTE:
202
203 cf_rte(env);
204 return;
205 case EXCP_HALT_INSN:
206 if (semihosting_enabled()
207 && (env->sr & SR_S) != 0
208 && (env->pc & 3) == 0
209 && cpu_lduw_code(env, env->pc - 4) == 0x4e71
210 && cpu_ldl_code(env, env->pc) == 0x4e7bf000) {
211 env->pc += 4;
212 do_m68k_semihosting(env, env->dregs[0]);
213 return;
214 }
215 cs->halted = 1;
216 cs->exception_index = EXCP_HLT;
217 cpu_loop_exit(cs);
218 return;
219 }
220 if (cs->exception_index >= EXCP_TRAP0
221 && cs->exception_index <= EXCP_TRAP15) {
222
223 retaddr += 2;
224 }
225 }
226
227 vector = cs->exception_index << 2;
228
229 sr = env->sr | cpu_m68k_get_ccr(env);
230 if (qemu_loglevel_mask(CPU_LOG_INT)) {
231 static int count;
232 qemu_log("INT %6d: %s(%#x) pc=%08x sp=%08x sr=%04x\n",
233 ++count, m68k_exception_name(cs->exception_index),
234 vector, env->pc, env->aregs[7], sr);
235 }
236
237 fmt |= 0x40000000;
238 fmt |= vector << 16;
239 fmt |= sr;
240
241 env->sr |= SR_S;
242 if (is_hw) {
243 env->sr = (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT);
244 env->sr &= ~SR_M;
245 }
246 m68k_switch_sp(env);
247 sp = env->aregs[7];
248 fmt |= (sp & 3) << 28;
249
250
251 sp &= ~3;
252 sp -= 4;
253 cpu_stl_mmuidx_ra(env, sp, retaddr, MMU_KERNEL_IDX, 0);
254 sp -= 4;
255 cpu_stl_mmuidx_ra(env, sp, fmt, MMU_KERNEL_IDX, 0);
256 env->aregs[7] = sp;
257
258 env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
259}
260
261static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp,
262 uint16_t format, uint16_t sr,
263 uint32_t addr, uint32_t retaddr)
264{
265 if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) {
266
267 CPUState *cs = env_cpu(env);
268 switch (format) {
269 case 4:
270 *sp -= 4;
271 cpu_stl_mmuidx_ra(env, *sp, env->pc, MMU_KERNEL_IDX, 0);
272 *sp -= 4;
273 cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
274 break;
275 case 3:
276 case 2:
277 *sp -= 4;
278 cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
279 break;
280 }
281 *sp -= 2;
282 cpu_stw_mmuidx_ra(env, *sp, (format << 12) + (cs->exception_index << 2),
283 MMU_KERNEL_IDX, 0);
284 }
285 *sp -= 4;
286 cpu_stl_mmuidx_ra(env, *sp, retaddr, MMU_KERNEL_IDX, 0);
287 *sp -= 2;
288 cpu_stw_mmuidx_ra(env, *sp, sr, MMU_KERNEL_IDX, 0);
289}
290
291static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
292{
293 CPUState *cs = env_cpu(env);
294 uint32_t sp;
295 uint32_t retaddr;
296 uint32_t vector;
297 uint16_t sr, oldsr;
298
299 retaddr = env->pc;
300
301 if (!is_hw) {
302 switch (cs->exception_index) {
303 case EXCP_RTE:
304
305 m68k_rte(env);
306 return;
307 case EXCP_TRAP0 ... EXCP_TRAP15:
308
309 retaddr += 2;
310 break;
311 }
312 }
313
314 vector = cs->exception_index << 2;
315
316 sr = env->sr | cpu_m68k_get_ccr(env);
317 if (qemu_loglevel_mask(CPU_LOG_INT)) {
318 static int count;
319 qemu_log("INT %6d: %s(%#x) pc=%08x sp=%08x sr=%04x\n",
320 ++count, m68k_exception_name(cs->exception_index),
321 vector, env->pc, env->aregs[7], sr);
322 }
323
324
325
326
327
328
329 oldsr = sr;
330
331 sr |= SR_S;
332
333 sr &= ~SR_T;
334
335 if (is_hw) {
336 sr |= (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT);
337 }
338 cpu_m68k_set_sr(env, sr);
339 sp = env->aregs[7];
340
341 if (!m68k_feature(env, M68K_FEATURE_UNALIGNED_DATA)) {
342 sp &= ~1;
343 }
344
345 if (cs->exception_index == EXCP_ACCESS) {
346 if (env->mmu.fault) {
347 cpu_abort(cs, "DOUBLE MMU FAULT\n");
348 }
349 env->mmu.fault = true;
350
351 sp -= 4;
352 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
353
354 sp -= 4;
355 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
356
357 sp -= 4;
358 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
359
360 sp -= 4;
361 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
362
363 sp -= 4;
364 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
365
366 sp -= 4;
367 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
368
369 sp -= 4;
370 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
371
372 sp -= 4;
373 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
374
375 sp -= 4;
376 cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
377
378 sp -= 4;
379 cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
380
381 sp -= 2;
382 cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
383
384 sp -= 2;
385 cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
386
387 sp -= 2;
388 cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
389
390 sp -= 2;
391 cpu_stw_mmuidx_ra(env, sp, env->mmu.ssw, MMU_KERNEL_IDX, 0);
392
393 sp -= 4;
394 cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
395
396 do_stack_frame(env, &sp, 7, oldsr, 0, retaddr);
397 env->mmu.fault = false;
398 if (qemu_loglevel_mask(CPU_LOG_INT)) {
399 qemu_log(" "
400 "ssw: %08x ea: %08x sfc: %d dfc: %d\n",
401 env->mmu.ssw, env->mmu.ar, env->sfc, env->dfc);
402 }
403 } else if (cs->exception_index == EXCP_ADDRESS) {
404 do_stack_frame(env, &sp, 2, oldsr, 0, retaddr);
405 } else if (cs->exception_index == EXCP_ILLEGAL ||
406 cs->exception_index == EXCP_DIV0 ||
407 cs->exception_index == EXCP_CHK ||
408 cs->exception_index == EXCP_TRAPCC ||
409 cs->exception_index == EXCP_TRACE) {
410
411 do_stack_frame(env, &sp, 2, oldsr, env->pc, retaddr);
412 } else if (is_hw && oldsr & SR_M &&
413 cs->exception_index >= EXCP_SPURIOUS &&
414 cs->exception_index <= EXCP_INT_LEVEL_7) {
415 do_stack_frame(env, &sp, 0, oldsr, 0, retaddr);
416 oldsr = sr;
417 env->aregs[7] = sp;
418 cpu_m68k_set_sr(env, sr &= ~SR_M);
419 sp = env->aregs[7];
420 if (!m68k_feature(env, M68K_FEATURE_UNALIGNED_DATA)) {
421 sp &= ~1;
422 }
423 do_stack_frame(env, &sp, 1, oldsr, 0, retaddr);
424 } else {
425 do_stack_frame(env, &sp, 0, oldsr, 0, retaddr);
426 }
427
428 env->aregs[7] = sp;
429
430 env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
431}
432
433static void do_interrupt_all(CPUM68KState *env, int is_hw)
434{
435 if (m68k_feature(env, M68K_FEATURE_M68000)) {
436 m68k_interrupt_all(env, is_hw);
437 return;
438 }
439 cf_interrupt_all(env, is_hw);
440}
441
442void m68k_cpu_do_interrupt(CPUState *cs)
443{
444 M68kCPU *cpu = M68K_CPU(cs);
445 CPUM68KState *env = &cpu->env;
446
447 do_interrupt_all(env, 0);
448}
449
450static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
451{
452 do_interrupt_all(env, 1);
453}
454
455void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
456 unsigned size, MMUAccessType access_type,
457 int mmu_idx, MemTxAttrs attrs,
458 MemTxResult response, uintptr_t retaddr)
459{
460 M68kCPU *cpu = M68K_CPU(cs);
461 CPUM68KState *env = &cpu->env;
462
463 cpu_restore_state(cs, retaddr, true);
464
465 if (m68k_feature(env, M68K_FEATURE_M68040)) {
466 env->mmu.mmusr = 0;
467
468
469
470
471
472
473
474 if (response != MEMTX_DECODE_ERROR) {
475 env->mmu.ssw |= M68K_ATC_040;
476 }
477
478
479 env->mmu.ssw &= ~M68K_TM_040;
480 if (env->sr & SR_S) {
481 env->mmu.ssw |= M68K_TM_040_SUPER;
482 }
483 if (access_type == MMU_INST_FETCH) {
484 env->mmu.ssw |= M68K_TM_040_CODE;
485 } else {
486 env->mmu.ssw |= M68K_TM_040_DATA;
487 }
488 env->mmu.ssw &= ~M68K_BA_SIZE_MASK;
489 switch (size) {
490 case 1:
491 env->mmu.ssw |= M68K_BA_SIZE_BYTE;
492 break;
493 case 2:
494 env->mmu.ssw |= M68K_BA_SIZE_WORD;
495 break;
496 case 4:
497 env->mmu.ssw |= M68K_BA_SIZE_LONG;
498 break;
499 }
500
501 if (access_type != MMU_DATA_STORE) {
502 env->mmu.ssw |= M68K_RW_040;
503 }
504
505 env->mmu.ar = addr;
506
507 cs->exception_index = EXCP_ACCESS;
508 cpu_loop_exit(cs);
509 }
510}
511
512bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
513{
514 M68kCPU *cpu = M68K_CPU(cs);
515 CPUM68KState *env = &cpu->env;
516
517 if (interrupt_request & CPU_INTERRUPT_HARD
518 && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) {
519
520
521
522
523
524
525 cs->exception_index = env->pending_vector;
526 do_interrupt_m68k_hardirq(env);
527 return true;
528 }
529 return false;
530}
531
532#endif
533
534static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr)
535{
536 CPUState *cs = env_cpu(env);
537
538 cs->exception_index = tt;
539 cpu_loop_exit_restore(cs, raddr);
540}
541
542static void raise_exception(CPUM68KState *env, int tt)
543{
544 raise_exception_ra(env, tt, 0);
545}
546
547void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt)
548{
549 raise_exception(env, tt);
550}
551
552void HELPER(divuw)(CPUM68KState *env, int destr, uint32_t den)
553{
554 uint32_t num = env->dregs[destr];
555 uint32_t quot, rem;
556
557 if (den == 0) {
558 raise_exception_ra(env, EXCP_DIV0, GETPC());
559 }
560 quot = num / den;
561 rem = num % den;
562
563 env->cc_c = 0;
564 if (quot > 0xffff) {
565 env->cc_v = -1;
566
567
568
569
570 env->cc_z = 1;
571 return;
572 }
573 env->dregs[destr] = deposit32(quot, 16, 16, rem);
574 env->cc_z = (int16_t)quot;
575 env->cc_n = (int16_t)quot;
576 env->cc_v = 0;
577}
578
579void HELPER(divsw)(CPUM68KState *env, int destr, int32_t den)
580{
581 int32_t num = env->dregs[destr];
582 uint32_t quot, rem;
583
584 if (den == 0) {
585 raise_exception_ra(env, EXCP_DIV0, GETPC());
586 }
587 quot = num / den;
588 rem = num % den;
589
590 env->cc_c = 0;
591 if (quot != (int16_t)quot) {
592 env->cc_v = -1;
593
594
595
596
597
598 env->cc_z = 1;
599 return;
600 }
601 env->dregs[destr] = deposit32(quot, 16, 16, rem);
602 env->cc_z = (int16_t)quot;
603 env->cc_n = (int16_t)quot;
604 env->cc_v = 0;
605}
606
607void HELPER(divul)(CPUM68KState *env, int numr, int regr, uint32_t den)
608{
609 uint32_t num = env->dregs[numr];
610 uint32_t quot, rem;
611
612 if (den == 0) {
613 raise_exception_ra(env, EXCP_DIV0, GETPC());
614 }
615 quot = num / den;
616 rem = num % den;
617
618 env->cc_c = 0;
619 env->cc_z = quot;
620 env->cc_n = quot;
621 env->cc_v = 0;
622
623 if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) {
624 if (numr == regr) {
625 env->dregs[numr] = quot;
626 } else {
627 env->dregs[regr] = rem;
628 }
629 } else {
630 env->dregs[regr] = rem;
631 env->dregs[numr] = quot;
632 }
633}
634
635void HELPER(divsl)(CPUM68KState *env, int numr, int regr, int32_t den)
636{
637 int32_t num = env->dregs[numr];
638 int32_t quot, rem;
639
640 if (den == 0) {
641 raise_exception_ra(env, EXCP_DIV0, GETPC());
642 }
643 quot = num / den;
644 rem = num % den;
645
646 env->cc_c = 0;
647 env->cc_z = quot;
648 env->cc_n = quot;
649 env->cc_v = 0;
650
651 if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) {
652 if (numr == regr) {
653 env->dregs[numr] = quot;
654 } else {
655 env->dregs[regr] = rem;
656 }
657 } else {
658 env->dregs[regr] = rem;
659 env->dregs[numr] = quot;
660 }
661}
662
663void HELPER(divull)(CPUM68KState *env, int numr, int regr, uint32_t den)
664{
665 uint64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]);
666 uint64_t quot;
667 uint32_t rem;
668
669 if (den == 0) {
670 raise_exception_ra(env, EXCP_DIV0, GETPC());
671 }
672 quot = num / den;
673 rem = num % den;
674
675 env->cc_c = 0;
676 if (quot > 0xffffffffULL) {
677 env->cc_v = -1;
678
679
680
681
682 env->cc_z = 1;
683 return;
684 }
685 env->cc_z = quot;
686 env->cc_n = quot;
687 env->cc_v = 0;
688
689
690
691
692
693
694 env->dregs[regr] = rem;
695 env->dregs[numr] = quot;
696}
697
698void HELPER(divsll)(CPUM68KState *env, int numr, int regr, int32_t den)
699{
700 int64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]);
701 int64_t quot;
702 int32_t rem;
703
704 if (den == 0) {
705 raise_exception_ra(env, EXCP_DIV0, GETPC());
706 }
707 quot = num / den;
708 rem = num % den;
709
710 env->cc_c = 0;
711 if (quot != (int32_t)quot) {
712 env->cc_v = -1;
713
714
715
716
717 env->cc_z = 1;
718 return;
719 }
720 env->cc_z = quot;
721 env->cc_n = quot;
722 env->cc_v = 0;
723
724
725
726
727
728
729 env->dregs[regr] = rem;
730 env->dregs[numr] = quot;
731}
732
733
734void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
735{
736 uint32_t Dc1 = extract32(regs, 9, 3);
737 uint32_t Dc2 = extract32(regs, 6, 3);
738 uint32_t Du1 = extract32(regs, 3, 3);
739 uint32_t Du2 = extract32(regs, 0, 3);
740 int16_t c1 = env->dregs[Dc1];
741 int16_t c2 = env->dregs[Dc2];
742 int16_t u1 = env->dregs[Du1];
743 int16_t u2 = env->dregs[Du2];
744 int16_t l1, l2;
745 uintptr_t ra = GETPC();
746
747 l1 = cpu_lduw_data_ra(env, a1, ra);
748 l2 = cpu_lduw_data_ra(env, a2, ra);
749 if (l1 == c1 && l2 == c2) {
750 cpu_stw_data_ra(env, a1, u1, ra);
751 cpu_stw_data_ra(env, a2, u2, ra);
752 }
753
754 if (c1 != l1) {
755 env->cc_n = l1;
756 env->cc_v = c1;
757 } else {
758 env->cc_n = l2;
759 env->cc_v = c2;
760 }
761 env->cc_op = CC_OP_CMPW;
762 env->dregs[Dc1] = deposit32(env->dregs[Dc1], 0, 16, l1);
763 env->dregs[Dc2] = deposit32(env->dregs[Dc2], 0, 16, l2);
764}
765
766static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
767 bool parallel)
768{
769 uint32_t Dc1 = extract32(regs, 9, 3);
770 uint32_t Dc2 = extract32(regs, 6, 3);
771 uint32_t Du1 = extract32(regs, 3, 3);
772 uint32_t Du2 = extract32(regs, 0, 3);
773 uint32_t c1 = env->dregs[Dc1];
774 uint32_t c2 = env->dregs[Dc2];
775 uint32_t u1 = env->dregs[Du1];
776 uint32_t u2 = env->dregs[Du2];
777 uint32_t l1, l2;
778 uintptr_t ra = GETPC();
779#if defined(CONFIG_ATOMIC64)
780 int mmu_idx = cpu_mmu_index(env, 0);
781 MemOpIdx oi = make_memop_idx(MO_BEUQ, mmu_idx);
782#endif
783
784 if (parallel) {
785
786#ifdef CONFIG_ATOMIC64
787 uint64_t c, u, l;
788 if ((a1 & 7) == 0 && a2 == a1 + 4) {
789 c = deposit64(c2, 32, 32, c1);
790 u = deposit64(u2, 32, 32, u1);
791 l = cpu_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra);
792 l1 = l >> 32;
793 l2 = l;
794 } else if ((a2 & 7) == 0 && a1 == a2 + 4) {
795 c = deposit64(c1, 32, 32, c2);
796 u = deposit64(u1, 32, 32, u2);
797 l = cpu_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra);
798 l2 = l >> 32;
799 l1 = l;
800 } else
801#endif
802 {
803
804 cpu_loop_exit_atomic(env_cpu(env), ra);
805 }
806 } else {
807
808 l1 = cpu_ldl_data_ra(env, a1, ra);
809 l2 = cpu_ldl_data_ra(env, a2, ra);
810 if (l1 == c1 && l2 == c2) {
811 cpu_stl_data_ra(env, a1, u1, ra);
812 cpu_stl_data_ra(env, a2, u2, ra);
813 }
814 }
815
816 if (c1 != l1) {
817 env->cc_n = l1;
818 env->cc_v = c1;
819 } else {
820 env->cc_n = l2;
821 env->cc_v = c2;
822 }
823 env->cc_op = CC_OP_CMPL;
824 env->dregs[Dc1] = l1;
825 env->dregs[Dc2] = l2;
826}
827
828void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
829{
830 do_cas2l(env, regs, a1, a2, false);
831}
832
833void HELPER(cas2l_parallel)(CPUM68KState *env, uint32_t regs, uint32_t a1,
834 uint32_t a2)
835{
836 do_cas2l(env, regs, a1, a2, true);
837}
838
839struct bf_data {
840 uint32_t addr;
841 uint32_t bofs;
842 uint32_t blen;
843 uint32_t len;
844};
845
846static struct bf_data bf_prep(uint32_t addr, int32_t ofs, uint32_t len)
847{
848 int bofs, blen;
849
850
851 len = ((len - 1) & 31) + 1;
852
853
854 addr += ofs / 8;
855 bofs = ofs % 8;
856 if (bofs < 0) {
857 bofs += 8;
858 addr -= 1;
859 }
860
861
862
863
864
865 blen = (bofs + len - 1) / 8;
866
867
868
869
870
871
872
873 switch (blen) {
874 case 0:
875 bofs += 56;
876 break;
877 case 1:
878 bofs += 48;
879 break;
880 case 2:
881 if (addr & 1) {
882 bofs += 8;
883 addr -= 1;
884 }
885
886 case 3:
887 bofs += 32;
888 break;
889 case 4:
890 if (addr & 3) {
891 bofs += 8 * (addr & 3);
892 addr &= -4;
893 }
894 break;
895 default:
896 g_assert_not_reached();
897 }
898
899 return (struct bf_data){
900 .addr = addr,
901 .bofs = bofs,
902 .blen = blen,
903 .len = len,
904 };
905}
906
907static uint64_t bf_load(CPUM68KState *env, uint32_t addr, int blen,
908 uintptr_t ra)
909{
910 switch (blen) {
911 case 0:
912 return cpu_ldub_data_ra(env, addr, ra);
913 case 1:
914 return cpu_lduw_data_ra(env, addr, ra);
915 case 2:
916 case 3:
917 return cpu_ldl_data_ra(env, addr, ra);
918 case 4:
919 return cpu_ldq_data_ra(env, addr, ra);
920 default:
921 g_assert_not_reached();
922 }
923}
924
925static void bf_store(CPUM68KState *env, uint32_t addr, int blen,
926 uint64_t data, uintptr_t ra)
927{
928 switch (blen) {
929 case 0:
930 cpu_stb_data_ra(env, addr, data, ra);
931 break;
932 case 1:
933 cpu_stw_data_ra(env, addr, data, ra);
934 break;
935 case 2:
936 case 3:
937 cpu_stl_data_ra(env, addr, data, ra);
938 break;
939 case 4:
940 cpu_stq_data_ra(env, addr, data, ra);
941 break;
942 default:
943 g_assert_not_reached();
944 }
945}
946
947uint32_t HELPER(bfexts_mem)(CPUM68KState *env, uint32_t addr,
948 int32_t ofs, uint32_t len)
949{
950 uintptr_t ra = GETPC();
951 struct bf_data d = bf_prep(addr, ofs, len);
952 uint64_t data = bf_load(env, d.addr, d.blen, ra);
953
954 return (int64_t)(data << d.bofs) >> (64 - d.len);
955}
956
957uint64_t HELPER(bfextu_mem)(CPUM68KState *env, uint32_t addr,
958 int32_t ofs, uint32_t len)
959{
960 uintptr_t ra = GETPC();
961 struct bf_data d = bf_prep(addr, ofs, len);
962 uint64_t data = bf_load(env, d.addr, d.blen, ra);
963
964
965
966
967
968 data <<= d.bofs;
969 data >>= 64 - d.len;
970 data |= data << (64 - d.len);
971
972 return data;
973}
974
975uint32_t HELPER(bfins_mem)(CPUM68KState *env, uint32_t addr, uint32_t val,
976 int32_t ofs, uint32_t len)
977{
978 uintptr_t ra = GETPC();
979 struct bf_data d = bf_prep(addr, ofs, len);
980 uint64_t data = bf_load(env, d.addr, d.blen, ra);
981 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
982
983 data = (data & ~mask) | (((uint64_t)val << (64 - d.len)) >> d.bofs);
984
985 bf_store(env, d.addr, d.blen, data, ra);
986
987
988 return val << (32 - d.len);
989}
990
991uint32_t HELPER(bfchg_mem)(CPUM68KState *env, uint32_t addr,
992 int32_t ofs, uint32_t len)
993{
994 uintptr_t ra = GETPC();
995 struct bf_data d = bf_prep(addr, ofs, len);
996 uint64_t data = bf_load(env, d.addr, d.blen, ra);
997 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
998
999 bf_store(env, d.addr, d.blen, data ^ mask, ra);
1000
1001 return ((data & mask) << d.bofs) >> 32;
1002}
1003
1004uint32_t HELPER(bfclr_mem)(CPUM68KState *env, uint32_t addr,
1005 int32_t ofs, uint32_t len)
1006{
1007 uintptr_t ra = GETPC();
1008 struct bf_data d = bf_prep(addr, ofs, len);
1009 uint64_t data = bf_load(env, d.addr, d.blen, ra);
1010 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
1011
1012 bf_store(env, d.addr, d.blen, data & ~mask, ra);
1013
1014 return ((data & mask) << d.bofs) >> 32;
1015}
1016
1017uint32_t HELPER(bfset_mem)(CPUM68KState *env, uint32_t addr,
1018 int32_t ofs, uint32_t len)
1019{
1020 uintptr_t ra = GETPC();
1021 struct bf_data d = bf_prep(addr, ofs, len);
1022 uint64_t data = bf_load(env, d.addr, d.blen, ra);
1023 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
1024
1025 bf_store(env, d.addr, d.blen, data | mask, ra);
1026
1027 return ((data & mask) << d.bofs) >> 32;
1028}
1029
1030uint32_t HELPER(bfffo_reg)(uint32_t n, uint32_t ofs, uint32_t len)
1031{
1032 return (n ? clz32(n) : len) + ofs;
1033}
1034
1035uint64_t HELPER(bfffo_mem)(CPUM68KState *env, uint32_t addr,
1036 int32_t ofs, uint32_t len)
1037{
1038 uintptr_t ra = GETPC();
1039 struct bf_data d = bf_prep(addr, ofs, len);
1040 uint64_t data = bf_load(env, d.addr, d.blen, ra);
1041 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
1042 uint64_t n = (data & mask) << d.bofs;
1043 uint32_t ffo = helper_bfffo_reg(n >> 32, ofs, d.len);
1044
1045
1046
1047
1048
1049
1050 return n | ffo;
1051}
1052
1053void HELPER(chk)(CPUM68KState *env, int32_t val, int32_t ub)
1054{
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 env->cc_n = val;
1066 env->cc_c = 0 <= ub ? val < 0 || val > ub : val > ub && val < 0;
1067
1068 if (val < 0 || val > ub) {
1069 CPUState *cs = env_cpu(env);
1070
1071
1072 cpu_restore_state(cs, GETPC(), true);
1073
1074
1075 env->cc_op = CC_OP_FLAGS;
1076
1077 env->pc += 2;
1078
1079 cs->exception_index = EXCP_CHK;
1080 cpu_loop_exit(cs);
1081 }
1082}
1083
1084void HELPER(chk2)(CPUM68KState *env, int32_t val, int32_t lb, int32_t ub)
1085{
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 env->cc_z = val != lb && val != ub;
1098 env->cc_c = lb <= ub ? val < lb || val > ub : val > ub && val < lb;
1099
1100 if (env->cc_c) {
1101 CPUState *cs = env_cpu(env);
1102
1103
1104 cpu_restore_state(cs, GETPC(), true);
1105
1106
1107 env->cc_op = CC_OP_FLAGS;
1108
1109 env->pc += 4;
1110
1111 cs->exception_index = EXCP_CHK;
1112 cpu_loop_exit(cs);
1113 }
1114}
1115