1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "cpu.h"
21#include "exec/helper-proto.h"
22#include "internals.h"
23#include "exec/exec-all.h"
24#include "exec/cpu_ldst.h"
25#include "sysemu/cpus.h"
26
27#define SIGNBIT (uint32_t)0x80000000
28#define SIGNBIT64 ((uint64_t)1 << 63)
29
30static void raise_exception(CPUARMState *env, uint32_t excp,
31 uint32_t syndrome, uint32_t target_el)
32{
33 CPUState *cs = CPU(arm_env_get_cpu(env));
34
35 assert(!excp_is_internal(excp));
36 cs->exception_index = excp;
37 env->exception.syndrome = syndrome;
38 env->exception.target_el = target_el;
39 cpu_loop_exit(cs);
40}
41
42static int exception_target_el(CPUARMState *env)
43{
44 int target_el = MAX(1, arm_current_el(env));
45
46
47
48
49 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
50 target_el = 3;
51 }
52
53 return target_el;
54}
55
56uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
57 uint32_t rn, uint32_t maxindex)
58{
59 uint32_t val;
60 uint32_t tmp;
61 int index;
62 int shift;
63 uint64_t *table;
64 table = (uint64_t *)&env->vfp.regs[rn];
65 val = 0;
66 for (shift = 0; shift < 32; shift += 8) {
67 index = (ireg >> shift) & 0xff;
68 if (index < maxindex) {
69 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
70 val |= tmp << shift;
71 } else {
72 val |= def & (0xff << shift);
73 }
74 }
75 return val;
76}
77
78#if !defined(CONFIG_USER_ONLY)
79
80#include "hw/remote-port.h"
81
82static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
83 unsigned int target_el,
84 bool same_el,
85 bool s1ptw, bool is_write,
86 int fsc)
87{
88 uint32_t syn;
89
90
91
92
93
94
95
96
97
98
99
100
101 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
102 syn = syn_data_abort_no_iss(same_el,
103 0, 0, s1ptw, is_write, fsc);
104 } else {
105
106
107
108
109 syn = syn_data_abort_with_iss(same_el,
110 0, 0, 0, 0, 0,
111 0, 0, s1ptw, is_write, fsc,
112 false);
113
114 syn |= template_syn;
115 }
116 return syn;
117}
118
119
120
121
122
123void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
124 int mmu_idx, uintptr_t retaddr)
125{
126 bool ret;
127 uint32_t fsr = 0;
128 ARMMMUFaultInfo fi = {};
129
130 ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
131 if (unlikely(ret)) {
132 ARMCPU *cpu = ARM_CPU(cs);
133 CPUARMState *env = &cpu->env;
134 uint32_t syn, exc;
135 unsigned int target_el;
136 bool same_el;
137
138 if (retaddr) {
139
140 cpu_restore_state(cs, retaddr);
141 }
142
143 target_el = exception_target_el(env);
144 if (fi.stage2) {
145 target_el = 2;
146 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
147 }
148 same_el = arm_current_el(env) == target_el;
149
150 syn = fsr & ~(1 << 9);
151
152
153
154
155 if (access_type == MMU_INST_FETCH) {
156 syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
157 exc = EXCP_PREFETCH_ABORT;
158 } else {
159 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
160 same_el, fi.s1ptw,
161 access_type == MMU_DATA_STORE, syn);
162 if (access_type == MMU_DATA_STORE
163 && arm_feature(env, ARM_FEATURE_V6)) {
164 fsr |= (1 << 11);
165 }
166 exc = EXCP_DATA_ABORT;
167 }
168
169 env->exception.vaddress = addr;
170 env->exception.fsr = fsr;
171 raise_exception(env, exc, syn, target_el);
172 }
173}
174
175
176void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
177 MMUAccessType access_type,
178 int mmu_idx, uintptr_t retaddr)
179{
180 ARMCPU *cpu = ARM_CPU(cs);
181 CPUARMState *env = &cpu->env;
182 int target_el;
183 bool same_el;
184 uint32_t syn;
185
186 if (retaddr) {
187
188 cpu_restore_state(cs, retaddr);
189 }
190
191 target_el = exception_target_el(env);
192 same_el = (arm_current_el(env) == target_el);
193
194 env->exception.vaddress = vaddr;
195
196
197
198
199 if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
200 env->exception.fsr = (1 << 9) | 0x21;
201 } else {
202 env->exception.fsr = 0x1;
203 }
204
205 if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) {
206 env->exception.fsr |= (1 << 11);
207 }
208
209 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
210 same_el, 0, access_type == MMU_DATA_STORE,
211 0x21);
212 raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
213}
214
215#endif
216
217uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
218{
219 uint32_t res = a + b;
220 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
221 env->QF = 1;
222 return res;
223}
224
225uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
226{
227 uint32_t res = a + b;
228 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
229 env->QF = 1;
230 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
231 }
232 return res;
233}
234
235uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
236{
237 uint32_t res = a - b;
238 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
239 env->QF = 1;
240 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
241 }
242 return res;
243}
244
245uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
246{
247 uint32_t res;
248 if (val >= 0x40000000) {
249 res = ~SIGNBIT;
250 env->QF = 1;
251 } else if (val <= (int32_t)0xc0000000) {
252 res = SIGNBIT;
253 env->QF = 1;
254 } else {
255 res = val << 1;
256 }
257 return res;
258}
259
260uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
261{
262 uint32_t res = a + b;
263 if (res < a) {
264 env->QF = 1;
265 res = ~0;
266 }
267 return res;
268}
269
270uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
271{
272 uint32_t res = a - b;
273 if (res > a) {
274 env->QF = 1;
275 res = 0;
276 }
277 return res;
278}
279
280
281static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
282{
283 int32_t top;
284 uint32_t mask;
285
286 top = val >> shift;
287 mask = (1u << shift) - 1;
288 if (top > 0) {
289 env->QF = 1;
290 return mask;
291 } else if (top < -1) {
292 env->QF = 1;
293 return ~mask;
294 }
295 return val;
296}
297
298
299static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
300{
301 uint32_t max;
302
303 max = (1u << shift) - 1;
304 if (val < 0) {
305 env->QF = 1;
306 return 0;
307 } else if (val > max) {
308 env->QF = 1;
309 return max;
310 }
311 return val;
312}
313
314
315uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
316{
317 return do_ssat(env, x, shift);
318}
319
320
321uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
322{
323 uint32_t res;
324
325 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
326 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
327 return res;
328}
329
330
331uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
332{
333 return do_usat(env, x, shift);
334}
335
336
337uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
338{
339 uint32_t res;
340
341 res = (uint16_t)do_usat(env, (int16_t)x, shift);
342 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
343 return res;
344}
345
346void HELPER(setend)(CPUARMState *env)
347{
348 env->uncached_cpsr ^= CPSR_E;
349}
350
351
352
353
354
355static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
356{
357 int cur_el = arm_current_el(env);
358 uint64_t mask;
359
360
361
362
363 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
364 int target_el;
365
366 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
367 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
368
369 target_el = 3;
370 } else {
371 target_el = 1;
372 }
373
374 if (!(env->cp15.sctlr_el[target_el] & mask)) {
375 return target_el;
376 }
377 }
378
379
380
381
382
383 if (cur_el < 2 && !arm_is_secure(env)) {
384 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
385 if (env->cp15.hcr_el2 & mask) {
386 return 2;
387 }
388 }
389
390
391 if (cur_el < 3) {
392 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
393 if (env->cp15.scr_el3 & mask) {
394 return 3;
395 }
396 }
397
398 return 0;
399}
400
401void HELPER(wfi)(CPUARMState *env)
402{
403 CPUState *cs = CPU(arm_env_get_cpu(env));
404 ARMCPU *cpu = arm_env_get_cpu(env);
405 int target_el = check_wfx_trap(env, false);
406
407 if (cpu_has_work(cs)) {
408 cs->exception_index = -1;
409 cpu_loop_exit(cs);
410 return;
411 }
412
413 if (target_el) {
414 env->pc -= 4;
415 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
416 }
417
418 if (use_icount) {
419 cs->exception_index = EXCP_YIELD;
420 } else {
421 cs->exception_index = EXCP_HLT;
422 cs->halted = 1;
423 }
424
425 cpu->is_in_wfi = true;
426 qemu_set_irq(cpu->wfi, 1);
427
428 cpu_loop_exit(cs);
429}
430
431void HELPER(wfe)(CPUARMState *env)
432{
433 ARMCPU *ac = ARM_CPU(arm_env_get_cpu(env));
434 CPUState *cs = CPU(ac);
435
436 switch (ac->pe) {
437 case 1:
438 ac->pe = 0;
439 return;
440 case 0:
441 cs->exception_index = EXCP_YIELD;
442 cpu_loop_exit(cs);
443 return;
444 default:
445 g_assert_not_reached();
446 }
447}
448
449void HELPER(sev)(CPUARMState *env)
450{
451 CPUState *cs = CPU(arm_env_get_cpu(env));
452 CPUState *i;
453
454 for (i = first_cpu; i; i = CPU_NEXT(i)) {
455 ARMCPU *ac = ARM_CPU(i);
456 ac->pe = 1;
457 if (i == cs || i->halt_pin || i->reset_pin || i->arch_halt_pin) {
458 continue;
459 }
460 cpu_reset_interrupt(i, CPU_INTERRUPT_HALT);
461 cpu_interrupt(i, CPU_INTERRUPT_EXITTB);
462 i->halted = 0;
463 }
464}
465
466void HELPER(sevl)(CPUARMState *env)
467{
468 ARMCPU *ac = arm_env_get_cpu(env);
469
470 ac->pe = 1;
471}
472
473void HELPER(yield)(CPUARMState *env)
474{
475 ARMCPU *cpu = arm_env_get_cpu(env);
476 CPUState *cs = CPU(cpu);
477
478
479
480
481
482 cs->exception_index = EXCP_YIELD;
483 cpu_loop_exit(cs);
484}
485
486
487
488
489
490
491
492void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
493{
494 CPUState *cs = CPU(arm_env_get_cpu(env));
495
496 assert(excp_is_internal(excp));
497 cs->exception_index = excp;
498 cpu_loop_exit(cs);
499}
500
501
502void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
503 uint32_t syndrome, uint32_t target_el)
504{
505 raise_exception(env, excp, syndrome, target_el);
506}
507
508uint32_t HELPER(cpsr_read)(CPUARMState *env)
509{
510 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
511}
512
513void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
514{
515 cpsr_write(env, val, mask, CPSRWriteByInstr);
516}
517
518
519void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
520{
521 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
522
523
524
525
526
527
528 env->regs[15] &= (env->thumb ? ~1 : ~3);
529
530 arm_call_el_change_hook(arm_env_get_cpu(env));
531}
532
533
534uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
535{
536 uint32_t val;
537
538 if (regno == 13) {
539 val = env->banked_r13[BANK_USRSYS];
540 } else if (regno == 14) {
541 val = env->banked_r14[BANK_USRSYS];
542 } else if (regno >= 8
543 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
544 val = env->usr_regs[regno - 8];
545 } else {
546 val = env->regs[regno];
547 }
548 return val;
549}
550
551void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
552{
553 if (regno == 13) {
554 env->banked_r13[BANK_USRSYS] = val;
555 } else if (regno == 14) {
556 env->banked_r14[BANK_USRSYS] = val;
557 } else if (regno >= 8
558 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
559 env->usr_regs[regno - 8] = val;
560 } else {
561 env->regs[regno] = val;
562 }
563}
564
565void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
566{
567 if ((env->uncached_cpsr & CPSR_M) == mode) {
568 env->regs[13] = val;
569 } else {
570 env->banked_r13[bank_number(mode)] = val;
571 }
572}
573
574uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
575{
576 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
577
578
579
580 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
581 exception_target_el(env));
582 }
583
584 if ((env->uncached_cpsr & CPSR_M) == mode) {
585 return env->regs[13];
586 } else {
587 return env->banked_r13[bank_number(mode)];
588 }
589}
590
591static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
592 uint32_t regno)
593{
594
595
596
597
598
599 int curmode = env->uncached_cpsr & CPSR_M;
600
601 if (curmode == tgtmode) {
602 goto undef;
603 }
604
605 if (tgtmode == ARM_CPU_MODE_USR) {
606 switch (regno) {
607 case 8 ... 12:
608 if (curmode != ARM_CPU_MODE_FIQ) {
609 goto undef;
610 }
611 break;
612 case 13:
613 if (curmode == ARM_CPU_MODE_SYS) {
614 goto undef;
615 }
616 break;
617 case 14:
618 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
619 goto undef;
620 }
621 break;
622 default:
623 break;
624 }
625 }
626
627 if (tgtmode == ARM_CPU_MODE_HYP) {
628 switch (regno) {
629 case 17:
630 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
631 goto undef;
632 }
633 break;
634 default:
635 if (curmode != ARM_CPU_MODE_MON) {
636 goto undef;
637 }
638 break;
639 }
640 }
641
642 return;
643
644undef:
645 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
646 exception_target_el(env));
647}
648
649void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
650 uint32_t regno)
651{
652 msr_mrs_banked_exc_checks(env, tgtmode, regno);
653
654 switch (regno) {
655 case 16:
656 env->banked_spsr[bank_number(tgtmode)] = value;
657 break;
658 case 17:
659 env->elr_el[2] = value;
660 break;
661 case 13:
662 env->banked_r13[bank_number(tgtmode)] = value;
663 break;
664 case 14:
665 env->banked_r14[bank_number(tgtmode)] = value;
666 break;
667 case 8 ... 12:
668 switch (tgtmode) {
669 case ARM_CPU_MODE_USR:
670 env->usr_regs[regno - 8] = value;
671 break;
672 case ARM_CPU_MODE_FIQ:
673 env->fiq_regs[regno - 8] = value;
674 break;
675 default:
676 g_assert_not_reached();
677 }
678 break;
679 default:
680 g_assert_not_reached();
681 }
682}
683
684uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
685{
686 msr_mrs_banked_exc_checks(env, tgtmode, regno);
687
688 switch (regno) {
689 case 16:
690 return env->banked_spsr[bank_number(tgtmode)];
691 case 17:
692 return env->elr_el[2];
693 case 13:
694 return env->banked_r13[bank_number(tgtmode)];
695 case 14:
696 return env->banked_r14[bank_number(tgtmode)];
697 case 8 ... 12:
698 switch (tgtmode) {
699 case ARM_CPU_MODE_USR:
700 return env->usr_regs[regno - 8];
701 case ARM_CPU_MODE_FIQ:
702 return env->fiq_regs[regno - 8];
703 default:
704 g_assert_not_reached();
705 }
706 default:
707 g_assert_not_reached();
708 }
709}
710
711void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
712 uint32_t isread)
713{
714 const ARMCPRegInfo *ri = rip;
715 int target_el;
716
717 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
718 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
719 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
720 }
721
722 if (!ri->accessfn) {
723 return;
724 }
725
726 switch (ri->accessfn(env, ri, isread)) {
727 case CP_ACCESS_OK:
728 return;
729 case CP_ACCESS_TRAP:
730 target_el = exception_target_el(env);
731 break;
732 case CP_ACCESS_TRAP_EL2:
733
734
735
736 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
737 target_el = 2;
738 break;
739 case CP_ACCESS_TRAP_EL3:
740 target_el = 3;
741 break;
742 case CP_ACCESS_TRAP_UNCATEGORIZED:
743 target_el = exception_target_el(env);
744 syndrome = syn_uncategorized();
745 break;
746 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
747 target_el = 2;
748 syndrome = syn_uncategorized();
749 break;
750 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
751 target_el = 3;
752 syndrome = syn_uncategorized();
753 break;
754 case CP_ACCESS_TRAP_FP_EL2:
755 target_el = 2;
756
757
758
759
760
761 syndrome = syn_fp_access_trap(1, 0xe, false);
762 break;
763 case CP_ACCESS_TRAP_FP_EL3:
764 target_el = 3;
765 syndrome = syn_fp_access_trap(1, 0xe, false);
766 break;
767 default:
768 g_assert_not_reached();
769 }
770
771 raise_exception(env, EXCP_UDEF, syndrome, target_el);
772}
773
774void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
775{
776 const ARMCPRegInfo *ri = rip;
777
778 ri->writefn(env, ri, value);
779}
780
781uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
782{
783 const ARMCPRegInfo *ri = rip;
784
785 return ri->readfn(env, ri);
786}
787
788void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
789{
790 const ARMCPRegInfo *ri = rip;
791
792 ri->writefn(env, ri, value);
793}
794
795uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
796{
797 const ARMCPRegInfo *ri = rip;
798
799 return ri->readfn(env, ri);
800}
801
802void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
803{
804
805
806
807
808 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
809 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
810 extract32(op, 3, 3), 4,
811 imm, 0x1f, 0);
812 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
813 }
814
815 switch (op) {
816 case 0x05:
817 update_spsel(env, imm);
818 break;
819 case 0x1e:
820 env->daif |= (imm << 6) & PSTATE_DAIF;
821 break;
822 case 0x1f:
823 env->daif &= ~((imm << 6) & PSTATE_DAIF);
824 break;
825 default:
826 g_assert_not_reached();
827 }
828}
829
830void HELPER(clear_pstate_ss)(CPUARMState *env)
831{
832 env->pstate &= ~PSTATE_SS;
833}
834
835void HELPER(pre_hvc)(CPUARMState *env)
836{
837 ARMCPU *cpu = arm_env_get_cpu(env);
838 int cur_el = arm_current_el(env);
839
840 bool secure = false;
841 bool undef;
842
843 if (arm_is_psci_call(cpu, EXCP_HVC)) {
844
845
846
847 return;
848 }
849
850 if (!arm_feature(env, ARM_FEATURE_EL2)) {
851
852 undef = true;
853 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
854
855 undef = !(env->cp15.scr_el3 & SCR_HCE);
856 } else {
857 undef = env->cp15.hcr_el2 & HCR_HCD;
858 }
859
860
861
862
863
864
865 if (secure && (!is_a64(env) || cur_el == 1)) {
866 undef = true;
867 }
868
869 if (undef) {
870 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
871 exception_target_el(env));
872 }
873}
874
875void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
876{
877 ARMCPU *cpu = arm_env_get_cpu(env);
878 int cur_el = arm_current_el(env);
879 bool secure = arm_is_secure(env);
880 bool smd = env->cp15.scr_el3 & SCR_SMD;
881
882
883
884
885
886
887
888 bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
889
890 if (arm_is_psci_call(cpu, EXCP_SMC)) {
891
892
893
894 return;
895 }
896
897 if (!arm_feature(env, ARM_FEATURE_EL3)) {
898
899 undef = true;
900 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
901
902 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
903 }
904
905 if (undef) {
906 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
907 exception_target_el(env));
908 }
909}
910
911static int el_from_spsr(uint32_t spsr)
912{
913
914
915
916 if (spsr & PSTATE_nRW) {
917 switch (spsr & CPSR_M) {
918 case ARM_CPU_MODE_USR:
919 return 0;
920 case ARM_CPU_MODE_HYP:
921 return 2;
922 case ARM_CPU_MODE_FIQ:
923 case ARM_CPU_MODE_IRQ:
924 case ARM_CPU_MODE_SVC:
925 case ARM_CPU_MODE_ABT:
926 case ARM_CPU_MODE_UND:
927 case ARM_CPU_MODE_SYS:
928 return 1;
929 case ARM_CPU_MODE_MON:
930
931
932
933 default:
934 return -1;
935 }
936 } else {
937 if (extract32(spsr, 1, 1)) {
938
939 return -1;
940 }
941 if (extract32(spsr, 0, 4) == 1) {
942
943 return -1;
944 }
945 return extract32(spsr, 2, 2);
946 }
947}
948
949void HELPER(exception_return)(CPUARMState *env)
950{
951 int cur_el = arm_current_el(env);
952 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
953 uint32_t spsr = env->banked_spsr[spsr_idx];
954 int new_el;
955 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
956
957 aarch64_save_sp(env, cur_el);
958
959 env->exclusive_addr = -1;
960
961
962
963
964
965
966
967
968 if (arm_generate_debug_exceptions(env)) {
969 spsr &= ~PSTATE_SS;
970 }
971
972 new_el = el_from_spsr(spsr);
973 if (new_el == -1) {
974 goto illegal_return;
975 }
976 if (new_el > cur_el
977 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
978
979
980
981 goto illegal_return;
982 }
983
984 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
985
986 goto illegal_return;
987 }
988
989 if (new_el == 2 && arm_is_secure_below_el3(env)) {
990
991 goto illegal_return;
992 }
993
994 if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
995 && !arm_is_secure_below_el3(env)) {
996 goto illegal_return;
997 }
998
999 if (!return_to_aa64) {
1000 env->aarch64 = 0;
1001
1002
1003
1004
1005 cpsr_write(env, spsr, ~0, CPSRWriteRaw);
1006 if (!arm_singlestep_active(env)) {
1007 env->uncached_cpsr &= ~PSTATE_SS;
1008 }
1009 aarch64_sync_64_to_32(env);
1010
1011 if (spsr & CPSR_T) {
1012 env->regs[15] = env->elr_el[cur_el] & ~0x1;
1013 } else {
1014 env->regs[15] = env->elr_el[cur_el] & ~0x3;
1015 }
1016 } else {
1017 env->aarch64 = 1;
1018 pstate_write(env, spsr);
1019 if (!arm_singlestep_active(env)) {
1020 env->pstate &= ~PSTATE_SS;
1021 }
1022 aarch64_restore_sp(env, new_el);
1023 env->pc = env->elr_el[cur_el];
1024 }
1025
1026 arm_call_el_change_hook(arm_env_get_cpu(env));
1027
1028 return;
1029
1030illegal_return:
1031
1032
1033
1034
1035
1036
1037
1038 env->pstate |= PSTATE_IL;
1039 env->pc = env->elr_el[cur_el];
1040 spsr &= PSTATE_NZCV | PSTATE_DAIF;
1041 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1042 pstate_write(env, spsr);
1043 if (!arm_singlestep_active(env)) {
1044 env->pstate &= ~PSTATE_SS;
1045 }
1046}
1047
1048
1049static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1050{
1051 CPUARMState *env = &cpu->env;
1052 uint64_t bcr = env->cp15.dbgbcr[lbn];
1053 int brps = extract32(cpu->dbgdidr, 24, 4);
1054 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1055 int bt;
1056 uint32_t contextidr;
1057
1058
1059
1060
1061
1062
1063
1064 if (lbn > brps || lbn < (brps - ctx_cmps)) {
1065 return false;
1066 }
1067
1068 bcr = env->cp15.dbgbcr[lbn];
1069
1070 if (extract64(bcr, 0, 1) == 0) {
1071
1072 return false;
1073 }
1074
1075 bt = extract64(bcr, 20, 4);
1076
1077
1078
1079
1080
1081 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1082
1083 switch (bt) {
1084 case 3:
1085 if (arm_current_el(env) > 1) {
1086
1087 return false;
1088 }
1089 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1090 case 5:
1091 case 9:
1092 case 11:
1093 default:
1094
1095
1096
1097 return false;
1098 }
1099
1100 return false;
1101}
1102
1103static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1104{
1105 CPUARMState *env = &cpu->env;
1106 uint64_t cr;
1107 int pac, hmc, ssc, wt, lbn;
1108
1109
1110
1111 bool is_secure = arm_is_secure(env);
1112 int access_el = arm_current_el(env);
1113
1114 if (is_wp) {
1115 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1116
1117 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1118 return false;
1119 }
1120 cr = env->cp15.dbgwcr[n];
1121 if (wp->hitattrs.user) {
1122
1123
1124
1125
1126 access_el = 0;
1127 }
1128 } else {
1129 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1130
1131 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1132 return false;
1133 }
1134 cr = env->cp15.dbgbcr[n];
1135 }
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 pac = extract64(cr, 1, 2);
1149 hmc = extract64(cr, 13, 1);
1150 ssc = extract64(cr, 14, 2);
1151
1152 switch (ssc) {
1153 case 0:
1154 break;
1155 case 1:
1156 case 3:
1157 if (is_secure) {
1158 return false;
1159 }
1160 break;
1161 case 2:
1162 if (!is_secure) {
1163 return false;
1164 }
1165 break;
1166 }
1167
1168 switch (access_el) {
1169 case 3:
1170 case 2:
1171 if (!hmc) {
1172 return false;
1173 }
1174 break;
1175 case 1:
1176 if (extract32(pac, 0, 1) == 0) {
1177 return false;
1178 }
1179 break;
1180 case 0:
1181 if (extract32(pac, 1, 1) == 0) {
1182 return false;
1183 }
1184 break;
1185 default:
1186 g_assert_not_reached();
1187 }
1188
1189 wt = extract64(cr, 20, 1);
1190 lbn = extract64(cr, 16, 4);
1191
1192 if (wt && !linked_bp_matches(cpu, lbn)) {
1193 return false;
1194 }
1195
1196 return true;
1197}
1198
1199static bool check_watchpoints(ARMCPU *cpu)
1200{
1201 CPUARMState *env = &cpu->env;
1202 int n;
1203
1204
1205
1206
1207 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1208 || !arm_generate_debug_exceptions(env)) {
1209 return false;
1210 }
1211
1212 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1213 if (bp_wp_matches(cpu, n, true)) {
1214 return true;
1215 }
1216 }
1217 return false;
1218}
1219
1220static bool check_breakpoints(ARMCPU *cpu)
1221{
1222 CPUARMState *env = &cpu->env;
1223 int n;
1224
1225
1226
1227
1228 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1229 || !arm_generate_debug_exceptions(env)) {
1230 return false;
1231 }
1232
1233 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1234 if (bp_wp_matches(cpu, n, false)) {
1235 return true;
1236 }
1237 }
1238 return false;
1239}
1240
1241void HELPER(check_breakpoints)(CPUARMState *env)
1242{
1243 ARMCPU *cpu = arm_env_get_cpu(env);
1244
1245 if (check_breakpoints(cpu)) {
1246 HELPER(exception_internal(env, EXCP_DEBUG));
1247 }
1248}
1249
1250bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1251{
1252
1253
1254
1255 ARMCPU *cpu = ARM_CPU(cs);
1256
1257 return check_watchpoints(cpu);
1258}
1259
1260void arm_debug_excp_handler(CPUState *cs)
1261{
1262
1263
1264
1265 ARMCPU *cpu = ARM_CPU(cs);
1266 CPUARMState *env = &cpu->env;
1267 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1268
1269 if (wp_hit) {
1270 if (wp_hit->flags & BP_CPU) {
1271 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1272 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1273
1274 cs->watchpoint_hit = NULL;
1275
1276 if (extended_addresses_enabled(env)) {
1277 env->exception.fsr = (1 << 9) | 0x22;
1278 } else {
1279 env->exception.fsr = 0x2;
1280 }
1281 env->exception.vaddress = wp_hit->hitaddr;
1282 raise_exception(env, EXCP_DATA_ABORT,
1283 syn_watchpoint(same_el, 0, wnr),
1284 arm_debug_target_el(env));
1285 }
1286 } else {
1287 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1288 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1289
1290
1291
1292
1293
1294
1295 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1296 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1297 return;
1298 }
1299
1300 if (extended_addresses_enabled(env)) {
1301 env->exception.fsr = (1 << 9) | 0x22;
1302 } else {
1303 env->exception.fsr = 0x2;
1304 }
1305
1306 raise_exception(env, EXCP_PREFETCH_ABORT,
1307 syn_breakpoint(same_el),
1308 arm_debug_target_el(env));
1309 }
1310}
1311
1312
1313
1314
1315
1316
1317
1318uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1319{
1320 int shift = i & 0xff;
1321 if (shift >= 32) {
1322 if (shift == 32)
1323 env->CF = x & 1;
1324 else
1325 env->CF = 0;
1326 return 0;
1327 } else if (shift != 0) {
1328 env->CF = (x >> (32 - shift)) & 1;
1329 return x << shift;
1330 }
1331 return x;
1332}
1333
1334uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1335{
1336 int shift = i & 0xff;
1337 if (shift >= 32) {
1338 if (shift == 32)
1339 env->CF = (x >> 31) & 1;
1340 else
1341 env->CF = 0;
1342 return 0;
1343 } else if (shift != 0) {
1344 env->CF = (x >> (shift - 1)) & 1;
1345 return x >> shift;
1346 }
1347 return x;
1348}
1349
1350uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1351{
1352 int shift = i & 0xff;
1353 if (shift >= 32) {
1354 env->CF = (x >> 31) & 1;
1355 return (int32_t)x >> 31;
1356 } else if (shift != 0) {
1357 env->CF = (x >> (shift - 1)) & 1;
1358 return (int32_t)x >> shift;
1359 }
1360 return x;
1361}
1362
1363uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1364{
1365 int shift1, shift;
1366 shift1 = i & 0xff;
1367 shift = shift1 & 0x1f;
1368 if (shift == 0) {
1369 if (shift1 != 0)
1370 env->CF = (x >> 31) & 1;
1371 return x;
1372 } else {
1373 env->CF = (x >> (shift - 1)) & 1;
1374 return ((uint32_t)x >> shift) | (x << (32 - shift));
1375 }
1376}
1377