1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "qemu/log.h"
21#include "qemu/main-loop.h"
22#include "cpu.h"
23#include "exec/helper-proto.h"
24#include "internals.h"
25#include "exec/exec-all.h"
26#include "exec/cpu_ldst.h"
27
28#define SIGNBIT (uint32_t)0x80000000
29#define SIGNBIT64 ((uint64_t)1 << 63)
30
31void raise_exception(CPUARMState *env, uint32_t excp,
32 uint32_t syndrome, uint32_t target_el)
33{
34 CPUState *cs = CPU(arm_env_get_cpu(env));
35
36 if ((env->cp15.hcr_el2 & HCR_TGE) &&
37 target_el == 1 && !arm_is_secure(env)) {
38
39
40
41
42
43
44 target_el = 2;
45 if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
46 syndrome = syn_uncategorized();
47 }
48 }
49
50 assert(!excp_is_internal(excp));
51 cs->exception_index = excp;
52 env->exception.syndrome = syndrome;
53 env->exception.target_el = target_el;
54 cpu_loop_exit(cs);
55}
56
57static int exception_target_el(CPUARMState *env)
58{
59 int target_el = MAX(1, arm_current_el(env));
60
61
62
63
64 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
65 target_el = 3;
66 }
67
68 return target_el;
69}
70
71uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
72 uint32_t maxindex)
73{
74 uint32_t val, shift;
75 uint64_t *table = vn;
76
77 val = 0;
78 for (shift = 0; shift < 32; shift += 8) {
79 uint32_t index = (ireg >> shift) & 0xff;
80 if (index < maxindex) {
81 uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
82 val |= tmp << shift;
83 } else {
84 val |= def & (0xff << shift);
85 }
86 }
87 return val;
88}
89
90#if !defined(CONFIG_USER_ONLY)
91
92static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
93 unsigned int target_el,
94 bool same_el, bool ea,
95 bool s1ptw, bool is_write,
96 int fsc)
97{
98 uint32_t syn;
99
100
101
102
103
104
105
106
107
108
109
110
111 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
112 syn = syn_data_abort_no_iss(same_el,
113 ea, 0, s1ptw, is_write, fsc);
114 } else {
115
116
117
118
119 syn = syn_data_abort_with_iss(same_el,
120 0, 0, 0, 0, 0,
121 ea, 0, s1ptw, is_write, fsc,
122 false);
123
124 syn |= template_syn;
125 }
126 return syn;
127}
128
129static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
130 int mmu_idx, ARMMMUFaultInfo *fi)
131{
132 CPUARMState *env = &cpu->env;
133 int target_el;
134 bool same_el;
135 uint32_t syn, exc, fsr, fsc;
136 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
137
138 target_el = exception_target_el(env);
139 if (fi->stage2) {
140 target_el = 2;
141 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
142 }
143 same_el = (arm_current_el(env) == target_el);
144
145 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
146 arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
147
148
149
150 fsr = arm_fi_to_lfsc(fi);
151 fsc = extract32(fsr, 0, 6);
152 } else {
153 fsr = arm_fi_to_sfsc(fi);
154
155
156
157
158
159 fsc = 0x3f;
160 }
161
162 if (access_type == MMU_INST_FETCH) {
163 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
164 exc = EXCP_PREFETCH_ABORT;
165 } else {
166 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
167 same_el, fi->ea, fi->s1ptw,
168 access_type == MMU_DATA_STORE,
169 fsc);
170 if (access_type == MMU_DATA_STORE
171 && arm_feature(env, ARM_FEATURE_V6)) {
172 fsr |= (1 << 11);
173 }
174 exc = EXCP_DATA_ABORT;
175 }
176
177 env->exception.vaddress = addr;
178 env->exception.fsr = fsr;
179 raise_exception(env, exc, syn, target_el);
180}
181
182
183
184
185
186void tlb_fill(CPUState *cs, target_ulong addr, int size,
187 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
188{
189 bool ret;
190 ARMMMUFaultInfo fi = {};
191
192 ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
193 if (unlikely(ret)) {
194 ARMCPU *cpu = ARM_CPU(cs);
195
196
197 cpu_restore_state(cs, retaddr, true);
198
199 deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
200 }
201}
202
203
204void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
205 MMUAccessType access_type,
206 int mmu_idx, uintptr_t retaddr)
207{
208 ARMCPU *cpu = ARM_CPU(cs);
209 ARMMMUFaultInfo fi = {};
210
211
212 cpu_restore_state(cs, retaddr, true);
213
214 fi.type = ARMFault_Alignment;
215 deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
216}
217
218
219
220
221
222void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
223 vaddr addr, unsigned size,
224 MMUAccessType access_type,
225 int mmu_idx, MemTxAttrs attrs,
226 MemTxResult response, uintptr_t retaddr)
227{
228 ARMCPU *cpu = ARM_CPU(cs);
229 ARMMMUFaultInfo fi = {};
230
231
232 cpu_restore_state(cs, retaddr, true);
233
234 fi.ea = arm_extabort_type(response);
235 fi.type = ARMFault_SyncExternal;
236 deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
237}
238
239#endif
240
241void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
242{
243
244
245
246
247 if (newvalue < v7m_sp_limit(env)) {
248 CPUState *cs = CPU(arm_env_get_cpu(env));
249
250
251
252
253
254
255 cpu_restore_state(cs, GETPC(), true);
256 raise_exception(env, EXCP_STKOF, 0, 1);
257 }
258}
259
260uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
261{
262 uint32_t res = a + b;
263 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
264 env->QF = 1;
265 return res;
266}
267
268uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
269{
270 uint32_t res = a + b;
271 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
272 env->QF = 1;
273 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
274 }
275 return res;
276}
277
278uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
279{
280 uint32_t res = a - b;
281 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
282 env->QF = 1;
283 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
284 }
285 return res;
286}
287
288uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
289{
290 uint32_t res;
291 if (val >= 0x40000000) {
292 res = ~SIGNBIT;
293 env->QF = 1;
294 } else if (val <= (int32_t)0xc0000000) {
295 res = SIGNBIT;
296 env->QF = 1;
297 } else {
298 res = val << 1;
299 }
300 return res;
301}
302
303uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
304{
305 uint32_t res = a + b;
306 if (res < a) {
307 env->QF = 1;
308 res = ~0;
309 }
310 return res;
311}
312
313uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
314{
315 uint32_t res = a - b;
316 if (res > a) {
317 env->QF = 1;
318 res = 0;
319 }
320 return res;
321}
322
323
324static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
325{
326 int32_t top;
327 uint32_t mask;
328
329 top = val >> shift;
330 mask = (1u << shift) - 1;
331 if (top > 0) {
332 env->QF = 1;
333 return mask;
334 } else if (top < -1) {
335 env->QF = 1;
336 return ~mask;
337 }
338 return val;
339}
340
341
342static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
343{
344 uint32_t max;
345
346 max = (1u << shift) - 1;
347 if (val < 0) {
348 env->QF = 1;
349 return 0;
350 } else if (val > max) {
351 env->QF = 1;
352 return max;
353 }
354 return val;
355}
356
357
358uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
359{
360 return do_ssat(env, x, shift);
361}
362
363
364uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
365{
366 uint32_t res;
367
368 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
369 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
370 return res;
371}
372
373
374uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
375{
376 return do_usat(env, x, shift);
377}
378
379
380uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
381{
382 uint32_t res;
383
384 res = (uint16_t)do_usat(env, (int16_t)x, shift);
385 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
386 return res;
387}
388
389void HELPER(setend)(CPUARMState *env)
390{
391 env->uncached_cpsr ^= CPSR_E;
392}
393
394
395
396
397
398static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
399{
400 int cur_el = arm_current_el(env);
401 uint64_t mask;
402
403 if (arm_feature(env, ARM_FEATURE_M)) {
404
405 return 0;
406 }
407
408
409
410
411 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
412 int target_el;
413
414 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
415 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
416
417 target_el = 3;
418 } else {
419 target_el = 1;
420 }
421
422 if (!(env->cp15.sctlr_el[target_el] & mask)) {
423 return target_el;
424 }
425 }
426
427
428
429
430
431 if (cur_el < 2 && !arm_is_secure(env)) {
432 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
433 if (env->cp15.hcr_el2 & mask) {
434 return 2;
435 }
436 }
437
438
439 if (cur_el < 3) {
440 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
441 if (env->cp15.scr_el3 & mask) {
442 return 3;
443 }
444 }
445
446 return 0;
447}
448
449void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
450{
451 CPUState *cs = CPU(arm_env_get_cpu(env));
452 int target_el = check_wfx_trap(env, false);
453
454 if (cpu_has_work(cs)) {
455
456
457
458 return;
459 }
460
461 if (target_el) {
462 env->pc -= insn_len;
463 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
464 target_el);
465 }
466
467 cs->exception_index = EXCP_HLT;
468 cs->halted = 1;
469 cpu_loop_exit(cs);
470}
471
472void HELPER(wfe)(CPUARMState *env)
473{
474
475
476
477
478
479
480
481 HELPER(yield)(env);
482}
483
484void HELPER(yield)(CPUARMState *env)
485{
486 ARMCPU *cpu = arm_env_get_cpu(env);
487 CPUState *cs = CPU(cpu);
488
489
490
491
492
493 cs->exception_index = EXCP_YIELD;
494 cpu_loop_exit(cs);
495}
496
497
498
499
500
501
502
503void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
504{
505 CPUState *cs = CPU(arm_env_get_cpu(env));
506
507 assert(excp_is_internal(excp));
508 cs->exception_index = excp;
509 cpu_loop_exit(cs);
510}
511
512
513void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
514 uint32_t syndrome, uint32_t target_el)
515{
516 raise_exception(env, excp, syndrome, target_el);
517}
518
519
520
521
522void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
523{
524
525 env->exception.fsr = arm_debug_exception_fsr(env);
526
527
528
529
530 env->exception.vaddress = 0;
531 raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
532}
533
534uint32_t HELPER(cpsr_read)(CPUARMState *env)
535{
536 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
537}
538
539void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
540{
541 cpsr_write(env, val, mask, CPSRWriteByInstr);
542}
543
544
545void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
546{
547 qemu_mutex_lock_iothread();
548 arm_call_pre_el_change_hook(arm_env_get_cpu(env));
549 qemu_mutex_unlock_iothread();
550
551 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
552
553
554
555
556
557
558 env->regs[15] &= (env->thumb ? ~1 : ~3);
559
560 qemu_mutex_lock_iothread();
561 arm_call_el_change_hook(arm_env_get_cpu(env));
562 qemu_mutex_unlock_iothread();
563}
564
565
566uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
567{
568 uint32_t val;
569
570 if (regno == 13) {
571 val = env->banked_r13[BANK_USRSYS];
572 } else if (regno == 14) {
573 val = env->banked_r14[BANK_USRSYS];
574 } else if (regno >= 8
575 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
576 val = env->usr_regs[regno - 8];
577 } else {
578 val = env->regs[regno];
579 }
580 return val;
581}
582
583void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
584{
585 if (regno == 13) {
586 env->banked_r13[BANK_USRSYS] = val;
587 } else if (regno == 14) {
588 env->banked_r14[BANK_USRSYS] = val;
589 } else if (regno >= 8
590 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
591 env->usr_regs[regno - 8] = val;
592 } else {
593 env->regs[regno] = val;
594 }
595}
596
597void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
598{
599 if ((env->uncached_cpsr & CPSR_M) == mode) {
600 env->regs[13] = val;
601 } else {
602 env->banked_r13[bank_number(mode)] = val;
603 }
604}
605
606uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
607{
608 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
609
610
611
612 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
613 exception_target_el(env));
614 }
615
616 if ((env->uncached_cpsr & CPSR_M) == mode) {
617 return env->regs[13];
618 } else {
619 return env->banked_r13[bank_number(mode)];
620 }
621}
622
623static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
624 uint32_t regno)
625{
626
627
628
629
630
631 int curmode = env->uncached_cpsr & CPSR_M;
632
633 if (regno == 17) {
634
635 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
636 goto undef;
637 }
638 return;
639 }
640
641 if (curmode == tgtmode) {
642 goto undef;
643 }
644
645 if (tgtmode == ARM_CPU_MODE_USR) {
646 switch (regno) {
647 case 8 ... 12:
648 if (curmode != ARM_CPU_MODE_FIQ) {
649 goto undef;
650 }
651 break;
652 case 13:
653 if (curmode == ARM_CPU_MODE_SYS) {
654 goto undef;
655 }
656 break;
657 case 14:
658 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
659 goto undef;
660 }
661 break;
662 default:
663 break;
664 }
665 }
666
667 if (tgtmode == ARM_CPU_MODE_HYP) {
668
669 if (curmode != ARM_CPU_MODE_MON) {
670 goto undef;
671 }
672 }
673
674 return;
675
676undef:
677 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
678 exception_target_el(env));
679}
680
681void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
682 uint32_t regno)
683{
684 msr_mrs_banked_exc_checks(env, tgtmode, regno);
685
686 switch (regno) {
687 case 16:
688 env->banked_spsr[bank_number(tgtmode)] = value;
689 break;
690 case 17:
691 env->elr_el[2] = value;
692 break;
693 case 13:
694 env->banked_r13[bank_number(tgtmode)] = value;
695 break;
696 case 14:
697 env->banked_r14[r14_bank_number(tgtmode)] = value;
698 break;
699 case 8 ... 12:
700 switch (tgtmode) {
701 case ARM_CPU_MODE_USR:
702 env->usr_regs[regno - 8] = value;
703 break;
704 case ARM_CPU_MODE_FIQ:
705 env->fiq_regs[regno - 8] = value;
706 break;
707 default:
708 g_assert_not_reached();
709 }
710 break;
711 default:
712 g_assert_not_reached();
713 }
714}
715
716uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
717{
718 msr_mrs_banked_exc_checks(env, tgtmode, regno);
719
720 switch (regno) {
721 case 16:
722 return env->banked_spsr[bank_number(tgtmode)];
723 case 17:
724 return env->elr_el[2];
725 case 13:
726 return env->banked_r13[bank_number(tgtmode)];
727 case 14:
728 return env->banked_r14[r14_bank_number(tgtmode)];
729 case 8 ... 12:
730 switch (tgtmode) {
731 case ARM_CPU_MODE_USR:
732 return env->usr_regs[regno - 8];
733 case ARM_CPU_MODE_FIQ:
734 return env->fiq_regs[regno - 8];
735 default:
736 g_assert_not_reached();
737 }
738 default:
739 g_assert_not_reached();
740 }
741}
742
743void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
744 uint32_t isread)
745{
746 const ARMCPRegInfo *ri = rip;
747 int target_el;
748
749 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
750 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
751 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
752 }
753
754 if (!ri->accessfn) {
755 return;
756 }
757
758 switch (ri->accessfn(env, ri, isread)) {
759 case CP_ACCESS_OK:
760 return;
761 case CP_ACCESS_TRAP:
762 target_el = exception_target_el(env);
763 break;
764 case CP_ACCESS_TRAP_EL2:
765
766
767
768 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
769 target_el = 2;
770 break;
771 case CP_ACCESS_TRAP_EL3:
772 target_el = 3;
773 break;
774 case CP_ACCESS_TRAP_UNCATEGORIZED:
775 target_el = exception_target_el(env);
776 syndrome = syn_uncategorized();
777 break;
778 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
779 target_el = 2;
780 syndrome = syn_uncategorized();
781 break;
782 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
783 target_el = 3;
784 syndrome = syn_uncategorized();
785 break;
786 case CP_ACCESS_TRAP_FP_EL2:
787 target_el = 2;
788
789
790
791
792
793 syndrome = syn_fp_access_trap(1, 0xe, false);
794 break;
795 case CP_ACCESS_TRAP_FP_EL3:
796 target_el = 3;
797 syndrome = syn_fp_access_trap(1, 0xe, false);
798 break;
799 default:
800 g_assert_not_reached();
801 }
802
803 raise_exception(env, EXCP_UDEF, syndrome, target_el);
804}
805
806void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
807{
808 const ARMCPRegInfo *ri = rip;
809
810 if (ri->type & ARM_CP_IO) {
811 qemu_mutex_lock_iothread();
812 ri->writefn(env, ri, value);
813 qemu_mutex_unlock_iothread();
814 } else {
815 ri->writefn(env, ri, value);
816 }
817}
818
819uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
820{
821 const ARMCPRegInfo *ri = rip;
822 uint32_t res;
823
824 if (ri->type & ARM_CP_IO) {
825 qemu_mutex_lock_iothread();
826 res = ri->readfn(env, ri);
827 qemu_mutex_unlock_iothread();
828 } else {
829 res = ri->readfn(env, ri);
830 }
831
832 return res;
833}
834
835void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
836{
837 const ARMCPRegInfo *ri = rip;
838
839 if (ri->type & ARM_CP_IO) {
840 qemu_mutex_lock_iothread();
841 ri->writefn(env, ri, value);
842 qemu_mutex_unlock_iothread();
843 } else {
844 ri->writefn(env, ri, value);
845 }
846}
847
848uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
849{
850 const ARMCPRegInfo *ri = rip;
851 uint64_t res;
852
853 if (ri->type & ARM_CP_IO) {
854 qemu_mutex_lock_iothread();
855 res = ri->readfn(env, ri);
856 qemu_mutex_unlock_iothread();
857 } else {
858 res = ri->readfn(env, ri);
859 }
860
861 return res;
862}
863
864void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
865{
866
867
868
869
870 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
871 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
872 extract32(op, 3, 3), 4,
873 imm, 0x1f, 0);
874 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
875 }
876
877 switch (op) {
878 case 0x05:
879 update_spsel(env, imm);
880 break;
881 case 0x1e:
882 env->daif |= (imm << 6) & PSTATE_DAIF;
883 break;
884 case 0x1f:
885 env->daif &= ~((imm << 6) & PSTATE_DAIF);
886 break;
887 default:
888 g_assert_not_reached();
889 }
890}
891
892void HELPER(clear_pstate_ss)(CPUARMState *env)
893{
894 env->pstate &= ~PSTATE_SS;
895}
896
897void HELPER(pre_hvc)(CPUARMState *env)
898{
899 ARMCPU *cpu = arm_env_get_cpu(env);
900 int cur_el = arm_current_el(env);
901
902 bool secure = false;
903 bool undef;
904
905 if (arm_is_psci_call(cpu, EXCP_HVC)) {
906
907
908
909 return;
910 }
911
912 if (!arm_feature(env, ARM_FEATURE_EL2)) {
913
914 undef = true;
915 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
916
917 undef = !(env->cp15.scr_el3 & SCR_HCE);
918 } else {
919 undef = env->cp15.hcr_el2 & HCR_HCD;
920 }
921
922
923
924
925
926
927 if (secure && (!is_a64(env) || cur_el == 1)) {
928 undef = true;
929 }
930
931 if (undef) {
932 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
933 exception_target_el(env));
934 }
935}
936
937void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
938{
939 ARMCPU *cpu = arm_env_get_cpu(env);
940 int cur_el = arm_current_el(env);
941 bool secure = arm_is_secure(env);
942 bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981 bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
982 : smd_flag && !secure;
983
984 if (!arm_feature(env, ARM_FEATURE_EL3) &&
985 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
986
987
988
989
990
991
992
993
994 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
995 exception_target_el(env));
996 }
997
998 if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
999
1000
1001
1002
1003
1004 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
1005 }
1006
1007
1008
1009
1010
1011 if (!arm_is_psci_call(cpu, EXCP_SMC) &&
1012 (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
1013 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
1014 exception_target_el(env));
1015 }
1016}
1017
1018static int el_from_spsr(uint32_t spsr)
1019{
1020
1021
1022
1023 if (spsr & PSTATE_nRW) {
1024 switch (spsr & CPSR_M) {
1025 case ARM_CPU_MODE_USR:
1026 return 0;
1027 case ARM_CPU_MODE_HYP:
1028 return 2;
1029 case ARM_CPU_MODE_FIQ:
1030 case ARM_CPU_MODE_IRQ:
1031 case ARM_CPU_MODE_SVC:
1032 case ARM_CPU_MODE_ABT:
1033 case ARM_CPU_MODE_UND:
1034 case ARM_CPU_MODE_SYS:
1035 return 1;
1036 case ARM_CPU_MODE_MON:
1037
1038
1039
1040 default:
1041 return -1;
1042 }
1043 } else {
1044 if (extract32(spsr, 1, 1)) {
1045
1046 return -1;
1047 }
1048 if (extract32(spsr, 0, 4) == 1) {
1049
1050 return -1;
1051 }
1052 return extract32(spsr, 2, 2);
1053 }
1054}
1055
1056void HELPER(exception_return)(CPUARMState *env)
1057{
1058 int cur_el = arm_current_el(env);
1059 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
1060 uint32_t spsr = env->banked_spsr[spsr_idx];
1061 int new_el;
1062 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
1063
1064 aarch64_save_sp(env, cur_el);
1065
1066 arm_clear_exclusive(env);
1067
1068
1069
1070
1071
1072
1073
1074
1075 if (arm_generate_debug_exceptions(env)) {
1076 spsr &= ~PSTATE_SS;
1077 }
1078
1079 new_el = el_from_spsr(spsr);
1080 if (new_el == -1) {
1081 goto illegal_return;
1082 }
1083 if (new_el > cur_el
1084 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
1085
1086
1087
1088 goto illegal_return;
1089 }
1090
1091 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
1092
1093 goto illegal_return;
1094 }
1095
1096 if (new_el == 2 && arm_is_secure_below_el3(env)) {
1097
1098 goto illegal_return;
1099 }
1100
1101 if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
1102 && !arm_is_secure_below_el3(env)) {
1103 goto illegal_return;
1104 }
1105
1106 qemu_mutex_lock_iothread();
1107 arm_call_pre_el_change_hook(arm_env_get_cpu(env));
1108 qemu_mutex_unlock_iothread();
1109
1110 if (!return_to_aa64) {
1111 env->aarch64 = 0;
1112
1113
1114
1115
1116 cpsr_write(env, spsr, ~0, CPSRWriteRaw);
1117 if (!arm_singlestep_active(env)) {
1118 env->uncached_cpsr &= ~PSTATE_SS;
1119 }
1120 aarch64_sync_64_to_32(env);
1121
1122 if (spsr & CPSR_T) {
1123 env->regs[15] = env->elr_el[cur_el] & ~0x1;
1124 } else {
1125 env->regs[15] = env->elr_el[cur_el] & ~0x3;
1126 }
1127 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1128 "AArch32 EL%d PC 0x%" PRIx32 "\n",
1129 cur_el, new_el, env->regs[15]);
1130 } else {
1131 env->aarch64 = 1;
1132 pstate_write(env, spsr);
1133 if (!arm_singlestep_active(env)) {
1134 env->pstate &= ~PSTATE_SS;
1135 }
1136 aarch64_restore_sp(env, new_el);
1137 env->pc = env->elr_el[cur_el];
1138 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1139 "AArch64 EL%d PC 0x%" PRIx64 "\n",
1140 cur_el, new_el, env->pc);
1141 }
1142
1143
1144
1145
1146 aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
1147
1148 qemu_mutex_lock_iothread();
1149 arm_call_el_change_hook(arm_env_get_cpu(env));
1150 qemu_mutex_unlock_iothread();
1151
1152 return;
1153
1154illegal_return:
1155
1156
1157
1158
1159
1160
1161
1162 env->pstate |= PSTATE_IL;
1163 env->pc = env->elr_el[cur_el];
1164 spsr &= PSTATE_NZCV | PSTATE_DAIF;
1165 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1166 pstate_write(env, spsr);
1167 if (!arm_singlestep_active(env)) {
1168 env->pstate &= ~PSTATE_SS;
1169 }
1170 qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
1171 "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
1172}
1173
1174
1175static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1176{
1177 CPUARMState *env = &cpu->env;
1178 uint64_t bcr = env->cp15.dbgbcr[lbn];
1179 int brps = extract32(cpu->dbgdidr, 24, 4);
1180 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1181 int bt;
1182 uint32_t contextidr;
1183
1184
1185
1186
1187
1188
1189
1190 if (lbn > brps || lbn < (brps - ctx_cmps)) {
1191 return false;
1192 }
1193
1194 bcr = env->cp15.dbgbcr[lbn];
1195
1196 if (extract64(bcr, 0, 1) == 0) {
1197
1198 return false;
1199 }
1200
1201 bt = extract64(bcr, 20, 4);
1202
1203
1204
1205
1206
1207 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1208
1209 switch (bt) {
1210 case 3:
1211 if (arm_current_el(env) > 1) {
1212
1213 return false;
1214 }
1215 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1216 case 5:
1217 case 9:
1218 case 11:
1219 default:
1220
1221
1222
1223 return false;
1224 }
1225
1226 return false;
1227}
1228
1229static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1230{
1231 CPUARMState *env = &cpu->env;
1232 uint64_t cr;
1233 int pac, hmc, ssc, wt, lbn;
1234
1235
1236
1237 bool is_secure = arm_is_secure(env);
1238 int access_el = arm_current_el(env);
1239
1240 if (is_wp) {
1241 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1242
1243 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1244 return false;
1245 }
1246 cr = env->cp15.dbgwcr[n];
1247 if (wp->hitattrs.user) {
1248
1249
1250
1251
1252 access_el = 0;
1253 }
1254 } else {
1255 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1256
1257 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1258 return false;
1259 }
1260 cr = env->cp15.dbgbcr[n];
1261 }
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 pac = extract64(cr, 1, 2);
1275 hmc = extract64(cr, 13, 1);
1276 ssc = extract64(cr, 14, 2);
1277
1278 switch (ssc) {
1279 case 0:
1280 break;
1281 case 1:
1282 case 3:
1283 if (is_secure) {
1284 return false;
1285 }
1286 break;
1287 case 2:
1288 if (!is_secure) {
1289 return false;
1290 }
1291 break;
1292 }
1293
1294 switch (access_el) {
1295 case 3:
1296 case 2:
1297 if (!hmc) {
1298 return false;
1299 }
1300 break;
1301 case 1:
1302 if (extract32(pac, 0, 1) == 0) {
1303 return false;
1304 }
1305 break;
1306 case 0:
1307 if (extract32(pac, 1, 1) == 0) {
1308 return false;
1309 }
1310 break;
1311 default:
1312 g_assert_not_reached();
1313 }
1314
1315 wt = extract64(cr, 20, 1);
1316 lbn = extract64(cr, 16, 4);
1317
1318 if (wt && !linked_bp_matches(cpu, lbn)) {
1319 return false;
1320 }
1321
1322 return true;
1323}
1324
1325static bool check_watchpoints(ARMCPU *cpu)
1326{
1327 CPUARMState *env = &cpu->env;
1328 int n;
1329
1330
1331
1332
1333 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1334 || !arm_generate_debug_exceptions(env)) {
1335 return false;
1336 }
1337
1338 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1339 if (bp_wp_matches(cpu, n, true)) {
1340 return true;
1341 }
1342 }
1343 return false;
1344}
1345
1346static bool check_breakpoints(ARMCPU *cpu)
1347{
1348 CPUARMState *env = &cpu->env;
1349 int n;
1350
1351
1352
1353
1354 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1355 || !arm_generate_debug_exceptions(env)) {
1356 return false;
1357 }
1358
1359 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1360 if (bp_wp_matches(cpu, n, false)) {
1361 return true;
1362 }
1363 }
1364 return false;
1365}
1366
1367void HELPER(check_breakpoints)(CPUARMState *env)
1368{
1369 ARMCPU *cpu = arm_env_get_cpu(env);
1370
1371 if (check_breakpoints(cpu)) {
1372 HELPER(exception_internal(env, EXCP_DEBUG));
1373 }
1374}
1375
1376bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1377{
1378
1379
1380
1381 ARMCPU *cpu = ARM_CPU(cs);
1382
1383 return check_watchpoints(cpu);
1384}
1385
1386vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1387{
1388 ARMCPU *cpu = ARM_CPU(cs);
1389 CPUARMState *env = &cpu->env;
1390
1391
1392
1393
1394
1395
1396
1397 if (arm_sctlr_b(env)) {
1398 if (len == 1) {
1399 addr ^= 3;
1400 } else if (len == 2) {
1401 addr ^= 2;
1402 }
1403 }
1404
1405 return addr;
1406}
1407
1408void arm_debug_excp_handler(CPUState *cs)
1409{
1410
1411
1412
1413 ARMCPU *cpu = ARM_CPU(cs);
1414 CPUARMState *env = &cpu->env;
1415 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1416
1417 if (wp_hit) {
1418 if (wp_hit->flags & BP_CPU) {
1419 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1420 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1421
1422 cs->watchpoint_hit = NULL;
1423
1424 env->exception.fsr = arm_debug_exception_fsr(env);
1425 env->exception.vaddress = wp_hit->hitaddr;
1426 raise_exception(env, EXCP_DATA_ABORT,
1427 syn_watchpoint(same_el, 0, wnr),
1428 arm_debug_target_el(env));
1429 }
1430 } else {
1431 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1432 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1433
1434
1435
1436
1437
1438
1439 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1440 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1441 return;
1442 }
1443
1444 env->exception.fsr = arm_debug_exception_fsr(env);
1445
1446
1447
1448
1449 env->exception.vaddress = 0;
1450 raise_exception(env, EXCP_PREFETCH_ABORT,
1451 syn_breakpoint(same_el),
1452 arm_debug_target_el(env));
1453 }
1454}
1455
1456
1457
1458
1459
1460
1461
1462uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1463{
1464 int shift = i & 0xff;
1465 if (shift >= 32) {
1466 if (shift == 32)
1467 env->CF = x & 1;
1468 else
1469 env->CF = 0;
1470 return 0;
1471 } else if (shift != 0) {
1472 env->CF = (x >> (32 - shift)) & 1;
1473 return x << shift;
1474 }
1475 return x;
1476}
1477
1478uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1479{
1480 int shift = i & 0xff;
1481 if (shift >= 32) {
1482 if (shift == 32)
1483 env->CF = (x >> 31) & 1;
1484 else
1485 env->CF = 0;
1486 return 0;
1487 } else if (shift != 0) {
1488 env->CF = (x >> (shift - 1)) & 1;
1489 return x >> shift;
1490 }
1491 return x;
1492}
1493
1494uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1495{
1496 int shift = i & 0xff;
1497 if (shift >= 32) {
1498 env->CF = (x >> 31) & 1;
1499 return (int32_t)x >> 31;
1500 } else if (shift != 0) {
1501 env->CF = (x >> (shift - 1)) & 1;
1502 return (int32_t)x >> shift;
1503 }
1504 return x;
1505}
1506
1507uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1508{
1509 int shift1, shift;
1510 shift1 = i & 0xff;
1511 shift = shift1 & 0x1f;
1512 if (shift == 0) {
1513 if (shift1 != 0)
1514 env->CF = (x >> 31) & 1;
1515 return x;
1516 } else {
1517 env->CF = (x >> (shift - 1)) & 1;
1518 return ((uint32_t)x >> shift) | (x << (32 - shift));
1519 }
1520}
1521