1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "qemu/log.h"
21#include "qemu/main-loop.h"
22#include "cpu.h"
23#include "exec/helper-proto.h"
24#include "internals.h"
25#include "exec/exec-all.h"
26#include "exec/cpu_ldst.h"
27
28#define SIGNBIT (uint32_t)0x80000000
29#define SIGNBIT64 ((uint64_t)1 << 63)
30
31static void raise_exception(CPUARMState *env, uint32_t excp,
32 uint32_t syndrome, uint32_t target_el)
33{
34 CPUState *cs = CPU(arm_env_get_cpu(env));
35
36 assert(!excp_is_internal(excp));
37 cs->exception_index = excp;
38 env->exception.syndrome = syndrome;
39 env->exception.target_el = target_el;
40 cpu_loop_exit(cs);
41}
42
43static int exception_target_el(CPUARMState *env)
44{
45 int target_el = MAX(1, arm_current_el(env));
46
47
48
49
50 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
51 target_el = 3;
52 }
53
54 return target_el;
55}
56
57uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
58 uint32_t maxindex)
59{
60 uint32_t val, shift;
61 uint64_t *table = vn;
62
63 val = 0;
64 for (shift = 0; shift < 32; shift += 8) {
65 uint32_t index = (ireg >> shift) & 0xff;
66 if (index < maxindex) {
67 uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
68 val |= tmp << shift;
69 } else {
70 val |= def & (0xff << shift);
71 }
72 }
73 return val;
74}
75
76#if !defined(CONFIG_USER_ONLY)
77
78static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
79 unsigned int target_el,
80 bool same_el, bool ea,
81 bool s1ptw, bool is_write,
82 int fsc)
83{
84 uint32_t syn;
85
86
87
88
89
90
91
92
93
94
95
96
97 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
98 syn = syn_data_abort_no_iss(same_el,
99 ea, 0, s1ptw, is_write, fsc);
100 } else {
101
102
103
104
105 syn = syn_data_abort_with_iss(same_el,
106 0, 0, 0, 0, 0,
107 ea, 0, s1ptw, is_write, fsc,
108 false);
109
110 syn |= template_syn;
111 }
112 return syn;
113}
114
115static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
116 int mmu_idx, ARMMMUFaultInfo *fi)
117{
118 CPUARMState *env = &cpu->env;
119 int target_el;
120 bool same_el;
121 uint32_t syn, exc, fsr, fsc;
122 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
123
124 target_el = exception_target_el(env);
125 if (fi->stage2) {
126 target_el = 2;
127 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
128 }
129 same_el = (arm_current_el(env) == target_el);
130
131 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
132 arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
133
134
135
136 fsr = arm_fi_to_lfsc(fi);
137 fsc = extract32(fsr, 0, 6);
138 } else {
139 fsr = arm_fi_to_sfsc(fi);
140
141
142
143
144
145 fsc = 0x3f;
146 }
147
148 if (access_type == MMU_INST_FETCH) {
149 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
150 exc = EXCP_PREFETCH_ABORT;
151 } else {
152 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
153 same_el, fi->ea, fi->s1ptw,
154 access_type == MMU_DATA_STORE,
155 fsc);
156 if (access_type == MMU_DATA_STORE
157 && arm_feature(env, ARM_FEATURE_V6)) {
158 fsr |= (1 << 11);
159 }
160 exc = EXCP_DATA_ABORT;
161 }
162
163 env->exception.vaddress = addr;
164 env->exception.fsr = fsr;
165 raise_exception(env, exc, syn, target_el);
166}
167
168
169
170
171
172void tlb_fill(CPUState *cs, target_ulong addr, int size,
173 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
174{
175 bool ret;
176 ARMMMUFaultInfo fi = {};
177
178 ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
179 if (unlikely(ret)) {
180 ARMCPU *cpu = ARM_CPU(cs);
181
182
183 cpu_restore_state(cs, retaddr, true);
184
185 deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
186 }
187}
188
189
190void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
191 MMUAccessType access_type,
192 int mmu_idx, uintptr_t retaddr)
193{
194 ARMCPU *cpu = ARM_CPU(cs);
195 ARMMMUFaultInfo fi = {};
196
197
198 cpu_restore_state(cs, retaddr, true);
199
200 fi.type = ARMFault_Alignment;
201 deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
202}
203
204
205
206
207
208void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
209 vaddr addr, unsigned size,
210 MMUAccessType access_type,
211 int mmu_idx, MemTxAttrs attrs,
212 MemTxResult response, uintptr_t retaddr)
213{
214 ARMCPU *cpu = ARM_CPU(cs);
215 ARMMMUFaultInfo fi = {};
216
217
218 cpu_restore_state(cs, retaddr, true);
219
220 fi.ea = arm_extabort_type(response);
221 fi.type = ARMFault_SyncExternal;
222 deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
223}
224
225#endif
226
227uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
228{
229 uint32_t res = a + b;
230 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
231 env->QF = 1;
232 return res;
233}
234
235uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
236{
237 uint32_t res = a + b;
238 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
239 env->QF = 1;
240 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
241 }
242 return res;
243}
244
245uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
246{
247 uint32_t res = a - b;
248 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
249 env->QF = 1;
250 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
251 }
252 return res;
253}
254
255uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
256{
257 uint32_t res;
258 if (val >= 0x40000000) {
259 res = ~SIGNBIT;
260 env->QF = 1;
261 } else if (val <= (int32_t)0xc0000000) {
262 res = SIGNBIT;
263 env->QF = 1;
264 } else {
265 res = val << 1;
266 }
267 return res;
268}
269
270uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
271{
272 uint32_t res = a + b;
273 if (res < a) {
274 env->QF = 1;
275 res = ~0;
276 }
277 return res;
278}
279
280uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
281{
282 uint32_t res = a - b;
283 if (res > a) {
284 env->QF = 1;
285 res = 0;
286 }
287 return res;
288}
289
290
291static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
292{
293 int32_t top;
294 uint32_t mask;
295
296 top = val >> shift;
297 mask = (1u << shift) - 1;
298 if (top > 0) {
299 env->QF = 1;
300 return mask;
301 } else if (top < -1) {
302 env->QF = 1;
303 return ~mask;
304 }
305 return val;
306}
307
308
309static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
310{
311 uint32_t max;
312
313 max = (1u << shift) - 1;
314 if (val < 0) {
315 env->QF = 1;
316 return 0;
317 } else if (val > max) {
318 env->QF = 1;
319 return max;
320 }
321 return val;
322}
323
324
325uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
326{
327 return do_ssat(env, x, shift);
328}
329
330
331uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
332{
333 uint32_t res;
334
335 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
336 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
337 return res;
338}
339
340
341uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
342{
343 return do_usat(env, x, shift);
344}
345
346
347uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
348{
349 uint32_t res;
350
351 res = (uint16_t)do_usat(env, (int16_t)x, shift);
352 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
353 return res;
354}
355
356void HELPER(setend)(CPUARMState *env)
357{
358 env->uncached_cpsr ^= CPSR_E;
359}
360
361
362
363
364
365static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
366{
367 int cur_el = arm_current_el(env);
368 uint64_t mask;
369
370 if (arm_feature(env, ARM_FEATURE_M)) {
371
372 return 0;
373 }
374
375
376
377
378 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
379 int target_el;
380
381 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
382 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
383
384 target_el = 3;
385 } else {
386 target_el = 1;
387 }
388
389 if (!(env->cp15.sctlr_el[target_el] & mask)) {
390 return target_el;
391 }
392 }
393
394
395
396
397
398 if (cur_el < 2 && !arm_is_secure(env)) {
399 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
400 if (env->cp15.hcr_el2 & mask) {
401 return 2;
402 }
403 }
404
405
406 if (cur_el < 3) {
407 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
408 if (env->cp15.scr_el3 & mask) {
409 return 3;
410 }
411 }
412
413 return 0;
414}
415
416void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
417{
418 CPUState *cs = CPU(arm_env_get_cpu(env));
419 int target_el = check_wfx_trap(env, false);
420
421 if (cpu_has_work(cs)) {
422
423
424
425 return;
426 }
427
428 if (target_el) {
429 env->pc -= insn_len;
430 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
431 target_el);
432 }
433
434 cs->exception_index = EXCP_HLT;
435 cs->halted = 1;
436 cpu_loop_exit(cs);
437}
438
439void HELPER(wfe)(CPUARMState *env)
440{
441
442
443
444
445
446
447
448 HELPER(yield)(env);
449}
450
451void HELPER(yield)(CPUARMState *env)
452{
453 ARMCPU *cpu = arm_env_get_cpu(env);
454 CPUState *cs = CPU(cpu);
455
456
457
458
459
460 cs->exception_index = EXCP_YIELD;
461 cpu_loop_exit(cs);
462}
463
464
465
466
467
468
469
470void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
471{
472 CPUState *cs = CPU(arm_env_get_cpu(env));
473
474 assert(excp_is_internal(excp));
475 cs->exception_index = excp;
476 cpu_loop_exit(cs);
477}
478
479
480void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
481 uint32_t syndrome, uint32_t target_el)
482{
483 raise_exception(env, excp, syndrome, target_el);
484}
485
486
487
488
489void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
490{
491
492 env->exception.fsr = arm_debug_exception_fsr(env);
493
494
495
496
497 env->exception.vaddress = 0;
498 raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
499}
500
501uint32_t HELPER(cpsr_read)(CPUARMState *env)
502{
503 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
504}
505
506void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
507{
508 cpsr_write(env, val, mask, CPSRWriteByInstr);
509}
510
511
512void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
513{
514 qemu_mutex_lock_iothread();
515 arm_call_pre_el_change_hook(arm_env_get_cpu(env));
516 qemu_mutex_unlock_iothread();
517
518 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
519
520
521
522
523
524
525 env->regs[15] &= (env->thumb ? ~1 : ~3);
526
527 qemu_mutex_lock_iothread();
528 arm_call_el_change_hook(arm_env_get_cpu(env));
529 qemu_mutex_unlock_iothread();
530}
531
532
533uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
534{
535 uint32_t val;
536
537 if (regno == 13) {
538 val = env->banked_r13[BANK_USRSYS];
539 } else if (regno == 14) {
540 val = env->banked_r14[BANK_USRSYS];
541 } else if (regno >= 8
542 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
543 val = env->usr_regs[regno - 8];
544 } else {
545 val = env->regs[regno];
546 }
547 return val;
548}
549
550void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
551{
552 if (regno == 13) {
553 env->banked_r13[BANK_USRSYS] = val;
554 } else if (regno == 14) {
555 env->banked_r14[BANK_USRSYS] = val;
556 } else if (regno >= 8
557 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
558 env->usr_regs[regno - 8] = val;
559 } else {
560 env->regs[regno] = val;
561 }
562}
563
564void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
565{
566 if ((env->uncached_cpsr & CPSR_M) == mode) {
567 env->regs[13] = val;
568 } else {
569 env->banked_r13[bank_number(mode)] = val;
570 }
571}
572
573uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
574{
575 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
576
577
578
579 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
580 exception_target_el(env));
581 }
582
583 if ((env->uncached_cpsr & CPSR_M) == mode) {
584 return env->regs[13];
585 } else {
586 return env->banked_r13[bank_number(mode)];
587 }
588}
589
590static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
591 uint32_t regno)
592{
593
594
595
596
597
598 int curmode = env->uncached_cpsr & CPSR_M;
599
600 if (curmode == tgtmode) {
601 goto undef;
602 }
603
604 if (tgtmode == ARM_CPU_MODE_USR) {
605 switch (regno) {
606 case 8 ... 12:
607 if (curmode != ARM_CPU_MODE_FIQ) {
608 goto undef;
609 }
610 break;
611 case 13:
612 if (curmode == ARM_CPU_MODE_SYS) {
613 goto undef;
614 }
615 break;
616 case 14:
617 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
618 goto undef;
619 }
620 break;
621 default:
622 break;
623 }
624 }
625
626 if (tgtmode == ARM_CPU_MODE_HYP) {
627 switch (regno) {
628 case 17:
629 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
630 goto undef;
631 }
632 break;
633 default:
634 if (curmode != ARM_CPU_MODE_MON) {
635 goto undef;
636 }
637 break;
638 }
639 }
640
641 return;
642
643undef:
644 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
645 exception_target_el(env));
646}
647
648void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
649 uint32_t regno)
650{
651 msr_mrs_banked_exc_checks(env, tgtmode, regno);
652
653 switch (regno) {
654 case 16:
655 env->banked_spsr[bank_number(tgtmode)] = value;
656 break;
657 case 17:
658 env->elr_el[2] = value;
659 break;
660 case 13:
661 env->banked_r13[bank_number(tgtmode)] = value;
662 break;
663 case 14:
664 env->banked_r14[bank_number(tgtmode)] = value;
665 break;
666 case 8 ... 12:
667 switch (tgtmode) {
668 case ARM_CPU_MODE_USR:
669 env->usr_regs[regno - 8] = value;
670 break;
671 case ARM_CPU_MODE_FIQ:
672 env->fiq_regs[regno - 8] = value;
673 break;
674 default:
675 g_assert_not_reached();
676 }
677 break;
678 default:
679 g_assert_not_reached();
680 }
681}
682
683uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
684{
685 msr_mrs_banked_exc_checks(env, tgtmode, regno);
686
687 switch (regno) {
688 case 16:
689 return env->banked_spsr[bank_number(tgtmode)];
690 case 17:
691 return env->elr_el[2];
692 case 13:
693 return env->banked_r13[bank_number(tgtmode)];
694 case 14:
695 return env->banked_r14[bank_number(tgtmode)];
696 case 8 ... 12:
697 switch (tgtmode) {
698 case ARM_CPU_MODE_USR:
699 return env->usr_regs[regno - 8];
700 case ARM_CPU_MODE_FIQ:
701 return env->fiq_regs[regno - 8];
702 default:
703 g_assert_not_reached();
704 }
705 default:
706 g_assert_not_reached();
707 }
708}
709
710void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
711 uint32_t isread)
712{
713 const ARMCPRegInfo *ri = rip;
714 int target_el;
715
716 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
717 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
718 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
719 }
720
721 if (!ri->accessfn) {
722 return;
723 }
724
725 switch (ri->accessfn(env, ri, isread)) {
726 case CP_ACCESS_OK:
727 return;
728 case CP_ACCESS_TRAP:
729 target_el = exception_target_el(env);
730 break;
731 case CP_ACCESS_TRAP_EL2:
732
733
734
735 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
736 target_el = 2;
737 break;
738 case CP_ACCESS_TRAP_EL3:
739 target_el = 3;
740 break;
741 case CP_ACCESS_TRAP_UNCATEGORIZED:
742 target_el = exception_target_el(env);
743 syndrome = syn_uncategorized();
744 break;
745 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
746 target_el = 2;
747 syndrome = syn_uncategorized();
748 break;
749 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
750 target_el = 3;
751 syndrome = syn_uncategorized();
752 break;
753 case CP_ACCESS_TRAP_FP_EL2:
754 target_el = 2;
755
756
757
758
759
760 syndrome = syn_fp_access_trap(1, 0xe, false);
761 break;
762 case CP_ACCESS_TRAP_FP_EL3:
763 target_el = 3;
764 syndrome = syn_fp_access_trap(1, 0xe, false);
765 break;
766 default:
767 g_assert_not_reached();
768 }
769
770 raise_exception(env, EXCP_UDEF, syndrome, target_el);
771}
772
773void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
774{
775 const ARMCPRegInfo *ri = rip;
776
777 if (ri->type & ARM_CP_IO) {
778 qemu_mutex_lock_iothread();
779 ri->writefn(env, ri, value);
780 qemu_mutex_unlock_iothread();
781 } else {
782 ri->writefn(env, ri, value);
783 }
784}
785
786uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
787{
788 const ARMCPRegInfo *ri = rip;
789 uint32_t res;
790
791 if (ri->type & ARM_CP_IO) {
792 qemu_mutex_lock_iothread();
793 res = ri->readfn(env, ri);
794 qemu_mutex_unlock_iothread();
795 } else {
796 res = ri->readfn(env, ri);
797 }
798
799 return res;
800}
801
802void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
803{
804 const ARMCPRegInfo *ri = rip;
805
806 if (ri->type & ARM_CP_IO) {
807 qemu_mutex_lock_iothread();
808 ri->writefn(env, ri, value);
809 qemu_mutex_unlock_iothread();
810 } else {
811 ri->writefn(env, ri, value);
812 }
813}
814
815uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
816{
817 const ARMCPRegInfo *ri = rip;
818 uint64_t res;
819
820 if (ri->type & ARM_CP_IO) {
821 qemu_mutex_lock_iothread();
822 res = ri->readfn(env, ri);
823 qemu_mutex_unlock_iothread();
824 } else {
825 res = ri->readfn(env, ri);
826 }
827
828 return res;
829}
830
831void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
832{
833
834
835
836
837 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
838 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
839 extract32(op, 3, 3), 4,
840 imm, 0x1f, 0);
841 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
842 }
843
844 switch (op) {
845 case 0x05:
846 update_spsel(env, imm);
847 break;
848 case 0x1e:
849 env->daif |= (imm << 6) & PSTATE_DAIF;
850 break;
851 case 0x1f:
852 env->daif &= ~((imm << 6) & PSTATE_DAIF);
853 break;
854 default:
855 g_assert_not_reached();
856 }
857}
858
859void HELPER(clear_pstate_ss)(CPUARMState *env)
860{
861 env->pstate &= ~PSTATE_SS;
862}
863
864void HELPER(pre_hvc)(CPUARMState *env)
865{
866 ARMCPU *cpu = arm_env_get_cpu(env);
867 int cur_el = arm_current_el(env);
868
869 bool secure = false;
870 bool undef;
871
872 if (arm_is_psci_call(cpu, EXCP_HVC)) {
873
874
875
876 return;
877 }
878
879 if (!arm_feature(env, ARM_FEATURE_EL2)) {
880
881 undef = true;
882 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
883
884 undef = !(env->cp15.scr_el3 & SCR_HCE);
885 } else {
886 undef = env->cp15.hcr_el2 & HCR_HCD;
887 }
888
889
890
891
892
893
894 if (secure && (!is_a64(env) || cur_el == 1)) {
895 undef = true;
896 }
897
898 if (undef) {
899 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
900 exception_target_el(env));
901 }
902}
903
904void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
905{
906 ARMCPU *cpu = arm_env_get_cpu(env);
907 int cur_el = arm_current_el(env);
908 bool secure = arm_is_secure(env);
909 bool smd = env->cp15.scr_el3 & SCR_SMD;
910
911
912
913
914
915
916
917 bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
918
919 if (!arm_feature(env, ARM_FEATURE_EL3) &&
920 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
921
922
923
924
925
926
927
928 undef = true;
929 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
930
931
932
933
934 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
935 }
936
937
938
939
940
941 if (undef && !arm_is_psci_call(cpu, EXCP_SMC)) {
942 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
943 exception_target_el(env));
944 }
945}
946
947static int el_from_spsr(uint32_t spsr)
948{
949
950
951
952 if (spsr & PSTATE_nRW) {
953 switch (spsr & CPSR_M) {
954 case ARM_CPU_MODE_USR:
955 return 0;
956 case ARM_CPU_MODE_HYP:
957 return 2;
958 case ARM_CPU_MODE_FIQ:
959 case ARM_CPU_MODE_IRQ:
960 case ARM_CPU_MODE_SVC:
961 case ARM_CPU_MODE_ABT:
962 case ARM_CPU_MODE_UND:
963 case ARM_CPU_MODE_SYS:
964 return 1;
965 case ARM_CPU_MODE_MON:
966
967
968
969 default:
970 return -1;
971 }
972 } else {
973 if (extract32(spsr, 1, 1)) {
974
975 return -1;
976 }
977 if (extract32(spsr, 0, 4) == 1) {
978
979 return -1;
980 }
981 return extract32(spsr, 2, 2);
982 }
983}
984
985void HELPER(exception_return)(CPUARMState *env)
986{
987 int cur_el = arm_current_el(env);
988 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
989 uint32_t spsr = env->banked_spsr[spsr_idx];
990 int new_el;
991 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
992
993 aarch64_save_sp(env, cur_el);
994
995 arm_clear_exclusive(env);
996
997
998
999
1000
1001
1002
1003
1004 if (arm_generate_debug_exceptions(env)) {
1005 spsr &= ~PSTATE_SS;
1006 }
1007
1008 new_el = el_from_spsr(spsr);
1009 if (new_el == -1) {
1010 goto illegal_return;
1011 }
1012 if (new_el > cur_el
1013 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
1014
1015
1016
1017 goto illegal_return;
1018 }
1019
1020 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
1021
1022 goto illegal_return;
1023 }
1024
1025 if (new_el == 2 && arm_is_secure_below_el3(env)) {
1026
1027 goto illegal_return;
1028 }
1029
1030 if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
1031 && !arm_is_secure_below_el3(env)) {
1032 goto illegal_return;
1033 }
1034
1035 qemu_mutex_lock_iothread();
1036 arm_call_pre_el_change_hook(arm_env_get_cpu(env));
1037 qemu_mutex_unlock_iothread();
1038
1039 if (!return_to_aa64) {
1040 env->aarch64 = 0;
1041
1042
1043
1044
1045 cpsr_write(env, spsr, ~0, CPSRWriteRaw);
1046 if (!arm_singlestep_active(env)) {
1047 env->uncached_cpsr &= ~PSTATE_SS;
1048 }
1049 aarch64_sync_64_to_32(env);
1050
1051 if (spsr & CPSR_T) {
1052 env->regs[15] = env->elr_el[cur_el] & ~0x1;
1053 } else {
1054 env->regs[15] = env->elr_el[cur_el] & ~0x3;
1055 }
1056 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1057 "AArch32 EL%d PC 0x%" PRIx32 "\n",
1058 cur_el, new_el, env->regs[15]);
1059 } else {
1060 env->aarch64 = 1;
1061 pstate_write(env, spsr);
1062 if (!arm_singlestep_active(env)) {
1063 env->pstate &= ~PSTATE_SS;
1064 }
1065 aarch64_restore_sp(env, new_el);
1066 env->pc = env->elr_el[cur_el];
1067 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1068 "AArch64 EL%d PC 0x%" PRIx64 "\n",
1069 cur_el, new_el, env->pc);
1070 }
1071
1072 qemu_mutex_lock_iothread();
1073 arm_call_el_change_hook(arm_env_get_cpu(env));
1074 qemu_mutex_unlock_iothread();
1075
1076 return;
1077
1078illegal_return:
1079
1080
1081
1082
1083
1084
1085
1086 env->pstate |= PSTATE_IL;
1087 env->pc = env->elr_el[cur_el];
1088 spsr &= PSTATE_NZCV | PSTATE_DAIF;
1089 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1090 pstate_write(env, spsr);
1091 if (!arm_singlestep_active(env)) {
1092 env->pstate &= ~PSTATE_SS;
1093 }
1094 qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
1095 "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
1096}
1097
1098
1099static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1100{
1101 CPUARMState *env = &cpu->env;
1102 uint64_t bcr = env->cp15.dbgbcr[lbn];
1103 int brps = extract32(cpu->dbgdidr, 24, 4);
1104 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1105 int bt;
1106 uint32_t contextidr;
1107
1108
1109
1110
1111
1112
1113
1114 if (lbn > brps || lbn < (brps - ctx_cmps)) {
1115 return false;
1116 }
1117
1118 bcr = env->cp15.dbgbcr[lbn];
1119
1120 if (extract64(bcr, 0, 1) == 0) {
1121
1122 return false;
1123 }
1124
1125 bt = extract64(bcr, 20, 4);
1126
1127
1128
1129
1130
1131 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1132
1133 switch (bt) {
1134 case 3:
1135 if (arm_current_el(env) > 1) {
1136
1137 return false;
1138 }
1139 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1140 case 5:
1141 case 9:
1142 case 11:
1143 default:
1144
1145
1146
1147 return false;
1148 }
1149
1150 return false;
1151}
1152
1153static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1154{
1155 CPUARMState *env = &cpu->env;
1156 uint64_t cr;
1157 int pac, hmc, ssc, wt, lbn;
1158
1159
1160
1161 bool is_secure = arm_is_secure(env);
1162 int access_el = arm_current_el(env);
1163
1164 if (is_wp) {
1165 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1166
1167 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1168 return false;
1169 }
1170 cr = env->cp15.dbgwcr[n];
1171 if (wp->hitattrs.user) {
1172
1173
1174
1175
1176 access_el = 0;
1177 }
1178 } else {
1179 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1180
1181 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1182 return false;
1183 }
1184 cr = env->cp15.dbgbcr[n];
1185 }
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198 pac = extract64(cr, 1, 2);
1199 hmc = extract64(cr, 13, 1);
1200 ssc = extract64(cr, 14, 2);
1201
1202 switch (ssc) {
1203 case 0:
1204 break;
1205 case 1:
1206 case 3:
1207 if (is_secure) {
1208 return false;
1209 }
1210 break;
1211 case 2:
1212 if (!is_secure) {
1213 return false;
1214 }
1215 break;
1216 }
1217
1218 switch (access_el) {
1219 case 3:
1220 case 2:
1221 if (!hmc) {
1222 return false;
1223 }
1224 break;
1225 case 1:
1226 if (extract32(pac, 0, 1) == 0) {
1227 return false;
1228 }
1229 break;
1230 case 0:
1231 if (extract32(pac, 1, 1) == 0) {
1232 return false;
1233 }
1234 break;
1235 default:
1236 g_assert_not_reached();
1237 }
1238
1239 wt = extract64(cr, 20, 1);
1240 lbn = extract64(cr, 16, 4);
1241
1242 if (wt && !linked_bp_matches(cpu, lbn)) {
1243 return false;
1244 }
1245
1246 return true;
1247}
1248
1249static bool check_watchpoints(ARMCPU *cpu)
1250{
1251 CPUARMState *env = &cpu->env;
1252 int n;
1253
1254
1255
1256
1257 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1258 || !arm_generate_debug_exceptions(env)) {
1259 return false;
1260 }
1261
1262 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1263 if (bp_wp_matches(cpu, n, true)) {
1264 return true;
1265 }
1266 }
1267 return false;
1268}
1269
1270static bool check_breakpoints(ARMCPU *cpu)
1271{
1272 CPUARMState *env = &cpu->env;
1273 int n;
1274
1275
1276
1277
1278 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1279 || !arm_generate_debug_exceptions(env)) {
1280 return false;
1281 }
1282
1283 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1284 if (bp_wp_matches(cpu, n, false)) {
1285 return true;
1286 }
1287 }
1288 return false;
1289}
1290
1291void HELPER(check_breakpoints)(CPUARMState *env)
1292{
1293 ARMCPU *cpu = arm_env_get_cpu(env);
1294
1295 if (check_breakpoints(cpu)) {
1296 HELPER(exception_internal(env, EXCP_DEBUG));
1297 }
1298}
1299
1300bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1301{
1302
1303
1304
1305 ARMCPU *cpu = ARM_CPU(cs);
1306
1307 return check_watchpoints(cpu);
1308}
1309
1310vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1311{
1312 ARMCPU *cpu = ARM_CPU(cs);
1313 CPUARMState *env = &cpu->env;
1314
1315
1316
1317
1318
1319
1320
1321 if (arm_sctlr_b(env)) {
1322 if (len == 1) {
1323 addr ^= 3;
1324 } else if (len == 2) {
1325 addr ^= 2;
1326 }
1327 }
1328
1329 return addr;
1330}
1331
1332void arm_debug_excp_handler(CPUState *cs)
1333{
1334
1335
1336
1337 ARMCPU *cpu = ARM_CPU(cs);
1338 CPUARMState *env = &cpu->env;
1339 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1340
1341 if (wp_hit) {
1342 if (wp_hit->flags & BP_CPU) {
1343 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1344 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1345
1346 cs->watchpoint_hit = NULL;
1347
1348 env->exception.fsr = arm_debug_exception_fsr(env);
1349 env->exception.vaddress = wp_hit->hitaddr;
1350 raise_exception(env, EXCP_DATA_ABORT,
1351 syn_watchpoint(same_el, 0, wnr),
1352 arm_debug_target_el(env));
1353 }
1354 } else {
1355 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1356 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1357
1358
1359
1360
1361
1362
1363 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1364 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1365 return;
1366 }
1367
1368 env->exception.fsr = arm_debug_exception_fsr(env);
1369
1370
1371
1372
1373 env->exception.vaddress = 0;
1374 raise_exception(env, EXCP_PREFETCH_ABORT,
1375 syn_breakpoint(same_el),
1376 arm_debug_target_el(env));
1377 }
1378}
1379
1380
1381
1382
1383
1384
1385
1386uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1387{
1388 int shift = i & 0xff;
1389 if (shift >= 32) {
1390 if (shift == 32)
1391 env->CF = x & 1;
1392 else
1393 env->CF = 0;
1394 return 0;
1395 } else if (shift != 0) {
1396 env->CF = (x >> (32 - shift)) & 1;
1397 return x << shift;
1398 }
1399 return x;
1400}
1401
1402uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1403{
1404 int shift = i & 0xff;
1405 if (shift >= 32) {
1406 if (shift == 32)
1407 env->CF = (x >> 31) & 1;
1408 else
1409 env->CF = 0;
1410 return 0;
1411 } else if (shift != 0) {
1412 env->CF = (x >> (shift - 1)) & 1;
1413 return x >> shift;
1414 }
1415 return x;
1416}
1417
1418uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1419{
1420 int shift = i & 0xff;
1421 if (shift >= 32) {
1422 env->CF = (x >> 31) & 1;
1423 return (int32_t)x >> 31;
1424 } else if (shift != 0) {
1425 env->CF = (x >> (shift - 1)) & 1;
1426 return (int32_t)x >> shift;
1427 }
1428 return x;
1429}
1430
1431uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1432{
1433 int shift1, shift;
1434 shift1 = i & 0xff;
1435 shift = shift1 & 0x1f;
1436 if (shift == 0) {
1437 if (shift1 != 0)
1438 env->CF = (x >> 31) & 1;
1439 return x;
1440 } else {
1441 env->CF = (x >> (shift - 1)) & 1;
1442 return ((uint32_t)x >> shift) | (x << (32 - shift));
1443 }
1444}
1445