1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "qemu/log.h"
21#include "qemu/main-loop.h"
22#include "cpu.h"
23#include "exec/helper-proto.h"
24#include "internals.h"
25#include "exec/exec-all.h"
26#include "exec/cpu_ldst.h"
27
28#define SIGNBIT (uint32_t)0x80000000
29#define SIGNBIT64 ((uint64_t)1 << 63)
30
31static void raise_exception(CPUARMState *env, uint32_t excp,
32 uint32_t syndrome, uint32_t target_el)
33{
34 CPUState *cs = CPU(arm_env_get_cpu(env));
35
36 assert(!excp_is_internal(excp));
37 cs->exception_index = excp;
38 env->exception.syndrome = syndrome;
39 env->exception.target_el = target_el;
40 cpu_loop_exit(cs);
41}
42
43static int exception_target_el(CPUARMState *env)
44{
45 int target_el = MAX(1, arm_current_el(env));
46
47
48
49
50 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
51 target_el = 3;
52 }
53
54 return target_el;
55}
56
57uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
58 uint32_t maxindex)
59{
60 uint32_t val, shift;
61 uint64_t *table = vn;
62
63 val = 0;
64 for (shift = 0; shift < 32; shift += 8) {
65 uint32_t index = (ireg >> shift) & 0xff;
66 if (index < maxindex) {
67 uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
68 val |= tmp << shift;
69 } else {
70 val |= def & (0xff << shift);
71 }
72 }
73 return val;
74}
75
76#if !defined(CONFIG_USER_ONLY)
77
78static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
79 unsigned int target_el,
80 bool same_el, bool ea,
81 bool s1ptw, bool is_write,
82 int fsc)
83{
84 uint32_t syn;
85
86
87
88
89
90
91
92
93
94
95
96
97 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
98 syn = syn_data_abort_no_iss(same_el,
99 ea, 0, s1ptw, is_write, fsc);
100 } else {
101
102
103
104
105 syn = syn_data_abort_with_iss(same_el,
106 0, 0, 0, 0, 0,
107 ea, 0, s1ptw, is_write, fsc,
108 false);
109
110 syn |= template_syn;
111 }
112 return syn;
113}
114
115static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
116 int mmu_idx, ARMMMUFaultInfo *fi)
117{
118 CPUARMState *env = &cpu->env;
119 int target_el;
120 bool same_el;
121 uint32_t syn, exc, fsr, fsc;
122 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
123
124 target_el = exception_target_el(env);
125 if (fi->stage2) {
126 target_el = 2;
127 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
128 }
129 same_el = (arm_current_el(env) == target_el);
130
131 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
132 arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
133
134
135
136 fsr = arm_fi_to_lfsc(fi);
137 fsc = extract32(fsr, 0, 6);
138 } else {
139 fsr = arm_fi_to_sfsc(fi);
140
141
142
143
144
145 fsc = 0x3f;
146 }
147
148 if (access_type == MMU_INST_FETCH) {
149 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
150 exc = EXCP_PREFETCH_ABORT;
151 } else {
152 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
153 same_el, fi->ea, fi->s1ptw,
154 access_type == MMU_DATA_STORE,
155 fsc);
156 if (access_type == MMU_DATA_STORE
157 && arm_feature(env, ARM_FEATURE_V6)) {
158 fsr |= (1 << 11);
159 }
160 exc = EXCP_DATA_ABORT;
161 }
162
163 env->exception.vaddress = addr;
164 env->exception.fsr = fsr;
165 raise_exception(env, exc, syn, target_el);
166}
167
168
169
170
171
172void tlb_fill(CPUState *cs, target_ulong addr, int size,
173 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
174{
175 bool ret;
176 ARMMMUFaultInfo fi = {};
177
178 ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
179 if (unlikely(ret)) {
180 ARMCPU *cpu = ARM_CPU(cs);
181
182
183 cpu_restore_state(cs, retaddr, true);
184
185 deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
186 }
187}
188
189
190void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
191 MMUAccessType access_type,
192 int mmu_idx, uintptr_t retaddr)
193{
194 ARMCPU *cpu = ARM_CPU(cs);
195 ARMMMUFaultInfo fi = {};
196
197
198 cpu_restore_state(cs, retaddr, true);
199
200 fi.type = ARMFault_Alignment;
201 deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
202}
203
204
205
206
207
208void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
209 vaddr addr, unsigned size,
210 MMUAccessType access_type,
211 int mmu_idx, MemTxAttrs attrs,
212 MemTxResult response, uintptr_t retaddr)
213{
214 ARMCPU *cpu = ARM_CPU(cs);
215 ARMMMUFaultInfo fi = {};
216
217
218 cpu_restore_state(cs, retaddr, true);
219
220 fi.ea = arm_extabort_type(response);
221 fi.type = ARMFault_SyncExternal;
222 deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
223}
224
225#endif
226
227uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
228{
229 uint32_t res = a + b;
230 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
231 env->QF = 1;
232 return res;
233}
234
235uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
236{
237 uint32_t res = a + b;
238 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
239 env->QF = 1;
240 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
241 }
242 return res;
243}
244
245uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
246{
247 uint32_t res = a - b;
248 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
249 env->QF = 1;
250 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
251 }
252 return res;
253}
254
255uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
256{
257 uint32_t res;
258 if (val >= 0x40000000) {
259 res = ~SIGNBIT;
260 env->QF = 1;
261 } else if (val <= (int32_t)0xc0000000) {
262 res = SIGNBIT;
263 env->QF = 1;
264 } else {
265 res = val << 1;
266 }
267 return res;
268}
269
270uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
271{
272 uint32_t res = a + b;
273 if (res < a) {
274 env->QF = 1;
275 res = ~0;
276 }
277 return res;
278}
279
280uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
281{
282 uint32_t res = a - b;
283 if (res > a) {
284 env->QF = 1;
285 res = 0;
286 }
287 return res;
288}
289
290
291static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
292{
293 int32_t top;
294 uint32_t mask;
295
296 top = val >> shift;
297 mask = (1u << shift) - 1;
298 if (top > 0) {
299 env->QF = 1;
300 return mask;
301 } else if (top < -1) {
302 env->QF = 1;
303 return ~mask;
304 }
305 return val;
306}
307
308
309static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
310{
311 uint32_t max;
312
313 max = (1u << shift) - 1;
314 if (val < 0) {
315 env->QF = 1;
316 return 0;
317 } else if (val > max) {
318 env->QF = 1;
319 return max;
320 }
321 return val;
322}
323
324
325uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
326{
327 return do_ssat(env, x, shift);
328}
329
330
331uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
332{
333 uint32_t res;
334
335 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
336 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
337 return res;
338}
339
340
341uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
342{
343 return do_usat(env, x, shift);
344}
345
346
347uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
348{
349 uint32_t res;
350
351 res = (uint16_t)do_usat(env, (int16_t)x, shift);
352 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
353 return res;
354}
355
356void HELPER(setend)(CPUARMState *env)
357{
358 env->uncached_cpsr ^= CPSR_E;
359}
360
361
362
363
364
365static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
366{
367 int cur_el = arm_current_el(env);
368 uint64_t mask;
369
370 if (arm_feature(env, ARM_FEATURE_M)) {
371
372 return 0;
373 }
374
375
376
377
378 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
379 int target_el;
380
381 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
382 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
383
384 target_el = 3;
385 } else {
386 target_el = 1;
387 }
388
389 if (!(env->cp15.sctlr_el[target_el] & mask)) {
390 return target_el;
391 }
392 }
393
394
395
396
397
398 if (cur_el < 2 && !arm_is_secure(env)) {
399 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
400 if (env->cp15.hcr_el2 & mask) {
401 return 2;
402 }
403 }
404
405
406 if (cur_el < 3) {
407 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
408 if (env->cp15.scr_el3 & mask) {
409 return 3;
410 }
411 }
412
413 return 0;
414}
415
416void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
417{
418 CPUState *cs = CPU(arm_env_get_cpu(env));
419 int target_el = check_wfx_trap(env, false);
420
421 if (cpu_has_work(cs)) {
422
423
424
425 return;
426 }
427
428 if (target_el) {
429 env->pc -= insn_len;
430 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
431 target_el);
432 }
433
434 cs->exception_index = EXCP_HLT;
435 cs->halted = 1;
436 cpu_loop_exit(cs);
437}
438
439void HELPER(wfe)(CPUARMState *env)
440{
441
442
443
444
445
446
447
448 HELPER(yield)(env);
449}
450
451void HELPER(yield)(CPUARMState *env)
452{
453 ARMCPU *cpu = arm_env_get_cpu(env);
454 CPUState *cs = CPU(cpu);
455
456
457
458
459
460 cs->exception_index = EXCP_YIELD;
461 cpu_loop_exit(cs);
462}
463
464
465
466
467
468
469
470void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
471{
472 CPUState *cs = CPU(arm_env_get_cpu(env));
473
474 assert(excp_is_internal(excp));
475 cs->exception_index = excp;
476 cpu_loop_exit(cs);
477}
478
479
480void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
481 uint32_t syndrome, uint32_t target_el)
482{
483 raise_exception(env, excp, syndrome, target_el);
484}
485
486
487
488
489void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
490{
491
492 env->exception.fsr = arm_debug_exception_fsr(env);
493
494
495
496
497 env->exception.vaddress = 0;
498 raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
499}
500
501uint32_t HELPER(cpsr_read)(CPUARMState *env)
502{
503 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
504}
505
506void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
507{
508 cpsr_write(env, val, mask, CPSRWriteByInstr);
509}
510
511
512void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
513{
514 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
515
516
517
518
519
520
521 env->regs[15] &= (env->thumb ? ~1 : ~3);
522
523 qemu_mutex_lock_iothread();
524 arm_call_el_change_hook(arm_env_get_cpu(env));
525 qemu_mutex_unlock_iothread();
526}
527
528
529uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
530{
531 uint32_t val;
532
533 if (regno == 13) {
534 val = env->banked_r13[BANK_USRSYS];
535 } else if (regno == 14) {
536 val = env->banked_r14[BANK_USRSYS];
537 } else if (regno >= 8
538 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
539 val = env->usr_regs[regno - 8];
540 } else {
541 val = env->regs[regno];
542 }
543 return val;
544}
545
546void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
547{
548 if (regno == 13) {
549 env->banked_r13[BANK_USRSYS] = val;
550 } else if (regno == 14) {
551 env->banked_r14[BANK_USRSYS] = val;
552 } else if (regno >= 8
553 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
554 env->usr_regs[regno - 8] = val;
555 } else {
556 env->regs[regno] = val;
557 }
558}
559
560void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
561{
562 if ((env->uncached_cpsr & CPSR_M) == mode) {
563 env->regs[13] = val;
564 } else {
565 env->banked_r13[bank_number(mode)] = val;
566 }
567}
568
569uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
570{
571 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
572
573
574
575 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
576 exception_target_el(env));
577 }
578
579 if ((env->uncached_cpsr & CPSR_M) == mode) {
580 return env->regs[13];
581 } else {
582 return env->banked_r13[bank_number(mode)];
583 }
584}
585
586static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
587 uint32_t regno)
588{
589
590
591
592
593
594 int curmode = env->uncached_cpsr & CPSR_M;
595
596 if (curmode == tgtmode) {
597 goto undef;
598 }
599
600 if (tgtmode == ARM_CPU_MODE_USR) {
601 switch (regno) {
602 case 8 ... 12:
603 if (curmode != ARM_CPU_MODE_FIQ) {
604 goto undef;
605 }
606 break;
607 case 13:
608 if (curmode == ARM_CPU_MODE_SYS) {
609 goto undef;
610 }
611 break;
612 case 14:
613 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
614 goto undef;
615 }
616 break;
617 default:
618 break;
619 }
620 }
621
622 if (tgtmode == ARM_CPU_MODE_HYP) {
623 switch (regno) {
624 case 17:
625 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
626 goto undef;
627 }
628 break;
629 default:
630 if (curmode != ARM_CPU_MODE_MON) {
631 goto undef;
632 }
633 break;
634 }
635 }
636
637 return;
638
639undef:
640 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
641 exception_target_el(env));
642}
643
644void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
645 uint32_t regno)
646{
647 msr_mrs_banked_exc_checks(env, tgtmode, regno);
648
649 switch (regno) {
650 case 16:
651 env->banked_spsr[bank_number(tgtmode)] = value;
652 break;
653 case 17:
654 env->elr_el[2] = value;
655 break;
656 case 13:
657 env->banked_r13[bank_number(tgtmode)] = value;
658 break;
659 case 14:
660 env->banked_r14[bank_number(tgtmode)] = value;
661 break;
662 case 8 ... 12:
663 switch (tgtmode) {
664 case ARM_CPU_MODE_USR:
665 env->usr_regs[regno - 8] = value;
666 break;
667 case ARM_CPU_MODE_FIQ:
668 env->fiq_regs[regno - 8] = value;
669 break;
670 default:
671 g_assert_not_reached();
672 }
673 break;
674 default:
675 g_assert_not_reached();
676 }
677}
678
679uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
680{
681 msr_mrs_banked_exc_checks(env, tgtmode, regno);
682
683 switch (regno) {
684 case 16:
685 return env->banked_spsr[bank_number(tgtmode)];
686 case 17:
687 return env->elr_el[2];
688 case 13:
689 return env->banked_r13[bank_number(tgtmode)];
690 case 14:
691 return env->banked_r14[bank_number(tgtmode)];
692 case 8 ... 12:
693 switch (tgtmode) {
694 case ARM_CPU_MODE_USR:
695 return env->usr_regs[regno - 8];
696 case ARM_CPU_MODE_FIQ:
697 return env->fiq_regs[regno - 8];
698 default:
699 g_assert_not_reached();
700 }
701 default:
702 g_assert_not_reached();
703 }
704}
705
706void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
707 uint32_t isread)
708{
709 const ARMCPRegInfo *ri = rip;
710 int target_el;
711
712 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
713 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
714 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
715 }
716
717 if (!ri->accessfn) {
718 return;
719 }
720
721 switch (ri->accessfn(env, ri, isread)) {
722 case CP_ACCESS_OK:
723 return;
724 case CP_ACCESS_TRAP:
725 target_el = exception_target_el(env);
726 break;
727 case CP_ACCESS_TRAP_EL2:
728
729
730
731 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
732 target_el = 2;
733 break;
734 case CP_ACCESS_TRAP_EL3:
735 target_el = 3;
736 break;
737 case CP_ACCESS_TRAP_UNCATEGORIZED:
738 target_el = exception_target_el(env);
739 syndrome = syn_uncategorized();
740 break;
741 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
742 target_el = 2;
743 syndrome = syn_uncategorized();
744 break;
745 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
746 target_el = 3;
747 syndrome = syn_uncategorized();
748 break;
749 case CP_ACCESS_TRAP_FP_EL2:
750 target_el = 2;
751
752
753
754
755
756 syndrome = syn_fp_access_trap(1, 0xe, false);
757 break;
758 case CP_ACCESS_TRAP_FP_EL3:
759 target_el = 3;
760 syndrome = syn_fp_access_trap(1, 0xe, false);
761 break;
762 default:
763 g_assert_not_reached();
764 }
765
766 raise_exception(env, EXCP_UDEF, syndrome, target_el);
767}
768
769void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
770{
771 const ARMCPRegInfo *ri = rip;
772
773 if (ri->type & ARM_CP_IO) {
774 qemu_mutex_lock_iothread();
775 ri->writefn(env, ri, value);
776 qemu_mutex_unlock_iothread();
777 } else {
778 ri->writefn(env, ri, value);
779 }
780}
781
782uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
783{
784 const ARMCPRegInfo *ri = rip;
785 uint32_t res;
786
787 if (ri->type & ARM_CP_IO) {
788 qemu_mutex_lock_iothread();
789 res = ri->readfn(env, ri);
790 qemu_mutex_unlock_iothread();
791 } else {
792 res = ri->readfn(env, ri);
793 }
794
795 return res;
796}
797
798void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
799{
800 const ARMCPRegInfo *ri = rip;
801
802 if (ri->type & ARM_CP_IO) {
803 qemu_mutex_lock_iothread();
804 ri->writefn(env, ri, value);
805 qemu_mutex_unlock_iothread();
806 } else {
807 ri->writefn(env, ri, value);
808 }
809}
810
811uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
812{
813 const ARMCPRegInfo *ri = rip;
814 uint64_t res;
815
816 if (ri->type & ARM_CP_IO) {
817 qemu_mutex_lock_iothread();
818 res = ri->readfn(env, ri);
819 qemu_mutex_unlock_iothread();
820 } else {
821 res = ri->readfn(env, ri);
822 }
823
824 return res;
825}
826
827void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
828{
829
830
831
832
833 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
834 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
835 extract32(op, 3, 3), 4,
836 imm, 0x1f, 0);
837 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
838 }
839
840 switch (op) {
841 case 0x05:
842 update_spsel(env, imm);
843 break;
844 case 0x1e:
845 env->daif |= (imm << 6) & PSTATE_DAIF;
846 break;
847 case 0x1f:
848 env->daif &= ~((imm << 6) & PSTATE_DAIF);
849 break;
850 default:
851 g_assert_not_reached();
852 }
853}
854
855void HELPER(clear_pstate_ss)(CPUARMState *env)
856{
857 env->pstate &= ~PSTATE_SS;
858}
859
860void HELPER(pre_hvc)(CPUARMState *env)
861{
862 ARMCPU *cpu = arm_env_get_cpu(env);
863 int cur_el = arm_current_el(env);
864
865 bool secure = false;
866 bool undef;
867
868 if (arm_is_psci_call(cpu, EXCP_HVC)) {
869
870
871
872 return;
873 }
874
875 if (!arm_feature(env, ARM_FEATURE_EL2)) {
876
877 undef = true;
878 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
879
880 undef = !(env->cp15.scr_el3 & SCR_HCE);
881 } else {
882 undef = env->cp15.hcr_el2 & HCR_HCD;
883 }
884
885
886
887
888
889
890 if (secure && (!is_a64(env) || cur_el == 1)) {
891 undef = true;
892 }
893
894 if (undef) {
895 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
896 exception_target_el(env));
897 }
898}
899
900void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
901{
902 ARMCPU *cpu = arm_env_get_cpu(env);
903 int cur_el = arm_current_el(env);
904 bool secure = arm_is_secure(env);
905 bool smd = env->cp15.scr_el3 & SCR_SMD;
906
907
908
909
910
911
912
913 bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
914
915 if (!arm_feature(env, ARM_FEATURE_EL3) &&
916 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
917
918
919
920
921
922
923
924 undef = true;
925 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
926
927
928
929
930 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
931 }
932
933
934
935
936
937 if (undef && !arm_is_psci_call(cpu, EXCP_SMC)) {
938 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
939 exception_target_el(env));
940 }
941}
942
943static int el_from_spsr(uint32_t spsr)
944{
945
946
947
948 if (spsr & PSTATE_nRW) {
949 switch (spsr & CPSR_M) {
950 case ARM_CPU_MODE_USR:
951 return 0;
952 case ARM_CPU_MODE_HYP:
953 return 2;
954 case ARM_CPU_MODE_FIQ:
955 case ARM_CPU_MODE_IRQ:
956 case ARM_CPU_MODE_SVC:
957 case ARM_CPU_MODE_ABT:
958 case ARM_CPU_MODE_UND:
959 case ARM_CPU_MODE_SYS:
960 return 1;
961 case ARM_CPU_MODE_MON:
962
963
964
965 default:
966 return -1;
967 }
968 } else {
969 if (extract32(spsr, 1, 1)) {
970
971 return -1;
972 }
973 if (extract32(spsr, 0, 4) == 1) {
974
975 return -1;
976 }
977 return extract32(spsr, 2, 2);
978 }
979}
980
981void HELPER(exception_return)(CPUARMState *env)
982{
983 int cur_el = arm_current_el(env);
984 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
985 uint32_t spsr = env->banked_spsr[spsr_idx];
986 int new_el;
987 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
988
989 aarch64_save_sp(env, cur_el);
990
991 arm_clear_exclusive(env);
992
993
994
995
996
997
998
999
1000 if (arm_generate_debug_exceptions(env)) {
1001 spsr &= ~PSTATE_SS;
1002 }
1003
1004 new_el = el_from_spsr(spsr);
1005 if (new_el == -1) {
1006 goto illegal_return;
1007 }
1008 if (new_el > cur_el
1009 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
1010
1011
1012
1013 goto illegal_return;
1014 }
1015
1016 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
1017
1018 goto illegal_return;
1019 }
1020
1021 if (new_el == 2 && arm_is_secure_below_el3(env)) {
1022
1023 goto illegal_return;
1024 }
1025
1026 if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
1027 && !arm_is_secure_below_el3(env)) {
1028 goto illegal_return;
1029 }
1030
1031 if (!return_to_aa64) {
1032 env->aarch64 = 0;
1033
1034
1035
1036
1037 cpsr_write(env, spsr, ~0, CPSRWriteRaw);
1038 if (!arm_singlestep_active(env)) {
1039 env->uncached_cpsr &= ~PSTATE_SS;
1040 }
1041 aarch64_sync_64_to_32(env);
1042
1043 if (spsr & CPSR_T) {
1044 env->regs[15] = env->elr_el[cur_el] & ~0x1;
1045 } else {
1046 env->regs[15] = env->elr_el[cur_el] & ~0x3;
1047 }
1048 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1049 "AArch32 EL%d PC 0x%" PRIx32 "\n",
1050 cur_el, new_el, env->regs[15]);
1051 } else {
1052 env->aarch64 = 1;
1053 pstate_write(env, spsr);
1054 if (!arm_singlestep_active(env)) {
1055 env->pstate &= ~PSTATE_SS;
1056 }
1057 aarch64_restore_sp(env, new_el);
1058 env->pc = env->elr_el[cur_el];
1059 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1060 "AArch64 EL%d PC 0x%" PRIx64 "\n",
1061 cur_el, new_el, env->pc);
1062 }
1063
1064 qemu_mutex_lock_iothread();
1065 arm_call_el_change_hook(arm_env_get_cpu(env));
1066 qemu_mutex_unlock_iothread();
1067
1068 return;
1069
1070illegal_return:
1071
1072
1073
1074
1075
1076
1077
1078 env->pstate |= PSTATE_IL;
1079 env->pc = env->elr_el[cur_el];
1080 spsr &= PSTATE_NZCV | PSTATE_DAIF;
1081 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1082 pstate_write(env, spsr);
1083 if (!arm_singlestep_active(env)) {
1084 env->pstate &= ~PSTATE_SS;
1085 }
1086 qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
1087 "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
1088}
1089
1090
1091static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1092{
1093 CPUARMState *env = &cpu->env;
1094 uint64_t bcr = env->cp15.dbgbcr[lbn];
1095 int brps = extract32(cpu->dbgdidr, 24, 4);
1096 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1097 int bt;
1098 uint32_t contextidr;
1099
1100
1101
1102
1103
1104
1105
1106 if (lbn > brps || lbn < (brps - ctx_cmps)) {
1107 return false;
1108 }
1109
1110 bcr = env->cp15.dbgbcr[lbn];
1111
1112 if (extract64(bcr, 0, 1) == 0) {
1113
1114 return false;
1115 }
1116
1117 bt = extract64(bcr, 20, 4);
1118
1119
1120
1121
1122
1123 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1124
1125 switch (bt) {
1126 case 3:
1127 if (arm_current_el(env) > 1) {
1128
1129 return false;
1130 }
1131 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1132 case 5:
1133 case 9:
1134 case 11:
1135 default:
1136
1137
1138
1139 return false;
1140 }
1141
1142 return false;
1143}
1144
1145static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1146{
1147 CPUARMState *env = &cpu->env;
1148 uint64_t cr;
1149 int pac, hmc, ssc, wt, lbn;
1150
1151
1152
1153 bool is_secure = arm_is_secure(env);
1154 int access_el = arm_current_el(env);
1155
1156 if (is_wp) {
1157 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1158
1159 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1160 return false;
1161 }
1162 cr = env->cp15.dbgwcr[n];
1163 if (wp->hitattrs.user) {
1164
1165
1166
1167
1168 access_el = 0;
1169 }
1170 } else {
1171 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1172
1173 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1174 return false;
1175 }
1176 cr = env->cp15.dbgbcr[n];
1177 }
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 pac = extract64(cr, 1, 2);
1191 hmc = extract64(cr, 13, 1);
1192 ssc = extract64(cr, 14, 2);
1193
1194 switch (ssc) {
1195 case 0:
1196 break;
1197 case 1:
1198 case 3:
1199 if (is_secure) {
1200 return false;
1201 }
1202 break;
1203 case 2:
1204 if (!is_secure) {
1205 return false;
1206 }
1207 break;
1208 }
1209
1210 switch (access_el) {
1211 case 3:
1212 case 2:
1213 if (!hmc) {
1214 return false;
1215 }
1216 break;
1217 case 1:
1218 if (extract32(pac, 0, 1) == 0) {
1219 return false;
1220 }
1221 break;
1222 case 0:
1223 if (extract32(pac, 1, 1) == 0) {
1224 return false;
1225 }
1226 break;
1227 default:
1228 g_assert_not_reached();
1229 }
1230
1231 wt = extract64(cr, 20, 1);
1232 lbn = extract64(cr, 16, 4);
1233
1234 if (wt && !linked_bp_matches(cpu, lbn)) {
1235 return false;
1236 }
1237
1238 return true;
1239}
1240
1241static bool check_watchpoints(ARMCPU *cpu)
1242{
1243 CPUARMState *env = &cpu->env;
1244 int n;
1245
1246
1247
1248
1249 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1250 || !arm_generate_debug_exceptions(env)) {
1251 return false;
1252 }
1253
1254 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1255 if (bp_wp_matches(cpu, n, true)) {
1256 return true;
1257 }
1258 }
1259 return false;
1260}
1261
1262static bool check_breakpoints(ARMCPU *cpu)
1263{
1264 CPUARMState *env = &cpu->env;
1265 int n;
1266
1267
1268
1269
1270 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1271 || !arm_generate_debug_exceptions(env)) {
1272 return false;
1273 }
1274
1275 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1276 if (bp_wp_matches(cpu, n, false)) {
1277 return true;
1278 }
1279 }
1280 return false;
1281}
1282
1283void HELPER(check_breakpoints)(CPUARMState *env)
1284{
1285 ARMCPU *cpu = arm_env_get_cpu(env);
1286
1287 if (check_breakpoints(cpu)) {
1288 HELPER(exception_internal(env, EXCP_DEBUG));
1289 }
1290}
1291
1292bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1293{
1294
1295
1296
1297 ARMCPU *cpu = ARM_CPU(cs);
1298
1299 return check_watchpoints(cpu);
1300}
1301
1302vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1303{
1304 ARMCPU *cpu = ARM_CPU(cs);
1305 CPUARMState *env = &cpu->env;
1306
1307
1308
1309
1310
1311
1312
1313 if (arm_sctlr_b(env)) {
1314 if (len == 1) {
1315 addr ^= 3;
1316 } else if (len == 2) {
1317 addr ^= 2;
1318 }
1319 }
1320
1321 return addr;
1322}
1323
1324void arm_debug_excp_handler(CPUState *cs)
1325{
1326
1327
1328
1329 ARMCPU *cpu = ARM_CPU(cs);
1330 CPUARMState *env = &cpu->env;
1331 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1332
1333 if (wp_hit) {
1334 if (wp_hit->flags & BP_CPU) {
1335 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1336 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1337
1338 cs->watchpoint_hit = NULL;
1339
1340 env->exception.fsr = arm_debug_exception_fsr(env);
1341 env->exception.vaddress = wp_hit->hitaddr;
1342 raise_exception(env, EXCP_DATA_ABORT,
1343 syn_watchpoint(same_el, 0, wnr),
1344 arm_debug_target_el(env));
1345 }
1346 } else {
1347 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1348 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1349
1350
1351
1352
1353
1354
1355 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1356 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1357 return;
1358 }
1359
1360 env->exception.fsr = arm_debug_exception_fsr(env);
1361
1362
1363
1364
1365 env->exception.vaddress = 0;
1366 raise_exception(env, EXCP_PREFETCH_ABORT,
1367 syn_breakpoint(same_el),
1368 arm_debug_target_el(env));
1369 }
1370}
1371
1372
1373
1374
1375
1376
1377
1378uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1379{
1380 int shift = i & 0xff;
1381 if (shift >= 32) {
1382 if (shift == 32)
1383 env->CF = x & 1;
1384 else
1385 env->CF = 0;
1386 return 0;
1387 } else if (shift != 0) {
1388 env->CF = (x >> (32 - shift)) & 1;
1389 return x << shift;
1390 }
1391 return x;
1392}
1393
1394uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1395{
1396 int shift = i & 0xff;
1397 if (shift >= 32) {
1398 if (shift == 32)
1399 env->CF = (x >> 31) & 1;
1400 else
1401 env->CF = 0;
1402 return 0;
1403 } else if (shift != 0) {
1404 env->CF = (x >> (shift - 1)) & 1;
1405 return x >> shift;
1406 }
1407 return x;
1408}
1409
1410uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1411{
1412 int shift = i & 0xff;
1413 if (shift >= 32) {
1414 env->CF = (x >> 31) & 1;
1415 return (int32_t)x >> 31;
1416 } else if (shift != 0) {
1417 env->CF = (x >> (shift - 1)) & 1;
1418 return (int32_t)x >> shift;
1419 }
1420 return x;
1421}
1422
1423uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1424{
1425 int shift1, shift;
1426 shift1 = i & 0xff;
1427 shift = shift1 & 0x1f;
1428 if (shift == 0) {
1429 if (shift1 != 0)
1430 env->CF = (x >> 31) & 1;
1431 return x;
1432 } else {
1433 env->CF = (x >> (shift - 1)) & 1;
1434 return ((uint32_t)x >> shift) | (x << (32 - shift));
1435 }
1436}
1437