1
2
3
4
5
6
7
8
9
10
11
12
13#include "qemu/osdep.h"
14#include "qapi/error.h"
15#include "cpu.h"
16#include "hw/sysbus.h"
17#include "migration/vmstate.h"
18#include "qemu/timer.h"
19#include "hw/intc/armv7m_nvic.h"
20#include "hw/irq.h"
21#include "hw/qdev-properties.h"
22#include "sysemu/runstate.h"
23#include "target/arm/cpu.h"
24#include "exec/exec-all.h"
25#include "exec/memop.h"
26#include "qemu/log.h"
27#include "qemu/module.h"
28#include "trace.h"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
55#define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
56
57
58
59
60#define NVIC_NOEXC_PRIO 0x100
61
62#define NVIC_NS_PRIO_LIMIT 0x80
63
64static const uint8_t nvic_id[] = {
65 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
66};
67
68static void signal_sysresetreq(NVICState *s)
69{
70 if (qemu_irq_is_connected(s->sysresetreq)) {
71 qemu_irq_pulse(s->sysresetreq);
72 } else {
73
74
75
76
77
78 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
79 }
80}
81
82static int nvic_pending_prio(NVICState *s)
83{
84
85
86
87 return s->vectpending_prio;
88}
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104static bool nvic_rettobase(NVICState *s)
105{
106 int irq, nhand = 0;
107 bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
108
109 for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
110 if (s->vectors[irq].active ||
111 (check_sec && irq < NVIC_INTERNAL_VECTORS &&
112 s->sec_vectors[irq].active)) {
113 nhand++;
114 if (nhand == 2) {
115 return 0;
116 }
117 }
118 }
119
120 return 1;
121}
122
123
124
125
126
127static bool nvic_isrpending(NVICState *s)
128{
129 int irq;
130
131
132
133
134 if (s->vectpending > NVIC_FIRST_IRQ) {
135 return true;
136 }
137 if (s->vectpending == 0) {
138 return false;
139 }
140
141 for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
142 if (s->vectors[irq].pending) {
143 return true;
144 }
145 }
146 return false;
147}
148
149static bool exc_is_banked(int exc)
150{
151
152
153
154 return exc == ARMV7M_EXCP_HARD ||
155 exc == ARMV7M_EXCP_MEM ||
156 exc == ARMV7M_EXCP_USAGE ||
157 exc == ARMV7M_EXCP_SVC ||
158 exc == ARMV7M_EXCP_PENDSV ||
159 exc == ARMV7M_EXCP_SYSTICK;
160}
161
162
163
164
165
166static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
167{
168 return ~0U << (s->prigroup[secure] + 1);
169}
170
171static bool exc_targets_secure(NVICState *s, int exc)
172{
173
174 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
175 return false;
176 }
177
178 if (exc >= NVIC_FIRST_IRQ) {
179 return !s->itns[exc];
180 }
181
182
183 assert(!exc_is_banked(exc));
184
185 switch (exc) {
186 case ARMV7M_EXCP_NMI:
187 case ARMV7M_EXCP_BUS:
188 return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
189 case ARMV7M_EXCP_SECURE:
190 return true;
191 case ARMV7M_EXCP_DEBUG:
192
193 return false;
194 default:
195
196
197
198
199
200 return true;
201 }
202}
203
204static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
205{
206
207
208
209
210 if (rawprio < 0) {
211 return rawprio;
212 }
213 rawprio &= nvic_gprio_mask(s, targets_secure);
214
215
216
217 if (!targets_secure &&
218 (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
219 rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
220 }
221 return rawprio;
222}
223
224
225
226
227static void nvic_recompute_state_secure(NVICState *s)
228{
229 int i, bank;
230 int pend_prio = NVIC_NOEXC_PRIO;
231 int active_prio = NVIC_NOEXC_PRIO;
232 int pend_irq = 0;
233 bool pending_is_s_banked = false;
234 int pend_subprio = 0;
235
236
237
238
239
240
241
242
243
244
245 for (i = 1; i < s->num_irq; i++) {
246 for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
247 VecInfo *vec;
248 int prio, subprio;
249 bool targets_secure;
250
251 if (bank == M_REG_S) {
252 if (!exc_is_banked(i)) {
253 continue;
254 }
255 vec = &s->sec_vectors[i];
256 targets_secure = true;
257 } else {
258 vec = &s->vectors[i];
259 targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
260 }
261
262 prio = exc_group_prio(s, vec->prio, targets_secure);
263 subprio = vec->prio & ~nvic_gprio_mask(s, targets_secure);
264 if (vec->enabled && vec->pending &&
265 ((prio < pend_prio) ||
266 (prio == pend_prio && prio >= 0 && subprio < pend_subprio))) {
267 pend_prio = prio;
268 pend_subprio = subprio;
269 pend_irq = i;
270 pending_is_s_banked = (bank == M_REG_S);
271 }
272 if (vec->active && prio < active_prio) {
273 active_prio = prio;
274 }
275 }
276 }
277
278 s->vectpending_is_s_banked = pending_is_s_banked;
279 s->vectpending = pend_irq;
280 s->vectpending_prio = pend_prio;
281 s->exception_prio = active_prio;
282
283 trace_nvic_recompute_state_secure(s->vectpending,
284 s->vectpending_is_s_banked,
285 s->vectpending_prio,
286 s->exception_prio);
287}
288
289
290static void nvic_recompute_state(NVICState *s)
291{
292 int i;
293 int pend_prio = NVIC_NOEXC_PRIO;
294 int active_prio = NVIC_NOEXC_PRIO;
295 int pend_irq = 0;
296
297
298
299
300
301
302
303
304 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
305 nvic_recompute_state_secure(s);
306 return;
307 }
308
309 for (i = 1; i < s->num_irq; i++) {
310 VecInfo *vec = &s->vectors[i];
311
312 if (vec->enabled && vec->pending && vec->prio < pend_prio) {
313 pend_prio = vec->prio;
314 pend_irq = i;
315 }
316 if (vec->active && vec->prio < active_prio) {
317 active_prio = vec->prio;
318 }
319 }
320
321 if (active_prio > 0) {
322 active_prio &= nvic_gprio_mask(s, false);
323 }
324
325 if (pend_prio > 0) {
326 pend_prio &= nvic_gprio_mask(s, false);
327 }
328
329 s->vectpending = pend_irq;
330 s->vectpending_prio = pend_prio;
331 s->exception_prio = active_prio;
332
333 trace_nvic_recompute_state(s->vectpending,
334 s->vectpending_prio,
335 s->exception_prio);
336}
337
338
339
340
341
342static inline int nvic_exec_prio(NVICState *s)
343{
344 CPUARMState *env = &s->cpu->env;
345 int running = NVIC_NOEXC_PRIO;
346
347 if (env->v7m.basepri[M_REG_NS] > 0) {
348 running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
349 }
350
351 if (env->v7m.basepri[M_REG_S] > 0) {
352 int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
353 if (running > basepri) {
354 running = basepri;
355 }
356 }
357
358 if (env->v7m.primask[M_REG_NS]) {
359 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
360 if (running > NVIC_NS_PRIO_LIMIT) {
361 running = NVIC_NS_PRIO_LIMIT;
362 }
363 } else {
364 running = 0;
365 }
366 }
367
368 if (env->v7m.primask[M_REG_S]) {
369 running = 0;
370 }
371
372 if (env->v7m.faultmask[M_REG_NS]) {
373 if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
374 running = -1;
375 } else {
376 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
377 if (running > NVIC_NS_PRIO_LIMIT) {
378 running = NVIC_NS_PRIO_LIMIT;
379 }
380 } else {
381 running = 0;
382 }
383 }
384 }
385
386 if (env->v7m.faultmask[M_REG_S]) {
387 running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
388 }
389
390
391 return MIN(running, s->exception_prio);
392}
393
394bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
395{
396
397
398
399
400
401
402
403
404 NVICState *s = opaque;
405
406 if (s->cpu->env.v7m.faultmask[secure]) {
407 return true;
408 }
409
410 if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
411 s->vectors[ARMV7M_EXCP_HARD].active) {
412 return true;
413 }
414
415 if (s->vectors[ARMV7M_EXCP_NMI].active &&
416 exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
417 return true;
418 }
419
420 return false;
421}
422
423bool armv7m_nvic_can_take_pending_exception(void *opaque)
424{
425 NVICState *s = opaque;
426
427 return nvic_exec_prio(s) > nvic_pending_prio(s);
428}
429
430int armv7m_nvic_raw_execution_priority(void *opaque)
431{
432 NVICState *s = opaque;
433
434 return s->exception_prio;
435}
436
437
438
439
440
441static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
442{
443 assert(irq > ARMV7M_EXCP_NMI);
444 assert(irq < s->num_irq);
445
446 prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits);
447
448 if (secure) {
449 assert(exc_is_banked(irq));
450 s->sec_vectors[irq].prio = prio;
451 } else {
452 s->vectors[irq].prio = prio;
453 }
454
455 trace_nvic_set_prio(irq, secure, prio);
456}
457
458
459
460
461
462static int get_prio(NVICState *s, unsigned irq, bool secure)
463{
464 assert(irq > ARMV7M_EXCP_NMI);
465 assert(irq < s->num_irq);
466
467 if (secure) {
468 assert(exc_is_banked(irq));
469 return s->sec_vectors[irq].prio;
470 } else {
471 return s->vectors[irq].prio;
472 }
473}
474
475
476
477
478
479
480static void nvic_irq_update(NVICState *s)
481{
482 int lvl;
483 int pend_prio;
484
485 nvic_recompute_state(s);
486 pend_prio = nvic_pending_prio(s);
487
488
489
490
491
492
493
494 lvl = (pend_prio < s->exception_prio);
495 trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
496 qemu_set_irq(s->excpout, lvl);
497}
498
499
500
501
502
503
504
505
506
507
508
509
510
511static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
512{
513 NVICState *s = (NVICState *)opaque;
514 VecInfo *vec;
515
516 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
517
518 if (secure) {
519 assert(exc_is_banked(irq));
520 vec = &s->sec_vectors[irq];
521 } else {
522 vec = &s->vectors[irq];
523 }
524 trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
525 if (vec->pending) {
526 vec->pending = 0;
527 nvic_irq_update(s);
528 }
529}
530
531static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
532 bool derived)
533{
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550 NVICState *s = (NVICState *)opaque;
551 bool banked = exc_is_banked(irq);
552 VecInfo *vec;
553 bool targets_secure;
554
555 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
556 assert(!secure || banked);
557
558 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
559
560 targets_secure = banked ? secure : exc_targets_secure(s, irq);
561
562 trace_nvic_set_pending(irq, secure, targets_secure,
563 derived, vec->enabled, vec->prio);
564
565 if (derived) {
566
567 assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
568
569 if (irq == ARMV7M_EXCP_DEBUG &&
570 exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
571
572
573
574 return;
575 }
576
577 if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
578
579
580
581
582
583
584
585
586
587
588
589 cpu_abort(&s->cpu->parent_obj,
590 "Lockup: can't take terminal derived exception "
591 "(original exception priority %d)\n",
592 s->vectpending_prio);
593 }
594
595
596
597
598
599
600 }
601
602 if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623 int running = nvic_exec_prio(s);
624 bool escalate = false;
625
626 if (exc_group_prio(s, vec->prio, secure) >= running) {
627 trace_nvic_escalate_prio(irq, vec->prio, running);
628 escalate = true;
629 } else if (!vec->enabled) {
630 trace_nvic_escalate_disabled(irq);
631 escalate = true;
632 }
633
634 if (escalate) {
635
636
637
638
639
640
641 irq = ARMV7M_EXCP_HARD;
642 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
643 (targets_secure ||
644 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
645 vec = &s->sec_vectors[irq];
646 } else {
647 vec = &s->vectors[irq];
648 }
649 if (running <= vec->prio) {
650
651
652
653
654
655 cpu_abort(&s->cpu->parent_obj,
656 "Lockup: can't escalate %d to HardFault "
657 "(current priority %d)\n", irq, running);
658 }
659
660
661 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
662 }
663 }
664
665 if (!vec->pending) {
666 vec->pending = 1;
667 nvic_irq_update(s);
668 }
669}
670
671void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
672{
673 do_armv7m_nvic_set_pending(opaque, irq, secure, false);
674}
675
676void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
677{
678 do_armv7m_nvic_set_pending(opaque, irq, secure, true);
679}
680
681void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure)
682{
683
684
685
686
687
688
689 NVICState *s = (NVICState *)opaque;
690 bool banked = exc_is_banked(irq);
691 VecInfo *vec;
692 bool targets_secure;
693 bool escalate = false;
694
695
696
697
698
699 uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S];
700 uint32_t fpccr = s->cpu->env.v7m.fpccr[secure];
701
702 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
703 assert(!secure || banked);
704
705 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
706
707 targets_secure = banked ? secure : exc_targets_secure(s, irq);
708
709 switch (irq) {
710 case ARMV7M_EXCP_DEBUG:
711 if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) {
712
713 return;
714 }
715 break;
716 case ARMV7M_EXCP_MEM:
717 escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK);
718 break;
719 case ARMV7M_EXCP_USAGE:
720 escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK);
721 break;
722 case ARMV7M_EXCP_BUS:
723 escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK);
724 break;
725 case ARMV7M_EXCP_SECURE:
726 escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK);
727 break;
728 default:
729 g_assert_not_reached();
730 }
731
732 if (escalate) {
733
734
735
736
737 irq = ARMV7M_EXCP_HARD;
738 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
739 (targets_secure ||
740 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
741 vec = &s->sec_vectors[irq];
742 } else {
743 vec = &s->vectors[irq];
744 }
745 }
746
747 if (!vec->enabled ||
748 nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) {
749 if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) {
750
751
752
753
754 cpu_abort(&s->cpu->parent_obj,
755 "Lockup: can't escalate to HardFault during "
756 "lazy FP register stacking\n");
757 }
758 }
759
760 if (escalate) {
761 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
762 }
763 if (!vec->pending) {
764 vec->pending = 1;
765
766
767
768
769
770
771
772
773 nvic_recompute_state(s);
774 }
775}
776
777
778void armv7m_nvic_acknowledge_irq(void *opaque)
779{
780 NVICState *s = (NVICState *)opaque;
781 CPUARMState *env = &s->cpu->env;
782 const int pending = s->vectpending;
783 const int running = nvic_exec_prio(s);
784 VecInfo *vec;
785
786 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
787
788 if (s->vectpending_is_s_banked) {
789 vec = &s->sec_vectors[pending];
790 } else {
791 vec = &s->vectors[pending];
792 }
793
794 assert(vec->enabled);
795 assert(vec->pending);
796
797 assert(s->vectpending_prio < running);
798
799 trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
800
801 vec->active = 1;
802 vec->pending = 0;
803
804 write_v7m_exception(env, s->vectpending);
805
806 nvic_irq_update(s);
807}
808
809void armv7m_nvic_get_pending_irq_info(void *opaque,
810 int *pirq, bool *ptargets_secure)
811{
812 NVICState *s = (NVICState *)opaque;
813 const int pending = s->vectpending;
814 bool targets_secure;
815
816 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
817
818 if (s->vectpending_is_s_banked) {
819 targets_secure = true;
820 } else {
821 targets_secure = !exc_is_banked(pending) &&
822 exc_targets_secure(s, pending);
823 }
824
825 trace_nvic_get_pending_irq_info(pending, targets_secure);
826
827 *ptargets_secure = targets_secure;
828 *pirq = pending;
829}
830
831int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
832{
833 NVICState *s = (NVICState *)opaque;
834 VecInfo *vec = NULL;
835 int ret = 0;
836
837 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
838
839 trace_nvic_complete_irq(irq, secure);
840
841 if (secure && exc_is_banked(irq)) {
842 vec = &s->sec_vectors[irq];
843 } else {
844 vec = &s->vectors[irq];
845 }
846
847
848
849
850
851
852 if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
853
854
855
856
857
858
859 ret = -1;
860 vec = NULL;
861 } else if (!vec->active) {
862
863 ret = -1;
864 } else {
865
866 ret = nvic_rettobase(s);
867 }
868
869
870
871
872
873
874
875
876
877 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
878 switch (armv7m_nvic_raw_execution_priority(s)) {
879 case -1:
880 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
881 vec = &s->vectors[ARMV7M_EXCP_HARD];
882 } else {
883 vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
884 }
885 break;
886 case -2:
887 vec = &s->vectors[ARMV7M_EXCP_NMI];
888 break;
889 case -3:
890 vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
891 break;
892 default:
893 break;
894 }
895 }
896
897 if (!vec) {
898 return ret;
899 }
900
901 vec->active = 0;
902 if (vec->level) {
903
904
905
906 assert(irq >= NVIC_FIRST_IRQ);
907 vec->pending = 1;
908 }
909
910 nvic_irq_update(s);
911
912 return ret;
913}
914
915bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
916{
917
918
919
920
921
922
923
924
925
926 NVICState *s = (NVICState *)opaque;
927 bool banked = exc_is_banked(irq);
928 VecInfo *vec;
929 int running = nvic_exec_prio(s);
930
931 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
932 assert(!secure || banked);
933
934
935
936
937
938
939 if (irq == ARMV7M_EXCP_HARD) {
940 return running > -1;
941 }
942
943 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
944
945 return vec->enabled &&
946 exc_group_prio(s, vec->prio, secure) < running;
947}
948
949
950static void set_irq_level(void *opaque, int n, int level)
951{
952 NVICState *s = opaque;
953 VecInfo *vec;
954
955 n += NVIC_FIRST_IRQ;
956
957 assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
958
959 trace_nvic_set_irq_level(n, level);
960
961
962
963
964
965
966
967
968 vec = &s->vectors[n];
969 if (level != vec->level) {
970 vec->level = level;
971 if (level) {
972 armv7m_nvic_set_pending(s, n, false);
973 }
974 }
975}
976
977
978static void nvic_nmi_trigger(void *opaque, int n, int level)
979{
980 NVICState *s = opaque;
981
982 trace_nvic_set_nmi_level(level);
983
984
985
986
987
988
989
990 if (level) {
991 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
992 }
993}
994
995static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
996{
997 ARMCPU *cpu = s->cpu;
998 uint32_t val;
999
1000 switch (offset) {
1001 case 4:
1002 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1003 goto bad_offset;
1004 }
1005 return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
1006 case 0xc:
1007 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1008 goto bad_offset;
1009 }
1010
1011
1012
1013 return 0;
1014 case 0x380 ... 0x3bf:
1015 {
1016 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1017 int i;
1018
1019 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1020 goto bad_offset;
1021 }
1022 if (!attrs.secure) {
1023 return 0;
1024 }
1025 val = 0;
1026 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1027 if (s->itns[startvec + i]) {
1028 val |= (1 << i);
1029 }
1030 }
1031 return val;
1032 }
1033 case 0xcfc:
1034 if (!arm_feature(&cpu->env, ARM_FEATURE_V8_1M)) {
1035 goto bad_offset;
1036 }
1037 return cpu->revidr;
1038 case 0xd00:
1039 return cpu->midr;
1040 case 0xd04:
1041
1042 val = cpu->env.v7m.exception;
1043
1044 val |= (s->vectpending & 0xff) << 12;
1045
1046 if (nvic_isrpending(s)) {
1047 val |= (1 << 22);
1048 }
1049
1050 if (nvic_rettobase(s)) {
1051 val |= (1 << 11);
1052 }
1053 if (attrs.secure) {
1054
1055 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
1056 val |= (1 << 26);
1057 }
1058
1059 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
1060 val |= (1 << 28);
1061 }
1062 } else {
1063
1064 if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
1065 val |= (1 << 26);
1066 }
1067
1068 if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
1069 val |= (1 << 28);
1070 }
1071 }
1072
1073 if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))
1074 && s->vectors[ARMV7M_EXCP_NMI].pending) {
1075 val |= (1 << 31);
1076 }
1077
1078
1079 return val;
1080 case 0xd08:
1081 return cpu->env.v7m.vecbase[attrs.secure];
1082 case 0xd0c:
1083 val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
1084 if (attrs.secure) {
1085
1086 val |= cpu->env.v7m.aircr;
1087 } else {
1088 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1089
1090
1091
1092
1093 val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
1094 }
1095 }
1096 return val;
1097 case 0xd10:
1098 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1099 goto bad_offset;
1100 }
1101 return cpu->env.v7m.scr[attrs.secure];
1102 case 0xd14:
1103
1104
1105
1106
1107 val = cpu->env.v7m.ccr[attrs.secure];
1108 val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
1109
1110 if (!attrs.secure) {
1111 if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1112 val &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1113 }
1114 }
1115 return val;
1116 case 0xd24:
1117 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1118 goto bad_offset;
1119 }
1120 val = 0;
1121 if (attrs.secure) {
1122 if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
1123 val |= (1 << 0);
1124 }
1125 if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
1126 val |= (1 << 2);
1127 }
1128 if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
1129 val |= (1 << 3);
1130 }
1131 if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
1132 val |= (1 << 7);
1133 }
1134 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
1135 val |= (1 << 10);
1136 }
1137 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
1138 val |= (1 << 11);
1139 }
1140 if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
1141 val |= (1 << 12);
1142 }
1143 if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
1144 val |= (1 << 13);
1145 }
1146 if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
1147 val |= (1 << 15);
1148 }
1149 if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
1150 val |= (1 << 16);
1151 }
1152 if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
1153 val |= (1 << 18);
1154 }
1155 if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
1156 val |= (1 << 21);
1157 }
1158
1159 if (s->vectors[ARMV7M_EXCP_SECURE].active) {
1160 val |= (1 << 4);
1161 }
1162 if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
1163 val |= (1 << 19);
1164 }
1165 if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
1166 val |= (1 << 20);
1167 }
1168 } else {
1169 if (s->vectors[ARMV7M_EXCP_MEM].active) {
1170 val |= (1 << 0);
1171 }
1172 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1173
1174 if (s->vectors[ARMV7M_EXCP_HARD].active) {
1175 val |= (1 << 2);
1176 }
1177 if (s->vectors[ARMV7M_EXCP_HARD].pending) {
1178 val |= (1 << 21);
1179 }
1180 }
1181 if (s->vectors[ARMV7M_EXCP_USAGE].active) {
1182 val |= (1 << 3);
1183 }
1184 if (s->vectors[ARMV7M_EXCP_SVC].active) {
1185 val |= (1 << 7);
1186 }
1187 if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
1188 val |= (1 << 10);
1189 }
1190 if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
1191 val |= (1 << 11);
1192 }
1193 if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
1194 val |= (1 << 12);
1195 }
1196 if (s->vectors[ARMV7M_EXCP_MEM].pending) {
1197 val |= (1 << 13);
1198 }
1199 if (s->vectors[ARMV7M_EXCP_SVC].pending) {
1200 val |= (1 << 15);
1201 }
1202 if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
1203 val |= (1 << 16);
1204 }
1205 if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
1206 val |= (1 << 18);
1207 }
1208 }
1209 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1210 if (s->vectors[ARMV7M_EXCP_BUS].active) {
1211 val |= (1 << 1);
1212 }
1213 if (s->vectors[ARMV7M_EXCP_BUS].pending) {
1214 val |= (1 << 14);
1215 }
1216 if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
1217 val |= (1 << 17);
1218 }
1219 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
1220 s->vectors[ARMV7M_EXCP_NMI].active) {
1221
1222 val |= (1 << 5);
1223 }
1224 }
1225
1226
1227 if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
1228 val |= (1 << 8);
1229 }
1230 return val;
1231 case 0xd2c:
1232 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1233 goto bad_offset;
1234 }
1235 return cpu->env.v7m.hfsr;
1236 case 0xd30:
1237 return cpu->env.v7m.dfsr;
1238 case 0xd34:
1239 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1240 goto bad_offset;
1241 }
1242 return cpu->env.v7m.mmfar[attrs.secure];
1243 case 0xd38:
1244 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1245 goto bad_offset;
1246 }
1247 if (!attrs.secure &&
1248 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1249 return 0;
1250 }
1251 return cpu->env.v7m.bfar;
1252 case 0xd3c:
1253
1254 qemu_log_mask(LOG_UNIMP,
1255 "Aux Fault status registers unimplemented\n");
1256 return 0;
1257 case 0xd40:
1258 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1259 goto bad_offset;
1260 }
1261 return cpu->isar.id_pfr0;
1262 case 0xd44:
1263 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1264 goto bad_offset;
1265 }
1266 return cpu->isar.id_pfr1;
1267 case 0xd48:
1268 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1269 goto bad_offset;
1270 }
1271 return cpu->isar.id_dfr0;
1272 case 0xd4c:
1273 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1274 goto bad_offset;
1275 }
1276 return cpu->id_afr0;
1277 case 0xd50:
1278 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1279 goto bad_offset;
1280 }
1281 return cpu->isar.id_mmfr0;
1282 case 0xd54:
1283 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1284 goto bad_offset;
1285 }
1286 return cpu->isar.id_mmfr1;
1287 case 0xd58:
1288 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1289 goto bad_offset;
1290 }
1291 return cpu->isar.id_mmfr2;
1292 case 0xd5c:
1293 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1294 goto bad_offset;
1295 }
1296 return cpu->isar.id_mmfr3;
1297 case 0xd60:
1298 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1299 goto bad_offset;
1300 }
1301 return cpu->isar.id_isar0;
1302 case 0xd64:
1303 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1304 goto bad_offset;
1305 }
1306 return cpu->isar.id_isar1;
1307 case 0xd68:
1308 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1309 goto bad_offset;
1310 }
1311 return cpu->isar.id_isar2;
1312 case 0xd6c:
1313 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1314 goto bad_offset;
1315 }
1316 return cpu->isar.id_isar3;
1317 case 0xd70:
1318 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1319 goto bad_offset;
1320 }
1321 return cpu->isar.id_isar4;
1322 case 0xd74:
1323 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1324 goto bad_offset;
1325 }
1326 return cpu->isar.id_isar5;
1327 case 0xd78:
1328 return cpu->clidr;
1329 case 0xd7c:
1330 return cpu->ctr;
1331 case 0xd80:
1332 {
1333 int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK;
1334 return cpu->ccsidr[idx];
1335 }
1336 case 0xd84:
1337 return cpu->env.v7m.csselr[attrs.secure];
1338 case 0xd88:
1339 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1340 return 0;
1341 }
1342 return cpu->env.v7m.cpacr[attrs.secure];
1343 case 0xd8c:
1344 if (!attrs.secure || !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1345 return 0;
1346 }
1347 return cpu->env.v7m.nsacr;
1348
1349 case 0xd90:
1350
1351 return cpu->pmsav7_dregion << 8;
1352 case 0xd94:
1353 return cpu->env.v7m.mpu_ctrl[attrs.secure];
1354 case 0xd98:
1355 return cpu->env.pmsav7.rnr[attrs.secure];
1356 case 0xd9c:
1357 case 0xda4:
1358 case 0xdac:
1359 case 0xdb4:
1360 {
1361 int region = cpu->env.pmsav7.rnr[attrs.secure];
1362
1363 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1364
1365
1366
1367
1368
1369 int aliasno = (offset - 0xd9c) / 8;
1370 if (aliasno) {
1371 region = deposit32(region, 0, 2, aliasno);
1372 }
1373 if (region >= cpu->pmsav7_dregion) {
1374 return 0;
1375 }
1376 return cpu->env.pmsav8.rbar[attrs.secure][region];
1377 }
1378
1379 if (region >= cpu->pmsav7_dregion) {
1380 return 0;
1381 }
1382 return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf);
1383 }
1384 case 0xda0:
1385 case 0xda8:
1386 case 0xdb0:
1387 case 0xdb8:
1388 {
1389 int region = cpu->env.pmsav7.rnr[attrs.secure];
1390
1391 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1392
1393
1394
1395
1396 int aliasno = (offset - 0xda0) / 8;
1397 if (aliasno) {
1398 region = deposit32(region, 0, 2, aliasno);
1399 }
1400 if (region >= cpu->pmsav7_dregion) {
1401 return 0;
1402 }
1403 return cpu->env.pmsav8.rlar[attrs.secure][region];
1404 }
1405
1406 if (region >= cpu->pmsav7_dregion) {
1407 return 0;
1408 }
1409 return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
1410 (cpu->env.pmsav7.drsr[region] & 0xffff);
1411 }
1412 case 0xdc0:
1413 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1414 goto bad_offset;
1415 }
1416 return cpu->env.pmsav8.mair0[attrs.secure];
1417 case 0xdc4:
1418 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1419 goto bad_offset;
1420 }
1421 return cpu->env.pmsav8.mair1[attrs.secure];
1422 case 0xdd0:
1423 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1424 goto bad_offset;
1425 }
1426 if (!attrs.secure) {
1427 return 0;
1428 }
1429 return cpu->env.sau.ctrl;
1430 case 0xdd4:
1431 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1432 goto bad_offset;
1433 }
1434 if (!attrs.secure) {
1435 return 0;
1436 }
1437 return cpu->sau_sregion;
1438 case 0xdd8:
1439 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1440 goto bad_offset;
1441 }
1442 if (!attrs.secure) {
1443 return 0;
1444 }
1445 return cpu->env.sau.rnr;
1446 case 0xddc:
1447 {
1448 int region = cpu->env.sau.rnr;
1449
1450 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1451 goto bad_offset;
1452 }
1453 if (!attrs.secure) {
1454 return 0;
1455 }
1456 if (region >= cpu->sau_sregion) {
1457 return 0;
1458 }
1459 return cpu->env.sau.rbar[region];
1460 }
1461 case 0xde0:
1462 {
1463 int region = cpu->env.sau.rnr;
1464
1465 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1466 goto bad_offset;
1467 }
1468 if (!attrs.secure) {
1469 return 0;
1470 }
1471 if (region >= cpu->sau_sregion) {
1472 return 0;
1473 }
1474 return cpu->env.sau.rlar[region];
1475 }
1476 case 0xde4:
1477 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1478 goto bad_offset;
1479 }
1480 if (!attrs.secure) {
1481 return 0;
1482 }
1483 return cpu->env.v7m.sfsr;
1484 case 0xde8:
1485 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1486 goto bad_offset;
1487 }
1488 if (!attrs.secure) {
1489 return 0;
1490 }
1491 return cpu->env.v7m.sfar;
1492 case 0xf04:
1493 if (!cpu_isar_feature(aa32_ras, cpu)) {
1494 goto bad_offset;
1495 }
1496
1497 return 0;
1498 case 0xf34:
1499 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1500 return 0;
1501 }
1502 if (attrs.secure) {
1503 return cpu->env.v7m.fpccr[M_REG_S];
1504 } else {
1505
1506
1507
1508
1509
1510
1511 uint32_t value = cpu->env.v7m.fpccr[M_REG_S];
1512 uint32_t mask = R_V7M_FPCCR_LSPEN_MASK |
1513 R_V7M_FPCCR_CLRONRET_MASK |
1514 R_V7M_FPCCR_MONRDY_MASK;
1515
1516 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1517 mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK;
1518 }
1519
1520 value &= mask;
1521
1522 value |= cpu->env.v7m.fpccr[M_REG_NS];
1523 return value;
1524 }
1525 case 0xf38:
1526 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1527 return 0;
1528 }
1529 return cpu->env.v7m.fpcar[attrs.secure];
1530 case 0xf3c:
1531 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1532 return 0;
1533 }
1534 return cpu->env.v7m.fpdscr[attrs.secure];
1535 case 0xf40:
1536 return cpu->isar.mvfr0;
1537 case 0xf44:
1538 return cpu->isar.mvfr1;
1539 case 0xf48:
1540 return cpu->isar.mvfr2;
1541 default:
1542 bad_offset:
1543 qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
1544 return 0;
1545 }
1546}
1547
1548static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
1549 MemTxAttrs attrs)
1550{
1551 ARMCPU *cpu = s->cpu;
1552
1553 switch (offset) {
1554 case 0xc:
1555 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1556 goto bad_offset;
1557 }
1558
1559 break;
1560 case 0x380 ... 0x3bf:
1561 {
1562 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1563 int i;
1564
1565 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1566 goto bad_offset;
1567 }
1568 if (!attrs.secure) {
1569 break;
1570 }
1571 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1572 s->itns[startvec + i] = (value >> i) & 1;
1573 }
1574 nvic_irq_update(s);
1575 break;
1576 }
1577 case 0xd04:
1578 if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1579 if (value & (1 << 31)) {
1580 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
1581 } else if (value & (1 << 30) &&
1582 arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1583
1584 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
1585 }
1586 }
1587 if (value & (1 << 28)) {
1588 armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1589 } else if (value & (1 << 27)) {
1590 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1591 }
1592 if (value & (1 << 26)) {
1593 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1594 } else if (value & (1 << 25)) {
1595 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1596 }
1597 break;
1598 case 0xd08:
1599 cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
1600 break;
1601 case 0xd0c:
1602 if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
1603 if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
1604 if (attrs.secure ||
1605 !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
1606 signal_sysresetreq(s);
1607 }
1608 }
1609 if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
1610 qemu_log_mask(LOG_GUEST_ERROR,
1611 "Setting VECTCLRACTIVE when not in DEBUG mode "
1612 "is UNPREDICTABLE\n");
1613 }
1614 if (value & R_V7M_AIRCR_VECTRESET_MASK) {
1615
1616 qemu_log_mask(LOG_GUEST_ERROR,
1617 "Setting VECTRESET when not in DEBUG mode "
1618 "is UNPREDICTABLE\n");
1619 }
1620 if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1621 s->prigroup[attrs.secure] =
1622 extract32(value,
1623 R_V7M_AIRCR_PRIGROUP_SHIFT,
1624 R_V7M_AIRCR_PRIGROUP_LENGTH);
1625 }
1626
1627 if (attrs.secure) {
1628
1629 cpu->env.v7m.aircr = value &
1630 (R_V7M_AIRCR_SYSRESETREQS_MASK |
1631 R_V7M_AIRCR_BFHFNMINS_MASK |
1632 R_V7M_AIRCR_PRIS_MASK);
1633
1634
1635
1636
1637 if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1638 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
1639 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1640 } else {
1641 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
1642 s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
1643 }
1644 }
1645 nvic_irq_update(s);
1646 }
1647 break;
1648 case 0xd10:
1649 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1650 goto bad_offset;
1651 }
1652
1653
1654
1655
1656
1657 value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK);
1658 cpu->env.v7m.scr[attrs.secure] = value;
1659 break;
1660 case 0xd14:
1661 {
1662 uint32_t mask;
1663
1664 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1665 goto bad_offset;
1666 }
1667
1668
1669 mask = R_V7M_CCR_STKALIGN_MASK |
1670 R_V7M_CCR_BFHFNMIGN_MASK |
1671 R_V7M_CCR_DIV_0_TRP_MASK |
1672 R_V7M_CCR_UNALIGN_TRP_MASK |
1673 R_V7M_CCR_USERSETMPEND_MASK |
1674 R_V7M_CCR_NONBASETHRDENA_MASK;
1675 if (arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && attrs.secure) {
1676
1677 mask |= R_V7M_CCR_TRD_MASK;
1678 }
1679 value &= mask;
1680
1681 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1682
1683 value |= R_V7M_CCR_NONBASETHRDENA_MASK
1684 | R_V7M_CCR_STKALIGN_MASK;
1685 }
1686 if (attrs.secure) {
1687
1688 cpu->env.v7m.ccr[M_REG_NS] =
1689 (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
1690 | (value & R_V7M_CCR_BFHFNMIGN_MASK);
1691 value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1692 } else {
1693
1694
1695
1696
1697 if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1698 value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1699 value |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
1700 }
1701 }
1702
1703 cpu->env.v7m.ccr[attrs.secure] = value;
1704 break;
1705 }
1706 case 0xd24:
1707 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1708 goto bad_offset;
1709 }
1710 if (attrs.secure) {
1711 s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1712
1713 s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1714 s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1715 s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
1716 (value & (1 << 10)) != 0;
1717 s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
1718 (value & (1 << 11)) != 0;
1719 s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
1720 (value & (1 << 12)) != 0;
1721 s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1722 s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1723 s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1724 s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1725 s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
1726 (value & (1 << 18)) != 0;
1727 s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1728
1729 s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
1730 s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
1731 s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
1732 } else {
1733 s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1734 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1735
1736 s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1737 }
1738 s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1739 s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1740 s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
1741 s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
1742 s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
1743 s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1744 s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1745 s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1746 s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
1747 }
1748 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1749 s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
1750 s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
1751 s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1752 }
1753
1754
1755
1756 if (!attrs.secure && cpu->env.v7m.secure &&
1757 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1758 (value & (1 << 5)) == 0) {
1759 s->vectors[ARMV7M_EXCP_NMI].active = 0;
1760 }
1761
1762
1763
1764
1765
1766
1767 if (!attrs.secure && cpu->env.v7m.secure &&
1768 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1769 (value & (1 << 2)) == 0) {
1770 s->vectors[ARMV7M_EXCP_HARD].active = 0;
1771 }
1772
1773
1774 s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
1775 nvic_irq_update(s);
1776 break;
1777 case 0xd2c:
1778 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1779 goto bad_offset;
1780 }
1781 cpu->env.v7m.hfsr &= ~value;
1782 break;
1783 case 0xd30:
1784 cpu->env.v7m.dfsr &= ~value;
1785 break;
1786 case 0xd34:
1787 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1788 goto bad_offset;
1789 }
1790 cpu->env.v7m.mmfar[attrs.secure] = value;
1791 return;
1792 case 0xd38:
1793 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1794 goto bad_offset;
1795 }
1796 if (!attrs.secure &&
1797 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1798 return;
1799 }
1800 cpu->env.v7m.bfar = value;
1801 return;
1802 case 0xd3c:
1803 qemu_log_mask(LOG_UNIMP,
1804 "NVIC: Aux fault status registers unimplemented\n");
1805 break;
1806 case 0xd84:
1807 if (!arm_v7m_csselr_razwi(cpu)) {
1808 cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK;
1809 }
1810 break;
1811 case 0xd88:
1812 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
1813
1814 cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20);
1815 }
1816 break;
1817 case 0xd8c:
1818 if (attrs.secure && cpu_isar_feature(aa32_vfp_simd, cpu)) {
1819
1820 cpu->env.v7m.nsacr = value & (3 << 10);
1821 }
1822 break;
1823 case 0xd90:
1824 return;
1825 case 0xd94:
1826 if ((value &
1827 (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
1828 == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
1829 qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
1830 "UNPREDICTABLE\n");
1831 }
1832 cpu->env.v7m.mpu_ctrl[attrs.secure]
1833 = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
1834 R_V7M_MPU_CTRL_HFNMIENA_MASK |
1835 R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
1836 tlb_flush(CPU(cpu));
1837 break;
1838 case 0xd98:
1839 if (value >= cpu->pmsav7_dregion) {
1840 qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
1841 PRIu32 "/%" PRIu32 "\n",
1842 value, cpu->pmsav7_dregion);
1843 } else {
1844 cpu->env.pmsav7.rnr[attrs.secure] = value;
1845 }
1846 break;
1847 case 0xd9c:
1848 case 0xda4:
1849 case 0xdac:
1850 case 0xdb4:
1851 {
1852 int region;
1853
1854 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1855
1856
1857
1858
1859
1860 int aliasno = (offset - 0xd9c) / 8;
1861
1862 region = cpu->env.pmsav7.rnr[attrs.secure];
1863 if (aliasno) {
1864 region = deposit32(region, 0, 2, aliasno);
1865 }
1866 if (region >= cpu->pmsav7_dregion) {
1867 return;
1868 }
1869 cpu->env.pmsav8.rbar[attrs.secure][region] = value;
1870 tlb_flush(CPU(cpu));
1871 return;
1872 }
1873
1874 if (value & (1 << 4)) {
1875
1876
1877
1878 region = extract32(value, 0, 4);
1879 if (region >= cpu->pmsav7_dregion) {
1880 qemu_log_mask(LOG_GUEST_ERROR,
1881 "MPU region out of range %u/%" PRIu32 "\n",
1882 region, cpu->pmsav7_dregion);
1883 return;
1884 }
1885 cpu->env.pmsav7.rnr[attrs.secure] = region;
1886 } else {
1887 region = cpu->env.pmsav7.rnr[attrs.secure];
1888 }
1889
1890 if (region >= cpu->pmsav7_dregion) {
1891 return;
1892 }
1893
1894 cpu->env.pmsav7.drbar[region] = value & ~0x1f;
1895 tlb_flush(CPU(cpu));
1896 break;
1897 }
1898 case 0xda0:
1899 case 0xda8:
1900 case 0xdb0:
1901 case 0xdb8:
1902 {
1903 int region = cpu->env.pmsav7.rnr[attrs.secure];
1904
1905 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1906
1907
1908
1909
1910 int aliasno = (offset - 0xd9c) / 8;
1911
1912 region = cpu->env.pmsav7.rnr[attrs.secure];
1913 if (aliasno) {
1914 region = deposit32(region, 0, 2, aliasno);
1915 }
1916 if (region >= cpu->pmsav7_dregion) {
1917 return;
1918 }
1919 cpu->env.pmsav8.rlar[attrs.secure][region] = value;
1920 tlb_flush(CPU(cpu));
1921 return;
1922 }
1923
1924 if (region >= cpu->pmsav7_dregion) {
1925 return;
1926 }
1927
1928 cpu->env.pmsav7.drsr[region] = value & 0xff3f;
1929 cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
1930 tlb_flush(CPU(cpu));
1931 break;
1932 }
1933 case 0xdc0:
1934 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1935 goto bad_offset;
1936 }
1937 if (cpu->pmsav7_dregion) {
1938
1939 cpu->env.pmsav8.mair0[attrs.secure] = value;
1940 }
1941
1942
1943
1944 break;
1945 case 0xdc4:
1946 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1947 goto bad_offset;
1948 }
1949 if (cpu->pmsav7_dregion) {
1950
1951 cpu->env.pmsav8.mair1[attrs.secure] = value;
1952 }
1953
1954
1955
1956 break;
1957 case 0xdd0:
1958 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1959 goto bad_offset;
1960 }
1961 if (!attrs.secure) {
1962 return;
1963 }
1964 cpu->env.sau.ctrl = value & 3;
1965 break;
1966 case 0xdd4:
1967 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1968 goto bad_offset;
1969 }
1970 break;
1971 case 0xdd8:
1972 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1973 goto bad_offset;
1974 }
1975 if (!attrs.secure) {
1976 return;
1977 }
1978 if (value >= cpu->sau_sregion) {
1979 qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
1980 PRIu32 "/%" PRIu32 "\n",
1981 value, cpu->sau_sregion);
1982 } else {
1983 cpu->env.sau.rnr = value;
1984 }
1985 break;
1986 case 0xddc:
1987 {
1988 int region = cpu->env.sau.rnr;
1989
1990 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1991 goto bad_offset;
1992 }
1993 if (!attrs.secure) {
1994 return;
1995 }
1996 if (region >= cpu->sau_sregion) {
1997 return;
1998 }
1999 cpu->env.sau.rbar[region] = value & ~0x1f;
2000 tlb_flush(CPU(cpu));
2001 break;
2002 }
2003 case 0xde0:
2004 {
2005 int region = cpu->env.sau.rnr;
2006
2007 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2008 goto bad_offset;
2009 }
2010 if (!attrs.secure) {
2011 return;
2012 }
2013 if (region >= cpu->sau_sregion) {
2014 return;
2015 }
2016 cpu->env.sau.rlar[region] = value & ~0x1c;
2017 tlb_flush(CPU(cpu));
2018 break;
2019 }
2020 case 0xde4:
2021 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2022 goto bad_offset;
2023 }
2024 if (!attrs.secure) {
2025 return;
2026 }
2027 cpu->env.v7m.sfsr &= ~value;
2028 break;
2029 case 0xde8:
2030 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2031 goto bad_offset;
2032 }
2033 if (!attrs.secure) {
2034 return;
2035 }
2036 cpu->env.v7m.sfsr = value;
2037 break;
2038 case 0xf00:
2039 {
2040 int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
2041
2042 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
2043 goto bad_offset;
2044 }
2045
2046 if (excnum < s->num_irq) {
2047 armv7m_nvic_set_pending(s, excnum, false);
2048 }
2049 break;
2050 }
2051 case 0xf04:
2052 if (!cpu_isar_feature(aa32_ras, cpu)) {
2053 goto bad_offset;
2054 }
2055
2056 break;
2057 case 0xf34:
2058 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2059
2060 uint32_t fpccr_s;
2061
2062 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2063
2064 value &= (R_V7M_FPCCR_LSPACT_MASK |
2065 R_V7M_FPCCR_USER_MASK |
2066 R_V7M_FPCCR_THREAD_MASK |
2067 R_V7M_FPCCR_HFRDY_MASK |
2068 R_V7M_FPCCR_MMRDY_MASK |
2069 R_V7M_FPCCR_BFRDY_MASK |
2070 R_V7M_FPCCR_MONRDY_MASK |
2071 R_V7M_FPCCR_LSPEN_MASK |
2072 R_V7M_FPCCR_ASPEN_MASK);
2073 }
2074 value &= ~R_V7M_FPCCR_RES0_MASK;
2075
2076 if (!attrs.secure) {
2077
2078 fpccr_s = cpu->env.v7m.fpccr[M_REG_S];
2079 if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) {
2080 uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN);
2081 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen);
2082 }
2083 if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) {
2084 uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET);
2085 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor);
2086 }
2087 if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2088 uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY);
2089 uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY);
2090 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
2091 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
2092 }
2093
2094 {
2095 uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY);
2096 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy);
2097 }
2098
2099
2100
2101
2102
2103 value &= R_V7M_FPCCR_BANKED_MASK;
2104 cpu->env.v7m.fpccr[M_REG_NS] = value;
2105 } else {
2106 fpccr_s = value;
2107 }
2108 cpu->env.v7m.fpccr[M_REG_S] = fpccr_s;
2109 }
2110 break;
2111 case 0xf38:
2112 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2113 value &= ~7;
2114 cpu->env.v7m.fpcar[attrs.secure] = value;
2115 }
2116 break;
2117 case 0xf3c:
2118 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2119 uint32_t mask = FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK;
2120 if (cpu_isar_feature(any_fp16, cpu)) {
2121 mask |= FPCR_FZ16;
2122 }
2123 value &= mask;
2124 if (cpu_isar_feature(aa32_lob, cpu)) {
2125 value |= 4 << FPCR_LTPSIZE_SHIFT;
2126 }
2127 cpu->env.v7m.fpdscr[attrs.secure] = value;
2128 }
2129 break;
2130 case 0xf50:
2131 case 0xf58:
2132 case 0xf5c:
2133 case 0xf60:
2134 case 0xf64:
2135 case 0xf68:
2136 case 0xf6c:
2137 case 0xf70:
2138 case 0xf74:
2139 case 0xf78:
2140
2141 break;
2142 default:
2143 bad_offset:
2144 qemu_log_mask(LOG_GUEST_ERROR,
2145 "NVIC: Bad write offset 0x%x\n", offset);
2146 }
2147}
2148
2149static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
2150{
2151
2152 switch (offset) {
2153 case 0xf00:
2154
2155
2156
2157 return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
2158 default:
2159
2160 return false;
2161 }
2162}
2163
2164static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
2165{
2166
2167
2168
2169
2170
2171 switch (exc) {
2172 case ARMV7M_EXCP_MEM:
2173 case ARMV7M_EXCP_USAGE:
2174 case ARMV7M_EXCP_SVC:
2175 case ARMV7M_EXCP_PENDSV:
2176 case ARMV7M_EXCP_SYSTICK:
2177
2178 return attrs.secure;
2179 case ARMV7M_EXCP_BUS:
2180
2181 if (!attrs.secure &&
2182 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2183 return -1;
2184 }
2185 return M_REG_NS;
2186 case ARMV7M_EXCP_SECURE:
2187
2188 if (!attrs.secure) {
2189 return -1;
2190 }
2191 return M_REG_NS;
2192 case ARMV7M_EXCP_DEBUG:
2193
2194 return M_REG_NS;
2195 case 8 ... 10:
2196 case 13:
2197
2198 return -1;
2199 default:
2200
2201 g_assert_not_reached();
2202 }
2203}
2204
2205static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
2206 uint64_t *data, unsigned size,
2207 MemTxAttrs attrs)
2208{
2209 NVICState *s = (NVICState *)opaque;
2210 uint32_t offset = addr;
2211 unsigned i, startvec, end;
2212 uint32_t val;
2213
2214 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2215
2216 return MEMTX_ERROR;
2217 }
2218
2219 switch (offset) {
2220
2221 case 0x100 ... 0x13f:
2222 offset += 0x80;
2223
2224 case 0x180 ... 0x1bf:
2225 val = 0;
2226 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
2227
2228 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2229 if (s->vectors[startvec + i].enabled &&
2230 (attrs.secure || s->itns[startvec + i])) {
2231 val |= (1 << i);
2232 }
2233 }
2234 break;
2235 case 0x200 ... 0x23f:
2236 offset += 0x80;
2237
2238 case 0x280 ... 0x2bf:
2239 val = 0;
2240 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ;
2241 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2242 if (s->vectors[startvec + i].pending &&
2243 (attrs.secure || s->itns[startvec + i])) {
2244 val |= (1 << i);
2245 }
2246 }
2247 break;
2248 case 0x300 ... 0x33f:
2249 val = 0;
2250
2251 if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) {
2252 break;
2253 }
2254
2255 startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ;
2256
2257 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2258 if (s->vectors[startvec + i].active &&
2259 (attrs.secure || s->itns[startvec + i])) {
2260 val |= (1 << i);
2261 }
2262 }
2263 break;
2264 case 0x400 ... 0x5ef:
2265 val = 0;
2266 startvec = offset - 0x400 + NVIC_FIRST_IRQ;
2267
2268 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2269 if (attrs.secure || s->itns[startvec + i]) {
2270 val |= s->vectors[startvec + i].prio << (8 * i);
2271 }
2272 }
2273 break;
2274 case 0xd18 ... 0xd1b:
2275 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2276 val = 0;
2277 break;
2278 }
2279
2280 case 0xd1c ... 0xd23:
2281 val = 0;
2282 for (i = 0; i < size; i++) {
2283 unsigned hdlidx = (offset - 0xd14) + i;
2284 int sbank = shpr_bank(s, hdlidx, attrs);
2285
2286 if (sbank < 0) {
2287 continue;
2288 }
2289 val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
2290 }
2291 break;
2292 case 0xd28 ... 0xd2b:
2293 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2294 val = 0;
2295 break;
2296 };
2297
2298
2299
2300
2301
2302 val = s->cpu->env.v7m.cfsr[attrs.secure];
2303 if (!attrs.secure &&
2304 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2305 val &= ~R_V7M_CFSR_BFSR_MASK;
2306 } else {
2307 val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
2308 }
2309 val = extract32(val, (offset - 0xd28) * 8, size * 8);
2310 break;
2311 case 0xfe0 ... 0xfff:
2312 if (offset & 3) {
2313 val = 0;
2314 } else {
2315 val = nvic_id[(offset - 0xfe0) >> 2];
2316 }
2317 break;
2318 default:
2319 if (size == 4) {
2320 val = nvic_readl(s, offset, attrs);
2321 } else {
2322 qemu_log_mask(LOG_GUEST_ERROR,
2323 "NVIC: Bad read of size %d at offset 0x%x\n",
2324 size, offset);
2325 val = 0;
2326 }
2327 }
2328
2329 trace_nvic_sysreg_read(addr, val, size);
2330 *data = val;
2331 return MEMTX_OK;
2332}
2333
2334static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
2335 uint64_t value, unsigned size,
2336 MemTxAttrs attrs)
2337{
2338 NVICState *s = (NVICState *)opaque;
2339 uint32_t offset = addr;
2340 unsigned i, startvec, end;
2341 unsigned setval = 0;
2342
2343 trace_nvic_sysreg_write(addr, value, size);
2344
2345 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2346
2347 return MEMTX_ERROR;
2348 }
2349
2350 switch (offset) {
2351 case 0x100 ... 0x13f:
2352 offset += 0x80;
2353 setval = 1;
2354
2355 case 0x180 ... 0x1bf:
2356 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
2357
2358 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2359 if (value & (1 << i) &&
2360 (attrs.secure || s->itns[startvec + i])) {
2361 s->vectors[startvec + i].enabled = setval;
2362 }
2363 }
2364 nvic_irq_update(s);
2365 goto exit_ok;
2366 case 0x200 ... 0x23f:
2367
2368
2369
2370 offset += 0x80;
2371 setval = 1;
2372
2373 case 0x280 ... 0x2bf:
2374 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ;
2375
2376 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2377 if (value & (1 << i) &&
2378 (attrs.secure || s->itns[startvec + i])) {
2379 s->vectors[startvec + i].pending = setval;
2380 }
2381 }
2382 nvic_irq_update(s);
2383 goto exit_ok;
2384 case 0x300 ... 0x33f:
2385 goto exit_ok;
2386 case 0x400 ... 0x5ef:
2387 startvec = (offset - 0x400) + NVIC_FIRST_IRQ;
2388
2389 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2390 if (attrs.secure || s->itns[startvec + i]) {
2391 set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
2392 }
2393 }
2394 nvic_irq_update(s);
2395 goto exit_ok;
2396 case 0xd18 ... 0xd1b:
2397 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2398 goto exit_ok;
2399 }
2400
2401 case 0xd1c ... 0xd23:
2402 for (i = 0; i < size; i++) {
2403 unsigned hdlidx = (offset - 0xd14) + i;
2404 int newprio = extract32(value, i * 8, 8);
2405 int sbank = shpr_bank(s, hdlidx, attrs);
2406
2407 if (sbank < 0) {
2408 continue;
2409 }
2410 set_prio(s, hdlidx, sbank, newprio);
2411 }
2412 nvic_irq_update(s);
2413 goto exit_ok;
2414 case 0xd28 ... 0xd2b:
2415 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2416 goto exit_ok;
2417 }
2418
2419
2420
2421 value <<= ((offset - 0xd28) * 8);
2422
2423 if (!attrs.secure &&
2424 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2425
2426 value &= ~R_V7M_CFSR_BFSR_MASK;
2427 }
2428
2429 s->cpu->env.v7m.cfsr[attrs.secure] &= ~value;
2430 if (attrs.secure) {
2431
2432
2433
2434 s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
2435 }
2436 goto exit_ok;
2437 }
2438 if (size == 4) {
2439 nvic_writel(s, offset, value, attrs);
2440 goto exit_ok;
2441 }
2442 qemu_log_mask(LOG_GUEST_ERROR,
2443 "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
2444
2445
2446 exit_ok:
2447
2448 arm_rebuild_hflags(&s->cpu->env);
2449 return MEMTX_OK;
2450}
2451
2452static const MemoryRegionOps nvic_sysreg_ops = {
2453 .read_with_attrs = nvic_sysreg_read,
2454 .write_with_attrs = nvic_sysreg_write,
2455 .endianness = DEVICE_NATIVE_ENDIAN,
2456};
2457
2458static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr,
2459 uint64_t value, unsigned size,
2460 MemTxAttrs attrs)
2461{
2462 MemoryRegion *mr = opaque;
2463
2464 if (attrs.secure) {
2465
2466 attrs.secure = 0;
2467 return memory_region_dispatch_write(mr, addr, value,
2468 size_memop(size) | MO_TE, attrs);
2469 } else {
2470
2471 if (attrs.user) {
2472 return MEMTX_ERROR;
2473 }
2474 return MEMTX_OK;
2475 }
2476}
2477
2478static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr,
2479 uint64_t *data, unsigned size,
2480 MemTxAttrs attrs)
2481{
2482 MemoryRegion *mr = opaque;
2483
2484 if (attrs.secure) {
2485
2486 attrs.secure = 0;
2487 return memory_region_dispatch_read(mr, addr, data,
2488 size_memop(size) | MO_TE, attrs);
2489 } else {
2490
2491 if (attrs.user) {
2492 return MEMTX_ERROR;
2493 }
2494 *data = 0;
2495 return MEMTX_OK;
2496 }
2497}
2498
2499static const MemoryRegionOps nvic_sysreg_ns_ops = {
2500 .read_with_attrs = nvic_sysreg_ns_read,
2501 .write_with_attrs = nvic_sysreg_ns_write,
2502 .endianness = DEVICE_NATIVE_ENDIAN,
2503};
2504
2505static MemTxResult nvic_systick_write(void *opaque, hwaddr addr,
2506 uint64_t value, unsigned size,
2507 MemTxAttrs attrs)
2508{
2509 NVICState *s = opaque;
2510 MemoryRegion *mr;
2511
2512
2513 mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2514 return memory_region_dispatch_write(mr, addr, value,
2515 size_memop(size) | MO_TE, attrs);
2516}
2517
2518static MemTxResult nvic_systick_read(void *opaque, hwaddr addr,
2519 uint64_t *data, unsigned size,
2520 MemTxAttrs attrs)
2521{
2522 NVICState *s = opaque;
2523 MemoryRegion *mr;
2524
2525
2526 mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2527 return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE,
2528 attrs);
2529}
2530
2531static const MemoryRegionOps nvic_systick_ops = {
2532 .read_with_attrs = nvic_systick_read,
2533 .write_with_attrs = nvic_systick_write,
2534 .endianness = DEVICE_NATIVE_ENDIAN,
2535};
2536
2537
2538static MemTxResult ras_read(void *opaque, hwaddr addr,
2539 uint64_t *data, unsigned size,
2540 MemTxAttrs attrs)
2541{
2542 if (attrs.user) {
2543 return MEMTX_ERROR;
2544 }
2545
2546 switch (addr) {
2547 case 0xe10:
2548
2549 *data = 0x43b;
2550 break;
2551 case 0xfc8:
2552
2553 *data = 0;
2554 break;
2555 default:
2556 qemu_log_mask(LOG_UNIMP, "Read RAS register offset 0x%x\n",
2557 (uint32_t)addr);
2558 *data = 0;
2559 break;
2560 }
2561 return MEMTX_OK;
2562}
2563
2564static MemTxResult ras_write(void *opaque, hwaddr addr,
2565 uint64_t value, unsigned size,
2566 MemTxAttrs attrs)
2567{
2568 if (attrs.user) {
2569 return MEMTX_ERROR;
2570 }
2571
2572 switch (addr) {
2573 default:
2574 qemu_log_mask(LOG_UNIMP, "Write to RAS register offset 0x%x\n",
2575 (uint32_t)addr);
2576 break;
2577 }
2578 return MEMTX_OK;
2579}
2580
2581static const MemoryRegionOps ras_ops = {
2582 .read_with_attrs = ras_read,
2583 .write_with_attrs = ras_write,
2584 .endianness = DEVICE_NATIVE_ENDIAN,
2585};
2586
2587
2588
2589
2590
2591static MemTxResult ppb_default_read(void *opaque, hwaddr addr,
2592 uint64_t *data, unsigned size,
2593 MemTxAttrs attrs)
2594{
2595 qemu_log_mask(LOG_UNIMP, "Read of unassigned area of PPB: offset 0x%x\n",
2596 (uint32_t)addr);
2597 if (attrs.user) {
2598 return MEMTX_ERROR;
2599 }
2600 *data = 0;
2601 return MEMTX_OK;
2602}
2603
2604static MemTxResult ppb_default_write(void *opaque, hwaddr addr,
2605 uint64_t value, unsigned size,
2606 MemTxAttrs attrs)
2607{
2608 qemu_log_mask(LOG_UNIMP, "Write of unassigned area of PPB: offset 0x%x\n",
2609 (uint32_t)addr);
2610 if (attrs.user) {
2611 return MEMTX_ERROR;
2612 }
2613 return MEMTX_OK;
2614}
2615
2616static const MemoryRegionOps ppb_default_ops = {
2617 .read_with_attrs = ppb_default_read,
2618 .write_with_attrs = ppb_default_write,
2619 .endianness = DEVICE_NATIVE_ENDIAN,
2620 .valid.min_access_size = 1,
2621 .valid.max_access_size = 8,
2622};
2623
2624static int nvic_post_load(void *opaque, int version_id)
2625{
2626 NVICState *s = opaque;
2627 unsigned i;
2628 int resetprio;
2629
2630
2631 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2632
2633 if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
2634 s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
2635 s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
2636 return 1;
2637 }
2638 for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
2639 if (s->vectors[i].prio & ~0xff) {
2640 return 1;
2641 }
2642 }
2643
2644 nvic_recompute_state(s);
2645
2646 return 0;
2647}
2648
2649static const VMStateDescription vmstate_VecInfo = {
2650 .name = "armv7m_nvic_info",
2651 .version_id = 1,
2652 .minimum_version_id = 1,
2653 .fields = (VMStateField[]) {
2654 VMSTATE_INT16(prio, VecInfo),
2655 VMSTATE_UINT8(enabled, VecInfo),
2656 VMSTATE_UINT8(pending, VecInfo),
2657 VMSTATE_UINT8(active, VecInfo),
2658 VMSTATE_UINT8(level, VecInfo),
2659 VMSTATE_END_OF_LIST()
2660 }
2661};
2662
2663static bool nvic_security_needed(void *opaque)
2664{
2665 NVICState *s = opaque;
2666
2667 return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
2668}
2669
2670static int nvic_security_post_load(void *opaque, int version_id)
2671{
2672 NVICState *s = opaque;
2673 int i;
2674
2675
2676 if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
2677 && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
2678
2679
2680
2681
2682 return 1;
2683 }
2684 for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
2685 if (s->sec_vectors[i].prio & ~0xff) {
2686 return 1;
2687 }
2688 }
2689 return 0;
2690}
2691
2692static const VMStateDescription vmstate_nvic_security = {
2693 .name = "armv7m_nvic/m-security",
2694 .version_id = 1,
2695 .minimum_version_id = 1,
2696 .needed = nvic_security_needed,
2697 .post_load = &nvic_security_post_load,
2698 .fields = (VMStateField[]) {
2699 VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
2700 vmstate_VecInfo, VecInfo),
2701 VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
2702 VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
2703 VMSTATE_END_OF_LIST()
2704 }
2705};
2706
2707static const VMStateDescription vmstate_nvic = {
2708 .name = "armv7m_nvic",
2709 .version_id = 4,
2710 .minimum_version_id = 4,
2711 .post_load = &nvic_post_load,
2712 .fields = (VMStateField[]) {
2713 VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
2714 vmstate_VecInfo, VecInfo),
2715 VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
2716 VMSTATE_END_OF_LIST()
2717 },
2718 .subsections = (const VMStateDescription*[]) {
2719 &vmstate_nvic_security,
2720 NULL
2721 }
2722};
2723
2724static Property props_nvic[] = {
2725
2726 DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
2727 DEFINE_PROP_END_OF_LIST()
2728};
2729
2730static void armv7m_nvic_reset(DeviceState *dev)
2731{
2732 int resetprio;
2733 NVICState *s = NVIC(dev);
2734
2735 memset(s->vectors, 0, sizeof(s->vectors));
2736 memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
2737 s->prigroup[M_REG_NS] = 0;
2738 s->prigroup[M_REG_S] = 0;
2739
2740 s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
2741
2742
2743
2744 s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
2745 s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2746 s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2747
2748
2749 s->vectors[ARMV7M_EXCP_DEBUG].enabled = 0;
2750
2751 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2752 s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
2753 s->vectors[ARMV7M_EXCP_NMI].prio = -2;
2754 s->vectors[ARMV7M_EXCP_HARD].prio = -1;
2755
2756 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2757 s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
2758 s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
2759 s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2760 s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2761
2762
2763 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
2764
2765 s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
2766 } else {
2767 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
2768 }
2769
2770
2771
2772
2773
2774
2775
2776 s->exception_prio = NVIC_NOEXC_PRIO;
2777 s->vectpending = 0;
2778 s->vectpending_is_s_banked = false;
2779 s->vectpending_prio = NVIC_NOEXC_PRIO;
2780
2781 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2782 memset(s->itns, 0, sizeof(s->itns));
2783 } else {
2784
2785
2786
2787
2788 int i;
2789
2790 for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
2791 s->itns[i] = true;
2792 }
2793 }
2794
2795
2796
2797
2798
2799 arm_rebuild_hflags(&s->cpu->env);
2800}
2801
2802static void nvic_systick_trigger(void *opaque, int n, int level)
2803{
2804 NVICState *s = opaque;
2805
2806 if (level) {
2807
2808
2809
2810
2811
2812
2813 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n);
2814 }
2815}
2816
2817static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
2818{
2819 NVICState *s = NVIC(dev);
2820
2821
2822 if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
2823 error_setg(errp, "The NVIC can only be used with a Cortex-M CPU");
2824 return;
2825 }
2826
2827 if (s->num_irq > NVIC_MAX_IRQ) {
2828 error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
2829 return;
2830 }
2831
2832 qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
2833
2834
2835 s->num_irq += NVIC_FIRST_IRQ;
2836
2837 s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2;
2838
2839 if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), errp)) {
2840 return;
2841 }
2842 sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0,
2843 qdev_get_gpio_in_named(dev, "systick-trigger",
2844 M_REG_NS));
2845
2846 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2847
2848
2849
2850
2851 object_initialize_child(OBJECT(dev), "systick-reg-s",
2852 &s->systick[M_REG_S], TYPE_SYSTICK);
2853
2854 if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_S]), errp)) {
2855 return;
2856 }
2857 sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0,
2858 qdev_get_gpio_in_named(dev, "systick-trigger",
2859 M_REG_S));
2860 }
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907 memory_region_init(&s->container, OBJECT(s), "nvic", 0x100000);
2908 memory_region_init_io(&s->defaultmem, OBJECT(s), &ppb_default_ops, s,
2909 "nvic-default", 0x100000);
2910 memory_region_add_subregion_overlap(&s->container, 0, &s->defaultmem, -1);
2911 memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
2912 "nvic_sysregs", 0x1000);
2913 memory_region_add_subregion(&s->container, 0xe000, &s->sysregmem);
2914
2915 memory_region_init_io(&s->systickmem, OBJECT(s),
2916 &nvic_systick_ops, s,
2917 "nvic_systick", 0xe0);
2918
2919 memory_region_add_subregion_overlap(&s->container, 0xe010,
2920 &s->systickmem, 1);
2921
2922 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
2923 memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
2924 &nvic_sysreg_ns_ops, &s->sysregmem,
2925 "nvic_sysregs_ns", 0x1000);
2926 memory_region_add_subregion(&s->container, 0x2e000, &s->sysreg_ns_mem);
2927 memory_region_init_io(&s->systick_ns_mem, OBJECT(s),
2928 &nvic_sysreg_ns_ops, &s->systickmem,
2929 "nvic_systick_ns", 0xe0);
2930 memory_region_add_subregion_overlap(&s->container, 0x2e010,
2931 &s->systick_ns_mem, 1);
2932 }
2933
2934 if (cpu_isar_feature(aa32_ras, s->cpu)) {
2935 memory_region_init_io(&s->ras_mem, OBJECT(s),
2936 &ras_ops, s, "nvic_ras", 0x1000);
2937 memory_region_add_subregion(&s->container, 0x5000, &s->ras_mem);
2938 }
2939
2940 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
2941}
2942
2943static void armv7m_nvic_instance_init(Object *obj)
2944{
2945
2946
2947
2948
2949
2950
2951 DeviceState *dev = DEVICE(obj);
2952 NVICState *nvic = NVIC(obj);
2953 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
2954
2955 object_initialize_child(obj, "systick-reg-ns", &nvic->systick[M_REG_NS],
2956 TYPE_SYSTICK);
2957
2958
2959
2960
2961 sysbus_init_irq(sbd, &nvic->excpout);
2962 qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
2963 qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger",
2964 M_REG_NUM_BANKS);
2965 qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1);
2966}
2967
2968static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
2969{
2970 DeviceClass *dc = DEVICE_CLASS(klass);
2971
2972 dc->vmsd = &vmstate_nvic;
2973 device_class_set_props(dc, props_nvic);
2974 dc->reset = armv7m_nvic_reset;
2975 dc->realize = armv7m_nvic_realize;
2976}
2977
2978static const TypeInfo armv7m_nvic_info = {
2979 .name = TYPE_NVIC,
2980 .parent = TYPE_SYS_BUS_DEVICE,
2981 .instance_init = armv7m_nvic_instance_init,
2982 .instance_size = sizeof(NVICState),
2983 .class_init = armv7m_nvic_class_init,
2984 .class_size = sizeof(SysBusDeviceClass),
2985};
2986
2987static void armv7m_nvic_register_types(void)
2988{
2989 type_register_static(&armv7m_nvic_info);
2990}
2991
2992type_init(armv7m_nvic_register_types)
2993