1
2
3
4
5
6
7
8
9
10
11
12
13#include "qemu/osdep.h"
14#include "qapi/error.h"
15#include "hw/sysbus.h"
16#include "migration/vmstate.h"
17#include "qemu/timer.h"
18#include "hw/intc/armv7m_nvic.h"
19#include "hw/irq.h"
20#include "hw/qdev-properties.h"
21#include "sysemu/runstate.h"
22#include "target/arm/cpu.h"
23#include "exec/exec-all.h"
24#include "exec/memop.h"
25#include "qemu/log.h"
26#include "qemu/module.h"
27#include "trace.h"
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
54#define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
55
56
57
58
59#define NVIC_NOEXC_PRIO 0x100
60
61#define NVIC_NS_PRIO_LIMIT 0x80
62
63static const uint8_t nvic_id[] = {
64 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
65};
66
67static void signal_sysresetreq(NVICState *s)
68{
69 if (qemu_irq_is_connected(s->sysresetreq)) {
70 qemu_irq_pulse(s->sysresetreq);
71 } else {
72
73
74
75
76
77 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
78 }
79}
80
81static int nvic_pending_prio(NVICState *s)
82{
83
84
85
86 return s->vectpending_prio;
87}
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103static bool nvic_rettobase(NVICState *s)
104{
105 int irq, nhand = 0;
106 bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
107
108 for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
109 if (s->vectors[irq].active ||
110 (check_sec && irq < NVIC_INTERNAL_VECTORS &&
111 s->sec_vectors[irq].active)) {
112 nhand++;
113 if (nhand == 2) {
114 return 0;
115 }
116 }
117 }
118
119 return 1;
120}
121
122
123
124
125
126static bool nvic_isrpending(NVICState *s)
127{
128 int irq;
129
130
131
132
133
134
135 if (s->vectpending > NVIC_FIRST_IRQ) {
136 return true;
137 }
138
139 for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
140 if (s->vectors[irq].pending) {
141 return true;
142 }
143 }
144 return false;
145}
146
147static bool exc_is_banked(int exc)
148{
149
150
151
152 return exc == ARMV7M_EXCP_HARD ||
153 exc == ARMV7M_EXCP_MEM ||
154 exc == ARMV7M_EXCP_USAGE ||
155 exc == ARMV7M_EXCP_SVC ||
156 exc == ARMV7M_EXCP_PENDSV ||
157 exc == ARMV7M_EXCP_SYSTICK;
158}
159
160
161
162
163
164static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
165{
166 return ~0U << (s->prigroup[secure] + 1);
167}
168
169static bool exc_targets_secure(NVICState *s, int exc)
170{
171
172 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
173 return false;
174 }
175
176 if (exc >= NVIC_FIRST_IRQ) {
177 return !s->itns[exc];
178 }
179
180
181 assert(!exc_is_banked(exc));
182
183 switch (exc) {
184 case ARMV7M_EXCP_NMI:
185 case ARMV7M_EXCP_BUS:
186 return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
187 case ARMV7M_EXCP_SECURE:
188 return true;
189 case ARMV7M_EXCP_DEBUG:
190
191 return false;
192 default:
193
194
195
196
197
198 return true;
199 }
200}
201
202static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
203{
204
205
206
207
208 if (rawprio < 0) {
209 return rawprio;
210 }
211 rawprio &= nvic_gprio_mask(s, targets_secure);
212
213
214
215 if (!targets_secure &&
216 (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
217 rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
218 }
219 return rawprio;
220}
221
222
223
224
225static void nvic_recompute_state_secure(NVICState *s)
226{
227 int i, bank;
228 int pend_prio = NVIC_NOEXC_PRIO;
229 int active_prio = NVIC_NOEXC_PRIO;
230 int pend_irq = 0;
231 bool pending_is_s_banked = false;
232 int pend_subprio = 0;
233
234
235
236
237
238
239
240
241
242
243 for (i = 1; i < s->num_irq; i++) {
244 for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
245 VecInfo *vec;
246 int prio, subprio;
247 bool targets_secure;
248
249 if (bank == M_REG_S) {
250 if (!exc_is_banked(i)) {
251 continue;
252 }
253 vec = &s->sec_vectors[i];
254 targets_secure = true;
255 } else {
256 vec = &s->vectors[i];
257 targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
258 }
259
260 prio = exc_group_prio(s, vec->prio, targets_secure);
261 subprio = vec->prio & ~nvic_gprio_mask(s, targets_secure);
262 if (vec->enabled && vec->pending &&
263 ((prio < pend_prio) ||
264 (prio == pend_prio && prio >= 0 && subprio < pend_subprio))) {
265 pend_prio = prio;
266 pend_subprio = subprio;
267 pend_irq = i;
268 pending_is_s_banked = (bank == M_REG_S);
269 }
270 if (vec->active && prio < active_prio) {
271 active_prio = prio;
272 }
273 }
274 }
275
276 s->vectpending_is_s_banked = pending_is_s_banked;
277 s->vectpending = pend_irq;
278 s->vectpending_prio = pend_prio;
279 s->exception_prio = active_prio;
280
281 trace_nvic_recompute_state_secure(s->vectpending,
282 s->vectpending_is_s_banked,
283 s->vectpending_prio,
284 s->exception_prio);
285}
286
287
288static void nvic_recompute_state(NVICState *s)
289{
290 int i;
291 int pend_prio = NVIC_NOEXC_PRIO;
292 int active_prio = NVIC_NOEXC_PRIO;
293 int pend_irq = 0;
294
295
296
297
298
299
300
301
302 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
303 nvic_recompute_state_secure(s);
304 return;
305 }
306
307 for (i = 1; i < s->num_irq; i++) {
308 VecInfo *vec = &s->vectors[i];
309
310 if (vec->enabled && vec->pending && vec->prio < pend_prio) {
311 pend_prio = vec->prio;
312 pend_irq = i;
313 }
314 if (vec->active && vec->prio < active_prio) {
315 active_prio = vec->prio;
316 }
317 }
318
319 if (active_prio > 0) {
320 active_prio &= nvic_gprio_mask(s, false);
321 }
322
323 if (pend_prio > 0) {
324 pend_prio &= nvic_gprio_mask(s, false);
325 }
326
327 s->vectpending = pend_irq;
328 s->vectpending_prio = pend_prio;
329 s->exception_prio = active_prio;
330
331 trace_nvic_recompute_state(s->vectpending,
332 s->vectpending_prio,
333 s->exception_prio);
334}
335
336
337
338
339
340static inline int nvic_exec_prio(NVICState *s)
341{
342 CPUARMState *env = &s->cpu->env;
343 int running = NVIC_NOEXC_PRIO;
344
345 if (env->v7m.basepri[M_REG_NS] > 0) {
346 running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
347 }
348
349 if (env->v7m.basepri[M_REG_S] > 0) {
350 int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
351 if (running > basepri) {
352 running = basepri;
353 }
354 }
355
356 if (env->v7m.primask[M_REG_NS]) {
357 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
358 if (running > NVIC_NS_PRIO_LIMIT) {
359 running = NVIC_NS_PRIO_LIMIT;
360 }
361 } else {
362 running = 0;
363 }
364 }
365
366 if (env->v7m.primask[M_REG_S]) {
367 running = 0;
368 }
369
370 if (env->v7m.faultmask[M_REG_NS]) {
371 if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
372 running = -1;
373 } else {
374 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
375 if (running > NVIC_NS_PRIO_LIMIT) {
376 running = NVIC_NS_PRIO_LIMIT;
377 }
378 } else {
379 running = 0;
380 }
381 }
382 }
383
384 if (env->v7m.faultmask[M_REG_S]) {
385 running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
386 }
387
388
389 return MIN(running, s->exception_prio);
390}
391
392bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
393{
394
395
396
397
398
399
400
401
402 NVICState *s = opaque;
403
404 if (s->cpu->env.v7m.faultmask[secure]) {
405 return true;
406 }
407
408 if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
409 s->vectors[ARMV7M_EXCP_HARD].active) {
410 return true;
411 }
412
413 if (s->vectors[ARMV7M_EXCP_NMI].active &&
414 exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
415 return true;
416 }
417
418 return false;
419}
420
421bool armv7m_nvic_can_take_pending_exception(void *opaque)
422{
423 NVICState *s = opaque;
424
425 return nvic_exec_prio(s) > nvic_pending_prio(s);
426}
427
428int armv7m_nvic_raw_execution_priority(void *opaque)
429{
430 NVICState *s = opaque;
431
432 return s->exception_prio;
433}
434
435
436
437
438
439static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
440{
441 assert(irq > ARMV7M_EXCP_NMI);
442 assert(irq < s->num_irq);
443
444 prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits);
445
446 if (secure) {
447 assert(exc_is_banked(irq));
448 s->sec_vectors[irq].prio = prio;
449 } else {
450 s->vectors[irq].prio = prio;
451 }
452
453 trace_nvic_set_prio(irq, secure, prio);
454}
455
456
457
458
459
460static int get_prio(NVICState *s, unsigned irq, bool secure)
461{
462 assert(irq > ARMV7M_EXCP_NMI);
463 assert(irq < s->num_irq);
464
465 if (secure) {
466 assert(exc_is_banked(irq));
467 return s->sec_vectors[irq].prio;
468 } else {
469 return s->vectors[irq].prio;
470 }
471}
472
473
474
475
476
477
478static void nvic_irq_update(NVICState *s)
479{
480 int lvl;
481 int pend_prio;
482
483 nvic_recompute_state(s);
484 pend_prio = nvic_pending_prio(s);
485
486
487
488
489
490
491
492 lvl = (pend_prio < s->exception_prio);
493 trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
494 qemu_set_irq(s->excpout, lvl);
495}
496
497
498
499
500
501
502
503
504
505
506
507
508
509static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
510{
511 NVICState *s = (NVICState *)opaque;
512 VecInfo *vec;
513
514 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
515
516 if (secure) {
517 assert(exc_is_banked(irq));
518 vec = &s->sec_vectors[irq];
519 } else {
520 vec = &s->vectors[irq];
521 }
522 trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
523 if (vec->pending) {
524 vec->pending = 0;
525 nvic_irq_update(s);
526 }
527}
528
529static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
530 bool derived)
531{
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548 NVICState *s = (NVICState *)opaque;
549 bool banked = exc_is_banked(irq);
550 VecInfo *vec;
551 bool targets_secure;
552
553 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
554 assert(!secure || banked);
555
556 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
557
558 targets_secure = banked ? secure : exc_targets_secure(s, irq);
559
560 trace_nvic_set_pending(irq, secure, targets_secure,
561 derived, vec->enabled, vec->prio);
562
563 if (derived) {
564
565 assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
566
567 if (irq == ARMV7M_EXCP_DEBUG &&
568 exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
569
570
571
572 return;
573 }
574
575 if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
576
577
578
579
580
581
582
583
584
585
586
587 cpu_abort(&s->cpu->parent_obj,
588 "Lockup: can't take terminal derived exception "
589 "(original exception priority %d)\n",
590 s->vectpending_prio);
591 }
592
593
594
595
596
597
598 }
599
600 if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621 int running = nvic_exec_prio(s);
622 bool escalate = false;
623
624 if (exc_group_prio(s, vec->prio, secure) >= running) {
625 trace_nvic_escalate_prio(irq, vec->prio, running);
626 escalate = true;
627 } else if (!vec->enabled) {
628 trace_nvic_escalate_disabled(irq);
629 escalate = true;
630 }
631
632 if (escalate) {
633
634
635
636
637
638
639 irq = ARMV7M_EXCP_HARD;
640 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
641 (targets_secure ||
642 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
643 vec = &s->sec_vectors[irq];
644 } else {
645 vec = &s->vectors[irq];
646 }
647 if (running <= vec->prio) {
648
649
650
651
652
653 cpu_abort(&s->cpu->parent_obj,
654 "Lockup: can't escalate %d to HardFault "
655 "(current priority %d)\n", irq, running);
656 }
657
658
659 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
660 }
661 }
662
663 if (!vec->pending) {
664 vec->pending = 1;
665 nvic_irq_update(s);
666 }
667}
668
669void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
670{
671 do_armv7m_nvic_set_pending(opaque, irq, secure, false);
672}
673
674void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
675{
676 do_armv7m_nvic_set_pending(opaque, irq, secure, true);
677}
678
679void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure)
680{
681
682
683
684
685
686
687 NVICState *s = (NVICState *)opaque;
688 bool banked = exc_is_banked(irq);
689 VecInfo *vec;
690 bool targets_secure;
691 bool escalate = false;
692
693
694
695
696
697 uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S];
698 uint32_t fpccr = s->cpu->env.v7m.fpccr[secure];
699
700 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
701 assert(!secure || banked);
702
703 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
704
705 targets_secure = banked ? secure : exc_targets_secure(s, irq);
706
707 switch (irq) {
708 case ARMV7M_EXCP_DEBUG:
709 if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) {
710
711 return;
712 }
713 break;
714 case ARMV7M_EXCP_MEM:
715 escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK);
716 break;
717 case ARMV7M_EXCP_USAGE:
718 escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK);
719 break;
720 case ARMV7M_EXCP_BUS:
721 escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK);
722 break;
723 case ARMV7M_EXCP_SECURE:
724 escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK);
725 break;
726 default:
727 g_assert_not_reached();
728 }
729
730 if (escalate) {
731
732
733
734
735 irq = ARMV7M_EXCP_HARD;
736 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
737 (targets_secure ||
738 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
739 vec = &s->sec_vectors[irq];
740 } else {
741 vec = &s->vectors[irq];
742 }
743 }
744
745 if (!vec->enabled ||
746 nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) {
747 if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) {
748
749
750
751
752 cpu_abort(&s->cpu->parent_obj,
753 "Lockup: can't escalate to HardFault during "
754 "lazy FP register stacking\n");
755 }
756 }
757
758 if (escalate) {
759 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
760 }
761 if (!vec->pending) {
762 vec->pending = 1;
763
764
765
766
767
768
769
770
771 nvic_recompute_state(s);
772 }
773}
774
775
776void armv7m_nvic_acknowledge_irq(void *opaque)
777{
778 NVICState *s = (NVICState *)opaque;
779 CPUARMState *env = &s->cpu->env;
780 const int pending = s->vectpending;
781 const int running = nvic_exec_prio(s);
782 VecInfo *vec;
783
784 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
785
786 if (s->vectpending_is_s_banked) {
787 vec = &s->sec_vectors[pending];
788 } else {
789 vec = &s->vectors[pending];
790 }
791
792 assert(vec->enabled);
793 assert(vec->pending);
794
795 assert(s->vectpending_prio < running);
796
797 trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
798
799 vec->active = 1;
800 vec->pending = 0;
801
802 write_v7m_exception(env, s->vectpending);
803
804 nvic_irq_update(s);
805}
806
807static bool vectpending_targets_secure(NVICState *s)
808{
809
810 if (s->vectpending_is_s_banked) {
811 return true;
812 }
813 return !exc_is_banked(s->vectpending) &&
814 exc_targets_secure(s, s->vectpending);
815}
816
817void armv7m_nvic_get_pending_irq_info(void *opaque,
818 int *pirq, bool *ptargets_secure)
819{
820 NVICState *s = (NVICState *)opaque;
821 const int pending = s->vectpending;
822 bool targets_secure;
823
824 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
825
826 targets_secure = vectpending_targets_secure(s);
827
828 trace_nvic_get_pending_irq_info(pending, targets_secure);
829
830 *ptargets_secure = targets_secure;
831 *pirq = pending;
832}
833
834int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
835{
836 NVICState *s = (NVICState *)opaque;
837 VecInfo *vec = NULL;
838 int ret = 0;
839
840 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
841
842 trace_nvic_complete_irq(irq, secure);
843
844 if (secure && exc_is_banked(irq)) {
845 vec = &s->sec_vectors[irq];
846 } else {
847 vec = &s->vectors[irq];
848 }
849
850
851
852
853
854
855 if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
856
857
858
859
860
861
862 ret = -1;
863 vec = NULL;
864 } else if (!vec->active) {
865
866 ret = -1;
867 } else {
868
869 ret = nvic_rettobase(s);
870 }
871
872
873
874
875
876
877
878
879
880 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
881 switch (armv7m_nvic_raw_execution_priority(s)) {
882 case -1:
883 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
884 vec = &s->vectors[ARMV7M_EXCP_HARD];
885 } else {
886 vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
887 }
888 break;
889 case -2:
890 vec = &s->vectors[ARMV7M_EXCP_NMI];
891 break;
892 case -3:
893 vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
894 break;
895 default:
896 break;
897 }
898 }
899
900 if (!vec) {
901 return ret;
902 }
903
904 vec->active = 0;
905 if (vec->level) {
906
907
908
909 assert(irq >= NVIC_FIRST_IRQ);
910 vec->pending = 1;
911 }
912
913 nvic_irq_update(s);
914
915 return ret;
916}
917
918bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
919{
920
921
922
923
924
925
926
927
928
929 NVICState *s = (NVICState *)opaque;
930 bool banked = exc_is_banked(irq);
931 VecInfo *vec;
932 int running = nvic_exec_prio(s);
933
934 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
935 assert(!secure || banked);
936
937
938
939
940
941
942 if (irq == ARMV7M_EXCP_HARD) {
943 return running > -1;
944 }
945
946 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
947
948 return vec->enabled &&
949 exc_group_prio(s, vec->prio, secure) < running;
950}
951
952
953static void set_irq_level(void *opaque, int n, int level)
954{
955 NVICState *s = opaque;
956 VecInfo *vec;
957
958 n += NVIC_FIRST_IRQ;
959
960 assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
961
962 trace_nvic_set_irq_level(n, level);
963
964
965
966
967
968
969
970
971 vec = &s->vectors[n];
972 if (level != vec->level) {
973 vec->level = level;
974 if (level) {
975 armv7m_nvic_set_pending(s, n, false);
976 }
977 }
978}
979
980
981static void nvic_nmi_trigger(void *opaque, int n, int level)
982{
983 NVICState *s = opaque;
984
985 trace_nvic_set_nmi_level(level);
986
987
988
989
990
991
992
993 if (level) {
994 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
995 }
996}
997
998static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
999{
1000 ARMCPU *cpu = s->cpu;
1001 uint32_t val;
1002
1003 switch (offset) {
1004 case 4:
1005 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1006 goto bad_offset;
1007 }
1008 return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
1009 case 0xc:
1010 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1011 goto bad_offset;
1012 }
1013
1014
1015
1016 return 0;
1017 case 0x380 ... 0x3bf:
1018 {
1019 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1020 int i;
1021
1022 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1023 goto bad_offset;
1024 }
1025 if (!attrs.secure) {
1026 return 0;
1027 }
1028 val = 0;
1029 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1030 if (s->itns[startvec + i]) {
1031 val |= (1 << i);
1032 }
1033 }
1034 return val;
1035 }
1036 case 0xcfc:
1037 if (!arm_feature(&cpu->env, ARM_FEATURE_V8_1M)) {
1038 goto bad_offset;
1039 }
1040 return cpu->revidr;
1041 case 0xd00:
1042 return cpu->midr;
1043 case 0xd04:
1044
1045 val = cpu->env.v7m.exception;
1046
1047 if (s->vectpending) {
1048
1049
1050
1051
1052
1053 int vp = s->vectpending;
1054 if (!attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_V8_1M) &&
1055 vectpending_targets_secure(s)) {
1056 vp = 1;
1057 }
1058 val |= (vp & 0x1ff) << 12;
1059 }
1060
1061 if (nvic_isrpending(s)) {
1062 val |= (1 << 22);
1063 }
1064
1065 if (nvic_rettobase(s)) {
1066 val |= (1 << 11);
1067 }
1068 if (attrs.secure) {
1069
1070 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
1071 val |= (1 << 26);
1072 }
1073
1074 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
1075 val |= (1 << 28);
1076 }
1077 } else {
1078
1079 if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
1080 val |= (1 << 26);
1081 }
1082
1083 if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
1084 val |= (1 << 28);
1085 }
1086 }
1087
1088 if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))
1089 && s->vectors[ARMV7M_EXCP_NMI].pending) {
1090 val |= (1 << 31);
1091 }
1092
1093
1094 return val;
1095 case 0xd08:
1096 return cpu->env.v7m.vecbase[attrs.secure];
1097 case 0xd0c:
1098 val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
1099 if (attrs.secure) {
1100
1101 val |= cpu->env.v7m.aircr;
1102 } else {
1103 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1104
1105
1106
1107
1108 val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
1109 }
1110 }
1111 return val;
1112 case 0xd10:
1113 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1114 goto bad_offset;
1115 }
1116 return cpu->env.v7m.scr[attrs.secure];
1117 case 0xd14:
1118
1119
1120
1121
1122 val = cpu->env.v7m.ccr[attrs.secure];
1123 val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
1124
1125 if (!attrs.secure) {
1126 if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1127 val &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1128 }
1129 }
1130 return val;
1131 case 0xd24:
1132 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1133 goto bad_offset;
1134 }
1135 val = 0;
1136 if (attrs.secure) {
1137 if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
1138 val |= (1 << 0);
1139 }
1140 if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
1141 val |= (1 << 2);
1142 }
1143 if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
1144 val |= (1 << 3);
1145 }
1146 if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
1147 val |= (1 << 7);
1148 }
1149 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
1150 val |= (1 << 10);
1151 }
1152 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
1153 val |= (1 << 11);
1154 }
1155 if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
1156 val |= (1 << 12);
1157 }
1158 if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
1159 val |= (1 << 13);
1160 }
1161 if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
1162 val |= (1 << 15);
1163 }
1164 if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
1165 val |= (1 << 16);
1166 }
1167 if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
1168 val |= (1 << 18);
1169 }
1170 if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
1171 val |= (1 << 21);
1172 }
1173
1174 if (s->vectors[ARMV7M_EXCP_SECURE].active) {
1175 val |= (1 << 4);
1176 }
1177 if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
1178 val |= (1 << 19);
1179 }
1180 if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
1181 val |= (1 << 20);
1182 }
1183 } else {
1184 if (s->vectors[ARMV7M_EXCP_MEM].active) {
1185 val |= (1 << 0);
1186 }
1187 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1188
1189 if (s->vectors[ARMV7M_EXCP_HARD].active) {
1190 val |= (1 << 2);
1191 }
1192 if (s->vectors[ARMV7M_EXCP_HARD].pending) {
1193 val |= (1 << 21);
1194 }
1195 }
1196 if (s->vectors[ARMV7M_EXCP_USAGE].active) {
1197 val |= (1 << 3);
1198 }
1199 if (s->vectors[ARMV7M_EXCP_SVC].active) {
1200 val |= (1 << 7);
1201 }
1202 if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
1203 val |= (1 << 10);
1204 }
1205 if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
1206 val |= (1 << 11);
1207 }
1208 if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
1209 val |= (1 << 12);
1210 }
1211 if (s->vectors[ARMV7M_EXCP_MEM].pending) {
1212 val |= (1 << 13);
1213 }
1214 if (s->vectors[ARMV7M_EXCP_SVC].pending) {
1215 val |= (1 << 15);
1216 }
1217 if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
1218 val |= (1 << 16);
1219 }
1220 if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
1221 val |= (1 << 18);
1222 }
1223 }
1224 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1225 if (s->vectors[ARMV7M_EXCP_BUS].active) {
1226 val |= (1 << 1);
1227 }
1228 if (s->vectors[ARMV7M_EXCP_BUS].pending) {
1229 val |= (1 << 14);
1230 }
1231 if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
1232 val |= (1 << 17);
1233 }
1234 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
1235 s->vectors[ARMV7M_EXCP_NMI].active) {
1236
1237 val |= (1 << 5);
1238 }
1239 }
1240
1241
1242 if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
1243 val |= (1 << 8);
1244 }
1245 return val;
1246 case 0xd2c:
1247 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1248 goto bad_offset;
1249 }
1250 return cpu->env.v7m.hfsr;
1251 case 0xd30:
1252 return cpu->env.v7m.dfsr;
1253 case 0xd34:
1254 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1255 goto bad_offset;
1256 }
1257 return cpu->env.v7m.mmfar[attrs.secure];
1258 case 0xd38:
1259 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1260 goto bad_offset;
1261 }
1262 if (!attrs.secure &&
1263 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1264 return 0;
1265 }
1266 return cpu->env.v7m.bfar;
1267 case 0xd3c:
1268
1269 qemu_log_mask(LOG_UNIMP,
1270 "Aux Fault status registers unimplemented\n");
1271 return 0;
1272 case 0xd40:
1273 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1274 goto bad_offset;
1275 }
1276 return cpu->isar.id_pfr0;
1277 case 0xd44:
1278 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1279 goto bad_offset;
1280 }
1281 return cpu->isar.id_pfr1;
1282 case 0xd48:
1283 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1284 goto bad_offset;
1285 }
1286 return cpu->isar.id_dfr0;
1287 case 0xd4c:
1288 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1289 goto bad_offset;
1290 }
1291 return cpu->id_afr0;
1292 case 0xd50:
1293 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1294 goto bad_offset;
1295 }
1296 return cpu->isar.id_mmfr0;
1297 case 0xd54:
1298 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1299 goto bad_offset;
1300 }
1301 return cpu->isar.id_mmfr1;
1302 case 0xd58:
1303 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1304 goto bad_offset;
1305 }
1306 return cpu->isar.id_mmfr2;
1307 case 0xd5c:
1308 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1309 goto bad_offset;
1310 }
1311 return cpu->isar.id_mmfr3;
1312 case 0xd60:
1313 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1314 goto bad_offset;
1315 }
1316 return cpu->isar.id_isar0;
1317 case 0xd64:
1318 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1319 goto bad_offset;
1320 }
1321 return cpu->isar.id_isar1;
1322 case 0xd68:
1323 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1324 goto bad_offset;
1325 }
1326 return cpu->isar.id_isar2;
1327 case 0xd6c:
1328 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1329 goto bad_offset;
1330 }
1331 return cpu->isar.id_isar3;
1332 case 0xd70:
1333 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1334 goto bad_offset;
1335 }
1336 return cpu->isar.id_isar4;
1337 case 0xd74:
1338 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1339 goto bad_offset;
1340 }
1341 return cpu->isar.id_isar5;
1342 case 0xd78:
1343 return cpu->clidr;
1344 case 0xd7c:
1345 return cpu->ctr;
1346 case 0xd80:
1347 {
1348 int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK;
1349 return cpu->ccsidr[idx];
1350 }
1351 case 0xd84:
1352 return cpu->env.v7m.csselr[attrs.secure];
1353 case 0xd88:
1354 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1355 return 0;
1356 }
1357 return cpu->env.v7m.cpacr[attrs.secure];
1358 case 0xd8c:
1359 if (!attrs.secure || !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1360 return 0;
1361 }
1362 return cpu->env.v7m.nsacr;
1363
1364 case 0xd90:
1365
1366 return cpu->pmsav7_dregion << 8;
1367 case 0xd94:
1368 return cpu->env.v7m.mpu_ctrl[attrs.secure];
1369 case 0xd98:
1370 return cpu->env.pmsav7.rnr[attrs.secure];
1371 case 0xd9c:
1372 case 0xda4:
1373 case 0xdac:
1374 case 0xdb4:
1375 {
1376 int region = cpu->env.pmsav7.rnr[attrs.secure];
1377
1378 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1379
1380
1381
1382
1383
1384 int aliasno = (offset - 0xd9c) / 8;
1385 if (aliasno) {
1386 region = deposit32(region, 0, 2, aliasno);
1387 }
1388 if (region >= cpu->pmsav7_dregion) {
1389 return 0;
1390 }
1391 return cpu->env.pmsav8.rbar[attrs.secure][region];
1392 }
1393
1394 if (region >= cpu->pmsav7_dregion) {
1395 return 0;
1396 }
1397 return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf);
1398 }
1399 case 0xda0:
1400 case 0xda8:
1401 case 0xdb0:
1402 case 0xdb8:
1403 {
1404 int region = cpu->env.pmsav7.rnr[attrs.secure];
1405
1406 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1407
1408
1409
1410
1411 int aliasno = (offset - 0xda0) / 8;
1412 if (aliasno) {
1413 region = deposit32(region, 0, 2, aliasno);
1414 }
1415 if (region >= cpu->pmsav7_dregion) {
1416 return 0;
1417 }
1418 return cpu->env.pmsav8.rlar[attrs.secure][region];
1419 }
1420
1421 if (region >= cpu->pmsav7_dregion) {
1422 return 0;
1423 }
1424 return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
1425 (cpu->env.pmsav7.drsr[region] & 0xffff);
1426 }
1427 case 0xdc0:
1428 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1429 goto bad_offset;
1430 }
1431 return cpu->env.pmsav8.mair0[attrs.secure];
1432 case 0xdc4:
1433 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1434 goto bad_offset;
1435 }
1436 return cpu->env.pmsav8.mair1[attrs.secure];
1437 case 0xdd0:
1438 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1439 goto bad_offset;
1440 }
1441 if (!attrs.secure) {
1442 return 0;
1443 }
1444 return cpu->env.sau.ctrl;
1445 case 0xdd4:
1446 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1447 goto bad_offset;
1448 }
1449 if (!attrs.secure) {
1450 return 0;
1451 }
1452 return cpu->sau_sregion;
1453 case 0xdd8:
1454 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1455 goto bad_offset;
1456 }
1457 if (!attrs.secure) {
1458 return 0;
1459 }
1460 return cpu->env.sau.rnr;
1461 case 0xddc:
1462 {
1463 int region = cpu->env.sau.rnr;
1464
1465 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1466 goto bad_offset;
1467 }
1468 if (!attrs.secure) {
1469 return 0;
1470 }
1471 if (region >= cpu->sau_sregion) {
1472 return 0;
1473 }
1474 return cpu->env.sau.rbar[region];
1475 }
1476 case 0xde0:
1477 {
1478 int region = cpu->env.sau.rnr;
1479
1480 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1481 goto bad_offset;
1482 }
1483 if (!attrs.secure) {
1484 return 0;
1485 }
1486 if (region >= cpu->sau_sregion) {
1487 return 0;
1488 }
1489 return cpu->env.sau.rlar[region];
1490 }
1491 case 0xde4:
1492 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1493 goto bad_offset;
1494 }
1495 if (!attrs.secure) {
1496 return 0;
1497 }
1498 return cpu->env.v7m.sfsr;
1499 case 0xde8:
1500 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1501 goto bad_offset;
1502 }
1503 if (!attrs.secure) {
1504 return 0;
1505 }
1506 return cpu->env.v7m.sfar;
1507 case 0xf04:
1508 if (!cpu_isar_feature(aa32_ras, cpu)) {
1509 goto bad_offset;
1510 }
1511
1512 return 0;
1513 case 0xf34:
1514 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1515 return 0;
1516 }
1517 if (attrs.secure) {
1518 return cpu->env.v7m.fpccr[M_REG_S];
1519 } else {
1520
1521
1522
1523
1524
1525
1526 uint32_t value = cpu->env.v7m.fpccr[M_REG_S];
1527 uint32_t mask = R_V7M_FPCCR_LSPEN_MASK |
1528 R_V7M_FPCCR_CLRONRET_MASK |
1529 R_V7M_FPCCR_MONRDY_MASK;
1530
1531 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1532 mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK;
1533 }
1534
1535 value &= mask;
1536
1537 value |= cpu->env.v7m.fpccr[M_REG_NS];
1538 return value;
1539 }
1540 case 0xf38:
1541 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1542 return 0;
1543 }
1544 return cpu->env.v7m.fpcar[attrs.secure];
1545 case 0xf3c:
1546 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1547 return 0;
1548 }
1549 return cpu->env.v7m.fpdscr[attrs.secure];
1550 case 0xf40:
1551 return cpu->isar.mvfr0;
1552 case 0xf44:
1553 return cpu->isar.mvfr1;
1554 case 0xf48:
1555 return cpu->isar.mvfr2;
1556 default:
1557 bad_offset:
1558 qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
1559 return 0;
1560 }
1561}
1562
1563static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
1564 MemTxAttrs attrs)
1565{
1566 ARMCPU *cpu = s->cpu;
1567
1568 switch (offset) {
1569 case 0xc:
1570 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1571 goto bad_offset;
1572 }
1573
1574 break;
1575 case 0x380 ... 0x3bf:
1576 {
1577 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1578 int i;
1579
1580 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1581 goto bad_offset;
1582 }
1583 if (!attrs.secure) {
1584 break;
1585 }
1586 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1587 s->itns[startvec + i] = (value >> i) & 1;
1588 }
1589 nvic_irq_update(s);
1590 break;
1591 }
1592 case 0xd04:
1593 if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1594 if (value & (1 << 31)) {
1595 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
1596 } else if (value & (1 << 30) &&
1597 arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1598
1599 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
1600 }
1601 }
1602 if (value & (1 << 28)) {
1603 armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1604 } else if (value & (1 << 27)) {
1605 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1606 }
1607 if (value & (1 << 26)) {
1608 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1609 } else if (value & (1 << 25)) {
1610 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1611 }
1612 break;
1613 case 0xd08:
1614 cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
1615 break;
1616 case 0xd0c:
1617 if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
1618 if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
1619 if (attrs.secure ||
1620 !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
1621 signal_sysresetreq(s);
1622 }
1623 }
1624 if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
1625 qemu_log_mask(LOG_GUEST_ERROR,
1626 "Setting VECTCLRACTIVE when not in DEBUG mode "
1627 "is UNPREDICTABLE\n");
1628 }
1629 if (value & R_V7M_AIRCR_VECTRESET_MASK) {
1630
1631 qemu_log_mask(LOG_GUEST_ERROR,
1632 "Setting VECTRESET when not in DEBUG mode "
1633 "is UNPREDICTABLE\n");
1634 }
1635 if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1636 s->prigroup[attrs.secure] =
1637 extract32(value,
1638 R_V7M_AIRCR_PRIGROUP_SHIFT,
1639 R_V7M_AIRCR_PRIGROUP_LENGTH);
1640 }
1641
1642 if (attrs.secure) {
1643
1644 cpu->env.v7m.aircr = value &
1645 (R_V7M_AIRCR_SYSRESETREQS_MASK |
1646 R_V7M_AIRCR_BFHFNMINS_MASK |
1647 R_V7M_AIRCR_PRIS_MASK);
1648
1649
1650
1651
1652 if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1653 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
1654 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1655 } else {
1656 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
1657 s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
1658 }
1659 }
1660 nvic_irq_update(s);
1661 }
1662 break;
1663 case 0xd10:
1664 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1665 goto bad_offset;
1666 }
1667
1668
1669
1670
1671
1672 value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK);
1673 cpu->env.v7m.scr[attrs.secure] = value;
1674 break;
1675 case 0xd14:
1676 {
1677 uint32_t mask;
1678
1679 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1680 goto bad_offset;
1681 }
1682
1683
1684 mask = R_V7M_CCR_STKALIGN_MASK |
1685 R_V7M_CCR_BFHFNMIGN_MASK |
1686 R_V7M_CCR_DIV_0_TRP_MASK |
1687 R_V7M_CCR_UNALIGN_TRP_MASK |
1688 R_V7M_CCR_USERSETMPEND_MASK |
1689 R_V7M_CCR_NONBASETHRDENA_MASK;
1690 if (arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && attrs.secure) {
1691
1692 mask |= R_V7M_CCR_TRD_MASK;
1693 }
1694 value &= mask;
1695
1696 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1697
1698 value |= R_V7M_CCR_NONBASETHRDENA_MASK
1699 | R_V7M_CCR_STKALIGN_MASK;
1700 }
1701 if (attrs.secure) {
1702
1703 cpu->env.v7m.ccr[M_REG_NS] =
1704 (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
1705 | (value & R_V7M_CCR_BFHFNMIGN_MASK);
1706 value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1707 } else {
1708
1709
1710
1711
1712 if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1713 value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1714 value |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
1715 }
1716 }
1717
1718 cpu->env.v7m.ccr[attrs.secure] = value;
1719 break;
1720 }
1721 case 0xd24:
1722 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1723 goto bad_offset;
1724 }
1725 if (attrs.secure) {
1726 s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1727
1728 s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1729 s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1730 s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
1731 (value & (1 << 10)) != 0;
1732 s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
1733 (value & (1 << 11)) != 0;
1734 s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
1735 (value & (1 << 12)) != 0;
1736 s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1737 s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1738 s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1739 s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1740 s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
1741 (value & (1 << 18)) != 0;
1742 s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1743
1744 s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
1745 s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
1746 s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
1747 } else {
1748 s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1749 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1750
1751 s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1752 }
1753 s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1754 s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1755 s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
1756 s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
1757 s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
1758 s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1759 s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1760 s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1761 s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
1762 }
1763 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1764 s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
1765 s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
1766 s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1767 }
1768
1769
1770
1771 if (!attrs.secure && cpu->env.v7m.secure &&
1772 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1773 (value & (1 << 5)) == 0) {
1774 s->vectors[ARMV7M_EXCP_NMI].active = 0;
1775 }
1776
1777
1778
1779
1780
1781
1782 if (!attrs.secure && cpu->env.v7m.secure &&
1783 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1784 (value & (1 << 2)) == 0) {
1785 s->vectors[ARMV7M_EXCP_HARD].active = 0;
1786 }
1787
1788
1789 s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
1790 nvic_irq_update(s);
1791 break;
1792 case 0xd2c:
1793 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1794 goto bad_offset;
1795 }
1796 cpu->env.v7m.hfsr &= ~value;
1797 break;
1798 case 0xd30:
1799 cpu->env.v7m.dfsr &= ~value;
1800 break;
1801 case 0xd34:
1802 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1803 goto bad_offset;
1804 }
1805 cpu->env.v7m.mmfar[attrs.secure] = value;
1806 return;
1807 case 0xd38:
1808 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1809 goto bad_offset;
1810 }
1811 if (!attrs.secure &&
1812 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1813 return;
1814 }
1815 cpu->env.v7m.bfar = value;
1816 return;
1817 case 0xd3c:
1818 qemu_log_mask(LOG_UNIMP,
1819 "NVIC: Aux fault status registers unimplemented\n");
1820 break;
1821 case 0xd84:
1822 if (!arm_v7m_csselr_razwi(cpu)) {
1823 cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK;
1824 }
1825 break;
1826 case 0xd88:
1827 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
1828
1829 cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20);
1830 }
1831 break;
1832 case 0xd8c:
1833 if (attrs.secure && cpu_isar_feature(aa32_vfp_simd, cpu)) {
1834
1835 cpu->env.v7m.nsacr = value & (3 << 10);
1836 }
1837 break;
1838 case 0xd90:
1839 return;
1840 case 0xd94:
1841 if ((value &
1842 (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
1843 == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
1844 qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
1845 "UNPREDICTABLE\n");
1846 }
1847 cpu->env.v7m.mpu_ctrl[attrs.secure]
1848 = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
1849 R_V7M_MPU_CTRL_HFNMIENA_MASK |
1850 R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
1851 tlb_flush(CPU(cpu));
1852 break;
1853 case 0xd98:
1854 if (value >= cpu->pmsav7_dregion) {
1855 qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
1856 PRIu32 "/%" PRIu32 "\n",
1857 value, cpu->pmsav7_dregion);
1858 } else {
1859 cpu->env.pmsav7.rnr[attrs.secure] = value;
1860 }
1861 break;
1862 case 0xd9c:
1863 case 0xda4:
1864 case 0xdac:
1865 case 0xdb4:
1866 {
1867 int region;
1868
1869 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1870
1871
1872
1873
1874
1875 int aliasno = (offset - 0xd9c) / 8;
1876
1877 region = cpu->env.pmsav7.rnr[attrs.secure];
1878 if (aliasno) {
1879 region = deposit32(region, 0, 2, aliasno);
1880 }
1881 if (region >= cpu->pmsav7_dregion) {
1882 return;
1883 }
1884 cpu->env.pmsav8.rbar[attrs.secure][region] = value;
1885 tlb_flush(CPU(cpu));
1886 return;
1887 }
1888
1889 if (value & (1 << 4)) {
1890
1891
1892
1893 region = extract32(value, 0, 4);
1894 if (region >= cpu->pmsav7_dregion) {
1895 qemu_log_mask(LOG_GUEST_ERROR,
1896 "MPU region out of range %u/%" PRIu32 "\n",
1897 region, cpu->pmsav7_dregion);
1898 return;
1899 }
1900 cpu->env.pmsav7.rnr[attrs.secure] = region;
1901 } else {
1902 region = cpu->env.pmsav7.rnr[attrs.secure];
1903 }
1904
1905 if (region >= cpu->pmsav7_dregion) {
1906 return;
1907 }
1908
1909 cpu->env.pmsav7.drbar[region] = value & ~0x1f;
1910 tlb_flush(CPU(cpu));
1911 break;
1912 }
1913 case 0xda0:
1914 case 0xda8:
1915 case 0xdb0:
1916 case 0xdb8:
1917 {
1918 int region = cpu->env.pmsav7.rnr[attrs.secure];
1919
1920 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1921
1922
1923
1924
1925 int aliasno = (offset - 0xd9c) / 8;
1926
1927 region = cpu->env.pmsav7.rnr[attrs.secure];
1928 if (aliasno) {
1929 region = deposit32(region, 0, 2, aliasno);
1930 }
1931 if (region >= cpu->pmsav7_dregion) {
1932 return;
1933 }
1934 cpu->env.pmsav8.rlar[attrs.secure][region] = value;
1935 tlb_flush(CPU(cpu));
1936 return;
1937 }
1938
1939 if (region >= cpu->pmsav7_dregion) {
1940 return;
1941 }
1942
1943 cpu->env.pmsav7.drsr[region] = value & 0xff3f;
1944 cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
1945 tlb_flush(CPU(cpu));
1946 break;
1947 }
1948 case 0xdc0:
1949 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1950 goto bad_offset;
1951 }
1952 if (cpu->pmsav7_dregion) {
1953
1954 cpu->env.pmsav8.mair0[attrs.secure] = value;
1955 }
1956
1957
1958
1959 break;
1960 case 0xdc4:
1961 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1962 goto bad_offset;
1963 }
1964 if (cpu->pmsav7_dregion) {
1965
1966 cpu->env.pmsav8.mair1[attrs.secure] = value;
1967 }
1968
1969
1970
1971 break;
1972 case 0xdd0:
1973 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1974 goto bad_offset;
1975 }
1976 if (!attrs.secure) {
1977 return;
1978 }
1979 cpu->env.sau.ctrl = value & 3;
1980 break;
1981 case 0xdd4:
1982 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1983 goto bad_offset;
1984 }
1985 break;
1986 case 0xdd8:
1987 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1988 goto bad_offset;
1989 }
1990 if (!attrs.secure) {
1991 return;
1992 }
1993 if (value >= cpu->sau_sregion) {
1994 qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
1995 PRIu32 "/%" PRIu32 "\n",
1996 value, cpu->sau_sregion);
1997 } else {
1998 cpu->env.sau.rnr = value;
1999 }
2000 break;
2001 case 0xddc:
2002 {
2003 int region = cpu->env.sau.rnr;
2004
2005 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2006 goto bad_offset;
2007 }
2008 if (!attrs.secure) {
2009 return;
2010 }
2011 if (region >= cpu->sau_sregion) {
2012 return;
2013 }
2014 cpu->env.sau.rbar[region] = value & ~0x1f;
2015 tlb_flush(CPU(cpu));
2016 break;
2017 }
2018 case 0xde0:
2019 {
2020 int region = cpu->env.sau.rnr;
2021
2022 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2023 goto bad_offset;
2024 }
2025 if (!attrs.secure) {
2026 return;
2027 }
2028 if (region >= cpu->sau_sregion) {
2029 return;
2030 }
2031 cpu->env.sau.rlar[region] = value & ~0x1c;
2032 tlb_flush(CPU(cpu));
2033 break;
2034 }
2035 case 0xde4:
2036 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2037 goto bad_offset;
2038 }
2039 if (!attrs.secure) {
2040 return;
2041 }
2042 cpu->env.v7m.sfsr &= ~value;
2043 break;
2044 case 0xde8:
2045 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2046 goto bad_offset;
2047 }
2048 if (!attrs.secure) {
2049 return;
2050 }
2051 cpu->env.v7m.sfsr = value;
2052 break;
2053 case 0xf00:
2054 {
2055 int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
2056
2057 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
2058 goto bad_offset;
2059 }
2060
2061 if (excnum < s->num_irq) {
2062 armv7m_nvic_set_pending(s, excnum, false);
2063 }
2064 break;
2065 }
2066 case 0xf04:
2067 if (!cpu_isar_feature(aa32_ras, cpu)) {
2068 goto bad_offset;
2069 }
2070
2071 break;
2072 case 0xf34:
2073 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2074
2075 uint32_t fpccr_s;
2076
2077 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2078
2079 value &= (R_V7M_FPCCR_LSPACT_MASK |
2080 R_V7M_FPCCR_USER_MASK |
2081 R_V7M_FPCCR_THREAD_MASK |
2082 R_V7M_FPCCR_HFRDY_MASK |
2083 R_V7M_FPCCR_MMRDY_MASK |
2084 R_V7M_FPCCR_BFRDY_MASK |
2085 R_V7M_FPCCR_MONRDY_MASK |
2086 R_V7M_FPCCR_LSPEN_MASK |
2087 R_V7M_FPCCR_ASPEN_MASK);
2088 }
2089 value &= ~R_V7M_FPCCR_RES0_MASK;
2090
2091 if (!attrs.secure) {
2092
2093 fpccr_s = cpu->env.v7m.fpccr[M_REG_S];
2094 if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) {
2095 uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN);
2096 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen);
2097 }
2098 if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) {
2099 uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET);
2100 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor);
2101 }
2102 if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2103 uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY);
2104 uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY);
2105 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
2106 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
2107 }
2108
2109 {
2110 uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY);
2111 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy);
2112 }
2113
2114
2115
2116
2117
2118 value &= R_V7M_FPCCR_BANKED_MASK;
2119 cpu->env.v7m.fpccr[M_REG_NS] = value;
2120 } else {
2121 fpccr_s = value;
2122 }
2123 cpu->env.v7m.fpccr[M_REG_S] = fpccr_s;
2124 }
2125 break;
2126 case 0xf38:
2127 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2128 value &= ~7;
2129 cpu->env.v7m.fpcar[attrs.secure] = value;
2130 }
2131 break;
2132 case 0xf3c:
2133 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2134 uint32_t mask = FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK;
2135 if (cpu_isar_feature(any_fp16, cpu)) {
2136 mask |= FPCR_FZ16;
2137 }
2138 value &= mask;
2139 if (cpu_isar_feature(aa32_lob, cpu)) {
2140 value |= 4 << FPCR_LTPSIZE_SHIFT;
2141 }
2142 cpu->env.v7m.fpdscr[attrs.secure] = value;
2143 }
2144 break;
2145 case 0xf50:
2146 case 0xf58:
2147 case 0xf5c:
2148 case 0xf60:
2149 case 0xf64:
2150 case 0xf68:
2151 case 0xf6c:
2152 case 0xf70:
2153 case 0xf74:
2154 case 0xf78:
2155
2156 break;
2157 default:
2158 bad_offset:
2159 qemu_log_mask(LOG_GUEST_ERROR,
2160 "NVIC: Bad write offset 0x%x\n", offset);
2161 }
2162}
2163
2164static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
2165{
2166
2167 switch (offset) {
2168 case 0xf00:
2169
2170
2171
2172 return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
2173 default:
2174
2175 return false;
2176 }
2177}
2178
2179static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
2180{
2181
2182
2183
2184
2185
2186 switch (exc) {
2187 case ARMV7M_EXCP_MEM:
2188 case ARMV7M_EXCP_USAGE:
2189 case ARMV7M_EXCP_SVC:
2190 case ARMV7M_EXCP_PENDSV:
2191 case ARMV7M_EXCP_SYSTICK:
2192
2193 return attrs.secure;
2194 case ARMV7M_EXCP_BUS:
2195
2196 if (!attrs.secure &&
2197 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2198 return -1;
2199 }
2200 return M_REG_NS;
2201 case ARMV7M_EXCP_SECURE:
2202
2203 if (!attrs.secure) {
2204 return -1;
2205 }
2206 return M_REG_NS;
2207 case ARMV7M_EXCP_DEBUG:
2208
2209 return M_REG_NS;
2210 case 8 ... 10:
2211 case 13:
2212
2213 return -1;
2214 default:
2215
2216 g_assert_not_reached();
2217 }
2218}
2219
2220static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
2221 uint64_t *data, unsigned size,
2222 MemTxAttrs attrs)
2223{
2224 NVICState *s = (NVICState *)opaque;
2225 uint32_t offset = addr;
2226 unsigned i, startvec, end;
2227 uint32_t val;
2228
2229 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2230
2231 return MEMTX_ERROR;
2232 }
2233
2234 switch (offset) {
2235
2236 case 0x100 ... 0x13f:
2237 offset += 0x80;
2238
2239 case 0x180 ... 0x1bf:
2240 val = 0;
2241 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
2242
2243 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2244 if (s->vectors[startvec + i].enabled &&
2245 (attrs.secure || s->itns[startvec + i])) {
2246 val |= (1 << i);
2247 }
2248 }
2249 break;
2250 case 0x200 ... 0x23f:
2251 offset += 0x80;
2252
2253 case 0x280 ... 0x2bf:
2254 val = 0;
2255 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ;
2256 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2257 if (s->vectors[startvec + i].pending &&
2258 (attrs.secure || s->itns[startvec + i])) {
2259 val |= (1 << i);
2260 }
2261 }
2262 break;
2263 case 0x300 ... 0x33f:
2264 val = 0;
2265
2266 if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) {
2267 break;
2268 }
2269
2270 startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ;
2271
2272 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2273 if (s->vectors[startvec + i].active &&
2274 (attrs.secure || s->itns[startvec + i])) {
2275 val |= (1 << i);
2276 }
2277 }
2278 break;
2279 case 0x400 ... 0x5ef:
2280 val = 0;
2281 startvec = offset - 0x400 + NVIC_FIRST_IRQ;
2282
2283 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2284 if (attrs.secure || s->itns[startvec + i]) {
2285 val |= s->vectors[startvec + i].prio << (8 * i);
2286 }
2287 }
2288 break;
2289 case 0xd18 ... 0xd1b:
2290 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2291 val = 0;
2292 break;
2293 }
2294
2295 case 0xd1c ... 0xd23:
2296 val = 0;
2297 for (i = 0; i < size; i++) {
2298 unsigned hdlidx = (offset - 0xd14) + i;
2299 int sbank = shpr_bank(s, hdlidx, attrs);
2300
2301 if (sbank < 0) {
2302 continue;
2303 }
2304 val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
2305 }
2306 break;
2307 case 0xd28 ... 0xd2b:
2308 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2309 val = 0;
2310 break;
2311 };
2312
2313
2314
2315
2316
2317 val = s->cpu->env.v7m.cfsr[attrs.secure];
2318 if (!attrs.secure &&
2319 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2320 val &= ~R_V7M_CFSR_BFSR_MASK;
2321 } else {
2322 val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
2323 }
2324 val = extract32(val, (offset - 0xd28) * 8, size * 8);
2325 break;
2326 case 0xfe0 ... 0xfff:
2327 if (offset & 3) {
2328 val = 0;
2329 } else {
2330 val = nvic_id[(offset - 0xfe0) >> 2];
2331 }
2332 break;
2333 default:
2334 if (size == 4) {
2335 val = nvic_readl(s, offset, attrs);
2336 } else {
2337 qemu_log_mask(LOG_GUEST_ERROR,
2338 "NVIC: Bad read of size %d at offset 0x%x\n",
2339 size, offset);
2340 val = 0;
2341 }
2342 }
2343
2344 trace_nvic_sysreg_read(addr, val, size);
2345 *data = val;
2346 return MEMTX_OK;
2347}
2348
2349static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
2350 uint64_t value, unsigned size,
2351 MemTxAttrs attrs)
2352{
2353 NVICState *s = (NVICState *)opaque;
2354 uint32_t offset = addr;
2355 unsigned i, startvec, end;
2356 unsigned setval = 0;
2357
2358 trace_nvic_sysreg_write(addr, value, size);
2359
2360 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2361
2362 return MEMTX_ERROR;
2363 }
2364
2365 switch (offset) {
2366 case 0x100 ... 0x13f:
2367 offset += 0x80;
2368 setval = 1;
2369
2370 case 0x180 ... 0x1bf:
2371 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
2372
2373 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2374 if (value & (1 << i) &&
2375 (attrs.secure || s->itns[startvec + i])) {
2376 s->vectors[startvec + i].enabled = setval;
2377 }
2378 }
2379 nvic_irq_update(s);
2380 goto exit_ok;
2381 case 0x200 ... 0x23f:
2382
2383
2384
2385 offset += 0x80;
2386 setval = 1;
2387
2388 case 0x280 ... 0x2bf:
2389 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ;
2390
2391 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2392 if (value & (1 << i) &&
2393 (attrs.secure || s->itns[startvec + i])) {
2394 s->vectors[startvec + i].pending = setval;
2395 }
2396 }
2397 nvic_irq_update(s);
2398 goto exit_ok;
2399 case 0x300 ... 0x33f:
2400 goto exit_ok;
2401 case 0x400 ... 0x5ef:
2402 startvec = (offset - 0x400) + NVIC_FIRST_IRQ;
2403
2404 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2405 if (attrs.secure || s->itns[startvec + i]) {
2406 set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
2407 }
2408 }
2409 nvic_irq_update(s);
2410 goto exit_ok;
2411 case 0xd18 ... 0xd1b:
2412 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2413 goto exit_ok;
2414 }
2415
2416 case 0xd1c ... 0xd23:
2417 for (i = 0; i < size; i++) {
2418 unsigned hdlidx = (offset - 0xd14) + i;
2419 int newprio = extract32(value, i * 8, 8);
2420 int sbank = shpr_bank(s, hdlidx, attrs);
2421
2422 if (sbank < 0) {
2423 continue;
2424 }
2425 set_prio(s, hdlidx, sbank, newprio);
2426 }
2427 nvic_irq_update(s);
2428 goto exit_ok;
2429 case 0xd28 ... 0xd2b:
2430 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2431 goto exit_ok;
2432 }
2433
2434
2435
2436 value <<= ((offset - 0xd28) * 8);
2437
2438 if (!attrs.secure &&
2439 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2440
2441 value &= ~R_V7M_CFSR_BFSR_MASK;
2442 }
2443
2444 s->cpu->env.v7m.cfsr[attrs.secure] &= ~value;
2445 if (attrs.secure) {
2446
2447
2448
2449 s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
2450 }
2451 goto exit_ok;
2452 }
2453 if (size == 4) {
2454 nvic_writel(s, offset, value, attrs);
2455 goto exit_ok;
2456 }
2457 qemu_log_mask(LOG_GUEST_ERROR,
2458 "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
2459
2460
2461 exit_ok:
2462
2463 arm_rebuild_hflags(&s->cpu->env);
2464 return MEMTX_OK;
2465}
2466
2467static const MemoryRegionOps nvic_sysreg_ops = {
2468 .read_with_attrs = nvic_sysreg_read,
2469 .write_with_attrs = nvic_sysreg_write,
2470 .endianness = DEVICE_NATIVE_ENDIAN,
2471};
2472
2473static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr,
2474 uint64_t value, unsigned size,
2475 MemTxAttrs attrs)
2476{
2477 MemoryRegion *mr = opaque;
2478
2479 if (attrs.secure) {
2480
2481 attrs.secure = 0;
2482 return memory_region_dispatch_write(mr, addr, value,
2483 size_memop(size) | MO_TE, attrs);
2484 } else {
2485
2486 if (attrs.user) {
2487 return MEMTX_ERROR;
2488 }
2489 return MEMTX_OK;
2490 }
2491}
2492
2493static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr,
2494 uint64_t *data, unsigned size,
2495 MemTxAttrs attrs)
2496{
2497 MemoryRegion *mr = opaque;
2498
2499 if (attrs.secure) {
2500
2501 attrs.secure = 0;
2502 return memory_region_dispatch_read(mr, addr, data,
2503 size_memop(size) | MO_TE, attrs);
2504 } else {
2505
2506 if (attrs.user) {
2507 return MEMTX_ERROR;
2508 }
2509 *data = 0;
2510 return MEMTX_OK;
2511 }
2512}
2513
2514static const MemoryRegionOps nvic_sysreg_ns_ops = {
2515 .read_with_attrs = nvic_sysreg_ns_read,
2516 .write_with_attrs = nvic_sysreg_ns_write,
2517 .endianness = DEVICE_NATIVE_ENDIAN,
2518};
2519
2520static MemTxResult nvic_systick_write(void *opaque, hwaddr addr,
2521 uint64_t value, unsigned size,
2522 MemTxAttrs attrs)
2523{
2524 NVICState *s = opaque;
2525 MemoryRegion *mr;
2526
2527
2528 mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2529 return memory_region_dispatch_write(mr, addr, value,
2530 size_memop(size) | MO_TE, attrs);
2531}
2532
2533static MemTxResult nvic_systick_read(void *opaque, hwaddr addr,
2534 uint64_t *data, unsigned size,
2535 MemTxAttrs attrs)
2536{
2537 NVICState *s = opaque;
2538 MemoryRegion *mr;
2539
2540
2541 mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2542 return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE,
2543 attrs);
2544}
2545
2546static const MemoryRegionOps nvic_systick_ops = {
2547 .read_with_attrs = nvic_systick_read,
2548 .write_with_attrs = nvic_systick_write,
2549 .endianness = DEVICE_NATIVE_ENDIAN,
2550};
2551
2552
2553static MemTxResult ras_read(void *opaque, hwaddr addr,
2554 uint64_t *data, unsigned size,
2555 MemTxAttrs attrs)
2556{
2557 if (attrs.user) {
2558 return MEMTX_ERROR;
2559 }
2560
2561 switch (addr) {
2562 case 0xe10:
2563
2564 *data = 0x43b;
2565 break;
2566 case 0xfc8:
2567
2568 *data = 0;
2569 break;
2570 default:
2571 qemu_log_mask(LOG_UNIMP, "Read RAS register offset 0x%x\n",
2572 (uint32_t)addr);
2573 *data = 0;
2574 break;
2575 }
2576 return MEMTX_OK;
2577}
2578
2579static MemTxResult ras_write(void *opaque, hwaddr addr,
2580 uint64_t value, unsigned size,
2581 MemTxAttrs attrs)
2582{
2583 if (attrs.user) {
2584 return MEMTX_ERROR;
2585 }
2586
2587 switch (addr) {
2588 default:
2589 qemu_log_mask(LOG_UNIMP, "Write to RAS register offset 0x%x\n",
2590 (uint32_t)addr);
2591 break;
2592 }
2593 return MEMTX_OK;
2594}
2595
2596static const MemoryRegionOps ras_ops = {
2597 .read_with_attrs = ras_read,
2598 .write_with_attrs = ras_write,
2599 .endianness = DEVICE_NATIVE_ENDIAN,
2600};
2601
2602
2603
2604
2605
2606static MemTxResult ppb_default_read(void *opaque, hwaddr addr,
2607 uint64_t *data, unsigned size,
2608 MemTxAttrs attrs)
2609{
2610 qemu_log_mask(LOG_UNIMP, "Read of unassigned area of PPB: offset 0x%x\n",
2611 (uint32_t)addr);
2612 if (attrs.user) {
2613 return MEMTX_ERROR;
2614 }
2615 *data = 0;
2616 return MEMTX_OK;
2617}
2618
2619static MemTxResult ppb_default_write(void *opaque, hwaddr addr,
2620 uint64_t value, unsigned size,
2621 MemTxAttrs attrs)
2622{
2623 qemu_log_mask(LOG_UNIMP, "Write of unassigned area of PPB: offset 0x%x\n",
2624 (uint32_t)addr);
2625 if (attrs.user) {
2626 return MEMTX_ERROR;
2627 }
2628 return MEMTX_OK;
2629}
2630
2631static const MemoryRegionOps ppb_default_ops = {
2632 .read_with_attrs = ppb_default_read,
2633 .write_with_attrs = ppb_default_write,
2634 .endianness = DEVICE_NATIVE_ENDIAN,
2635 .valid.min_access_size = 1,
2636 .valid.max_access_size = 8,
2637};
2638
2639static int nvic_post_load(void *opaque, int version_id)
2640{
2641 NVICState *s = opaque;
2642 unsigned i;
2643 int resetprio;
2644
2645
2646 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2647
2648 if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
2649 s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
2650 s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
2651 return 1;
2652 }
2653 for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
2654 if (s->vectors[i].prio & ~0xff) {
2655 return 1;
2656 }
2657 }
2658
2659 nvic_recompute_state(s);
2660
2661 return 0;
2662}
2663
2664static const VMStateDescription vmstate_VecInfo = {
2665 .name = "armv7m_nvic_info",
2666 .version_id = 1,
2667 .minimum_version_id = 1,
2668 .fields = (VMStateField[]) {
2669 VMSTATE_INT16(prio, VecInfo),
2670 VMSTATE_UINT8(enabled, VecInfo),
2671 VMSTATE_UINT8(pending, VecInfo),
2672 VMSTATE_UINT8(active, VecInfo),
2673 VMSTATE_UINT8(level, VecInfo),
2674 VMSTATE_END_OF_LIST()
2675 }
2676};
2677
2678static bool nvic_security_needed(void *opaque)
2679{
2680 NVICState *s = opaque;
2681
2682 return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
2683}
2684
2685static int nvic_security_post_load(void *opaque, int version_id)
2686{
2687 NVICState *s = opaque;
2688 int i;
2689
2690
2691 if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
2692 && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
2693
2694
2695
2696
2697 return 1;
2698 }
2699 for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
2700 if (s->sec_vectors[i].prio & ~0xff) {
2701 return 1;
2702 }
2703 }
2704 return 0;
2705}
2706
2707static const VMStateDescription vmstate_nvic_security = {
2708 .name = "armv7m_nvic/m-security",
2709 .version_id = 1,
2710 .minimum_version_id = 1,
2711 .needed = nvic_security_needed,
2712 .post_load = &nvic_security_post_load,
2713 .fields = (VMStateField[]) {
2714 VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
2715 vmstate_VecInfo, VecInfo),
2716 VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
2717 VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
2718 VMSTATE_END_OF_LIST()
2719 }
2720};
2721
2722static const VMStateDescription vmstate_nvic = {
2723 .name = "armv7m_nvic",
2724 .version_id = 4,
2725 .minimum_version_id = 4,
2726 .post_load = &nvic_post_load,
2727 .fields = (VMStateField[]) {
2728 VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
2729 vmstate_VecInfo, VecInfo),
2730 VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
2731 VMSTATE_END_OF_LIST()
2732 },
2733 .subsections = (const VMStateDescription*[]) {
2734 &vmstate_nvic_security,
2735 NULL
2736 }
2737};
2738
2739static Property props_nvic[] = {
2740
2741 DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
2742 DEFINE_PROP_END_OF_LIST()
2743};
2744
2745static void armv7m_nvic_reset(DeviceState *dev)
2746{
2747 int resetprio;
2748 NVICState *s = NVIC(dev);
2749
2750 memset(s->vectors, 0, sizeof(s->vectors));
2751 memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
2752 s->prigroup[M_REG_NS] = 0;
2753 s->prigroup[M_REG_S] = 0;
2754
2755 s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
2756
2757
2758
2759 s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
2760 s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2761 s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2762
2763
2764 s->vectors[ARMV7M_EXCP_DEBUG].enabled = 0;
2765
2766 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2767 s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
2768 s->vectors[ARMV7M_EXCP_NMI].prio = -2;
2769 s->vectors[ARMV7M_EXCP_HARD].prio = -1;
2770
2771 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2772 s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
2773 s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
2774 s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2775 s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2776
2777
2778 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
2779
2780 s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
2781 } else {
2782 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
2783 }
2784
2785
2786
2787
2788
2789
2790
2791 s->exception_prio = NVIC_NOEXC_PRIO;
2792 s->vectpending = 0;
2793 s->vectpending_is_s_banked = false;
2794 s->vectpending_prio = NVIC_NOEXC_PRIO;
2795
2796 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2797 memset(s->itns, 0, sizeof(s->itns));
2798 } else {
2799
2800
2801
2802
2803 int i;
2804
2805 for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
2806 s->itns[i] = true;
2807 }
2808 }
2809
2810
2811
2812
2813
2814 arm_rebuild_hflags(&s->cpu->env);
2815}
2816
2817static void nvic_systick_trigger(void *opaque, int n, int level)
2818{
2819 NVICState *s = opaque;
2820
2821 if (level) {
2822
2823
2824
2825
2826
2827
2828 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n);
2829 }
2830}
2831
2832static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
2833{
2834 NVICState *s = NVIC(dev);
2835
2836
2837 if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
2838 error_setg(errp, "The NVIC can only be used with a Cortex-M CPU");
2839 return;
2840 }
2841
2842 if (s->num_irq > NVIC_MAX_IRQ) {
2843 error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
2844 return;
2845 }
2846
2847 qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
2848
2849
2850 s->num_irq += NVIC_FIRST_IRQ;
2851
2852 s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2;
2853
2854 if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), errp)) {
2855 return;
2856 }
2857 sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0,
2858 qdev_get_gpio_in_named(dev, "systick-trigger",
2859 M_REG_NS));
2860
2861 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2862
2863
2864
2865
2866 object_initialize_child(OBJECT(dev), "systick-reg-s",
2867 &s->systick[M_REG_S], TYPE_SYSTICK);
2868
2869 if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_S]), errp)) {
2870 return;
2871 }
2872 sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0,
2873 qdev_get_gpio_in_named(dev, "systick-trigger",
2874 M_REG_S));
2875 }
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922 memory_region_init(&s->container, OBJECT(s), "nvic", 0x100000);
2923 memory_region_init_io(&s->defaultmem, OBJECT(s), &ppb_default_ops, s,
2924 "nvic-default", 0x100000);
2925 memory_region_add_subregion_overlap(&s->container, 0, &s->defaultmem, -1);
2926 memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
2927 "nvic_sysregs", 0x1000);
2928 memory_region_add_subregion(&s->container, 0xe000, &s->sysregmem);
2929
2930 memory_region_init_io(&s->systickmem, OBJECT(s),
2931 &nvic_systick_ops, s,
2932 "nvic_systick", 0xe0);
2933
2934 memory_region_add_subregion_overlap(&s->container, 0xe010,
2935 &s->systickmem, 1);
2936
2937 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
2938 memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
2939 &nvic_sysreg_ns_ops, &s->sysregmem,
2940 "nvic_sysregs_ns", 0x1000);
2941 memory_region_add_subregion(&s->container, 0x2e000, &s->sysreg_ns_mem);
2942 memory_region_init_io(&s->systick_ns_mem, OBJECT(s),
2943 &nvic_sysreg_ns_ops, &s->systickmem,
2944 "nvic_systick_ns", 0xe0);
2945 memory_region_add_subregion_overlap(&s->container, 0x2e010,
2946 &s->systick_ns_mem, 1);
2947 }
2948
2949 if (cpu_isar_feature(aa32_ras, s->cpu)) {
2950 memory_region_init_io(&s->ras_mem, OBJECT(s),
2951 &ras_ops, s, "nvic_ras", 0x1000);
2952 memory_region_add_subregion(&s->container, 0x5000, &s->ras_mem);
2953 }
2954
2955 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
2956}
2957
2958static void armv7m_nvic_instance_init(Object *obj)
2959{
2960 DeviceState *dev = DEVICE(obj);
2961 NVICState *nvic = NVIC(obj);
2962 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
2963
2964 object_initialize_child(obj, "systick-reg-ns", &nvic->systick[M_REG_NS],
2965 TYPE_SYSTICK);
2966
2967
2968
2969
2970 sysbus_init_irq(sbd, &nvic->excpout);
2971 qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
2972 qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger",
2973 M_REG_NUM_BANKS);
2974 qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1);
2975}
2976
2977static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
2978{
2979 DeviceClass *dc = DEVICE_CLASS(klass);
2980
2981 dc->vmsd = &vmstate_nvic;
2982 device_class_set_props(dc, props_nvic);
2983 dc->reset = armv7m_nvic_reset;
2984 dc->realize = armv7m_nvic_realize;
2985}
2986
2987static const TypeInfo armv7m_nvic_info = {
2988 .name = TYPE_NVIC,
2989 .parent = TYPE_SYS_BUS_DEVICE,
2990 .instance_init = armv7m_nvic_instance_init,
2991 .instance_size = sizeof(NVICState),
2992 .class_init = armv7m_nvic_class_init,
2993 .class_size = sizeof(SysBusDeviceClass),
2994};
2995
2996static void armv7m_nvic_register_types(void)
2997{
2998 type_register_static(&armv7m_nvic_info);
2999}
3000
3001type_init(armv7m_nvic_register_types)
3002