1
2
3
4
5
6
7
8
9
10
11
12
13#include "qemu/osdep.h"
14#include "qapi/error.h"
15#include "hw/sysbus.h"
16#include "migration/vmstate.h"
17#include "qemu/timer.h"
18#include "hw/intc/armv7m_nvic.h"
19#include "hw/irq.h"
20#include "hw/qdev-properties.h"
21#include "sysemu/runstate.h"
22#include "target/arm/cpu.h"
23#include "exec/exec-all.h"
24#include "exec/memop.h"
25#include "qemu/log.h"
26#include "qemu/module.h"
27#include "trace.h"
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
54#define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
55
56
57
58
59#define NVIC_NOEXC_PRIO 0x100
60
61#define NVIC_NS_PRIO_LIMIT 0x80
62
63static const uint8_t nvic_id[] = {
64 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
65};
66
67static void signal_sysresetreq(NVICState *s)
68{
69 if (qemu_irq_is_connected(s->sysresetreq)) {
70 qemu_irq_pulse(s->sysresetreq);
71 } else {
72
73
74
75
76
77 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
78 }
79}
80
81static int nvic_pending_prio(NVICState *s)
82{
83
84
85
86 return s->vectpending_prio;
87}
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103static bool nvic_rettobase(NVICState *s)
104{
105 int irq, nhand = 0;
106 bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
107
108 for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
109 if (s->vectors[irq].active ||
110 (check_sec && irq < NVIC_INTERNAL_VECTORS &&
111 s->sec_vectors[irq].active)) {
112 nhand++;
113 if (nhand == 2) {
114 return 0;
115 }
116 }
117 }
118
119 return 1;
120}
121
122
123
124
125
126static bool nvic_isrpending(NVICState *s)
127{
128 int irq;
129
130
131
132
133
134
135 if (s->vectpending > NVIC_FIRST_IRQ) {
136 return true;
137 }
138
139 for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
140 if (s->vectors[irq].pending) {
141 return true;
142 }
143 }
144 return false;
145}
146
147static bool exc_is_banked(int exc)
148{
149
150
151
152 return exc == ARMV7M_EXCP_HARD ||
153 exc == ARMV7M_EXCP_MEM ||
154 exc == ARMV7M_EXCP_USAGE ||
155 exc == ARMV7M_EXCP_SVC ||
156 exc == ARMV7M_EXCP_PENDSV ||
157 exc == ARMV7M_EXCP_SYSTICK;
158}
159
160
161
162
163
164static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
165{
166 return ~0U << (s->prigroup[secure] + 1);
167}
168
169static bool exc_targets_secure(NVICState *s, int exc)
170{
171
172 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
173 return false;
174 }
175
176 if (exc >= NVIC_FIRST_IRQ) {
177 return !s->itns[exc];
178 }
179
180
181 assert(!exc_is_banked(exc));
182
183 switch (exc) {
184 case ARMV7M_EXCP_NMI:
185 case ARMV7M_EXCP_BUS:
186 return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
187 case ARMV7M_EXCP_SECURE:
188 return true;
189 case ARMV7M_EXCP_DEBUG:
190
191 return false;
192 default:
193
194
195
196
197
198 return true;
199 }
200}
201
202static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
203{
204
205
206
207
208 if (rawprio < 0) {
209 return rawprio;
210 }
211 rawprio &= nvic_gprio_mask(s, targets_secure);
212
213
214
215 if (!targets_secure &&
216 (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
217 rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
218 }
219 return rawprio;
220}
221
222
223
224
225static void nvic_recompute_state_secure(NVICState *s)
226{
227 int i, bank;
228 int pend_prio = NVIC_NOEXC_PRIO;
229 int active_prio = NVIC_NOEXC_PRIO;
230 int pend_irq = 0;
231 bool pending_is_s_banked = false;
232 int pend_subprio = 0;
233
234
235
236
237
238
239
240
241
242
243 for (i = 1; i < s->num_irq; i++) {
244 for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
245 VecInfo *vec;
246 int prio, subprio;
247 bool targets_secure;
248
249 if (bank == M_REG_S) {
250 if (!exc_is_banked(i)) {
251 continue;
252 }
253 vec = &s->sec_vectors[i];
254 targets_secure = true;
255 } else {
256 vec = &s->vectors[i];
257 targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
258 }
259
260 prio = exc_group_prio(s, vec->prio, targets_secure);
261 subprio = vec->prio & ~nvic_gprio_mask(s, targets_secure);
262 if (vec->enabled && vec->pending &&
263 ((prio < pend_prio) ||
264 (prio == pend_prio && prio >= 0 && subprio < pend_subprio))) {
265 pend_prio = prio;
266 pend_subprio = subprio;
267 pend_irq = i;
268 pending_is_s_banked = (bank == M_REG_S);
269 }
270 if (vec->active && prio < active_prio) {
271 active_prio = prio;
272 }
273 }
274 }
275
276 s->vectpending_is_s_banked = pending_is_s_banked;
277 s->vectpending = pend_irq;
278 s->vectpending_prio = pend_prio;
279 s->exception_prio = active_prio;
280
281 trace_nvic_recompute_state_secure(s->vectpending,
282 s->vectpending_is_s_banked,
283 s->vectpending_prio,
284 s->exception_prio);
285}
286
287
288static void nvic_recompute_state(NVICState *s)
289{
290 int i;
291 int pend_prio = NVIC_NOEXC_PRIO;
292 int active_prio = NVIC_NOEXC_PRIO;
293 int pend_irq = 0;
294
295
296
297
298
299
300
301
302 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
303 nvic_recompute_state_secure(s);
304 return;
305 }
306
307 for (i = 1; i < s->num_irq; i++) {
308 VecInfo *vec = &s->vectors[i];
309
310 if (vec->enabled && vec->pending && vec->prio < pend_prio) {
311 pend_prio = vec->prio;
312 pend_irq = i;
313 }
314 if (vec->active && vec->prio < active_prio) {
315 active_prio = vec->prio;
316 }
317 }
318
319 if (active_prio > 0) {
320 active_prio &= nvic_gprio_mask(s, false);
321 }
322
323 if (pend_prio > 0) {
324 pend_prio &= nvic_gprio_mask(s, false);
325 }
326
327 s->vectpending = pend_irq;
328 s->vectpending_prio = pend_prio;
329 s->exception_prio = active_prio;
330
331 trace_nvic_recompute_state(s->vectpending,
332 s->vectpending_prio,
333 s->exception_prio);
334}
335
336
337
338
339
340static inline int nvic_exec_prio(NVICState *s)
341{
342 CPUARMState *env = &s->cpu->env;
343 int running = NVIC_NOEXC_PRIO;
344
345 if (env->v7m.basepri[M_REG_NS] > 0) {
346 running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
347 }
348
349 if (env->v7m.basepri[M_REG_S] > 0) {
350 int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
351 if (running > basepri) {
352 running = basepri;
353 }
354 }
355
356 if (env->v7m.primask[M_REG_NS]) {
357 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
358 if (running > NVIC_NS_PRIO_LIMIT) {
359 running = NVIC_NS_PRIO_LIMIT;
360 }
361 } else {
362 running = 0;
363 }
364 }
365
366 if (env->v7m.primask[M_REG_S]) {
367 running = 0;
368 }
369
370 if (env->v7m.faultmask[M_REG_NS]) {
371 if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
372 running = -1;
373 } else {
374 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
375 if (running > NVIC_NS_PRIO_LIMIT) {
376 running = NVIC_NS_PRIO_LIMIT;
377 }
378 } else {
379 running = 0;
380 }
381 }
382 }
383
384 if (env->v7m.faultmask[M_REG_S]) {
385 running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
386 }
387
388
389 return MIN(running, s->exception_prio);
390}
391
392bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
393{
394
395
396
397
398
399
400
401
402 NVICState *s = opaque;
403
404 if (s->cpu->env.v7m.faultmask[secure]) {
405 return true;
406 }
407
408 if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
409 s->vectors[ARMV7M_EXCP_HARD].active) {
410 return true;
411 }
412
413 if (s->vectors[ARMV7M_EXCP_NMI].active &&
414 exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
415 return true;
416 }
417
418 return false;
419}
420
421bool armv7m_nvic_can_take_pending_exception(void *opaque)
422{
423 NVICState *s = opaque;
424
425 return nvic_exec_prio(s) > nvic_pending_prio(s);
426}
427
428int armv7m_nvic_raw_execution_priority(void *opaque)
429{
430 NVICState *s = opaque;
431
432 return s->exception_prio;
433}
434
435
436
437
438
439static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
440{
441 assert(irq > ARMV7M_EXCP_NMI);
442 assert(irq < s->num_irq);
443
444 prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits);
445
446 if (secure) {
447 assert(exc_is_banked(irq));
448 s->sec_vectors[irq].prio = prio;
449 } else {
450 s->vectors[irq].prio = prio;
451 }
452
453 trace_nvic_set_prio(irq, secure, prio);
454}
455
456
457
458
459
460static int get_prio(NVICState *s, unsigned irq, bool secure)
461{
462 assert(irq > ARMV7M_EXCP_NMI);
463 assert(irq < s->num_irq);
464
465 if (secure) {
466 assert(exc_is_banked(irq));
467 return s->sec_vectors[irq].prio;
468 } else {
469 return s->vectors[irq].prio;
470 }
471}
472
473
474
475
476
477
478static void nvic_irq_update(NVICState *s)
479{
480 int lvl;
481 int pend_prio;
482
483 nvic_recompute_state(s);
484 pend_prio = nvic_pending_prio(s);
485
486
487
488
489
490
491
492 lvl = (pend_prio < s->exception_prio);
493 trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
494 qemu_set_irq(s->excpout, lvl);
495}
496
497
498
499
500
501
502
503
504
505
506
507
508
509static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
510{
511 NVICState *s = (NVICState *)opaque;
512 VecInfo *vec;
513
514 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
515
516 if (secure) {
517 assert(exc_is_banked(irq));
518 vec = &s->sec_vectors[irq];
519 } else {
520 vec = &s->vectors[irq];
521 }
522 trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
523 if (vec->pending) {
524 vec->pending = 0;
525 nvic_irq_update(s);
526 }
527}
528
529static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
530 bool derived)
531{
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548 NVICState *s = (NVICState *)opaque;
549 bool banked = exc_is_banked(irq);
550 VecInfo *vec;
551 bool targets_secure;
552
553 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
554 assert(!secure || banked);
555
556 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
557
558 targets_secure = banked ? secure : exc_targets_secure(s, irq);
559
560 trace_nvic_set_pending(irq, secure, targets_secure,
561 derived, vec->enabled, vec->prio);
562
563 if (derived) {
564
565 assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
566
567 if (irq == ARMV7M_EXCP_DEBUG &&
568 exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
569
570
571
572 return;
573 }
574
575 if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
576
577
578
579
580
581
582
583
584
585
586
587 cpu_abort(&s->cpu->parent_obj,
588 "Lockup: can't take terminal derived exception "
589 "(original exception priority %d)\n",
590 s->vectpending_prio);
591 }
592
593
594
595
596
597
598 }
599
600 if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621 int running = nvic_exec_prio(s);
622 bool escalate = false;
623
624 if (exc_group_prio(s, vec->prio, secure) >= running) {
625 trace_nvic_escalate_prio(irq, vec->prio, running);
626 escalate = true;
627 } else if (!vec->enabled) {
628 trace_nvic_escalate_disabled(irq);
629 escalate = true;
630 }
631
632 if (escalate) {
633
634
635
636
637
638
639 irq = ARMV7M_EXCP_HARD;
640 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
641 (targets_secure ||
642 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
643 vec = &s->sec_vectors[irq];
644 } else {
645 vec = &s->vectors[irq];
646 }
647 if (running <= vec->prio) {
648
649
650
651
652
653 cpu_abort(&s->cpu->parent_obj,
654 "Lockup: can't escalate %d to HardFault "
655 "(current priority %d)\n", irq, running);
656 }
657
658
659 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
660 }
661 }
662
663 if (!vec->pending) {
664 vec->pending = 1;
665 nvic_irq_update(s);
666 }
667}
668
669void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
670{
671 do_armv7m_nvic_set_pending(opaque, irq, secure, false);
672}
673
674void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
675{
676 do_armv7m_nvic_set_pending(opaque, irq, secure, true);
677}
678
679void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure)
680{
681
682
683
684
685
686
687 NVICState *s = (NVICState *)opaque;
688 bool banked = exc_is_banked(irq);
689 VecInfo *vec;
690 bool targets_secure;
691 bool escalate = false;
692
693
694
695
696
697 uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S];
698 uint32_t fpccr = s->cpu->env.v7m.fpccr[secure];
699
700 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
701 assert(!secure || banked);
702
703 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
704
705 targets_secure = banked ? secure : exc_targets_secure(s, irq);
706
707 switch (irq) {
708 case ARMV7M_EXCP_DEBUG:
709 if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) {
710
711 return;
712 }
713 break;
714 case ARMV7M_EXCP_MEM:
715 escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK);
716 break;
717 case ARMV7M_EXCP_USAGE:
718 escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK);
719 break;
720 case ARMV7M_EXCP_BUS:
721 escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK);
722 break;
723 case ARMV7M_EXCP_SECURE:
724 escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK);
725 break;
726 default:
727 g_assert_not_reached();
728 }
729
730 if (escalate) {
731
732
733
734
735 irq = ARMV7M_EXCP_HARD;
736 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
737 (targets_secure ||
738 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
739 vec = &s->sec_vectors[irq];
740 } else {
741 vec = &s->vectors[irq];
742 }
743 }
744
745 if (!vec->enabled ||
746 nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) {
747 if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) {
748
749
750
751
752 cpu_abort(&s->cpu->parent_obj,
753 "Lockup: can't escalate to HardFault during "
754 "lazy FP register stacking\n");
755 }
756 }
757
758 if (escalate) {
759 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
760 }
761 if (!vec->pending) {
762 vec->pending = 1;
763
764
765
766
767
768
769
770
771 nvic_recompute_state(s);
772 }
773}
774
775
776void armv7m_nvic_acknowledge_irq(void *opaque)
777{
778 NVICState *s = (NVICState *)opaque;
779 CPUARMState *env = &s->cpu->env;
780 const int pending = s->vectpending;
781 const int running = nvic_exec_prio(s);
782 VecInfo *vec;
783
784 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
785
786 if (s->vectpending_is_s_banked) {
787 vec = &s->sec_vectors[pending];
788 } else {
789 vec = &s->vectors[pending];
790 }
791
792 assert(vec->enabled);
793 assert(vec->pending);
794
795 assert(s->vectpending_prio < running);
796
797 trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
798
799 vec->active = 1;
800 vec->pending = 0;
801
802 write_v7m_exception(env, s->vectpending);
803
804 nvic_irq_update(s);
805}
806
807static bool vectpending_targets_secure(NVICState *s)
808{
809
810 if (s->vectpending_is_s_banked) {
811 return true;
812 }
813 return !exc_is_banked(s->vectpending) &&
814 exc_targets_secure(s, s->vectpending);
815}
816
817void armv7m_nvic_get_pending_irq_info(void *opaque,
818 int *pirq, bool *ptargets_secure)
819{
820 NVICState *s = (NVICState *)opaque;
821 const int pending = s->vectpending;
822 bool targets_secure;
823
824 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
825
826 targets_secure = vectpending_targets_secure(s);
827
828 trace_nvic_get_pending_irq_info(pending, targets_secure);
829
830 *ptargets_secure = targets_secure;
831 *pirq = pending;
832}
833
834int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
835{
836 NVICState *s = (NVICState *)opaque;
837 VecInfo *vec = NULL;
838 int ret = 0;
839
840 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
841
842 trace_nvic_complete_irq(irq, secure);
843
844 if (secure && exc_is_banked(irq)) {
845 vec = &s->sec_vectors[irq];
846 } else {
847 vec = &s->vectors[irq];
848 }
849
850
851
852
853
854
855 if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
856
857
858
859
860
861
862 ret = -1;
863 vec = NULL;
864 } else if (!vec->active) {
865
866 ret = -1;
867 } else {
868
869 ret = nvic_rettobase(s);
870 }
871
872
873
874
875
876
877
878
879
880 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
881 switch (armv7m_nvic_raw_execution_priority(s)) {
882 case -1:
883 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
884 vec = &s->vectors[ARMV7M_EXCP_HARD];
885 } else {
886 vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
887 }
888 break;
889 case -2:
890 vec = &s->vectors[ARMV7M_EXCP_NMI];
891 break;
892 case -3:
893 vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
894 break;
895 default:
896 break;
897 }
898 }
899
900 if (!vec) {
901 return ret;
902 }
903
904 vec->active = 0;
905 if (vec->level) {
906
907
908
909 assert(irq >= NVIC_FIRST_IRQ);
910 vec->pending = 1;
911 }
912
913 nvic_irq_update(s);
914
915 return ret;
916}
917
918bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
919{
920
921
922
923
924
925
926
927
928
929 NVICState *s = (NVICState *)opaque;
930 bool banked = exc_is_banked(irq);
931 VecInfo *vec;
932 int running = nvic_exec_prio(s);
933
934 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
935 assert(!secure || banked);
936
937
938
939
940
941
942 if (irq == ARMV7M_EXCP_HARD) {
943 return running > -1;
944 }
945
946 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
947
948 return vec->enabled &&
949 exc_group_prio(s, vec->prio, secure) < running;
950}
951
952
953static void set_irq_level(void *opaque, int n, int level)
954{
955 NVICState *s = opaque;
956 VecInfo *vec;
957
958 n += NVIC_FIRST_IRQ;
959
960 assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
961
962 trace_nvic_set_irq_level(n, level);
963
964
965
966
967
968
969
970
971 vec = &s->vectors[n];
972 if (level != vec->level) {
973 vec->level = level;
974 if (level) {
975 armv7m_nvic_set_pending(s, n, false);
976 }
977 }
978}
979
980
981static void nvic_nmi_trigger(void *opaque, int n, int level)
982{
983 NVICState *s = opaque;
984
985 trace_nvic_set_nmi_level(level);
986
987
988
989
990
991
992
993 if (level) {
994 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
995 }
996}
997
998static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
999{
1000 ARMCPU *cpu = s->cpu;
1001 uint32_t val;
1002
1003 switch (offset) {
1004 case 4:
1005 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1006 goto bad_offset;
1007 }
1008 return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
1009 case 0xc:
1010 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1011 goto bad_offset;
1012 }
1013
1014
1015
1016 return 0;
1017 case 0x380 ... 0x3bf:
1018 {
1019 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1020 int i;
1021
1022 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1023 goto bad_offset;
1024 }
1025 if (!attrs.secure) {
1026 return 0;
1027 }
1028 val = 0;
1029 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1030 if (s->itns[startvec + i]) {
1031 val |= (1 << i);
1032 }
1033 }
1034 return val;
1035 }
1036 case 0xcfc:
1037 if (!arm_feature(&cpu->env, ARM_FEATURE_V8_1M)) {
1038 goto bad_offset;
1039 }
1040 return cpu->revidr;
1041 case 0xd00:
1042 return cpu->midr;
1043 case 0xd04:
1044
1045 val = cpu->env.v7m.exception;
1046
1047 if (s->vectpending) {
1048
1049
1050
1051
1052
1053 int vp = s->vectpending;
1054 if (!attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_V8_1M) &&
1055 vectpending_targets_secure(s)) {
1056 vp = 1;
1057 }
1058 val |= (vp & 0x1ff) << 12;
1059 }
1060
1061 if (nvic_isrpending(s)) {
1062 val |= (1 << 22);
1063 }
1064
1065 if (nvic_rettobase(s)) {
1066 val |= (1 << 11);
1067 }
1068 if (attrs.secure) {
1069
1070 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
1071 val |= (1 << 26);
1072 }
1073
1074 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
1075 val |= (1 << 28);
1076 }
1077 } else {
1078
1079 if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
1080 val |= (1 << 26);
1081 }
1082
1083 if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
1084 val |= (1 << 28);
1085 }
1086 }
1087
1088 if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))
1089 && s->vectors[ARMV7M_EXCP_NMI].pending) {
1090 val |= (1 << 31);
1091 }
1092
1093
1094 return val;
1095 case 0xd08:
1096 return cpu->env.v7m.vecbase[attrs.secure];
1097 case 0xd0c:
1098 val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
1099 if (attrs.secure) {
1100
1101 val |= cpu->env.v7m.aircr;
1102 } else {
1103 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1104
1105
1106
1107
1108 val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
1109 }
1110 }
1111 return val;
1112 case 0xd10:
1113 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1114 goto bad_offset;
1115 }
1116 return cpu->env.v7m.scr[attrs.secure];
1117 case 0xd14:
1118
1119
1120
1121
1122 val = cpu->env.v7m.ccr[attrs.secure];
1123 val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
1124
1125 if (!attrs.secure) {
1126 if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1127 val &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1128 }
1129 }
1130 return val;
1131 case 0xd24:
1132 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1133 goto bad_offset;
1134 }
1135 val = 0;
1136 if (attrs.secure) {
1137 if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
1138 val |= (1 << 0);
1139 }
1140 if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
1141 val |= (1 << 2);
1142 }
1143 if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
1144 val |= (1 << 3);
1145 }
1146 if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
1147 val |= (1 << 7);
1148 }
1149 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
1150 val |= (1 << 10);
1151 }
1152 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
1153 val |= (1 << 11);
1154 }
1155 if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
1156 val |= (1 << 12);
1157 }
1158 if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
1159 val |= (1 << 13);
1160 }
1161 if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
1162 val |= (1 << 15);
1163 }
1164 if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
1165 val |= (1 << 16);
1166 }
1167 if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
1168 val |= (1 << 18);
1169 }
1170 if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
1171 val |= (1 << 21);
1172 }
1173
1174 if (s->vectors[ARMV7M_EXCP_SECURE].active) {
1175 val |= (1 << 4);
1176 }
1177 if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
1178 val |= (1 << 19);
1179 }
1180 if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
1181 val |= (1 << 20);
1182 }
1183 } else {
1184 if (s->vectors[ARMV7M_EXCP_MEM].active) {
1185 val |= (1 << 0);
1186 }
1187 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1188
1189 if (s->vectors[ARMV7M_EXCP_HARD].active) {
1190 val |= (1 << 2);
1191 }
1192 if (s->vectors[ARMV7M_EXCP_HARD].pending) {
1193 val |= (1 << 21);
1194 }
1195 }
1196 if (s->vectors[ARMV7M_EXCP_USAGE].active) {
1197 val |= (1 << 3);
1198 }
1199 if (s->vectors[ARMV7M_EXCP_SVC].active) {
1200 val |= (1 << 7);
1201 }
1202 if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
1203 val |= (1 << 10);
1204 }
1205 if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
1206 val |= (1 << 11);
1207 }
1208 if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
1209 val |= (1 << 12);
1210 }
1211 if (s->vectors[ARMV7M_EXCP_MEM].pending) {
1212 val |= (1 << 13);
1213 }
1214 if (s->vectors[ARMV7M_EXCP_SVC].pending) {
1215 val |= (1 << 15);
1216 }
1217 if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
1218 val |= (1 << 16);
1219 }
1220 if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
1221 val |= (1 << 18);
1222 }
1223 }
1224 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1225 if (s->vectors[ARMV7M_EXCP_BUS].active) {
1226 val |= (1 << 1);
1227 }
1228 if (s->vectors[ARMV7M_EXCP_BUS].pending) {
1229 val |= (1 << 14);
1230 }
1231 if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
1232 val |= (1 << 17);
1233 }
1234 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
1235 s->vectors[ARMV7M_EXCP_NMI].active) {
1236
1237 val |= (1 << 5);
1238 }
1239 }
1240
1241
1242 if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
1243 val |= (1 << 8);
1244 }
1245 return val;
1246 case 0xd2c:
1247 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1248 goto bad_offset;
1249 }
1250 return cpu->env.v7m.hfsr;
1251 case 0xd30:
1252 return cpu->env.v7m.dfsr;
1253 case 0xd34:
1254 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1255 goto bad_offset;
1256 }
1257 return cpu->env.v7m.mmfar[attrs.secure];
1258 case 0xd38:
1259 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1260 goto bad_offset;
1261 }
1262 if (!attrs.secure &&
1263 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1264 return 0;
1265 }
1266 return cpu->env.v7m.bfar;
1267 case 0xd3c:
1268
1269 qemu_log_mask(LOG_UNIMP,
1270 "Aux Fault status registers unimplemented\n");
1271 return 0;
1272 case 0xd40:
1273 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1274 goto bad_offset;
1275 }
1276 return cpu->isar.id_pfr0;
1277 case 0xd44:
1278 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1279 goto bad_offset;
1280 }
1281 return cpu->isar.id_pfr1;
1282 case 0xd48:
1283 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1284 goto bad_offset;
1285 }
1286 return cpu->isar.id_dfr0;
1287 case 0xd4c:
1288 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1289 goto bad_offset;
1290 }
1291 return cpu->id_afr0;
1292 case 0xd50:
1293 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1294 goto bad_offset;
1295 }
1296 return cpu->isar.id_mmfr0;
1297 case 0xd54:
1298 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1299 goto bad_offset;
1300 }
1301 return cpu->isar.id_mmfr1;
1302 case 0xd58:
1303 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1304 goto bad_offset;
1305 }
1306 return cpu->isar.id_mmfr2;
1307 case 0xd5c:
1308 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1309 goto bad_offset;
1310 }
1311 return cpu->isar.id_mmfr3;
1312 case 0xd60:
1313 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1314 goto bad_offset;
1315 }
1316 return cpu->isar.id_isar0;
1317 case 0xd64:
1318 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1319 goto bad_offset;
1320 }
1321 return cpu->isar.id_isar1;
1322 case 0xd68:
1323 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1324 goto bad_offset;
1325 }
1326 return cpu->isar.id_isar2;
1327 case 0xd6c:
1328 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1329 goto bad_offset;
1330 }
1331 return cpu->isar.id_isar3;
1332 case 0xd70:
1333 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1334 goto bad_offset;
1335 }
1336 return cpu->isar.id_isar4;
1337 case 0xd74:
1338 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1339 goto bad_offset;
1340 }
1341 return cpu->isar.id_isar5;
1342 case 0xd78:
1343 return cpu->clidr;
1344 case 0xd7c:
1345 return cpu->ctr;
1346 case 0xd80:
1347 {
1348 int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK;
1349 return cpu->ccsidr[idx];
1350 }
1351 case 0xd84:
1352 return cpu->env.v7m.csselr[attrs.secure];
1353 case 0xd88:
1354 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1355 return 0;
1356 }
1357 return cpu->env.v7m.cpacr[attrs.secure];
1358 case 0xd8c:
1359 if (!attrs.secure || !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1360 return 0;
1361 }
1362 return cpu->env.v7m.nsacr;
1363
1364 case 0xd90:
1365
1366 return cpu->pmsav7_dregion << 8;
1367 case 0xd94:
1368 return cpu->env.v7m.mpu_ctrl[attrs.secure];
1369 case 0xd98:
1370 return cpu->env.pmsav7.rnr[attrs.secure];
1371 case 0xd9c:
1372 case 0xda4:
1373 case 0xdac:
1374 case 0xdb4:
1375 {
1376 int region = cpu->env.pmsav7.rnr[attrs.secure];
1377
1378 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1379
1380
1381
1382
1383
1384 int aliasno = (offset - 0xd9c) / 8;
1385 if (aliasno) {
1386 region = deposit32(region, 0, 2, aliasno);
1387 }
1388 if (region >= cpu->pmsav7_dregion) {
1389 return 0;
1390 }
1391 return cpu->env.pmsav8.rbar[attrs.secure][region];
1392 }
1393
1394 if (region >= cpu->pmsav7_dregion) {
1395 return 0;
1396 }
1397 return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf);
1398 }
1399 case 0xda0:
1400 case 0xda8:
1401 case 0xdb0:
1402 case 0xdb8:
1403 {
1404 int region = cpu->env.pmsav7.rnr[attrs.secure];
1405
1406 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1407
1408
1409
1410
1411 int aliasno = (offset - 0xda0) / 8;
1412 if (aliasno) {
1413 region = deposit32(region, 0, 2, aliasno);
1414 }
1415 if (region >= cpu->pmsav7_dregion) {
1416 return 0;
1417 }
1418 return cpu->env.pmsav8.rlar[attrs.secure][region];
1419 }
1420
1421 if (region >= cpu->pmsav7_dregion) {
1422 return 0;
1423 }
1424 return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
1425 (cpu->env.pmsav7.drsr[region] & 0xffff);
1426 }
1427 case 0xdc0:
1428 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1429 goto bad_offset;
1430 }
1431 return cpu->env.pmsav8.mair0[attrs.secure];
1432 case 0xdc4:
1433 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1434 goto bad_offset;
1435 }
1436 return cpu->env.pmsav8.mair1[attrs.secure];
1437 case 0xdd0:
1438 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1439 goto bad_offset;
1440 }
1441 if (!attrs.secure) {
1442 return 0;
1443 }
1444 return cpu->env.sau.ctrl;
1445 case 0xdd4:
1446 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1447 goto bad_offset;
1448 }
1449 if (!attrs.secure) {
1450 return 0;
1451 }
1452 return cpu->sau_sregion;
1453 case 0xdd8:
1454 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1455 goto bad_offset;
1456 }
1457 if (!attrs.secure) {
1458 return 0;
1459 }
1460 return cpu->env.sau.rnr;
1461 case 0xddc:
1462 {
1463 int region = cpu->env.sau.rnr;
1464
1465 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1466 goto bad_offset;
1467 }
1468 if (!attrs.secure) {
1469 return 0;
1470 }
1471 if (region >= cpu->sau_sregion) {
1472 return 0;
1473 }
1474 return cpu->env.sau.rbar[region];
1475 }
1476 case 0xde0:
1477 {
1478 int region = cpu->env.sau.rnr;
1479
1480 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1481 goto bad_offset;
1482 }
1483 if (!attrs.secure) {
1484 return 0;
1485 }
1486 if (region >= cpu->sau_sregion) {
1487 return 0;
1488 }
1489 return cpu->env.sau.rlar[region];
1490 }
1491 case 0xde4:
1492 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1493 goto bad_offset;
1494 }
1495 if (!attrs.secure) {
1496 return 0;
1497 }
1498 return cpu->env.v7m.sfsr;
1499 case 0xde8:
1500 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1501 goto bad_offset;
1502 }
1503 if (!attrs.secure) {
1504 return 0;
1505 }
1506 return cpu->env.v7m.sfar;
1507 case 0xf04:
1508 if (!cpu_isar_feature(aa32_ras, cpu)) {
1509 goto bad_offset;
1510 }
1511
1512 return 0;
1513 case 0xf34:
1514 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1515 return 0;
1516 }
1517 if (attrs.secure) {
1518 return cpu->env.v7m.fpccr[M_REG_S];
1519 } else {
1520
1521
1522
1523
1524
1525
1526 uint32_t value = cpu->env.v7m.fpccr[M_REG_S];
1527 uint32_t mask = R_V7M_FPCCR_LSPEN_MASK |
1528 R_V7M_FPCCR_CLRONRET_MASK |
1529 R_V7M_FPCCR_MONRDY_MASK;
1530
1531 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1532 mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK;
1533 }
1534
1535 value &= mask;
1536
1537 value |= cpu->env.v7m.fpccr[M_REG_NS];
1538 return value;
1539 }
1540 case 0xf38:
1541 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1542 return 0;
1543 }
1544 return cpu->env.v7m.fpcar[attrs.secure];
1545 case 0xf3c:
1546 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1547 return 0;
1548 }
1549 return cpu->env.v7m.fpdscr[attrs.secure];
1550 case 0xf40:
1551 return cpu->isar.mvfr0;
1552 case 0xf44:
1553 return cpu->isar.mvfr1;
1554 case 0xf48:
1555 return cpu->isar.mvfr2;
1556 default:
1557 bad_offset:
1558 qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
1559 return 0;
1560 }
1561}
1562
1563static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
1564 MemTxAttrs attrs)
1565{
1566 ARMCPU *cpu = s->cpu;
1567
1568 switch (offset) {
1569 case 0xc:
1570 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1571 goto bad_offset;
1572 }
1573
1574 break;
1575 case 0x380 ... 0x3bf:
1576 {
1577 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1578 int i;
1579
1580 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1581 goto bad_offset;
1582 }
1583 if (!attrs.secure) {
1584 break;
1585 }
1586 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1587 s->itns[startvec + i] = (value >> i) & 1;
1588 }
1589 nvic_irq_update(s);
1590 break;
1591 }
1592 case 0xd04:
1593 if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1594 if (value & (1 << 31)) {
1595 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
1596 } else if (value & (1 << 30) &&
1597 arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1598
1599 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
1600 }
1601 }
1602 if (value & (1 << 28)) {
1603 armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1604 } else if (value & (1 << 27)) {
1605 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1606 }
1607 if (value & (1 << 26)) {
1608 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1609 } else if (value & (1 << 25)) {
1610 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1611 }
1612 break;
1613 case 0xd08:
1614 cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
1615 break;
1616 case 0xd0c:
1617 if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
1618 if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
1619 if (attrs.secure ||
1620 !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
1621 signal_sysresetreq(s);
1622 }
1623 }
1624 if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
1625 qemu_log_mask(LOG_GUEST_ERROR,
1626 "Setting VECTCLRACTIVE when not in DEBUG mode "
1627 "is UNPREDICTABLE\n");
1628 }
1629 if (value & R_V7M_AIRCR_VECTRESET_MASK) {
1630
1631 qemu_log_mask(LOG_GUEST_ERROR,
1632 "Setting VECTRESET when not in DEBUG mode "
1633 "is UNPREDICTABLE\n");
1634 }
1635 if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1636 s->prigroup[attrs.secure] =
1637 extract32(value,
1638 R_V7M_AIRCR_PRIGROUP_SHIFT,
1639 R_V7M_AIRCR_PRIGROUP_LENGTH);
1640 }
1641
1642 if (attrs.secure) {
1643
1644 cpu->env.v7m.aircr = value &
1645 (R_V7M_AIRCR_SYSRESETREQS_MASK |
1646 R_V7M_AIRCR_BFHFNMINS_MASK |
1647 R_V7M_AIRCR_PRIS_MASK);
1648
1649
1650
1651
1652 if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1653 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
1654 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1655 } else {
1656 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
1657 s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
1658 }
1659 }
1660 nvic_irq_update(s);
1661 }
1662 break;
1663 case 0xd10:
1664 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1665 goto bad_offset;
1666 }
1667
1668
1669
1670
1671
1672 value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK);
1673 cpu->env.v7m.scr[attrs.secure] = value;
1674 break;
1675 case 0xd14:
1676 {
1677 uint32_t mask;
1678
1679 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1680 goto bad_offset;
1681 }
1682
1683
1684 mask = R_V7M_CCR_STKALIGN_MASK |
1685 R_V7M_CCR_BFHFNMIGN_MASK |
1686 R_V7M_CCR_DIV_0_TRP_MASK |
1687 R_V7M_CCR_UNALIGN_TRP_MASK |
1688 R_V7M_CCR_USERSETMPEND_MASK |
1689 R_V7M_CCR_NONBASETHRDENA_MASK;
1690 if (arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && attrs.secure) {
1691
1692 mask |= R_V7M_CCR_TRD_MASK;
1693 }
1694 value &= mask;
1695
1696 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1697
1698 value |= R_V7M_CCR_NONBASETHRDENA_MASK
1699 | R_V7M_CCR_STKALIGN_MASK;
1700 }
1701 if (attrs.secure) {
1702
1703 cpu->env.v7m.ccr[M_REG_NS] =
1704 (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
1705 | (value & R_V7M_CCR_BFHFNMIGN_MASK);
1706 value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1707 } else {
1708
1709
1710
1711
1712 if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1713 value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1714 value |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
1715 }
1716 }
1717
1718 cpu->env.v7m.ccr[attrs.secure] = value;
1719 break;
1720 }
1721 case 0xd24:
1722 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1723 goto bad_offset;
1724 }
1725 if (attrs.secure) {
1726 s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1727
1728 s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1729 s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1730 s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
1731 (value & (1 << 10)) != 0;
1732 s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
1733 (value & (1 << 11)) != 0;
1734 s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
1735 (value & (1 << 12)) != 0;
1736 s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1737 s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1738 s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1739 s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1740 s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
1741 (value & (1 << 18)) != 0;
1742 s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1743
1744 s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
1745 s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
1746 s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
1747 } else {
1748 s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1749 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1750
1751 s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1752 }
1753 s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1754 s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1755 s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
1756 s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
1757 s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
1758 s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1759 s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1760 s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1761 s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
1762 }
1763 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1764 s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
1765 s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
1766 s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1767 }
1768
1769
1770
1771 if (!attrs.secure && cpu->env.v7m.secure &&
1772 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1773 (value & (1 << 5)) == 0) {
1774 s->vectors[ARMV7M_EXCP_NMI].active = 0;
1775 }
1776
1777
1778
1779
1780
1781
1782 if (!attrs.secure && cpu->env.v7m.secure &&
1783 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1784 (value & (1 << 2)) == 0) {
1785 s->vectors[ARMV7M_EXCP_HARD].active = 0;
1786 }
1787
1788
1789 s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
1790 nvic_irq_update(s);
1791 break;
1792 case 0xd2c:
1793 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1794 goto bad_offset;
1795 }
1796 cpu->env.v7m.hfsr &= ~value;
1797 break;
1798 case 0xd30:
1799 cpu->env.v7m.dfsr &= ~value;
1800 break;
1801 case 0xd34:
1802 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1803 goto bad_offset;
1804 }
1805 cpu->env.v7m.mmfar[attrs.secure] = value;
1806 return;
1807 case 0xd38:
1808 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1809 goto bad_offset;
1810 }
1811 if (!attrs.secure &&
1812 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1813 return;
1814 }
1815 cpu->env.v7m.bfar = value;
1816 return;
1817 case 0xd3c:
1818 qemu_log_mask(LOG_UNIMP,
1819 "NVIC: Aux fault status registers unimplemented\n");
1820 break;
1821 case 0xd84:
1822 if (!arm_v7m_csselr_razwi(cpu)) {
1823 cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK;
1824 }
1825 break;
1826 case 0xd88:
1827 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
1828
1829 cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20);
1830 }
1831 break;
1832 case 0xd8c:
1833 if (attrs.secure && cpu_isar_feature(aa32_vfp_simd, cpu)) {
1834
1835 cpu->env.v7m.nsacr = value & (3 << 10);
1836 }
1837 break;
1838 case 0xd90:
1839 return;
1840 case 0xd94:
1841 if ((value &
1842 (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
1843 == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
1844 qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
1845 "UNPREDICTABLE\n");
1846 }
1847 cpu->env.v7m.mpu_ctrl[attrs.secure]
1848 = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
1849 R_V7M_MPU_CTRL_HFNMIENA_MASK |
1850 R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
1851 tlb_flush(CPU(cpu));
1852 break;
1853 case 0xd98:
1854 if (value >= cpu->pmsav7_dregion) {
1855 qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
1856 PRIu32 "/%" PRIu32 "\n",
1857 value, cpu->pmsav7_dregion);
1858 } else {
1859 cpu->env.pmsav7.rnr[attrs.secure] = value;
1860 }
1861 break;
1862 case 0xd9c:
1863 case 0xda4:
1864 case 0xdac:
1865 case 0xdb4:
1866 {
1867 int region;
1868
1869 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1870
1871
1872
1873
1874
1875 int aliasno = (offset - 0xd9c) / 8;
1876
1877 region = cpu->env.pmsav7.rnr[attrs.secure];
1878 if (aliasno) {
1879 region = deposit32(region, 0, 2, aliasno);
1880 }
1881 if (region >= cpu->pmsav7_dregion) {
1882 return;
1883 }
1884 cpu->env.pmsav8.rbar[attrs.secure][region] = value;
1885 tlb_flush(CPU(cpu));
1886 return;
1887 }
1888
1889 if (value & (1 << 4)) {
1890
1891
1892
1893 region = extract32(value, 0, 4);
1894 if (region >= cpu->pmsav7_dregion) {
1895 qemu_log_mask(LOG_GUEST_ERROR,
1896 "MPU region out of range %u/%" PRIu32 "\n",
1897 region, cpu->pmsav7_dregion);
1898 return;
1899 }
1900 cpu->env.pmsav7.rnr[attrs.secure] = region;
1901 } else {
1902 region = cpu->env.pmsav7.rnr[attrs.secure];
1903 }
1904
1905 if (region >= cpu->pmsav7_dregion) {
1906 return;
1907 }
1908
1909 cpu->env.pmsav7.drbar[region] = value & ~0x1f;
1910 tlb_flush(CPU(cpu));
1911 break;
1912 }
1913 case 0xda0:
1914 case 0xda8:
1915 case 0xdb0:
1916 case 0xdb8:
1917 {
1918 int region = cpu->env.pmsav7.rnr[attrs.secure];
1919
1920 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1921
1922
1923
1924
1925 int aliasno = (offset - 0xd9c) / 8;
1926
1927 region = cpu->env.pmsav7.rnr[attrs.secure];
1928 if (aliasno) {
1929 region = deposit32(region, 0, 2, aliasno);
1930 }
1931 if (region >= cpu->pmsav7_dregion) {
1932 return;
1933 }
1934 cpu->env.pmsav8.rlar[attrs.secure][region] = value;
1935 tlb_flush(CPU(cpu));
1936 return;
1937 }
1938
1939 if (region >= cpu->pmsav7_dregion) {
1940 return;
1941 }
1942
1943 cpu->env.pmsav7.drsr[region] = value & 0xff3f;
1944 cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
1945 tlb_flush(CPU(cpu));
1946 break;
1947 }
1948 case 0xdc0:
1949 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1950 goto bad_offset;
1951 }
1952 if (cpu->pmsav7_dregion) {
1953
1954 cpu->env.pmsav8.mair0[attrs.secure] = value;
1955 }
1956
1957
1958
1959 break;
1960 case 0xdc4:
1961 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1962 goto bad_offset;
1963 }
1964 if (cpu->pmsav7_dregion) {
1965
1966 cpu->env.pmsav8.mair1[attrs.secure] = value;
1967 }
1968
1969
1970
1971 break;
1972 case 0xdd0:
1973 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1974 goto bad_offset;
1975 }
1976 if (!attrs.secure) {
1977 return;
1978 }
1979 cpu->env.sau.ctrl = value & 3;
1980 break;
1981 case 0xdd4:
1982 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1983 goto bad_offset;
1984 }
1985 break;
1986 case 0xdd8:
1987 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1988 goto bad_offset;
1989 }
1990 if (!attrs.secure) {
1991 return;
1992 }
1993 if (value >= cpu->sau_sregion) {
1994 qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
1995 PRIu32 "/%" PRIu32 "\n",
1996 value, cpu->sau_sregion);
1997 } else {
1998 cpu->env.sau.rnr = value;
1999 }
2000 break;
2001 case 0xddc:
2002 {
2003 int region = cpu->env.sau.rnr;
2004
2005 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2006 goto bad_offset;
2007 }
2008 if (!attrs.secure) {
2009 return;
2010 }
2011 if (region >= cpu->sau_sregion) {
2012 return;
2013 }
2014 cpu->env.sau.rbar[region] = value & ~0x1f;
2015 tlb_flush(CPU(cpu));
2016 break;
2017 }
2018 case 0xde0:
2019 {
2020 int region = cpu->env.sau.rnr;
2021
2022 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2023 goto bad_offset;
2024 }
2025 if (!attrs.secure) {
2026 return;
2027 }
2028 if (region >= cpu->sau_sregion) {
2029 return;
2030 }
2031 cpu->env.sau.rlar[region] = value & ~0x1c;
2032 tlb_flush(CPU(cpu));
2033 break;
2034 }
2035 case 0xde4:
2036 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2037 goto bad_offset;
2038 }
2039 if (!attrs.secure) {
2040 return;
2041 }
2042 cpu->env.v7m.sfsr &= ~value;
2043 break;
2044 case 0xde8:
2045 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2046 goto bad_offset;
2047 }
2048 if (!attrs.secure) {
2049 return;
2050 }
2051 cpu->env.v7m.sfsr = value;
2052 break;
2053 case 0xf00:
2054 {
2055 int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
2056
2057 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
2058 goto bad_offset;
2059 }
2060
2061 if (excnum < s->num_irq) {
2062 armv7m_nvic_set_pending(s, excnum, false);
2063 }
2064 break;
2065 }
2066 case 0xf04:
2067 if (!cpu_isar_feature(aa32_ras, cpu)) {
2068 goto bad_offset;
2069 }
2070
2071 break;
2072 case 0xf34:
2073 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2074
2075 uint32_t fpccr_s;
2076
2077 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2078
2079 value &= (R_V7M_FPCCR_LSPACT_MASK |
2080 R_V7M_FPCCR_USER_MASK |
2081 R_V7M_FPCCR_THREAD_MASK |
2082 R_V7M_FPCCR_HFRDY_MASK |
2083 R_V7M_FPCCR_MMRDY_MASK |
2084 R_V7M_FPCCR_BFRDY_MASK |
2085 R_V7M_FPCCR_MONRDY_MASK |
2086 R_V7M_FPCCR_LSPEN_MASK |
2087 R_V7M_FPCCR_ASPEN_MASK);
2088 }
2089 value &= ~R_V7M_FPCCR_RES0_MASK;
2090
2091 if (!attrs.secure) {
2092
2093 fpccr_s = cpu->env.v7m.fpccr[M_REG_S];
2094 if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) {
2095 uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN);
2096 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen);
2097 }
2098 if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) {
2099 uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET);
2100 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor);
2101 }
2102 if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2103 uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY);
2104 uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY);
2105 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
2106 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
2107 }
2108
2109 {
2110 uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY);
2111 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy);
2112 }
2113
2114
2115
2116
2117
2118 value &= R_V7M_FPCCR_BANKED_MASK;
2119 cpu->env.v7m.fpccr[M_REG_NS] = value;
2120 } else {
2121 fpccr_s = value;
2122 }
2123 cpu->env.v7m.fpccr[M_REG_S] = fpccr_s;
2124 }
2125 break;
2126 case 0xf38:
2127 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2128 value &= ~7;
2129 cpu->env.v7m.fpcar[attrs.secure] = value;
2130 }
2131 break;
2132 case 0xf3c:
2133 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2134 uint32_t mask = FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK;
2135 if (cpu_isar_feature(any_fp16, cpu)) {
2136 mask |= FPCR_FZ16;
2137 }
2138 value &= mask;
2139 if (cpu_isar_feature(aa32_lob, cpu)) {
2140 value |= 4 << FPCR_LTPSIZE_SHIFT;
2141 }
2142 cpu->env.v7m.fpdscr[attrs.secure] = value;
2143 }
2144 break;
2145 case 0xf50:
2146 case 0xf58:
2147 case 0xf5c:
2148 case 0xf60:
2149 case 0xf64:
2150 case 0xf68:
2151 case 0xf6c:
2152 case 0xf70:
2153 case 0xf74:
2154 case 0xf78:
2155
2156 break;
2157 default:
2158 bad_offset:
2159 qemu_log_mask(LOG_GUEST_ERROR,
2160 "NVIC: Bad write offset 0x%x\n", offset);
2161 }
2162}
2163
2164static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
2165{
2166
2167 switch (offset) {
2168 case 0xf00:
2169
2170
2171
2172 return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
2173 default:
2174
2175 return false;
2176 }
2177}
2178
2179static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
2180{
2181
2182
2183
2184
2185
2186 switch (exc) {
2187 case ARMV7M_EXCP_MEM:
2188 case ARMV7M_EXCP_USAGE:
2189 case ARMV7M_EXCP_SVC:
2190 case ARMV7M_EXCP_PENDSV:
2191 case ARMV7M_EXCP_SYSTICK:
2192
2193 return attrs.secure;
2194 case ARMV7M_EXCP_BUS:
2195
2196 if (!attrs.secure &&
2197 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2198 return -1;
2199 }
2200 return M_REG_NS;
2201 case ARMV7M_EXCP_SECURE:
2202
2203 if (!attrs.secure) {
2204 return -1;
2205 }
2206 return M_REG_NS;
2207 case ARMV7M_EXCP_DEBUG:
2208
2209 return M_REG_NS;
2210 case 8 ... 10:
2211 case 13:
2212
2213 return -1;
2214 default:
2215
2216 g_assert_not_reached();
2217 }
2218}
2219
2220static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
2221 uint64_t *data, unsigned size,
2222 MemTxAttrs attrs)
2223{
2224 NVICState *s = (NVICState *)opaque;
2225 uint32_t offset = addr;
2226 unsigned i, startvec, end;
2227 uint32_t val;
2228
2229 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2230
2231 return MEMTX_ERROR;
2232 }
2233
2234 switch (offset) {
2235
2236 case 0x100 ... 0x13f:
2237 offset += 0x80;
2238
2239 case 0x180 ... 0x1bf:
2240 val = 0;
2241 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
2242
2243 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2244 if (s->vectors[startvec + i].enabled &&
2245 (attrs.secure || s->itns[startvec + i])) {
2246 val |= (1 << i);
2247 }
2248 }
2249 break;
2250 case 0x200 ... 0x23f:
2251 offset += 0x80;
2252
2253 case 0x280 ... 0x2bf:
2254 val = 0;
2255 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ;
2256 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2257 if (s->vectors[startvec + i].pending &&
2258 (attrs.secure || s->itns[startvec + i])) {
2259 val |= (1 << i);
2260 }
2261 }
2262 break;
2263 case 0x300 ... 0x33f:
2264 val = 0;
2265
2266 if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) {
2267 break;
2268 }
2269
2270 startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ;
2271
2272 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2273 if (s->vectors[startvec + i].active &&
2274 (attrs.secure || s->itns[startvec + i])) {
2275 val |= (1 << i);
2276 }
2277 }
2278 break;
2279 case 0x400 ... 0x5ef:
2280 val = 0;
2281 startvec = offset - 0x400 + NVIC_FIRST_IRQ;
2282
2283 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2284 if (attrs.secure || s->itns[startvec + i]) {
2285 val |= s->vectors[startvec + i].prio << (8 * i);
2286 }
2287 }
2288 break;
2289 case 0xd18 ... 0xd1b:
2290 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2291 val = 0;
2292 break;
2293 }
2294
2295 case 0xd1c ... 0xd23:
2296 val = 0;
2297 for (i = 0; i < size; i++) {
2298 unsigned hdlidx = (offset - 0xd14) + i;
2299 int sbank = shpr_bank(s, hdlidx, attrs);
2300
2301 if (sbank < 0) {
2302 continue;
2303 }
2304 val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
2305 }
2306 break;
2307 case 0xd28 ... 0xd2b:
2308 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2309 val = 0;
2310 break;
2311 };
2312
2313
2314
2315
2316
2317 val = s->cpu->env.v7m.cfsr[attrs.secure];
2318 if (!attrs.secure &&
2319 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2320 val &= ~R_V7M_CFSR_BFSR_MASK;
2321 } else {
2322 val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
2323 }
2324 val = extract32(val, (offset - 0xd28) * 8, size * 8);
2325 break;
2326 case 0xfe0 ... 0xfff:
2327 if (offset & 3) {
2328 val = 0;
2329 } else {
2330 val = nvic_id[(offset - 0xfe0) >> 2];
2331 }
2332 break;
2333 default:
2334 if (size == 4) {
2335 val = nvic_readl(s, offset, attrs);
2336 } else {
2337 qemu_log_mask(LOG_GUEST_ERROR,
2338 "NVIC: Bad read of size %d at offset 0x%x\n",
2339 size, offset);
2340 val = 0;
2341 }
2342 }
2343
2344 trace_nvic_sysreg_read(addr, val, size);
2345 *data = val;
2346 return MEMTX_OK;
2347}
2348
2349static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
2350 uint64_t value, unsigned size,
2351 MemTxAttrs attrs)
2352{
2353 NVICState *s = (NVICState *)opaque;
2354 uint32_t offset = addr;
2355 unsigned i, startvec, end;
2356 unsigned setval = 0;
2357
2358 trace_nvic_sysreg_write(addr, value, size);
2359
2360 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2361
2362 return MEMTX_ERROR;
2363 }
2364
2365 switch (offset) {
2366 case 0x100 ... 0x13f:
2367 offset += 0x80;
2368 setval = 1;
2369
2370 case 0x180 ... 0x1bf:
2371 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
2372
2373 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2374 if (value & (1 << i) &&
2375 (attrs.secure || s->itns[startvec + i])) {
2376 s->vectors[startvec + i].enabled = setval;
2377 }
2378 }
2379 nvic_irq_update(s);
2380 goto exit_ok;
2381 case 0x200 ... 0x23f:
2382
2383
2384
2385 offset += 0x80;
2386 setval = 1;
2387
2388 case 0x280 ... 0x2bf:
2389 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ;
2390
2391 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2392 if (value & (1 << i) &&
2393 (attrs.secure || s->itns[startvec + i])) {
2394 s->vectors[startvec + i].pending = setval;
2395 }
2396 }
2397 nvic_irq_update(s);
2398 goto exit_ok;
2399 case 0x300 ... 0x33f:
2400 goto exit_ok;
2401 case 0x400 ... 0x5ef:
2402 startvec = (offset - 0x400) + NVIC_FIRST_IRQ;
2403
2404 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2405 if (attrs.secure || s->itns[startvec + i]) {
2406 set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
2407 }
2408 }
2409 nvic_irq_update(s);
2410 goto exit_ok;
2411 case 0xd18 ... 0xd1b:
2412 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2413 goto exit_ok;
2414 }
2415
2416 case 0xd1c ... 0xd23:
2417 for (i = 0; i < size; i++) {
2418 unsigned hdlidx = (offset - 0xd14) + i;
2419 int newprio = extract32(value, i * 8, 8);
2420 int sbank = shpr_bank(s, hdlidx, attrs);
2421
2422 if (sbank < 0) {
2423 continue;
2424 }
2425 set_prio(s, hdlidx, sbank, newprio);
2426 }
2427 nvic_irq_update(s);
2428 goto exit_ok;
2429 case 0xd28 ... 0xd2b:
2430 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2431 goto exit_ok;
2432 }
2433
2434
2435
2436 value <<= ((offset - 0xd28) * 8);
2437
2438 if (!attrs.secure &&
2439 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2440
2441 value &= ~R_V7M_CFSR_BFSR_MASK;
2442 }
2443
2444 s->cpu->env.v7m.cfsr[attrs.secure] &= ~value;
2445 if (attrs.secure) {
2446
2447
2448
2449 s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
2450 }
2451 goto exit_ok;
2452 }
2453 if (size == 4) {
2454 nvic_writel(s, offset, value, attrs);
2455 goto exit_ok;
2456 }
2457 qemu_log_mask(LOG_GUEST_ERROR,
2458 "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
2459
2460
2461 exit_ok:
2462
2463 arm_rebuild_hflags(&s->cpu->env);
2464 return MEMTX_OK;
2465}
2466
2467static const MemoryRegionOps nvic_sysreg_ops = {
2468 .read_with_attrs = nvic_sysreg_read,
2469 .write_with_attrs = nvic_sysreg_write,
2470 .endianness = DEVICE_NATIVE_ENDIAN,
2471};
2472
2473static int nvic_post_load(void *opaque, int version_id)
2474{
2475 NVICState *s = opaque;
2476 unsigned i;
2477 int resetprio;
2478
2479
2480 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2481
2482 if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
2483 s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
2484 s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
2485 return 1;
2486 }
2487 for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
2488 if (s->vectors[i].prio & ~0xff) {
2489 return 1;
2490 }
2491 }
2492
2493 nvic_recompute_state(s);
2494
2495 return 0;
2496}
2497
2498static const VMStateDescription vmstate_VecInfo = {
2499 .name = "armv7m_nvic_info",
2500 .version_id = 1,
2501 .minimum_version_id = 1,
2502 .fields = (VMStateField[]) {
2503 VMSTATE_INT16(prio, VecInfo),
2504 VMSTATE_UINT8(enabled, VecInfo),
2505 VMSTATE_UINT8(pending, VecInfo),
2506 VMSTATE_UINT8(active, VecInfo),
2507 VMSTATE_UINT8(level, VecInfo),
2508 VMSTATE_END_OF_LIST()
2509 }
2510};
2511
2512static bool nvic_security_needed(void *opaque)
2513{
2514 NVICState *s = opaque;
2515
2516 return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
2517}
2518
2519static int nvic_security_post_load(void *opaque, int version_id)
2520{
2521 NVICState *s = opaque;
2522 int i;
2523
2524
2525 if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
2526 && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
2527
2528
2529
2530
2531 return 1;
2532 }
2533 for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
2534 if (s->sec_vectors[i].prio & ~0xff) {
2535 return 1;
2536 }
2537 }
2538 return 0;
2539}
2540
2541static const VMStateDescription vmstate_nvic_security = {
2542 .name = "armv7m_nvic/m-security",
2543 .version_id = 1,
2544 .minimum_version_id = 1,
2545 .needed = nvic_security_needed,
2546 .post_load = &nvic_security_post_load,
2547 .fields = (VMStateField[]) {
2548 VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
2549 vmstate_VecInfo, VecInfo),
2550 VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
2551 VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
2552 VMSTATE_END_OF_LIST()
2553 }
2554};
2555
2556static const VMStateDescription vmstate_nvic = {
2557 .name = "armv7m_nvic",
2558 .version_id = 4,
2559 .minimum_version_id = 4,
2560 .post_load = &nvic_post_load,
2561 .fields = (VMStateField[]) {
2562 VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
2563 vmstate_VecInfo, VecInfo),
2564 VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
2565 VMSTATE_END_OF_LIST()
2566 },
2567 .subsections = (const VMStateDescription*[]) {
2568 &vmstate_nvic_security,
2569 NULL
2570 }
2571};
2572
2573static Property props_nvic[] = {
2574
2575 DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
2576 DEFINE_PROP_END_OF_LIST()
2577};
2578
2579static void armv7m_nvic_reset(DeviceState *dev)
2580{
2581 int resetprio;
2582 NVICState *s = NVIC(dev);
2583
2584 memset(s->vectors, 0, sizeof(s->vectors));
2585 memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
2586 s->prigroup[M_REG_NS] = 0;
2587 s->prigroup[M_REG_S] = 0;
2588
2589 s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
2590
2591
2592
2593 s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
2594 s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2595 s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2596
2597
2598 s->vectors[ARMV7M_EXCP_DEBUG].enabled = 0;
2599
2600 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2601 s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
2602 s->vectors[ARMV7M_EXCP_NMI].prio = -2;
2603 s->vectors[ARMV7M_EXCP_HARD].prio = -1;
2604
2605 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2606 s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
2607 s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
2608 s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2609 s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2610
2611
2612 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
2613
2614 s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
2615 } else {
2616 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
2617 }
2618
2619
2620
2621
2622
2623
2624
2625 s->exception_prio = NVIC_NOEXC_PRIO;
2626 s->vectpending = 0;
2627 s->vectpending_is_s_banked = false;
2628 s->vectpending_prio = NVIC_NOEXC_PRIO;
2629
2630 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2631 memset(s->itns, 0, sizeof(s->itns));
2632 } else {
2633
2634
2635
2636
2637 int i;
2638
2639 for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
2640 s->itns[i] = true;
2641 }
2642 }
2643
2644
2645
2646
2647
2648 arm_rebuild_hflags(&s->cpu->env);
2649}
2650
2651static void nvic_systick_trigger(void *opaque, int n, int level)
2652{
2653 NVICState *s = opaque;
2654
2655 if (level) {
2656
2657
2658
2659
2660
2661
2662 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n);
2663 }
2664}
2665
2666static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
2667{
2668 NVICState *s = NVIC(dev);
2669
2670
2671 if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
2672 error_setg(errp, "The NVIC can only be used with a Cortex-M CPU");
2673 return;
2674 }
2675
2676 if (s->num_irq > NVIC_MAX_IRQ) {
2677 error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
2678 return;
2679 }
2680
2681 qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
2682
2683
2684 s->num_irq += NVIC_FIRST_IRQ;
2685
2686 s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2;
2687
2688
2689
2690
2691
2692
2693 memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
2694 "nvic_sysregs", 0x1000);
2695 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->sysregmem);
2696}
2697
2698static void armv7m_nvic_instance_init(Object *obj)
2699{
2700 DeviceState *dev = DEVICE(obj);
2701 NVICState *nvic = NVIC(obj);
2702 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
2703
2704 sysbus_init_irq(sbd, &nvic->excpout);
2705 qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
2706 qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger",
2707 M_REG_NUM_BANKS);
2708 qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1);
2709}
2710
2711static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
2712{
2713 DeviceClass *dc = DEVICE_CLASS(klass);
2714
2715 dc->vmsd = &vmstate_nvic;
2716 device_class_set_props(dc, props_nvic);
2717 dc->reset = armv7m_nvic_reset;
2718 dc->realize = armv7m_nvic_realize;
2719}
2720
2721static const TypeInfo armv7m_nvic_info = {
2722 .name = TYPE_NVIC,
2723 .parent = TYPE_SYS_BUS_DEVICE,
2724 .instance_init = armv7m_nvic_instance_init,
2725 .instance_size = sizeof(NVICState),
2726 .class_init = armv7m_nvic_class_init,
2727 .class_size = sizeof(SysBusDeviceClass),
2728};
2729
2730static void armv7m_nvic_register_types(void)
2731{
2732 type_register_static(&armv7m_nvic_info);
2733}
2734
2735type_init(armv7m_nvic_register_types)
2736