1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "qemu/units.h"
21#include "qemu/log.h"
22#include "qemu/main-loop.h"
23#include "cpu.h"
24#include "exec/helper-proto.h"
25#include "internals.h"
26#include "exec/exec-all.h"
27#include "exec/cpu_ldst.h"
28
29#define SIGNBIT (uint32_t)0x80000000
30#define SIGNBIT64 ((uint64_t)1 << 63)
31
32static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
33 uint32_t syndrome, uint32_t target_el)
34{
35 CPUState *cs = env_cpu(env);
36
37 if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
38
39
40
41
42
43
44 target_el = 2;
45 if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
46 syndrome = syn_uncategorized();
47 }
48 }
49
50 assert(!excp_is_internal(excp));
51 cs->exception_index = excp;
52 env->exception.syndrome = syndrome;
53 env->exception.target_el = target_el;
54
55 return cs;
56}
57
58void raise_exception(CPUARMState *env, uint32_t excp,
59 uint32_t syndrome, uint32_t target_el)
60{
61 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
62 cpu_loop_exit(cs);
63}
64
65void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
66 uint32_t target_el, uintptr_t ra)
67{
68 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
69 cpu_loop_exit_restore(cs, ra);
70}
71
72uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
73 uint32_t maxindex)
74{
75 uint32_t val, shift;
76 uint64_t *table = vn;
77
78 val = 0;
79 for (shift = 0; shift < 32; shift += 8) {
80 uint32_t index = (ireg >> shift) & 0xff;
81 if (index < maxindex) {
82 uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
83 val |= tmp << shift;
84 } else {
85 val |= def & (0xff << shift);
86 }
87 }
88 return val;
89}
90
91void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
92{
93
94
95
96
97 if (newvalue < v7m_sp_limit(env)) {
98 CPUState *cs = env_cpu(env);
99
100
101
102
103
104
105 cpu_restore_state(cs, GETPC(), true);
106 raise_exception(env, EXCP_STKOF, 0, 1);
107 }
108}
109
110uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
111{
112 uint32_t res = a + b;
113 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
114 env->QF = 1;
115 return res;
116}
117
118uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
119{
120 uint32_t res = a + b;
121 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
122 env->QF = 1;
123 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
124 }
125 return res;
126}
127
128uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
129{
130 uint32_t res = a - b;
131 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
132 env->QF = 1;
133 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
134 }
135 return res;
136}
137
138uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
139{
140 uint32_t res;
141 if (val >= 0x40000000) {
142 res = ~SIGNBIT;
143 env->QF = 1;
144 } else if (val <= (int32_t)0xc0000000) {
145 res = SIGNBIT;
146 env->QF = 1;
147 } else {
148 res = val << 1;
149 }
150 return res;
151}
152
153uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
154{
155 uint32_t res = a + b;
156 if (res < a) {
157 env->QF = 1;
158 res = ~0;
159 }
160 return res;
161}
162
163uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
164{
165 uint32_t res = a - b;
166 if (res > a) {
167 env->QF = 1;
168 res = 0;
169 }
170 return res;
171}
172
173
174static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
175{
176 int32_t top;
177 uint32_t mask;
178
179 top = val >> shift;
180 mask = (1u << shift) - 1;
181 if (top > 0) {
182 env->QF = 1;
183 return mask;
184 } else if (top < -1) {
185 env->QF = 1;
186 return ~mask;
187 }
188 return val;
189}
190
191
192static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
193{
194 uint32_t max;
195
196 max = (1u << shift) - 1;
197 if (val < 0) {
198 env->QF = 1;
199 return 0;
200 } else if (val > max) {
201 env->QF = 1;
202 return max;
203 }
204 return val;
205}
206
207
208uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
209{
210 return do_ssat(env, x, shift);
211}
212
213
214uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
215{
216 uint32_t res;
217
218 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
219 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
220 return res;
221}
222
223
224uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
225{
226 return do_usat(env, x, shift);
227}
228
229
230uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
231{
232 uint32_t res;
233
234 res = (uint16_t)do_usat(env, (int16_t)x, shift);
235 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
236 return res;
237}
238
239void HELPER(setend)(CPUARMState *env)
240{
241 env->uncached_cpsr ^= CPSR_E;
242}
243
244
245
246
247
248static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
249{
250 int cur_el = arm_current_el(env);
251 uint64_t mask;
252
253 if (arm_feature(env, ARM_FEATURE_M)) {
254
255 return 0;
256 }
257
258
259
260
261 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
262 int target_el;
263
264 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
265 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
266
267 target_el = 3;
268 } else {
269 target_el = 1;
270 }
271
272 if (!(env->cp15.sctlr_el[target_el] & mask)) {
273 return target_el;
274 }
275 }
276
277
278
279
280
281 if (cur_el < 2) {
282 mask = is_wfe ? HCR_TWE : HCR_TWI;
283 if (arm_hcr_el2_eff(env) & mask) {
284 return 2;
285 }
286 }
287
288
289 if (cur_el < 3) {
290 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
291 if (env->cp15.scr_el3 & mask) {
292 return 3;
293 }
294 }
295
296 return 0;
297}
298
299void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
300{
301 CPUState *cs = env_cpu(env);
302 int target_el = check_wfx_trap(env, false);
303
304 if (cpu_has_work(cs)) {
305
306
307
308 return;
309 }
310
311 if (target_el) {
312 env->pc -= insn_len;
313 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
314 target_el);
315 }
316
317 cs->exception_index = EXCP_HLT;
318 cs->halted = 1;
319 cpu_loop_exit(cs);
320}
321
322void HELPER(wfe)(CPUARMState *env)
323{
324
325
326
327
328
329
330
331 HELPER(yield)(env);
332}
333
334void HELPER(yield)(CPUARMState *env)
335{
336 CPUState *cs = env_cpu(env);
337
338
339
340
341
342 cs->exception_index = EXCP_YIELD;
343 cpu_loop_exit(cs);
344}
345
346
347
348
349
350
351
352void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
353{
354 CPUState *cs = env_cpu(env);
355
356 assert(excp_is_internal(excp));
357 cs->exception_index = excp;
358 cpu_loop_exit(cs);
359}
360
361
362void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
363 uint32_t syndrome, uint32_t target_el)
364{
365 raise_exception(env, excp, syndrome, target_el);
366}
367
368
369
370
371void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
372{
373 int debug_el = arm_debug_target_el(env);
374 int cur_el = arm_current_el(env);
375
376
377 env->exception.fsr = arm_debug_exception_fsr(env);
378
379
380
381
382 env->exception.vaddress = 0;
383
384
385
386
387
388
389
390
391 if (debug_el < cur_el) {
392 debug_el = cur_el;
393 }
394 raise_exception(env, EXCP_BKPT, syndrome, debug_el);
395}
396
397uint32_t HELPER(cpsr_read)(CPUARMState *env)
398{
399 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
400}
401
402void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
403{
404 cpsr_write(env, val, mask, CPSRWriteByInstr);
405}
406
407
408void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
409{
410 qemu_mutex_lock_iothread();
411 arm_call_pre_el_change_hook(env_archcpu(env));
412 qemu_mutex_unlock_iothread();
413
414 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
415
416
417
418
419
420
421 env->regs[15] &= (env->thumb ? ~1 : ~3);
422
423 qemu_mutex_lock_iothread();
424 arm_call_el_change_hook(env_archcpu(env));
425 qemu_mutex_unlock_iothread();
426}
427
428
429uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
430{
431 uint32_t val;
432
433 if (regno == 13) {
434 val = env->banked_r13[BANK_USRSYS];
435 } else if (regno == 14) {
436 val = env->banked_r14[BANK_USRSYS];
437 } else if (regno >= 8
438 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
439 val = env->usr_regs[regno - 8];
440 } else {
441 val = env->regs[regno];
442 }
443 return val;
444}
445
446void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
447{
448 if (regno == 13) {
449 env->banked_r13[BANK_USRSYS] = val;
450 } else if (regno == 14) {
451 env->banked_r14[BANK_USRSYS] = val;
452 } else if (regno >= 8
453 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
454 env->usr_regs[regno - 8] = val;
455 } else {
456 env->regs[regno] = val;
457 }
458}
459
460void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
461{
462 if ((env->uncached_cpsr & CPSR_M) == mode) {
463 env->regs[13] = val;
464 } else {
465 env->banked_r13[bank_number(mode)] = val;
466 }
467}
468
469uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
470{
471 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
472
473
474
475 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
476 exception_target_el(env));
477 }
478
479 if ((env->uncached_cpsr & CPSR_M) == mode) {
480 return env->regs[13];
481 } else {
482 return env->banked_r13[bank_number(mode)];
483 }
484}
485
486static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
487 uint32_t regno)
488{
489
490
491
492
493
494 int curmode = env->uncached_cpsr & CPSR_M;
495
496 if (regno == 17) {
497
498 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
499 goto undef;
500 }
501 return;
502 }
503
504 if (curmode == tgtmode) {
505 goto undef;
506 }
507
508 if (tgtmode == ARM_CPU_MODE_USR) {
509 switch (regno) {
510 case 8 ... 12:
511 if (curmode != ARM_CPU_MODE_FIQ) {
512 goto undef;
513 }
514 break;
515 case 13:
516 if (curmode == ARM_CPU_MODE_SYS) {
517 goto undef;
518 }
519 break;
520 case 14:
521 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
522 goto undef;
523 }
524 break;
525 default:
526 break;
527 }
528 }
529
530 if (tgtmode == ARM_CPU_MODE_HYP) {
531
532 if (curmode != ARM_CPU_MODE_MON) {
533 goto undef;
534 }
535 }
536
537 return;
538
539undef:
540 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
541 exception_target_el(env));
542}
543
544void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
545 uint32_t regno)
546{
547 msr_mrs_banked_exc_checks(env, tgtmode, regno);
548
549 switch (regno) {
550 case 16:
551 env->banked_spsr[bank_number(tgtmode)] = value;
552 break;
553 case 17:
554 env->elr_el[2] = value;
555 break;
556 case 13:
557 env->banked_r13[bank_number(tgtmode)] = value;
558 break;
559 case 14:
560 env->banked_r14[r14_bank_number(tgtmode)] = value;
561 break;
562 case 8 ... 12:
563 switch (tgtmode) {
564 case ARM_CPU_MODE_USR:
565 env->usr_regs[regno - 8] = value;
566 break;
567 case ARM_CPU_MODE_FIQ:
568 env->fiq_regs[regno - 8] = value;
569 break;
570 default:
571 g_assert_not_reached();
572 }
573 break;
574 default:
575 g_assert_not_reached();
576 }
577}
578
579uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
580{
581 msr_mrs_banked_exc_checks(env, tgtmode, regno);
582
583 switch (regno) {
584 case 16:
585 return env->banked_spsr[bank_number(tgtmode)];
586 case 17:
587 return env->elr_el[2];
588 case 13:
589 return env->banked_r13[bank_number(tgtmode)];
590 case 14:
591 return env->banked_r14[r14_bank_number(tgtmode)];
592 case 8 ... 12:
593 switch (tgtmode) {
594 case ARM_CPU_MODE_USR:
595 return env->usr_regs[regno - 8];
596 case ARM_CPU_MODE_FIQ:
597 return env->fiq_regs[regno - 8];
598 default:
599 g_assert_not_reached();
600 }
601 default:
602 g_assert_not_reached();
603 }
604}
605
606void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
607 uint32_t isread)
608{
609 const ARMCPRegInfo *ri = rip;
610 int target_el;
611
612 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
613 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
614 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
615 }
616
617 if (!ri->accessfn) {
618 return;
619 }
620
621 switch (ri->accessfn(env, ri, isread)) {
622 case CP_ACCESS_OK:
623 return;
624 case CP_ACCESS_TRAP:
625 target_el = exception_target_el(env);
626 break;
627 case CP_ACCESS_TRAP_EL2:
628
629
630
631 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
632 target_el = 2;
633 break;
634 case CP_ACCESS_TRAP_EL3:
635 target_el = 3;
636 break;
637 case CP_ACCESS_TRAP_UNCATEGORIZED:
638 target_el = exception_target_el(env);
639 syndrome = syn_uncategorized();
640 break;
641 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
642 target_el = 2;
643 syndrome = syn_uncategorized();
644 break;
645 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
646 target_el = 3;
647 syndrome = syn_uncategorized();
648 break;
649 case CP_ACCESS_TRAP_FP_EL2:
650 target_el = 2;
651
652
653
654
655
656 syndrome = syn_fp_access_trap(1, 0xe, false);
657 break;
658 case CP_ACCESS_TRAP_FP_EL3:
659 target_el = 3;
660 syndrome = syn_fp_access_trap(1, 0xe, false);
661 break;
662 default:
663 g_assert_not_reached();
664 }
665
666 raise_exception(env, EXCP_UDEF, syndrome, target_el);
667}
668
669void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
670{
671 const ARMCPRegInfo *ri = rip;
672
673 if (ri->type & ARM_CP_IO) {
674 qemu_mutex_lock_iothread();
675 ri->writefn(env, ri, value);
676 qemu_mutex_unlock_iothread();
677 } else {
678 ri->writefn(env, ri, value);
679 }
680}
681
682uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
683{
684 const ARMCPRegInfo *ri = rip;
685 uint32_t res;
686
687 if (ri->type & ARM_CP_IO) {
688 qemu_mutex_lock_iothread();
689 res = ri->readfn(env, ri);
690 qemu_mutex_unlock_iothread();
691 } else {
692 res = ri->readfn(env, ri);
693 }
694
695 return res;
696}
697
698void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
699{
700 const ARMCPRegInfo *ri = rip;
701
702 if (ri->type & ARM_CP_IO) {
703 qemu_mutex_lock_iothread();
704 ri->writefn(env, ri, value);
705 qemu_mutex_unlock_iothread();
706 } else {
707 ri->writefn(env, ri, value);
708 }
709}
710
711uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
712{
713 const ARMCPRegInfo *ri = rip;
714 uint64_t res;
715
716 if (ri->type & ARM_CP_IO) {
717 qemu_mutex_lock_iothread();
718 res = ri->readfn(env, ri);
719 qemu_mutex_unlock_iothread();
720 } else {
721 res = ri->readfn(env, ri);
722 }
723
724 return res;
725}
726
727void HELPER(pre_hvc)(CPUARMState *env)
728{
729 ARMCPU *cpu = env_archcpu(env);
730 int cur_el = arm_current_el(env);
731
732 bool secure = false;
733 bool undef;
734
735 if (arm_is_psci_call(cpu, EXCP_HVC)) {
736
737
738
739 return;
740 }
741
742 if (!arm_feature(env, ARM_FEATURE_EL2)) {
743
744 undef = true;
745 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
746
747 undef = !(env->cp15.scr_el3 & SCR_HCE);
748 } else {
749 undef = env->cp15.hcr_el2 & HCR_HCD;
750 }
751
752
753
754
755
756
757 if (secure && (!is_a64(env) || cur_el == 1)) {
758 undef = true;
759 }
760
761 if (undef) {
762 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
763 exception_target_el(env));
764 }
765}
766
767void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
768{
769 ARMCPU *cpu = env_archcpu(env);
770 int cur_el = arm_current_el(env);
771 bool secure = arm_is_secure(env);
772 bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811 bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
812 : smd_flag && !secure;
813
814 if (!arm_feature(env, ARM_FEATURE_EL3) &&
815 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
816
817
818
819
820
821
822
823
824 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
825 exception_target_el(env));
826 }
827
828 if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
829
830
831
832
833
834 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
835 }
836
837
838
839
840
841 if (!arm_is_psci_call(cpu, EXCP_SMC) &&
842 (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
843 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
844 exception_target_el(env));
845 }
846}
847
848
849
850
851
852
853
854uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
855{
856 int shift = i & 0xff;
857 if (shift >= 32) {
858 if (shift == 32)
859 env->CF = x & 1;
860 else
861 env->CF = 0;
862 return 0;
863 } else if (shift != 0) {
864 env->CF = (x >> (32 - shift)) & 1;
865 return x << shift;
866 }
867 return x;
868}
869
870uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
871{
872 int shift = i & 0xff;
873 if (shift >= 32) {
874 if (shift == 32)
875 env->CF = (x >> 31) & 1;
876 else
877 env->CF = 0;
878 return 0;
879 } else if (shift != 0) {
880 env->CF = (x >> (shift - 1)) & 1;
881 return x >> shift;
882 }
883 return x;
884}
885
886uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
887{
888 int shift = i & 0xff;
889 if (shift >= 32) {
890 env->CF = (x >> 31) & 1;
891 return (int32_t)x >> 31;
892 } else if (shift != 0) {
893 env->CF = (x >> (shift - 1)) & 1;
894 return (int32_t)x >> shift;
895 }
896 return x;
897}
898
899uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
900{
901 int shift1, shift;
902 shift1 = i & 0xff;
903 shift = shift1 & 0x1f;
904 if (shift == 0) {
905 if (shift1 != 0)
906 env->CF = (x >> 31) & 1;
907 return x;
908 } else {
909 env->CF = (x >> (shift - 1)) & 1;
910 return ((uint32_t)x >> shift) | (x << (32 - shift));
911 }
912}
913
914void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
915{
916
917
918
919
920
921
922
923
924 ARMCPU *cpu = env_archcpu(env);
925 uint64_t blocklen = 4 << cpu->dcz_blocksize;
926 uint64_t vaddr = vaddr_in & ~(blocklen - 1);
927
928#ifndef CONFIG_USER_ONLY
929 {
930
931
932
933
934
935
936
937
938
939 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
940 void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)];
941 int try, i;
942 unsigned mmu_idx = cpu_mmu_index(env, false);
943 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
944
945 assert(maxidx <= ARRAY_SIZE(hostaddr));
946
947 for (try = 0; try < 2; try++) {
948
949 for (i = 0; i < maxidx; i++) {
950 hostaddr[i] = tlb_vaddr_to_host(env,
951 vaddr + TARGET_PAGE_SIZE * i,
952 1, mmu_idx);
953 if (!hostaddr[i]) {
954 break;
955 }
956 }
957 if (i == maxidx) {
958
959
960
961
962 for (i = 0; i < maxidx - 1; i++) {
963 memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
964 }
965 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
966 return;
967 }
968
969
970
971
972
973
974
975 helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
976
977 for (i = 0; i < maxidx; i++) {
978 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
979 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
980 helper_ret_stb_mmu(env, va, 0, oi, GETPC());
981 }
982 }
983 }
984
985
986
987
988
989
990
991
992
993
994
995
996
997 for (i = 0; i < blocklen; i++) {
998 helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
999 }
1000 }
1001#else
1002 memset(g2h(vaddr), 0, blocklen);
1003#endif
1004}
1005