1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "internals.h"
23#include "exec/exec-all.h"
24#include "exec/ram_addr.h"
25#include "exec/cpu_ldst.h"
26#include "exec/helper-proto.h"
27#include "qapi/error.h"
28#include "qemu/guest-random.h"
29
30
31static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
32{
33 if (exclude == 0xffff) {
34 return 0;
35 }
36 if (offset == 0) {
37 while (exclude & (1 << tag)) {
38 tag = (tag + 1) & 15;
39 }
40 } else {
41 do {
42 do {
43 tag = (tag + 1) & 15;
44 } while (exclude & (1 << tag));
45 } while (--offset > 0);
46 }
47 return tag;
48}
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
76 uint64_t ptr, MMUAccessType ptr_access,
77 int ptr_size, MMUAccessType tag_access,
78 int tag_size, uintptr_t ra)
79{
80#ifdef CONFIG_USER_ONLY
81 uint64_t clean_ptr = useronly_clean_ptr(ptr);
82 int flags = page_get_flags(clean_ptr);
83 uint8_t *tags;
84 uintptr_t index;
85
86 if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
87
88 arm_cpu_tlb_fill(env_cpu(env), ptr, ptr_size, ptr_access,
89 ptr_mmu_idx, false, ra);
90 g_assert_not_reached();
91 }
92
93
94 if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
95 return NULL;
96 }
97
98 tags = page_get_target_data(clean_ptr);
99 if (tags == NULL) {
100 size_t alloc_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
101 tags = page_alloc_target_data(clean_ptr, alloc_size);
102 assert(tags != NULL);
103 }
104
105 index = extract32(ptr, LOG2_TAG_GRANULE + 1,
106 TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
107 return tags + index;
108#else
109 uintptr_t index;
110 CPUIOTLBEntry *iotlbentry;
111 int in_page, flags;
112 ram_addr_t ptr_ra;
113 hwaddr ptr_paddr, tag_paddr, xlat;
114 MemoryRegion *mr;
115 ARMASIdx tag_asi;
116 AddressSpace *tag_as;
117 void *host;
118
119
120
121
122
123
124
125
126
127
128 flags = probe_access_flags(env, ptr, ptr_access, ptr_mmu_idx,
129 ra == 0, &host, ra);
130 assert(!(flags & TLB_INVALID_MASK));
131
132
133
134
135
136
137
138 index = tlb_index(env, ptr_mmu_idx, ptr);
139# ifdef CONFIG_DEBUG_TCG
140 {
141 CPUTLBEntry *entry = tlb_entry(env, ptr_mmu_idx, ptr);
142 target_ulong comparator = (ptr_access == MMU_DATA_LOAD
143 ? entry->addr_read
144 : tlb_addr_write(entry));
145 g_assert(tlb_hit(comparator, ptr));
146 }
147# endif
148 iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index];
149
150
151 if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) {
152 return NULL;
153 }
154
155
156
157
158
159 if (unlikely(flags & TLB_MMIO)) {
160 qemu_log_mask(LOG_GUEST_ERROR,
161 "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
162 "but is not backed by host ram\n", ptr);
163 return NULL;
164 }
165
166
167
168
169
170
171
172 in_page = -(ptr | TARGET_PAGE_MASK);
173 if (unlikely(ptr_size > in_page)) {
174 void *ignore;
175 flags |= probe_access_flags(env, ptr + in_page, ptr_access,
176 ptr_mmu_idx, ra == 0, &ignore, ra);
177 assert(!(flags & TLB_INVALID_MASK));
178 }
179
180
181 if (unlikely(flags & TLB_WATCHPOINT)) {
182 int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
183 assert(ra != 0);
184 cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
185 iotlbentry->attrs, wp, ra);
186 }
187
188
189
190
191
192
193 mr = memory_region_from_host(host, &ptr_ra);
194 tcg_debug_assert(mr != NULL);
195 tcg_debug_assert(memory_region_is_ram(mr));
196 ptr_paddr = ptr_ra;
197 do {
198 ptr_paddr += mr->addr;
199 mr = mr->container;
200 } while (mr);
201
202
203 tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
204
205
206 tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
207 tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
208 mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
209 tag_access == MMU_DATA_STORE,
210 iotlbentry->attrs);
211
212
213
214
215
216
217 if (unlikely(!memory_region_is_ram(mr))) {
218
219 qemu_log_mask(LOG_UNIMP,
220 "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
221 "Normal Memory @ 0x%" HWADDR_PRIx "\n",
222 tag_paddr, ptr_paddr);
223 return NULL;
224 }
225
226
227
228
229
230 if (tag_access == MMU_DATA_STORE) {
231 ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
232 cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
233 }
234
235 return memory_region_get_ram_ptr(mr) + xlat;
236#endif
237}
238
239uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
240{
241 uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
242 int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
243 int start = extract32(env->cp15.rgsr_el1, 0, 4);
244 int seed = extract32(env->cp15.rgsr_el1, 8, 16);
245 int offset, i, rtag;
246
247
248
249
250
251
252
253
254 if (unlikely(seed == 0) && rrnd) {
255 do {
256 Error *err = NULL;
257 uint16_t two;
258
259 if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
260
261
262
263
264 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
265 error_get_pretty(err));
266 error_free(err);
267 two = 1;
268 }
269 seed = two;
270 } while (seed == 0);
271 }
272
273
274 for (i = offset = 0; i < 4; ++i) {
275
276 int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
277 extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
278 seed = (top << 15) | (seed >> 1);
279 offset |= top << i;
280 }
281 rtag = choose_nonexcluded_tag(start, offset, exclude);
282 env->cp15.rgsr_el1 = rtag | (seed << 8);
283
284 return address_with_allocation_tag(rn, rtag);
285}
286
287uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
288 int32_t offset, uint32_t tag_offset)
289{
290 int start_tag = allocation_tag_from_addr(ptr);
291 uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
292 int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
293
294 return address_with_allocation_tag(ptr + offset, rtag);
295}
296
297static int load_tag1(uint64_t ptr, uint8_t *mem)
298{
299 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
300 return extract32(*mem, ofs, 4);
301}
302
303uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
304{
305 int mmu_idx = cpu_mmu_index(env, false);
306 uint8_t *mem;
307 int rtag = 0;
308
309
310 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
311 MMU_DATA_LOAD, 1, GETPC());
312
313
314 if (mem) {
315 rtag = load_tag1(ptr, mem);
316 }
317
318 return address_with_allocation_tag(xt, rtag);
319}
320
321static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
322{
323 if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
324 arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
325 cpu_mmu_index(env, false), ra);
326 g_assert_not_reached();
327 }
328}
329
330
331static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
332{
333 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
334 *mem = deposit32(*mem, ofs, 4, tag);
335}
336
337
338static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
339{
340 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
341 uint8_t old = qatomic_read(mem);
342
343 while (1) {
344 uint8_t new = deposit32(old, ofs, 4, tag);
345 uint8_t cmp = qatomic_cmpxchg(mem, old, new);
346 if (likely(cmp == old)) {
347 return;
348 }
349 old = cmp;
350 }
351}
352
353typedef void stg_store1(uint64_t, uint8_t *, int);
354
355static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
356 uintptr_t ra, stg_store1 store1)
357{
358 int mmu_idx = cpu_mmu_index(env, false);
359 uint8_t *mem;
360
361 check_tag_aligned(env, ptr, ra);
362
363
364 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
365 MMU_DATA_STORE, 1, ra);
366
367
368 if (mem) {
369 store1(ptr, mem, allocation_tag_from_addr(xt));
370 }
371}
372
373void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
374{
375 do_stg(env, ptr, xt, GETPC(), store_tag1);
376}
377
378void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
379{
380 do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
381}
382
383void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
384{
385 int mmu_idx = cpu_mmu_index(env, false);
386 uintptr_t ra = GETPC();
387
388 check_tag_aligned(env, ptr, ra);
389 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
390}
391
392static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
393 uintptr_t ra, stg_store1 store1)
394{
395 int mmu_idx = cpu_mmu_index(env, false);
396 int tag = allocation_tag_from_addr(xt);
397 uint8_t *mem1, *mem2;
398
399 check_tag_aligned(env, ptr, ra);
400
401
402
403
404
405 if (ptr & TAG_GRANULE) {
406
407 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
408 TAG_GRANULE, MMU_DATA_STORE, 1, ra);
409 mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
410 MMU_DATA_STORE, TAG_GRANULE,
411 MMU_DATA_STORE, 1, ra);
412
413
414 if (mem1) {
415 store1(TAG_GRANULE, mem1, tag);
416 }
417 if (mem2) {
418 store1(0, mem2, tag);
419 }
420 } else {
421
422 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
423 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
424 if (mem1) {
425 tag |= tag << 4;
426 qatomic_set(mem1, tag);
427 }
428 }
429}
430
431void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
432{
433 do_st2g(env, ptr, xt, GETPC(), store_tag1);
434}
435
436void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
437{
438 do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
439}
440
441void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
442{
443 int mmu_idx = cpu_mmu_index(env, false);
444 uintptr_t ra = GETPC();
445 int in_page = -(ptr | TARGET_PAGE_MASK);
446
447 check_tag_aligned(env, ptr, ra);
448
449 if (likely(in_page >= 2 * TAG_GRANULE)) {
450 probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
451 } else {
452 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
453 probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
454 }
455}
456
457#define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
458
459uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
460{
461 int mmu_idx = cpu_mmu_index(env, false);
462 uintptr_t ra = GETPC();
463 void *tag_mem;
464
465 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
466
467
468 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
469 LDGM_STGM_SIZE, MMU_DATA_LOAD,
470 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
471
472
473 if (!tag_mem) {
474 return 0;
475 }
476
477 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
478
479
480
481
482 return ldq_le_p(tag_mem);
483}
484
485void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
486{
487 int mmu_idx = cpu_mmu_index(env, false);
488 uintptr_t ra = GETPC();
489 void *tag_mem;
490
491 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
492
493
494 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
495 LDGM_STGM_SIZE, MMU_DATA_LOAD,
496 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
497
498
499
500
501
502 if (!tag_mem) {
503 return;
504 }
505
506 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
507
508
509
510
511 stq_le_p(tag_mem, val);
512}
513
514void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
515{
516 uintptr_t ra = GETPC();
517 int mmu_idx = cpu_mmu_index(env, false);
518 int log2_dcz_bytes, log2_tag_bytes;
519 intptr_t dcz_bytes, tag_bytes;
520 uint8_t *mem;
521
522
523
524
525
526
527 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
528 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
529 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
530 tag_bytes = (intptr_t)1 << log2_tag_bytes;
531 ptr &= -dcz_bytes;
532
533 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
534 MMU_DATA_STORE, tag_bytes, ra);
535 if (mem) {
536 int tag_pair = (val & 0xf) * 0x11;
537 memset(mem, tag_pair, tag_bytes);
538 }
539}
540
541
542static void mte_check_fail(CPUARMState *env, uint32_t desc,
543 uint64_t dirty_ptr, uintptr_t ra)
544{
545 int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
546 ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
547 int el, reg_el, tcf, select, is_write, syn;
548 uint64_t sctlr;
549
550 reg_el = regime_el(env, arm_mmu_idx);
551 sctlr = env->cp15.sctlr_el[reg_el];
552
553 switch (arm_mmu_idx) {
554 case ARMMMUIdx_E10_0:
555 case ARMMMUIdx_E20_0:
556 el = 0;
557 tcf = extract64(sctlr, 38, 2);
558 break;
559 default:
560 el = reg_el;
561 tcf = extract64(sctlr, 40, 2);
562 }
563
564 switch (tcf) {
565 case 1:
566
567
568
569
570
571
572
573 cpu_restore_state(env_cpu(env), ra, true);
574 env->exception.vaddress = dirty_ptr;
575
576 is_write = FIELD_EX32(desc, MTEDESC, WRITE);
577 syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0,
578 is_write, 0x11);
579 raise_exception(env, EXCP_DATA_ABORT, syn, exception_target_el(env));
580
581
582 case 0:
583
584
585
586
587
588 g_assert_not_reached();
589
590 case 2:
591
592 if (regime_has_2_ranges(arm_mmu_idx)) {
593 select = extract64(dirty_ptr, 55, 1);
594 } else {
595 select = 0;
596 }
597 env->cp15.tfsr_el[el] |= 1 << select;
598#ifdef CONFIG_USER_ONLY
599
600
601
602
603
604
605
606 qemu_cpu_kick(env_cpu(env));
607#endif
608 break;
609
610 default:
611
612 qemu_log_mask(LOG_GUEST_ERROR,
613 "Tag check failure with SCTLR_EL%d.TCF%s "
614 "set to reserved value %d\n",
615 reg_el, el ? "" : "0", tcf);
616 break;
617 }
618}
619
620
621
622
623static bool mte_probe1_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
624 uintptr_t ra, int bit55)
625{
626 int mem_tag, mmu_idx, ptr_tag, size;
627 MMUAccessType type;
628 uint8_t *mem;
629
630 ptr_tag = allocation_tag_from_addr(ptr);
631
632 if (tcma_check(desc, bit55, ptr_tag)) {
633 return true;
634 }
635
636 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
637 type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
638 size = FIELD_EX32(desc, MTEDESC, ESIZE);
639
640 mem = allocation_tag_mem(env, mmu_idx, ptr, type, size,
641 MMU_DATA_LOAD, 1, ra);
642 if (!mem) {
643 return true;
644 }
645
646 mem_tag = load_tag1(ptr, mem);
647 return ptr_tag == mem_tag;
648}
649
650
651
652
653
654
655
656bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
657{
658 int bit55 = extract64(ptr, 55, 1);
659
660
661 if (unlikely(!tbi_check(desc, bit55))) {
662 return true;
663 }
664
665 return mte_probe1_int(env, desc, ptr, 0, bit55);
666}
667
668uint64_t mte_check1(CPUARMState *env, uint32_t desc,
669 uint64_t ptr, uintptr_t ra)
670{
671 int bit55 = extract64(ptr, 55, 1);
672
673
674 if (unlikely(!tbi_check(desc, bit55))) {
675 return ptr;
676 }
677
678 if (unlikely(!mte_probe1_int(env, desc, ptr, ra, bit55))) {
679 mte_check_fail(env, desc, ptr, ra);
680 }
681
682 return useronly_clean_ptr(ptr);
683}
684
685uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
686{
687 return mte_check1(env, desc, ptr, GETPC());
688}
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721static int checkN(uint8_t *mem, int odd, int cmp, int count)
722{
723 int n = 0, diff;
724
725
726 cmp *= 0x11;
727 diff = *mem++ ^ cmp;
728
729 if (odd) {
730 goto start_odd;
731 }
732
733 while (1) {
734
735 if (unlikely((diff) & 0x0f)) {
736 break;
737 }
738 if (++n == count) {
739 break;
740 }
741
742 start_odd:
743
744 if (unlikely((diff) & 0xf0)) {
745 break;
746 }
747 if (++n == count) {
748 break;
749 }
750
751 diff = *mem++ ^ cmp;
752 }
753 return n;
754}
755
756uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
757 uint64_t ptr, uintptr_t ra)
758{
759 int mmu_idx, ptr_tag, bit55;
760 uint64_t ptr_last, ptr_end, prev_page, next_page;
761 uint64_t tag_first, tag_end;
762 uint64_t tag_byte_first, tag_byte_end;
763 uint32_t esize, total, tag_count, tag_size, n, c;
764 uint8_t *mem1, *mem2;
765 MMUAccessType type;
766
767 bit55 = extract64(ptr, 55, 1);
768
769
770 if (unlikely(!tbi_check(desc, bit55))) {
771 return ptr;
772 }
773
774 ptr_tag = allocation_tag_from_addr(ptr);
775
776 if (tcma_check(desc, bit55, ptr_tag)) {
777 goto done;
778 }
779
780 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
781 type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
782 esize = FIELD_EX32(desc, MTEDESC, ESIZE);
783 total = FIELD_EX32(desc, MTEDESC, TSIZE);
784
785
786 ptr_end = ptr + total;
787 ptr_last = ptr_end - esize;
788
789
790 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
791 tag_end = QEMU_ALIGN_UP(ptr_last, TAG_GRANULE);
792 tag_count = (tag_end - tag_first) / TAG_GRANULE;
793
794
795 tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
796 tag_byte_end = QEMU_ALIGN_UP(ptr_last, 2 * TAG_GRANULE);
797
798
799 prev_page = ptr & TARGET_PAGE_MASK;
800 next_page = prev_page + TARGET_PAGE_SIZE;
801
802 if (likely(tag_end - prev_page <= TARGET_PAGE_SIZE)) {
803
804 tag_size = (tag_byte_end - tag_byte_first) / (2 * TAG_GRANULE);
805 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
806 MMU_DATA_LOAD, tag_size, ra);
807 if (!mem1) {
808 goto done;
809 }
810
811 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
812 } else {
813
814 tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE);
815 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
816 MMU_DATA_LOAD, tag_size, ra);
817
818 tag_size = (tag_byte_end - next_page) / (2 * TAG_GRANULE);
819 mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
820 ptr_end - next_page,
821 MMU_DATA_LOAD, tag_size, ra);
822
823
824
825
826
827
828 n = c = (next_page - tag_first) / TAG_GRANULE;
829 if (mem1) {
830 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
831 }
832 if (n == c) {
833 if (!mem2) {
834 goto done;
835 }
836 n += checkN(mem2, 0, ptr_tag, tag_count - c);
837 }
838 }
839
840
841
842
843
844 if (unlikely(n < tag_count)) {
845 uint64_t fail_ofs;
846
847 fail_ofs = tag_first + n * TAG_GRANULE - ptr;
848 fail_ofs = ROUND_UP(fail_ofs, esize);
849 mte_check_fail(env, desc, ptr + fail_ofs, ra);
850 }
851
852 done:
853 return useronly_clean_ptr(ptr);
854}
855
856uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
857{
858 return mte_checkN(env, desc, ptr, GETPC());
859}
860
861
862
863
864uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
865{
866 uintptr_t ra = GETPC();
867 int log2_dcz_bytes, log2_tag_bytes;
868 int mmu_idx, bit55;
869 intptr_t dcz_bytes, tag_bytes, i;
870 void *mem;
871 uint64_t ptr_tag, mem_tag, align_ptr;
872
873 bit55 = extract64(ptr, 55, 1);
874
875
876 if (unlikely(!tbi_check(desc, bit55))) {
877 return ptr;
878 }
879
880 ptr_tag = allocation_tag_from_addr(ptr);
881
882 if (tcma_check(desc, bit55, ptr_tag)) {
883 goto done;
884 }
885
886
887
888
889
890
891 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
892 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
893 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
894 tag_bytes = (intptr_t)1 << log2_tag_bytes;
895 align_ptr = ptr & -dcz_bytes;
896
897
898
899
900
901
902 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
903 (void) probe_write(env, ptr, 1, mmu_idx, ra);
904 mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
905 dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra);
906 if (!mem) {
907 goto done;
908 }
909
910
911
912
913
914
915
916
917
918
919
920
921
922 switch (log2_tag_bytes) {
923 case 0:
924 mem_tag = *(uint8_t *)mem;
925 ptr_tag *= 0x11u;
926 break;
927 case 1:
928 mem_tag = cpu_to_le16(*(uint16_t *)mem);
929 ptr_tag *= 0x1111u;
930 break;
931 case 2:
932 mem_tag = cpu_to_le32(*(uint32_t *)mem);
933 ptr_tag *= 0x11111111u;
934 break;
935 case 3:
936 mem_tag = cpu_to_le64(*(uint64_t *)mem);
937 ptr_tag *= 0x1111111111111111ull;
938 break;
939
940 default:
941 ptr_tag *= 0x1111111111111111ull;
942 i = 0;
943 do {
944 mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
945 if (unlikely(mem_tag != ptr_tag)) {
946 goto fail;
947 }
948 i += 8;
949 align_ptr += 16 * TAG_GRANULE;
950 } while (i < tag_bytes);
951 goto done;
952 }
953
954 if (likely(mem_tag == ptr_tag)) {
955 goto done;
956 }
957
958 fail:
959
960 i = ctz64(mem_tag ^ ptr_tag) >> 4;
961 mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
962
963 done:
964 return useronly_clean_ptr(ptr);
965}
966