1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "internals.h"
23#include "exec/exec-all.h"
24#include "exec/ram_addr.h"
25#include "exec/cpu_ldst.h"
26#include "exec/helper-proto.h"
27#include "qapi/error.h"
28#include "qemu/guest-random.h"
29
30
31static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
32{
33 if (exclude == 0xffff) {
34 return 0;
35 }
36 if (offset == 0) {
37 while (exclude & (1 << tag)) {
38 tag = (tag + 1) & 15;
39 }
40 } else {
41 do {
42 do {
43 tag = (tag + 1) & 15;
44 } while (exclude & (1 << tag));
45 } while (--offset > 0);
46 }
47 return tag;
48}
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
76 uint64_t ptr, MMUAccessType ptr_access,
77 int ptr_size, MMUAccessType tag_access,
78 int tag_size, uintptr_t ra)
79{
80#ifdef CONFIG_USER_ONLY
81 uint64_t clean_ptr = useronly_clean_ptr(ptr);
82 int flags = page_get_flags(clean_ptr);
83 uint8_t *tags;
84 uintptr_t index;
85
86 if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
87 cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
88 !(flags & PAGE_VALID), ra);
89 }
90
91
92 if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
93 return NULL;
94 }
95
96 tags = page_get_target_data(clean_ptr);
97 if (tags == NULL) {
98 size_t alloc_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
99 tags = page_alloc_target_data(clean_ptr, alloc_size);
100 assert(tags != NULL);
101 }
102
103 index = extract32(ptr, LOG2_TAG_GRANULE + 1,
104 TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
105 return tags + index;
106#else
107 uintptr_t index;
108 CPUIOTLBEntry *iotlbentry;
109 int in_page, flags;
110 ram_addr_t ptr_ra;
111 hwaddr ptr_paddr, tag_paddr, xlat;
112 MemoryRegion *mr;
113 ARMASIdx tag_asi;
114 AddressSpace *tag_as;
115 void *host;
116
117
118
119
120
121
122
123
124
125
126 flags = probe_access_flags(env, ptr, ptr_access, ptr_mmu_idx,
127 ra == 0, &host, ra);
128 assert(!(flags & TLB_INVALID_MASK));
129
130
131
132
133
134
135
136 index = tlb_index(env, ptr_mmu_idx, ptr);
137# ifdef CONFIG_DEBUG_TCG
138 {
139 CPUTLBEntry *entry = tlb_entry(env, ptr_mmu_idx, ptr);
140 target_ulong comparator = (ptr_access == MMU_DATA_LOAD
141 ? entry->addr_read
142 : tlb_addr_write(entry));
143 g_assert(tlb_hit(comparator, ptr));
144 }
145# endif
146 iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index];
147
148
149 if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) {
150 return NULL;
151 }
152
153
154
155
156
157 if (unlikely(flags & TLB_MMIO)) {
158 qemu_log_mask(LOG_GUEST_ERROR,
159 "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
160 "but is not backed by host ram\n", ptr);
161 return NULL;
162 }
163
164
165
166
167
168
169
170 in_page = -(ptr | TARGET_PAGE_MASK);
171 if (unlikely(ptr_size > in_page)) {
172 void *ignore;
173 flags |= probe_access_flags(env, ptr + in_page, ptr_access,
174 ptr_mmu_idx, ra == 0, &ignore, ra);
175 assert(!(flags & TLB_INVALID_MASK));
176 }
177
178
179 if (unlikely(flags & TLB_WATCHPOINT)) {
180 int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
181 assert(ra != 0);
182 cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
183 iotlbentry->attrs, wp, ra);
184 }
185
186
187
188
189
190
191 mr = memory_region_from_host(host, &ptr_ra);
192 tcg_debug_assert(mr != NULL);
193 tcg_debug_assert(memory_region_is_ram(mr));
194 ptr_paddr = ptr_ra;
195 do {
196 ptr_paddr += mr->addr;
197 mr = mr->container;
198 } while (mr);
199
200
201 tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
202
203
204 tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
205 tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
206 mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
207 tag_access == MMU_DATA_STORE,
208 iotlbentry->attrs);
209
210
211
212
213
214
215 if (unlikely(!memory_region_is_ram(mr))) {
216
217 qemu_log_mask(LOG_UNIMP,
218 "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
219 "Normal Memory @ 0x%" HWADDR_PRIx "\n",
220 tag_paddr, ptr_paddr);
221 return NULL;
222 }
223
224
225
226
227
228 if (tag_access == MMU_DATA_STORE) {
229 ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
230 cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
231 }
232
233 return memory_region_get_ram_ptr(mr) + xlat;
234#endif
235}
236
237uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
238{
239 uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
240 int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
241 int start = extract32(env->cp15.rgsr_el1, 0, 4);
242 int seed = extract32(env->cp15.rgsr_el1, 8, 16);
243 int offset, i, rtag;
244
245
246
247
248
249
250
251
252 if (unlikely(seed == 0) && rrnd) {
253 do {
254 Error *err = NULL;
255 uint16_t two;
256
257 if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
258
259
260
261
262 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
263 error_get_pretty(err));
264 error_free(err);
265 two = 1;
266 }
267 seed = two;
268 } while (seed == 0);
269 }
270
271
272 for (i = offset = 0; i < 4; ++i) {
273
274 int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
275 extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
276 seed = (top << 15) | (seed >> 1);
277 offset |= top << i;
278 }
279 rtag = choose_nonexcluded_tag(start, offset, exclude);
280 env->cp15.rgsr_el1 = rtag | (seed << 8);
281
282 return address_with_allocation_tag(rn, rtag);
283}
284
285uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
286 int32_t offset, uint32_t tag_offset)
287{
288 int start_tag = allocation_tag_from_addr(ptr);
289 uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
290 int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
291
292 return address_with_allocation_tag(ptr + offset, rtag);
293}
294
295static int load_tag1(uint64_t ptr, uint8_t *mem)
296{
297 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
298 return extract32(*mem, ofs, 4);
299}
300
301uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
302{
303 int mmu_idx = cpu_mmu_index(env, false);
304 uint8_t *mem;
305 int rtag = 0;
306
307
308 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
309 MMU_DATA_LOAD, 1, GETPC());
310
311
312 if (mem) {
313 rtag = load_tag1(ptr, mem);
314 }
315
316 return address_with_allocation_tag(xt, rtag);
317}
318
319static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
320{
321 if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
322 arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
323 cpu_mmu_index(env, false), ra);
324 g_assert_not_reached();
325 }
326}
327
328
329static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
330{
331 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
332 *mem = deposit32(*mem, ofs, 4, tag);
333}
334
335
336static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
337{
338 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
339 uint8_t old = qatomic_read(mem);
340
341 while (1) {
342 uint8_t new = deposit32(old, ofs, 4, tag);
343 uint8_t cmp = qatomic_cmpxchg(mem, old, new);
344 if (likely(cmp == old)) {
345 return;
346 }
347 old = cmp;
348 }
349}
350
351typedef void stg_store1(uint64_t, uint8_t *, int);
352
353static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
354 uintptr_t ra, stg_store1 store1)
355{
356 int mmu_idx = cpu_mmu_index(env, false);
357 uint8_t *mem;
358
359 check_tag_aligned(env, ptr, ra);
360
361
362 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
363 MMU_DATA_STORE, 1, ra);
364
365
366 if (mem) {
367 store1(ptr, mem, allocation_tag_from_addr(xt));
368 }
369}
370
371void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
372{
373 do_stg(env, ptr, xt, GETPC(), store_tag1);
374}
375
376void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
377{
378 do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
379}
380
381void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
382{
383 int mmu_idx = cpu_mmu_index(env, false);
384 uintptr_t ra = GETPC();
385
386 check_tag_aligned(env, ptr, ra);
387 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
388}
389
390static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
391 uintptr_t ra, stg_store1 store1)
392{
393 int mmu_idx = cpu_mmu_index(env, false);
394 int tag = allocation_tag_from_addr(xt);
395 uint8_t *mem1, *mem2;
396
397 check_tag_aligned(env, ptr, ra);
398
399
400
401
402
403 if (ptr & TAG_GRANULE) {
404
405 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
406 TAG_GRANULE, MMU_DATA_STORE, 1, ra);
407 mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
408 MMU_DATA_STORE, TAG_GRANULE,
409 MMU_DATA_STORE, 1, ra);
410
411
412 if (mem1) {
413 store1(TAG_GRANULE, mem1, tag);
414 }
415 if (mem2) {
416 store1(0, mem2, tag);
417 }
418 } else {
419
420 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
421 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
422 if (mem1) {
423 tag |= tag << 4;
424 qatomic_set(mem1, tag);
425 }
426 }
427}
428
429void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
430{
431 do_st2g(env, ptr, xt, GETPC(), store_tag1);
432}
433
434void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
435{
436 do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
437}
438
439void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
440{
441 int mmu_idx = cpu_mmu_index(env, false);
442 uintptr_t ra = GETPC();
443 int in_page = -(ptr | TARGET_PAGE_MASK);
444
445 check_tag_aligned(env, ptr, ra);
446
447 if (likely(in_page >= 2 * TAG_GRANULE)) {
448 probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
449 } else {
450 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
451 probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
452 }
453}
454
455#define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
456
457uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
458{
459 int mmu_idx = cpu_mmu_index(env, false);
460 uintptr_t ra = GETPC();
461 void *tag_mem;
462
463 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
464
465
466 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
467 LDGM_STGM_SIZE, MMU_DATA_LOAD,
468 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
469
470
471 if (!tag_mem) {
472 return 0;
473 }
474
475 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
476
477
478
479
480 return ldq_le_p(tag_mem);
481}
482
483void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
484{
485 int mmu_idx = cpu_mmu_index(env, false);
486 uintptr_t ra = GETPC();
487 void *tag_mem;
488
489 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
490
491
492 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
493 LDGM_STGM_SIZE, MMU_DATA_LOAD,
494 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
495
496
497
498
499
500 if (!tag_mem) {
501 return;
502 }
503
504 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
505
506
507
508
509 stq_le_p(tag_mem, val);
510}
511
512void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
513{
514 uintptr_t ra = GETPC();
515 int mmu_idx = cpu_mmu_index(env, false);
516 int log2_dcz_bytes, log2_tag_bytes;
517 intptr_t dcz_bytes, tag_bytes;
518 uint8_t *mem;
519
520
521
522
523
524
525 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
526 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
527 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
528 tag_bytes = (intptr_t)1 << log2_tag_bytes;
529 ptr &= -dcz_bytes;
530
531 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
532 MMU_DATA_STORE, tag_bytes, ra);
533 if (mem) {
534 int tag_pair = (val & 0xf) * 0x11;
535 memset(mem, tag_pair, tag_bytes);
536 }
537}
538
539static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
540 uint64_t dirty_ptr, uintptr_t ra)
541{
542 int is_write, syn;
543
544 env->exception.vaddress = dirty_ptr;
545
546 is_write = FIELD_EX32(desc, MTEDESC, WRITE);
547 syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
548 0x11);
549 raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
550 g_assert_not_reached();
551}
552
553static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
554 uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
555{
556 int select;
557
558 if (regime_has_2_ranges(arm_mmu_idx)) {
559 select = extract64(dirty_ptr, 55, 1);
560 } else {
561 select = 0;
562 }
563 env->cp15.tfsr_el[el] |= 1 << select;
564#ifdef CONFIG_USER_ONLY
565
566
567
568
569
570
571
572 qemu_cpu_kick(env_cpu(env));
573#endif
574}
575
576
577static void mte_check_fail(CPUARMState *env, uint32_t desc,
578 uint64_t dirty_ptr, uintptr_t ra)
579{
580 int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
581 ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
582 int el, reg_el, tcf;
583 uint64_t sctlr;
584
585 reg_el = regime_el(env, arm_mmu_idx);
586 sctlr = env->cp15.sctlr_el[reg_el];
587
588 switch (arm_mmu_idx) {
589 case ARMMMUIdx_E10_0:
590 case ARMMMUIdx_E20_0:
591 el = 0;
592 tcf = extract64(sctlr, 38, 2);
593 break;
594 default:
595 el = reg_el;
596 tcf = extract64(sctlr, 40, 2);
597 }
598
599 switch (tcf) {
600 case 1:
601
602 mte_sync_check_fail(env, desc, dirty_ptr, ra);
603 break;
604
605 case 0:
606
607
608
609
610
611 g_assert_not_reached();
612
613 case 2:
614
615 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
616 break;
617
618 case 3:
619
620
621
622
623 if (FIELD_EX32(desc, MTEDESC, WRITE)) {
624 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
625 } else {
626 mte_sync_check_fail(env, desc, dirty_ptr, ra);
627 }
628 break;
629 }
630}
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659static int checkN(uint8_t *mem, int odd, int cmp, int count)
660{
661 int n = 0, diff;
662
663
664 cmp *= 0x11;
665 diff = *mem++ ^ cmp;
666
667 if (odd) {
668 goto start_odd;
669 }
670
671 while (1) {
672
673 if (unlikely((diff) & 0x0f)) {
674 break;
675 }
676 if (++n == count) {
677 break;
678 }
679
680 start_odd:
681
682 if (unlikely((diff) & 0xf0)) {
683 break;
684 }
685 if (++n == count) {
686 break;
687 }
688
689 diff = *mem++ ^ cmp;
690 }
691 return n;
692}
693
694
695
696
697
698
699
700
701
702
703
704
705
706static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
707 uintptr_t ra, uint64_t *fault)
708{
709 int mmu_idx, ptr_tag, bit55;
710 uint64_t ptr_last, prev_page, next_page;
711 uint64_t tag_first, tag_last;
712 uint64_t tag_byte_first, tag_byte_last;
713 uint32_t sizem1, tag_count, tag_size, n, c;
714 uint8_t *mem1, *mem2;
715 MMUAccessType type;
716
717 bit55 = extract64(ptr, 55, 1);
718 *fault = ptr;
719
720
721 if (unlikely(!tbi_check(desc, bit55))) {
722 return -1;
723 }
724
725 ptr_tag = allocation_tag_from_addr(ptr);
726
727 if (tcma_check(desc, bit55, ptr_tag)) {
728 return 1;
729 }
730
731 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
732 type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
733 sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
734
735
736 ptr_last = ptr + sizem1;
737
738
739 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
740 tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
741 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
742
743
744 tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
745 tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
746
747
748 prev_page = ptr & TARGET_PAGE_MASK;
749 next_page = prev_page + TARGET_PAGE_SIZE;
750
751 if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
752
753 tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
754 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
755 MMU_DATA_LOAD, tag_size, ra);
756 if (!mem1) {
757 return 1;
758 }
759
760 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
761 } else {
762
763 tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE);
764 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
765 MMU_DATA_LOAD, tag_size, ra);
766
767 tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
768 mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
769 ptr_last - next_page + 1,
770 MMU_DATA_LOAD, tag_size, ra);
771
772
773
774
775
776
777 n = c = (next_page - tag_first) / TAG_GRANULE;
778 if (mem1) {
779 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
780 }
781 if (n == c) {
782 if (!mem2) {
783 return 1;
784 }
785 n += checkN(mem2, 0, ptr_tag, tag_count - c);
786 }
787 }
788
789 if (likely(n == tag_count)) {
790 return 1;
791 }
792
793
794
795
796
797
798 if (n > 0) {
799 *fault = tag_first + n * TAG_GRANULE;
800 }
801 return 0;
802}
803
804uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
805{
806 uint64_t fault;
807 int ret = mte_probe_int(env, desc, ptr, ra, &fault);
808
809 if (unlikely(ret == 0)) {
810 mte_check_fail(env, desc, fault, ra);
811 } else if (ret < 0) {
812 return ptr;
813 }
814 return useronly_clean_ptr(ptr);
815}
816
817uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
818{
819 return mte_check(env, desc, ptr, GETPC());
820}
821
822
823
824
825
826
827
828bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
829{
830 uint64_t fault;
831 int ret = mte_probe_int(env, desc, ptr, 0, &fault);
832
833 return ret != 0;
834}
835
836
837
838
839uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
840{
841 uintptr_t ra = GETPC();
842 int log2_dcz_bytes, log2_tag_bytes;
843 int mmu_idx, bit55;
844 intptr_t dcz_bytes, tag_bytes, i;
845 void *mem;
846 uint64_t ptr_tag, mem_tag, align_ptr;
847
848 bit55 = extract64(ptr, 55, 1);
849
850
851 if (unlikely(!tbi_check(desc, bit55))) {
852 return ptr;
853 }
854
855 ptr_tag = allocation_tag_from_addr(ptr);
856
857 if (tcma_check(desc, bit55, ptr_tag)) {
858 goto done;
859 }
860
861
862
863
864
865
866 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
867 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
868 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
869 tag_bytes = (intptr_t)1 << log2_tag_bytes;
870 align_ptr = ptr & -dcz_bytes;
871
872
873
874
875
876
877 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
878 (void) probe_write(env, ptr, 1, mmu_idx, ra);
879 mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
880 dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra);
881 if (!mem) {
882 goto done;
883 }
884
885
886
887
888
889
890
891
892
893
894
895
896
897 switch (log2_tag_bytes) {
898 case 0:
899 mem_tag = *(uint8_t *)mem;
900 ptr_tag *= 0x11u;
901 break;
902 case 1:
903 mem_tag = cpu_to_le16(*(uint16_t *)mem);
904 ptr_tag *= 0x1111u;
905 break;
906 case 2:
907 mem_tag = cpu_to_le32(*(uint32_t *)mem);
908 ptr_tag *= 0x11111111u;
909 break;
910 case 3:
911 mem_tag = cpu_to_le64(*(uint64_t *)mem);
912 ptr_tag *= 0x1111111111111111ull;
913 break;
914
915 default:
916 ptr_tag *= 0x1111111111111111ull;
917 i = 0;
918 do {
919 mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
920 if (unlikely(mem_tag != ptr_tag)) {
921 goto fail;
922 }
923 i += 8;
924 align_ptr += 16 * TAG_GRANULE;
925 } while (i < tag_bytes);
926 goto done;
927 }
928
929 if (likely(mem_tag == ptr_tag)) {
930 goto done;
931 }
932
933 fail:
934
935 i = ctz64(mem_tag ^ ptr_tag) >> 4;
936 mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
937
938 done:
939 return useronly_clean_ptr(ptr);
940}
941