1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "internals.h"
23#include "exec/exec-all.h"
24#include "exec/ram_addr.h"
25#include "exec/cpu_ldst.h"
26#include "exec/helper-proto.h"
27#include "qapi/error.h"
28#include "qemu/guest-random.h"
29
30
31static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
32{
33 if (exclude == 0xffff) {
34 return 0;
35 }
36 if (offset == 0) {
37 while (exclude & (1 << tag)) {
38 tag = (tag + 1) & 15;
39 }
40 } else {
41 do {
42 do {
43 tag = (tag + 1) & 15;
44 } while (exclude & (1 << tag));
45 } while (--offset > 0);
46 }
47 return tag;
48}
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
76 uint64_t ptr, MMUAccessType ptr_access,
77 int ptr_size, MMUAccessType tag_access,
78 int tag_size, uintptr_t ra)
79{
80#ifdef CONFIG_USER_ONLY
81
82 return NULL;
83#else
84 uintptr_t index;
85 CPUIOTLBEntry *iotlbentry;
86 int in_page, flags;
87 ram_addr_t ptr_ra;
88 hwaddr ptr_paddr, tag_paddr, xlat;
89 MemoryRegion *mr;
90 ARMASIdx tag_asi;
91 AddressSpace *tag_as;
92 void *host;
93
94
95
96
97
98
99
100
101
102
103 flags = probe_access_flags(env, ptr, ptr_access, ptr_mmu_idx,
104 ra == 0, &host, ra);
105 assert(!(flags & TLB_INVALID_MASK));
106
107
108
109
110
111
112
113 index = tlb_index(env, ptr_mmu_idx, ptr);
114# ifdef CONFIG_DEBUG_TCG
115 {
116 CPUTLBEntry *entry = tlb_entry(env, ptr_mmu_idx, ptr);
117 target_ulong comparator = (ptr_access == MMU_DATA_LOAD
118 ? entry->addr_read
119 : tlb_addr_write(entry));
120 g_assert(tlb_hit(comparator, ptr));
121 }
122# endif
123 iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index];
124
125
126 if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) {
127 return NULL;
128 }
129
130
131
132
133
134 if (unlikely(flags & TLB_MMIO)) {
135 qemu_log_mask(LOG_GUEST_ERROR,
136 "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
137 "but is not backed by host ram\n", ptr);
138 return NULL;
139 }
140
141
142
143
144
145
146
147 in_page = -(ptr | TARGET_PAGE_MASK);
148 if (unlikely(ptr_size > in_page)) {
149 void *ignore;
150 flags |= probe_access_flags(env, ptr + in_page, ptr_access,
151 ptr_mmu_idx, ra == 0, &ignore, ra);
152 assert(!(flags & TLB_INVALID_MASK));
153 }
154
155
156 if (unlikely(flags & TLB_WATCHPOINT)) {
157 int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
158 assert(ra != 0);
159 cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
160 iotlbentry->attrs, wp, ra);
161 }
162
163
164
165
166
167
168 mr = memory_region_from_host(host, &ptr_ra);
169 tcg_debug_assert(mr != NULL);
170 tcg_debug_assert(memory_region_is_ram(mr));
171 ptr_paddr = ptr_ra;
172 do {
173 ptr_paddr += mr->addr;
174 mr = mr->container;
175 } while (mr);
176
177
178 tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
179
180
181 tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
182 tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
183 mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
184 tag_access == MMU_DATA_STORE,
185 iotlbentry->attrs);
186
187
188
189
190
191
192 if (unlikely(!memory_region_is_ram(mr))) {
193
194 qemu_log_mask(LOG_UNIMP,
195 "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
196 "Normal Memory @ 0x%" HWADDR_PRIx "\n",
197 tag_paddr, ptr_paddr);
198 return NULL;
199 }
200
201
202
203
204
205 if (tag_access == MMU_DATA_STORE) {
206 ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
207 cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
208 }
209
210 return memory_region_get_ram_ptr(mr) + xlat;
211#endif
212}
213
214uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
215{
216 uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
217 int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
218 int start = extract32(env->cp15.rgsr_el1, 0, 4);
219 int seed = extract32(env->cp15.rgsr_el1, 8, 16);
220 int offset, i, rtag;
221
222
223
224
225
226
227
228
229 if (unlikely(seed == 0) && rrnd) {
230 do {
231 Error *err = NULL;
232 uint16_t two;
233
234 if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
235
236
237
238
239 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
240 error_get_pretty(err));
241 error_free(err);
242 two = 1;
243 }
244 seed = two;
245 } while (seed == 0);
246 }
247
248
249 for (i = offset = 0; i < 4; ++i) {
250
251 int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
252 extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
253 seed = (top << 15) | (seed >> 1);
254 offset |= top << i;
255 }
256 rtag = choose_nonexcluded_tag(start, offset, exclude);
257 env->cp15.rgsr_el1 = rtag | (seed << 8);
258
259 return address_with_allocation_tag(rn, rtag);
260}
261
262uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
263 int32_t offset, uint32_t tag_offset)
264{
265 int start_tag = allocation_tag_from_addr(ptr);
266 uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
267 int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
268
269 return address_with_allocation_tag(ptr + offset, rtag);
270}
271
272static int load_tag1(uint64_t ptr, uint8_t *mem)
273{
274 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
275 return extract32(*mem, ofs, 4);
276}
277
278uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
279{
280 int mmu_idx = cpu_mmu_index(env, false);
281 uint8_t *mem;
282 int rtag = 0;
283
284
285 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
286 MMU_DATA_LOAD, 1, GETPC());
287
288
289 if (mem) {
290 rtag = load_tag1(ptr, mem);
291 }
292
293 return address_with_allocation_tag(xt, rtag);
294}
295
296static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
297{
298 if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
299 arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
300 cpu_mmu_index(env, false), ra);
301 g_assert_not_reached();
302 }
303}
304
305
306static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
307{
308 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
309 *mem = deposit32(*mem, ofs, 4, tag);
310}
311
312
313static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
314{
315 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
316 uint8_t old = qatomic_read(mem);
317
318 while (1) {
319 uint8_t new = deposit32(old, ofs, 4, tag);
320 uint8_t cmp = qatomic_cmpxchg(mem, old, new);
321 if (likely(cmp == old)) {
322 return;
323 }
324 old = cmp;
325 }
326}
327
328typedef void stg_store1(uint64_t, uint8_t *, int);
329
330static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
331 uintptr_t ra, stg_store1 store1)
332{
333 int mmu_idx = cpu_mmu_index(env, false);
334 uint8_t *mem;
335
336 check_tag_aligned(env, ptr, ra);
337
338
339 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
340 MMU_DATA_STORE, 1, ra);
341
342
343 if (mem) {
344 store1(ptr, mem, allocation_tag_from_addr(xt));
345 }
346}
347
348void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
349{
350 do_stg(env, ptr, xt, GETPC(), store_tag1);
351}
352
353void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
354{
355 do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
356}
357
358void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
359{
360 int mmu_idx = cpu_mmu_index(env, false);
361 uintptr_t ra = GETPC();
362
363 check_tag_aligned(env, ptr, ra);
364 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
365}
366
367static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
368 uintptr_t ra, stg_store1 store1)
369{
370 int mmu_idx = cpu_mmu_index(env, false);
371 int tag = allocation_tag_from_addr(xt);
372 uint8_t *mem1, *mem2;
373
374 check_tag_aligned(env, ptr, ra);
375
376
377
378
379
380 if (ptr & TAG_GRANULE) {
381
382 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
383 TAG_GRANULE, MMU_DATA_STORE, 1, ra);
384 mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
385 MMU_DATA_STORE, TAG_GRANULE,
386 MMU_DATA_STORE, 1, ra);
387
388
389 if (mem1) {
390 store1(TAG_GRANULE, mem1, tag);
391 }
392 if (mem2) {
393 store1(0, mem2, tag);
394 }
395 } else {
396
397 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
398 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
399 if (mem1) {
400 tag |= tag << 4;
401 qatomic_set(mem1, tag);
402 }
403 }
404}
405
406void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
407{
408 do_st2g(env, ptr, xt, GETPC(), store_tag1);
409}
410
411void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
412{
413 do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
414}
415
416void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
417{
418 int mmu_idx = cpu_mmu_index(env, false);
419 uintptr_t ra = GETPC();
420 int in_page = -(ptr | TARGET_PAGE_MASK);
421
422 check_tag_aligned(env, ptr, ra);
423
424 if (likely(in_page >= 2 * TAG_GRANULE)) {
425 probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
426 } else {
427 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
428 probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
429 }
430}
431
432#define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
433
434uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
435{
436 int mmu_idx = cpu_mmu_index(env, false);
437 uintptr_t ra = GETPC();
438 void *tag_mem;
439
440 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
441
442
443 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
444 LDGM_STGM_SIZE, MMU_DATA_LOAD,
445 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
446
447
448 if (!tag_mem) {
449 return 0;
450 }
451
452 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
453
454
455
456
457 return ldq_le_p(tag_mem);
458}
459
460void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
461{
462 int mmu_idx = cpu_mmu_index(env, false);
463 uintptr_t ra = GETPC();
464 void *tag_mem;
465
466 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
467
468
469 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
470 LDGM_STGM_SIZE, MMU_DATA_LOAD,
471 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
472
473
474
475
476
477 if (!tag_mem) {
478 return;
479 }
480
481 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
482
483
484
485
486 stq_le_p(tag_mem, val);
487}
488
489void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
490{
491 uintptr_t ra = GETPC();
492 int mmu_idx = cpu_mmu_index(env, false);
493 int log2_dcz_bytes, log2_tag_bytes;
494 intptr_t dcz_bytes, tag_bytes;
495 uint8_t *mem;
496
497
498
499
500
501
502 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
503 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
504 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
505 tag_bytes = (intptr_t)1 << log2_tag_bytes;
506 ptr &= -dcz_bytes;
507
508 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
509 MMU_DATA_STORE, tag_bytes, ra);
510 if (mem) {
511 int tag_pair = (val & 0xf) * 0x11;
512 memset(mem, tag_pair, tag_bytes);
513 }
514}
515
516
517static void mte_check_fail(CPUARMState *env, uint32_t desc,
518 uint64_t dirty_ptr, uintptr_t ra)
519{
520 int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
521 ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
522 int el, reg_el, tcf, select, is_write, syn;
523 uint64_t sctlr;
524
525 reg_el = regime_el(env, arm_mmu_idx);
526 sctlr = env->cp15.sctlr_el[reg_el];
527
528 el = arm_current_el(env);
529 if (el == 0) {
530 tcf = extract64(sctlr, 38, 2);
531 } else {
532 tcf = extract64(sctlr, 40, 2);
533 }
534
535 switch (tcf) {
536 case 1:
537
538
539
540
541
542
543
544 cpu_restore_state(env_cpu(env), ra, true);
545 env->exception.vaddress = dirty_ptr;
546
547 is_write = FIELD_EX32(desc, MTEDESC, WRITE);
548 syn = syn_data_abort_no_iss(el != 0, 0, 0, 0, 0, is_write, 0x11);
549 raise_exception(env, EXCP_DATA_ABORT, syn, exception_target_el(env));
550
551
552 case 0:
553
554
555
556
557
558 g_assert_not_reached();
559
560 case 2:
561
562 if (regime_has_2_ranges(arm_mmu_idx)) {
563 select = extract64(dirty_ptr, 55, 1);
564 } else {
565 select = 0;
566 }
567 env->cp15.tfsr_el[el] |= 1 << select;
568 break;
569
570 default:
571
572 qemu_log_mask(LOG_GUEST_ERROR,
573 "Tag check failure with SCTLR_EL%d.TCF%s "
574 "set to reserved value %d\n",
575 reg_el, el ? "" : "0", tcf);
576 break;
577 }
578}
579
580
581
582
583static bool mte_probe1_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
584 uintptr_t ra, int bit55)
585{
586 int mem_tag, mmu_idx, ptr_tag, size;
587 MMUAccessType type;
588 uint8_t *mem;
589
590 ptr_tag = allocation_tag_from_addr(ptr);
591
592 if (tcma_check(desc, bit55, ptr_tag)) {
593 return true;
594 }
595
596 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
597 type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
598 size = FIELD_EX32(desc, MTEDESC, ESIZE);
599
600 mem = allocation_tag_mem(env, mmu_idx, ptr, type, size,
601 MMU_DATA_LOAD, 1, ra);
602 if (!mem) {
603 return true;
604 }
605
606 mem_tag = load_tag1(ptr, mem);
607 return ptr_tag == mem_tag;
608}
609
610
611
612
613
614
615
616bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
617{
618 int bit55 = extract64(ptr, 55, 1);
619
620
621 if (unlikely(!tbi_check(desc, bit55))) {
622 return true;
623 }
624
625 return mte_probe1_int(env, desc, ptr, 0, bit55);
626}
627
628uint64_t mte_check1(CPUARMState *env, uint32_t desc,
629 uint64_t ptr, uintptr_t ra)
630{
631 int bit55 = extract64(ptr, 55, 1);
632
633
634 if (unlikely(!tbi_check(desc, bit55))) {
635 return ptr;
636 }
637
638 if (unlikely(!mte_probe1_int(env, desc, ptr, ra, bit55))) {
639 mte_check_fail(env, desc, ptr, ra);
640 }
641
642 return useronly_clean_ptr(ptr);
643}
644
645uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
646{
647 return mte_check1(env, desc, ptr, GETPC());
648}
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681static int checkN(uint8_t *mem, int odd, int cmp, int count)
682{
683 int n = 0, diff;
684
685
686 cmp *= 0x11;
687 diff = *mem++ ^ cmp;
688
689 if (odd) {
690 goto start_odd;
691 }
692
693 while (1) {
694
695 if (unlikely((diff) & 0x0f)) {
696 break;
697 }
698 if (++n == count) {
699 break;
700 }
701
702 start_odd:
703
704 if (unlikely((diff) & 0xf0)) {
705 break;
706 }
707 if (++n == count) {
708 break;
709 }
710
711 diff = *mem++ ^ cmp;
712 }
713 return n;
714}
715
716uint64_t mte_checkN(CPUARMState *env, uint32_t desc,
717 uint64_t ptr, uintptr_t ra)
718{
719 int mmu_idx, ptr_tag, bit55;
720 uint64_t ptr_last, ptr_end, prev_page, next_page;
721 uint64_t tag_first, tag_end;
722 uint64_t tag_byte_first, tag_byte_end;
723 uint32_t esize, total, tag_count, tag_size, n, c;
724 uint8_t *mem1, *mem2;
725 MMUAccessType type;
726
727 bit55 = extract64(ptr, 55, 1);
728
729
730 if (unlikely(!tbi_check(desc, bit55))) {
731 return ptr;
732 }
733
734 ptr_tag = allocation_tag_from_addr(ptr);
735
736 if (tcma_check(desc, bit55, ptr_tag)) {
737 goto done;
738 }
739
740 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
741 type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
742 esize = FIELD_EX32(desc, MTEDESC, ESIZE);
743 total = FIELD_EX32(desc, MTEDESC, TSIZE);
744
745
746 ptr_end = ptr + total;
747 ptr_last = ptr_end - esize;
748
749
750 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
751 tag_end = QEMU_ALIGN_UP(ptr_last, TAG_GRANULE);
752 tag_count = (tag_end - tag_first) / TAG_GRANULE;
753
754
755 tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
756 tag_byte_end = QEMU_ALIGN_UP(ptr_last, 2 * TAG_GRANULE);
757
758
759 prev_page = ptr & TARGET_PAGE_MASK;
760 next_page = prev_page + TARGET_PAGE_SIZE;
761
762 if (likely(tag_end - prev_page <= TARGET_PAGE_SIZE)) {
763
764 tag_size = (tag_byte_end - tag_byte_first) / (2 * TAG_GRANULE);
765 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total,
766 MMU_DATA_LOAD, tag_size, ra);
767 if (!mem1) {
768 goto done;
769 }
770
771 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
772 } else {
773
774 tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE);
775 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
776 MMU_DATA_LOAD, tag_size, ra);
777
778 tag_size = (tag_byte_end - next_page) / (2 * TAG_GRANULE);
779 mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
780 ptr_end - next_page,
781 MMU_DATA_LOAD, tag_size, ra);
782
783
784
785
786
787
788 n = c = (next_page - tag_first) / TAG_GRANULE;
789 if (mem1) {
790 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
791 }
792 if (n == c) {
793 if (!mem2) {
794 goto done;
795 }
796 n += checkN(mem2, 0, ptr_tag, tag_count - c);
797 }
798 }
799
800
801
802
803
804 if (unlikely(n < tag_count)) {
805 uint64_t fail_ofs;
806
807 fail_ofs = tag_first + n * TAG_GRANULE - ptr;
808 fail_ofs = ROUND_UP(fail_ofs, esize);
809 mte_check_fail(env, desc, ptr + fail_ofs, ra);
810 }
811
812 done:
813 return useronly_clean_ptr(ptr);
814}
815
816uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
817{
818 return mte_checkN(env, desc, ptr, GETPC());
819}
820
821
822
823
824uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
825{
826 uintptr_t ra = GETPC();
827 int log2_dcz_bytes, log2_tag_bytes;
828 int mmu_idx, bit55;
829 intptr_t dcz_bytes, tag_bytes, i;
830 void *mem;
831 uint64_t ptr_tag, mem_tag, align_ptr;
832
833 bit55 = extract64(ptr, 55, 1);
834
835
836 if (unlikely(!tbi_check(desc, bit55))) {
837 return ptr;
838 }
839
840 ptr_tag = allocation_tag_from_addr(ptr);
841
842 if (tcma_check(desc, bit55, ptr_tag)) {
843 goto done;
844 }
845
846
847
848
849
850
851 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
852 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
853 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
854 tag_bytes = (intptr_t)1 << log2_tag_bytes;
855 align_ptr = ptr & -dcz_bytes;
856
857
858
859
860
861
862 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
863 (void) probe_write(env, ptr, 1, mmu_idx, ra);
864 mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
865 dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra);
866 if (!mem) {
867 goto done;
868 }
869
870
871
872
873
874
875
876
877
878
879
880
881
882 switch (log2_tag_bytes) {
883 case 0:
884 mem_tag = *(uint8_t *)mem;
885 ptr_tag *= 0x11u;
886 break;
887 case 1:
888 mem_tag = cpu_to_le16(*(uint16_t *)mem);
889 ptr_tag *= 0x1111u;
890 break;
891 case 2:
892 mem_tag = cpu_to_le32(*(uint32_t *)mem);
893 ptr_tag *= 0x11111111u;
894 break;
895 case 3:
896 mem_tag = cpu_to_le64(*(uint64_t *)mem);
897 ptr_tag *= 0x1111111111111111ull;
898 break;
899
900 default:
901 ptr_tag *= 0x1111111111111111ull;
902 i = 0;
903 do {
904 mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
905 if (unlikely(mem_tag != ptr_tag)) {
906 goto fail;
907 }
908 i += 8;
909 align_ptr += 16 * TAG_GRANULE;
910 } while (i < tag_bytes);
911 goto done;
912 }
913
914 if (likely(mem_tag == ptr_tag)) {
915 goto done;
916 }
917
918 fail:
919
920 i = ctz64(mem_tag ^ ptr_tag) >> 4;
921 mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
922
923 done:
924 return useronly_clean_ptr(ptr);
925}
926