1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/log.h"
22#include "cpu.h"
23#include "internals.h"
24#include "exec/exec-all.h"
25#include "exec/ram_addr.h"
26#include "exec/cpu_ldst.h"
27#include "exec/helper-proto.h"
28#include "hw/core/tcg-cpu-ops.h"
29#include "qapi/error.h"
30#include "qemu/guest-random.h"
31
32
33static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
34{
35 if (exclude == 0xffff) {
36 return 0;
37 }
38 if (offset == 0) {
39 while (exclude & (1 << tag)) {
40 tag = (tag + 1) & 15;
41 }
42 } else {
43 do {
44 do {
45 tag = (tag + 1) & 15;
46 } while (exclude & (1 << tag));
47 } while (--offset > 0);
48 }
49 return tag;
50}
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
78 uint64_t ptr, MMUAccessType ptr_access,
79 int ptr_size, MMUAccessType tag_access,
80 int tag_size, uintptr_t ra)
81{
82#ifdef CONFIG_USER_ONLY
83 uint64_t clean_ptr = useronly_clean_ptr(ptr);
84 int flags = page_get_flags(clean_ptr);
85 uint8_t *tags;
86 uintptr_t index;
87
88 if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
89 cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
90 !(flags & PAGE_VALID), ra);
91 }
92
93
94 if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
95 return NULL;
96 }
97
98 tags = page_get_target_data(clean_ptr);
99
100 index = extract32(ptr, LOG2_TAG_GRANULE + 1,
101 TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
102 return tags + index;
103#else
104 CPUTLBEntryFull *full;
105 MemTxAttrs attrs;
106 int in_page, flags;
107 hwaddr ptr_paddr, tag_paddr, xlat;
108 MemoryRegion *mr;
109 ARMASIdx tag_asi;
110 AddressSpace *tag_as;
111 void *host;
112
113
114
115
116
117
118
119
120
121
122 flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
123 ra == 0, &host, &full, ra);
124 assert(!(flags & TLB_INVALID_MASK));
125
126
127 if (full->pte_attrs != 0xf0) {
128 return NULL;
129 }
130
131
132
133
134
135 if (unlikely(flags & TLB_MMIO)) {
136 qemu_log_mask(LOG_GUEST_ERROR,
137 "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
138 "but is not backed by host ram\n", ptr);
139 return NULL;
140 }
141
142
143
144
145
146 ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK);
147 attrs = full->attrs;
148 full = NULL;
149
150
151
152
153
154
155
156 in_page = -(ptr | TARGET_PAGE_MASK);
157 if (unlikely(ptr_size > in_page)) {
158 flags |= probe_access_full(env, ptr + in_page, 0, ptr_access,
159 ptr_mmu_idx, ra == 0, &host, &full, ra);
160 assert(!(flags & TLB_INVALID_MASK));
161 }
162
163
164 if (unlikely(flags & TLB_WATCHPOINT)) {
165 int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
166 assert(ra != 0);
167 cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
168 }
169
170
171 tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
172
173
174 tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
175 tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
176 mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
177 tag_access == MMU_DATA_STORE, attrs);
178
179
180
181
182
183
184 if (unlikely(!memory_region_is_ram(mr))) {
185
186 qemu_log_mask(LOG_UNIMP,
187 "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
188 "Normal Memory @ 0x%" HWADDR_PRIx "\n",
189 tag_paddr, ptr_paddr);
190 return NULL;
191 }
192
193
194
195
196
197 if (tag_access == MMU_DATA_STORE) {
198 ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
199 cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
200 }
201
202 return memory_region_get_ram_ptr(mr) + xlat;
203#endif
204}
205
206uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
207{
208 uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
209 int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
210 int start = extract32(env->cp15.rgsr_el1, 0, 4);
211 int seed = extract32(env->cp15.rgsr_el1, 8, 16);
212 int offset, i, rtag;
213
214
215
216
217
218
219
220
221 if (unlikely(seed == 0) && rrnd) {
222 do {
223 Error *err = NULL;
224 uint16_t two;
225
226 if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
227
228
229
230
231 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
232 error_get_pretty(err));
233 error_free(err);
234 two = 1;
235 }
236 seed = two;
237 } while (seed == 0);
238 }
239
240
241 for (i = offset = 0; i < 4; ++i) {
242
243 int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
244 extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
245 seed = (top << 15) | (seed >> 1);
246 offset |= top << i;
247 }
248 rtag = choose_nonexcluded_tag(start, offset, exclude);
249 env->cp15.rgsr_el1 = rtag | (seed << 8);
250
251 return address_with_allocation_tag(rn, rtag);
252}
253
254uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
255 int32_t offset, uint32_t tag_offset)
256{
257 int start_tag = allocation_tag_from_addr(ptr);
258 uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
259 int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
260
261 return address_with_allocation_tag(ptr + offset, rtag);
262}
263
264static int load_tag1(uint64_t ptr, uint8_t *mem)
265{
266 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
267 return extract32(*mem, ofs, 4);
268}
269
270uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
271{
272 int mmu_idx = cpu_mmu_index(env, false);
273 uint8_t *mem;
274 int rtag = 0;
275
276
277 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
278 MMU_DATA_LOAD, 1, GETPC());
279
280
281 if (mem) {
282 rtag = load_tag1(ptr, mem);
283 }
284
285 return address_with_allocation_tag(xt, rtag);
286}
287
288static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
289{
290 if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
291 arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
292 cpu_mmu_index(env, false), ra);
293 g_assert_not_reached();
294 }
295}
296
297
298static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
299{
300 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
301 *mem = deposit32(*mem, ofs, 4, tag);
302}
303
304
305static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
306{
307 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
308 uint8_t old = qatomic_read(mem);
309
310 while (1) {
311 uint8_t new = deposit32(old, ofs, 4, tag);
312 uint8_t cmp = qatomic_cmpxchg(mem, old, new);
313 if (likely(cmp == old)) {
314 return;
315 }
316 old = cmp;
317 }
318}
319
320typedef void stg_store1(uint64_t, uint8_t *, int);
321
322static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
323 uintptr_t ra, stg_store1 store1)
324{
325 int mmu_idx = cpu_mmu_index(env, false);
326 uint8_t *mem;
327
328 check_tag_aligned(env, ptr, ra);
329
330
331 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
332 MMU_DATA_STORE, 1, ra);
333
334
335 if (mem) {
336 store1(ptr, mem, allocation_tag_from_addr(xt));
337 }
338}
339
340void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
341{
342 do_stg(env, ptr, xt, GETPC(), store_tag1);
343}
344
345void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
346{
347 do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
348}
349
350void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
351{
352 int mmu_idx = cpu_mmu_index(env, false);
353 uintptr_t ra = GETPC();
354
355 check_tag_aligned(env, ptr, ra);
356 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
357}
358
359static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
360 uintptr_t ra, stg_store1 store1)
361{
362 int mmu_idx = cpu_mmu_index(env, false);
363 int tag = allocation_tag_from_addr(xt);
364 uint8_t *mem1, *mem2;
365
366 check_tag_aligned(env, ptr, ra);
367
368
369
370
371
372 if (ptr & TAG_GRANULE) {
373
374 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
375 TAG_GRANULE, MMU_DATA_STORE, 1, ra);
376 mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
377 MMU_DATA_STORE, TAG_GRANULE,
378 MMU_DATA_STORE, 1, ra);
379
380
381 if (mem1) {
382 store1(TAG_GRANULE, mem1, tag);
383 }
384 if (mem2) {
385 store1(0, mem2, tag);
386 }
387 } else {
388
389 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
390 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
391 if (mem1) {
392 tag |= tag << 4;
393 qatomic_set(mem1, tag);
394 }
395 }
396}
397
398void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
399{
400 do_st2g(env, ptr, xt, GETPC(), store_tag1);
401}
402
403void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
404{
405 do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
406}
407
408void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
409{
410 int mmu_idx = cpu_mmu_index(env, false);
411 uintptr_t ra = GETPC();
412 int in_page = -(ptr | TARGET_PAGE_MASK);
413
414 check_tag_aligned(env, ptr, ra);
415
416 if (likely(in_page >= 2 * TAG_GRANULE)) {
417 probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
418 } else {
419 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
420 probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
421 }
422}
423
424#define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
425
426uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
427{
428 int mmu_idx = cpu_mmu_index(env, false);
429 uintptr_t ra = GETPC();
430 void *tag_mem;
431
432 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
433
434
435 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
436 LDGM_STGM_SIZE, MMU_DATA_LOAD,
437 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
438
439
440 if (!tag_mem) {
441 return 0;
442 }
443
444 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
445
446
447
448
449 return ldq_le_p(tag_mem);
450}
451
452void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
453{
454 int mmu_idx = cpu_mmu_index(env, false);
455 uintptr_t ra = GETPC();
456 void *tag_mem;
457
458 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
459
460
461 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
462 LDGM_STGM_SIZE, MMU_DATA_LOAD,
463 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
464
465
466
467
468
469 if (!tag_mem) {
470 return;
471 }
472
473 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
474
475
476
477
478 stq_le_p(tag_mem, val);
479}
480
481void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
482{
483 uintptr_t ra = GETPC();
484 int mmu_idx = cpu_mmu_index(env, false);
485 int log2_dcz_bytes, log2_tag_bytes;
486 intptr_t dcz_bytes, tag_bytes;
487 uint8_t *mem;
488
489
490
491
492
493
494 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
495 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
496 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
497 tag_bytes = (intptr_t)1 << log2_tag_bytes;
498 ptr &= -dcz_bytes;
499
500 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
501 MMU_DATA_STORE, tag_bytes, ra);
502 if (mem) {
503 int tag_pair = (val & 0xf) * 0x11;
504 memset(mem, tag_pair, tag_bytes);
505 }
506}
507
508static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
509 uint64_t dirty_ptr, uintptr_t ra)
510{
511 int is_write, syn;
512
513 env->exception.vaddress = dirty_ptr;
514
515 is_write = FIELD_EX32(desc, MTEDESC, WRITE);
516 syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
517 0x11);
518 raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
519 g_assert_not_reached();
520}
521
522static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
523 uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
524{
525 int select;
526
527 if (regime_has_2_ranges(arm_mmu_idx)) {
528 select = extract64(dirty_ptr, 55, 1);
529 } else {
530 select = 0;
531 }
532 env->cp15.tfsr_el[el] |= 1 << select;
533#ifdef CONFIG_USER_ONLY
534
535
536
537
538
539
540
541 qemu_cpu_kick(env_cpu(env));
542#endif
543}
544
545
546static void mte_check_fail(CPUARMState *env, uint32_t desc,
547 uint64_t dirty_ptr, uintptr_t ra)
548{
549 int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
550 ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
551 int el, reg_el, tcf;
552 uint64_t sctlr;
553
554 reg_el = regime_el(env, arm_mmu_idx);
555 sctlr = env->cp15.sctlr_el[reg_el];
556
557 switch (arm_mmu_idx) {
558 case ARMMMUIdx_E10_0:
559 case ARMMMUIdx_E20_0:
560 el = 0;
561 tcf = extract64(sctlr, 38, 2);
562 break;
563 default:
564 el = reg_el;
565 tcf = extract64(sctlr, 40, 2);
566 }
567
568 switch (tcf) {
569 case 1:
570
571 mte_sync_check_fail(env, desc, dirty_ptr, ra);
572 break;
573
574 case 0:
575
576
577
578
579
580 g_assert_not_reached();
581
582 case 2:
583
584 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
585 break;
586
587 case 3:
588
589
590
591
592 if (FIELD_EX32(desc, MTEDESC, WRITE)) {
593 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
594 } else {
595 mte_sync_check_fail(env, desc, dirty_ptr, ra);
596 }
597 break;
598 }
599}
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628static int checkN(uint8_t *mem, int odd, int cmp, int count)
629{
630 int n = 0, diff;
631
632
633 cmp *= 0x11;
634 diff = *mem++ ^ cmp;
635
636 if (odd) {
637 goto start_odd;
638 }
639
640 while (1) {
641
642 if (unlikely((diff) & 0x0f)) {
643 break;
644 }
645 if (++n == count) {
646 break;
647 }
648
649 start_odd:
650
651 if (unlikely((diff) & 0xf0)) {
652 break;
653 }
654 if (++n == count) {
655 break;
656 }
657
658 diff = *mem++ ^ cmp;
659 }
660 return n;
661}
662
663
664
665
666
667
668
669
670
671
672
673
674
675static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
676 uintptr_t ra, uint64_t *fault)
677{
678 int mmu_idx, ptr_tag, bit55;
679 uint64_t ptr_last, prev_page, next_page;
680 uint64_t tag_first, tag_last;
681 uint64_t tag_byte_first, tag_byte_last;
682 uint32_t sizem1, tag_count, tag_size, n, c;
683 uint8_t *mem1, *mem2;
684 MMUAccessType type;
685
686 bit55 = extract64(ptr, 55, 1);
687 *fault = ptr;
688
689
690 if (unlikely(!tbi_check(desc, bit55))) {
691 return -1;
692 }
693
694 ptr_tag = allocation_tag_from_addr(ptr);
695
696 if (tcma_check(desc, bit55, ptr_tag)) {
697 return 1;
698 }
699
700 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
701 type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
702 sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
703
704
705 ptr_last = ptr + sizem1;
706
707
708 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
709 tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
710 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
711
712
713 tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
714 tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
715
716
717 prev_page = ptr & TARGET_PAGE_MASK;
718 next_page = prev_page + TARGET_PAGE_SIZE;
719
720 if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
721
722 tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
723 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
724 MMU_DATA_LOAD, tag_size, ra);
725 if (!mem1) {
726 return 1;
727 }
728
729 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
730 } else {
731
732 tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE);
733 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
734 MMU_DATA_LOAD, tag_size, ra);
735
736 tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
737 mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
738 ptr_last - next_page + 1,
739 MMU_DATA_LOAD, tag_size, ra);
740
741
742
743
744
745
746 n = c = (next_page - tag_first) / TAG_GRANULE;
747 if (mem1) {
748 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
749 }
750 if (n == c) {
751 if (!mem2) {
752 return 1;
753 }
754 n += checkN(mem2, 0, ptr_tag, tag_count - c);
755 }
756 }
757
758 if (likely(n == tag_count)) {
759 return 1;
760 }
761
762
763
764
765
766
767 if (n > 0) {
768 *fault = tag_first + n * TAG_GRANULE;
769 }
770 return 0;
771}
772
773uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
774{
775 uint64_t fault;
776 int ret = mte_probe_int(env, desc, ptr, ra, &fault);
777
778 if (unlikely(ret == 0)) {
779 mte_check_fail(env, desc, fault, ra);
780 } else if (ret < 0) {
781 return ptr;
782 }
783 return useronly_clean_ptr(ptr);
784}
785
786uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
787{
788 return mte_check(env, desc, ptr, GETPC());
789}
790
791
792
793
794
795
796
797bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
798{
799 uint64_t fault;
800 int ret = mte_probe_int(env, desc, ptr, 0, &fault);
801
802 return ret != 0;
803}
804
805
806
807
808uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
809{
810 uintptr_t ra = GETPC();
811 int log2_dcz_bytes, log2_tag_bytes;
812 int mmu_idx, bit55;
813 intptr_t dcz_bytes, tag_bytes, i;
814 void *mem;
815 uint64_t ptr_tag, mem_tag, align_ptr;
816
817 bit55 = extract64(ptr, 55, 1);
818
819
820 if (unlikely(!tbi_check(desc, bit55))) {
821 return ptr;
822 }
823
824 ptr_tag = allocation_tag_from_addr(ptr);
825
826 if (tcma_check(desc, bit55, ptr_tag)) {
827 goto done;
828 }
829
830
831
832
833
834
835 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
836 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
837 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
838 tag_bytes = (intptr_t)1 << log2_tag_bytes;
839 align_ptr = ptr & -dcz_bytes;
840
841
842
843
844
845
846 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
847 (void) probe_write(env, ptr, 1, mmu_idx, ra);
848 mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
849 dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra);
850 if (!mem) {
851 goto done;
852 }
853
854
855
856
857
858
859
860
861
862
863
864
865
866 switch (log2_tag_bytes) {
867 case 0:
868 mem_tag = *(uint8_t *)mem;
869 ptr_tag *= 0x11u;
870 break;
871 case 1:
872 mem_tag = cpu_to_le16(*(uint16_t *)mem);
873 ptr_tag *= 0x1111u;
874 break;
875 case 2:
876 mem_tag = cpu_to_le32(*(uint32_t *)mem);
877 ptr_tag *= 0x11111111u;
878 break;
879 case 3:
880 mem_tag = cpu_to_le64(*(uint64_t *)mem);
881 ptr_tag *= 0x1111111111111111ull;
882 break;
883
884 default:
885 ptr_tag *= 0x1111111111111111ull;
886 i = 0;
887 do {
888 mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
889 if (unlikely(mem_tag != ptr_tag)) {
890 goto fail;
891 }
892 i += 8;
893 align_ptr += 16 * TAG_GRANULE;
894 } while (i < tag_bytes);
895 goto done;
896 }
897
898 if (likely(mem_tag == ptr_tag)) {
899 goto done;
900 }
901
902 fail:
903
904 i = ctz64(mem_tag ^ ptr_tag) >> 4;
905 mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
906
907 done:
908 return useronly_clean_ptr(ptr);
909}
910