1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/log.h"
22#include "cpu.h"
23#include "internals.h"
24#include "exec/exec-all.h"
25#include "exec/ram_addr.h"
26#include "exec/cpu_ldst.h"
27#include "exec/helper-proto.h"
28#include "qapi/error.h"
29#include "qemu/guest-random.h"
30
31
32static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
33{
34 if (exclude == 0xffff) {
35 return 0;
36 }
37 if (offset == 0) {
38 while (exclude & (1 << tag)) {
39 tag = (tag + 1) & 15;
40 }
41 } else {
42 do {
43 do {
44 tag = (tag + 1) & 15;
45 } while (exclude & (1 << tag));
46 } while (--offset > 0);
47 }
48 return tag;
49}
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
77 uint64_t ptr, MMUAccessType ptr_access,
78 int ptr_size, MMUAccessType tag_access,
79 int tag_size, uintptr_t ra)
80{
81#ifdef CONFIG_USER_ONLY
82 uint64_t clean_ptr = useronly_clean_ptr(ptr);
83 int flags = page_get_flags(clean_ptr);
84 uint8_t *tags;
85 uintptr_t index;
86
87 if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
88 cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
89 !(flags & PAGE_VALID), ra);
90 }
91
92
93 if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
94 return NULL;
95 }
96
97 tags = page_get_target_data(clean_ptr);
98
99 index = extract32(ptr, LOG2_TAG_GRANULE + 1,
100 TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
101 return tags + index;
102#else
103 CPUTLBEntryFull *full;
104 MemTxAttrs attrs;
105 int in_page, flags;
106 hwaddr ptr_paddr, tag_paddr, xlat;
107 MemoryRegion *mr;
108 ARMASIdx tag_asi;
109 AddressSpace *tag_as;
110 void *host;
111
112
113
114
115
116
117
118
119
120
121 flags = probe_access_full(env, ptr, ptr_access, ptr_mmu_idx,
122 ra == 0, &host, &full, ra);
123 assert(!(flags & TLB_INVALID_MASK));
124
125
126 if (full->pte_attrs != 0xf0) {
127 return NULL;
128 }
129
130
131
132
133
134 if (unlikely(flags & TLB_MMIO)) {
135 qemu_log_mask(LOG_GUEST_ERROR,
136 "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
137 "but is not backed by host ram\n", ptr);
138 return NULL;
139 }
140
141
142
143
144
145 ptr_paddr = full->phys_addr;
146 attrs = full->attrs;
147 full = NULL;
148
149
150
151
152
153
154
155 in_page = -(ptr | TARGET_PAGE_MASK);
156 if (unlikely(ptr_size > in_page)) {
157 flags |= probe_access_full(env, ptr + in_page, ptr_access,
158 ptr_mmu_idx, ra == 0, &host, &full, ra);
159 assert(!(flags & TLB_INVALID_MASK));
160 }
161
162
163 if (unlikely(flags & TLB_WATCHPOINT)) {
164 int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
165 assert(ra != 0);
166 cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
167 }
168
169
170 tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
171
172
173 tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
174 tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
175 mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
176 tag_access == MMU_DATA_STORE, attrs);
177
178
179
180
181
182
183 if (unlikely(!memory_region_is_ram(mr))) {
184
185 qemu_log_mask(LOG_UNIMP,
186 "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
187 "Normal Memory @ 0x%" HWADDR_PRIx "\n",
188 tag_paddr, ptr_paddr);
189 return NULL;
190 }
191
192
193
194
195
196 if (tag_access == MMU_DATA_STORE) {
197 ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
198 cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
199 }
200
201 return memory_region_get_ram_ptr(mr) + xlat;
202#endif
203}
204
205uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
206{
207 uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
208 int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
209 int start = extract32(env->cp15.rgsr_el1, 0, 4);
210 int seed = extract32(env->cp15.rgsr_el1, 8, 16);
211 int offset, i, rtag;
212
213
214
215
216
217
218
219
220 if (unlikely(seed == 0) && rrnd) {
221 do {
222 Error *err = NULL;
223 uint16_t two;
224
225 if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
226
227
228
229
230 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
231 error_get_pretty(err));
232 error_free(err);
233 two = 1;
234 }
235 seed = two;
236 } while (seed == 0);
237 }
238
239
240 for (i = offset = 0; i < 4; ++i) {
241
242 int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
243 extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
244 seed = (top << 15) | (seed >> 1);
245 offset |= top << i;
246 }
247 rtag = choose_nonexcluded_tag(start, offset, exclude);
248 env->cp15.rgsr_el1 = rtag | (seed << 8);
249
250 return address_with_allocation_tag(rn, rtag);
251}
252
253uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
254 int32_t offset, uint32_t tag_offset)
255{
256 int start_tag = allocation_tag_from_addr(ptr);
257 uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
258 int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
259
260 return address_with_allocation_tag(ptr + offset, rtag);
261}
262
263static int load_tag1(uint64_t ptr, uint8_t *mem)
264{
265 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
266 return extract32(*mem, ofs, 4);
267}
268
269uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
270{
271 int mmu_idx = cpu_mmu_index(env, false);
272 uint8_t *mem;
273 int rtag = 0;
274
275
276 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
277 MMU_DATA_LOAD, 1, GETPC());
278
279
280 if (mem) {
281 rtag = load_tag1(ptr, mem);
282 }
283
284 return address_with_allocation_tag(xt, rtag);
285}
286
287static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
288{
289 if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
290 arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
291 cpu_mmu_index(env, false), ra);
292 g_assert_not_reached();
293 }
294}
295
296
297static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
298{
299 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
300 *mem = deposit32(*mem, ofs, 4, tag);
301}
302
303
304static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
305{
306 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
307 uint8_t old = qatomic_read(mem);
308
309 while (1) {
310 uint8_t new = deposit32(old, ofs, 4, tag);
311 uint8_t cmp = qatomic_cmpxchg(mem, old, new);
312 if (likely(cmp == old)) {
313 return;
314 }
315 old = cmp;
316 }
317}
318
319typedef void stg_store1(uint64_t, uint8_t *, int);
320
321static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
322 uintptr_t ra, stg_store1 store1)
323{
324 int mmu_idx = cpu_mmu_index(env, false);
325 uint8_t *mem;
326
327 check_tag_aligned(env, ptr, ra);
328
329
330 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
331 MMU_DATA_STORE, 1, ra);
332
333
334 if (mem) {
335 store1(ptr, mem, allocation_tag_from_addr(xt));
336 }
337}
338
339void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
340{
341 do_stg(env, ptr, xt, GETPC(), store_tag1);
342}
343
344void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
345{
346 do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
347}
348
349void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
350{
351 int mmu_idx = cpu_mmu_index(env, false);
352 uintptr_t ra = GETPC();
353
354 check_tag_aligned(env, ptr, ra);
355 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
356}
357
358static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
359 uintptr_t ra, stg_store1 store1)
360{
361 int mmu_idx = cpu_mmu_index(env, false);
362 int tag = allocation_tag_from_addr(xt);
363 uint8_t *mem1, *mem2;
364
365 check_tag_aligned(env, ptr, ra);
366
367
368
369
370
371 if (ptr & TAG_GRANULE) {
372
373 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
374 TAG_GRANULE, MMU_DATA_STORE, 1, ra);
375 mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
376 MMU_DATA_STORE, TAG_GRANULE,
377 MMU_DATA_STORE, 1, ra);
378
379
380 if (mem1) {
381 store1(TAG_GRANULE, mem1, tag);
382 }
383 if (mem2) {
384 store1(0, mem2, tag);
385 }
386 } else {
387
388 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
389 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
390 if (mem1) {
391 tag |= tag << 4;
392 qatomic_set(mem1, tag);
393 }
394 }
395}
396
397void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
398{
399 do_st2g(env, ptr, xt, GETPC(), store_tag1);
400}
401
402void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
403{
404 do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
405}
406
407void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
408{
409 int mmu_idx = cpu_mmu_index(env, false);
410 uintptr_t ra = GETPC();
411 int in_page = -(ptr | TARGET_PAGE_MASK);
412
413 check_tag_aligned(env, ptr, ra);
414
415 if (likely(in_page >= 2 * TAG_GRANULE)) {
416 probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
417 } else {
418 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
419 probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
420 }
421}
422
423#define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
424
425uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
426{
427 int mmu_idx = cpu_mmu_index(env, false);
428 uintptr_t ra = GETPC();
429 void *tag_mem;
430
431 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
432
433
434 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
435 LDGM_STGM_SIZE, MMU_DATA_LOAD,
436 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
437
438
439 if (!tag_mem) {
440 return 0;
441 }
442
443 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
444
445
446
447
448 return ldq_le_p(tag_mem);
449}
450
451void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
452{
453 int mmu_idx = cpu_mmu_index(env, false);
454 uintptr_t ra = GETPC();
455 void *tag_mem;
456
457 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
458
459
460 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
461 LDGM_STGM_SIZE, MMU_DATA_LOAD,
462 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
463
464
465
466
467
468 if (!tag_mem) {
469 return;
470 }
471
472 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
473
474
475
476
477 stq_le_p(tag_mem, val);
478}
479
480void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
481{
482 uintptr_t ra = GETPC();
483 int mmu_idx = cpu_mmu_index(env, false);
484 int log2_dcz_bytes, log2_tag_bytes;
485 intptr_t dcz_bytes, tag_bytes;
486 uint8_t *mem;
487
488
489
490
491
492
493 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
494 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
495 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
496 tag_bytes = (intptr_t)1 << log2_tag_bytes;
497 ptr &= -dcz_bytes;
498
499 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
500 MMU_DATA_STORE, tag_bytes, ra);
501 if (mem) {
502 int tag_pair = (val & 0xf) * 0x11;
503 memset(mem, tag_pair, tag_bytes);
504 }
505}
506
507static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
508 uint64_t dirty_ptr, uintptr_t ra)
509{
510 int is_write, syn;
511
512 env->exception.vaddress = dirty_ptr;
513
514 is_write = FIELD_EX32(desc, MTEDESC, WRITE);
515 syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
516 0x11);
517 raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
518 g_assert_not_reached();
519}
520
521static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
522 uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
523{
524 int select;
525
526 if (regime_has_2_ranges(arm_mmu_idx)) {
527 select = extract64(dirty_ptr, 55, 1);
528 } else {
529 select = 0;
530 }
531 env->cp15.tfsr_el[el] |= 1 << select;
532#ifdef CONFIG_USER_ONLY
533
534
535
536
537
538
539
540 qemu_cpu_kick(env_cpu(env));
541#endif
542}
543
544
545static void mte_check_fail(CPUARMState *env, uint32_t desc,
546 uint64_t dirty_ptr, uintptr_t ra)
547{
548 int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
549 ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
550 int el, reg_el, tcf;
551 uint64_t sctlr;
552
553 reg_el = regime_el(env, arm_mmu_idx);
554 sctlr = env->cp15.sctlr_el[reg_el];
555
556 switch (arm_mmu_idx) {
557 case ARMMMUIdx_E10_0:
558 case ARMMMUIdx_E20_0:
559 el = 0;
560 tcf = extract64(sctlr, 38, 2);
561 break;
562 default:
563 el = reg_el;
564 tcf = extract64(sctlr, 40, 2);
565 }
566
567 switch (tcf) {
568 case 1:
569
570 mte_sync_check_fail(env, desc, dirty_ptr, ra);
571 break;
572
573 case 0:
574
575
576
577
578
579 g_assert_not_reached();
580
581 case 2:
582
583 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
584 break;
585
586 case 3:
587
588
589
590
591 if (FIELD_EX32(desc, MTEDESC, WRITE)) {
592 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
593 } else {
594 mte_sync_check_fail(env, desc, dirty_ptr, ra);
595 }
596 break;
597 }
598}
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627static int checkN(uint8_t *mem, int odd, int cmp, int count)
628{
629 int n = 0, diff;
630
631
632 cmp *= 0x11;
633 diff = *mem++ ^ cmp;
634
635 if (odd) {
636 goto start_odd;
637 }
638
639 while (1) {
640
641 if (unlikely((diff) & 0x0f)) {
642 break;
643 }
644 if (++n == count) {
645 break;
646 }
647
648 start_odd:
649
650 if (unlikely((diff) & 0xf0)) {
651 break;
652 }
653 if (++n == count) {
654 break;
655 }
656
657 diff = *mem++ ^ cmp;
658 }
659 return n;
660}
661
662
663
664
665
666
667
668
669
670
671
672
673
674static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
675 uintptr_t ra, uint64_t *fault)
676{
677 int mmu_idx, ptr_tag, bit55;
678 uint64_t ptr_last, prev_page, next_page;
679 uint64_t tag_first, tag_last;
680 uint64_t tag_byte_first, tag_byte_last;
681 uint32_t sizem1, tag_count, tag_size, n, c;
682 uint8_t *mem1, *mem2;
683 MMUAccessType type;
684
685 bit55 = extract64(ptr, 55, 1);
686 *fault = ptr;
687
688
689 if (unlikely(!tbi_check(desc, bit55))) {
690 return -1;
691 }
692
693 ptr_tag = allocation_tag_from_addr(ptr);
694
695 if (tcma_check(desc, bit55, ptr_tag)) {
696 return 1;
697 }
698
699 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
700 type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
701 sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
702
703
704 ptr_last = ptr + sizem1;
705
706
707 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
708 tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
709 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
710
711
712 tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
713 tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
714
715
716 prev_page = ptr & TARGET_PAGE_MASK;
717 next_page = prev_page + TARGET_PAGE_SIZE;
718
719 if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
720
721 tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
722 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
723 MMU_DATA_LOAD, tag_size, ra);
724 if (!mem1) {
725 return 1;
726 }
727
728 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
729 } else {
730
731 tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE);
732 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
733 MMU_DATA_LOAD, tag_size, ra);
734
735 tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
736 mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
737 ptr_last - next_page + 1,
738 MMU_DATA_LOAD, tag_size, ra);
739
740
741
742
743
744
745 n = c = (next_page - tag_first) / TAG_GRANULE;
746 if (mem1) {
747 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
748 }
749 if (n == c) {
750 if (!mem2) {
751 return 1;
752 }
753 n += checkN(mem2, 0, ptr_tag, tag_count - c);
754 }
755 }
756
757 if (likely(n == tag_count)) {
758 return 1;
759 }
760
761
762
763
764
765
766 if (n > 0) {
767 *fault = tag_first + n * TAG_GRANULE;
768 }
769 return 0;
770}
771
772uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
773{
774 uint64_t fault;
775 int ret = mte_probe_int(env, desc, ptr, ra, &fault);
776
777 if (unlikely(ret == 0)) {
778 mte_check_fail(env, desc, fault, ra);
779 } else if (ret < 0) {
780 return ptr;
781 }
782 return useronly_clean_ptr(ptr);
783}
784
785uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
786{
787 return mte_check(env, desc, ptr, GETPC());
788}
789
790
791
792
793
794
795
796bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
797{
798 uint64_t fault;
799 int ret = mte_probe_int(env, desc, ptr, 0, &fault);
800
801 return ret != 0;
802}
803
804
805
806
807uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
808{
809 uintptr_t ra = GETPC();
810 int log2_dcz_bytes, log2_tag_bytes;
811 int mmu_idx, bit55;
812 intptr_t dcz_bytes, tag_bytes, i;
813 void *mem;
814 uint64_t ptr_tag, mem_tag, align_ptr;
815
816 bit55 = extract64(ptr, 55, 1);
817
818
819 if (unlikely(!tbi_check(desc, bit55))) {
820 return ptr;
821 }
822
823 ptr_tag = allocation_tag_from_addr(ptr);
824
825 if (tcma_check(desc, bit55, ptr_tag)) {
826 goto done;
827 }
828
829
830
831
832
833
834 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
835 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
836 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
837 tag_bytes = (intptr_t)1 << log2_tag_bytes;
838 align_ptr = ptr & -dcz_bytes;
839
840
841
842
843
844
845 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
846 (void) probe_write(env, ptr, 1, mmu_idx, ra);
847 mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
848 dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra);
849 if (!mem) {
850 goto done;
851 }
852
853
854
855
856
857
858
859
860
861
862
863
864
865 switch (log2_tag_bytes) {
866 case 0:
867 mem_tag = *(uint8_t *)mem;
868 ptr_tag *= 0x11u;
869 break;
870 case 1:
871 mem_tag = cpu_to_le16(*(uint16_t *)mem);
872 ptr_tag *= 0x1111u;
873 break;
874 case 2:
875 mem_tag = cpu_to_le32(*(uint32_t *)mem);
876 ptr_tag *= 0x11111111u;
877 break;
878 case 3:
879 mem_tag = cpu_to_le64(*(uint64_t *)mem);
880 ptr_tag *= 0x1111111111111111ull;
881 break;
882
883 default:
884 ptr_tag *= 0x1111111111111111ull;
885 i = 0;
886 do {
887 mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
888 if (unlikely(mem_tag != ptr_tag)) {
889 goto fail;
890 }
891 i += 8;
892 align_ptr += 16 * TAG_GRANULE;
893 } while (i < tag_bytes);
894 goto done;
895 }
896
897 if (likely(mem_tag == ptr_tag)) {
898 goto done;
899 }
900
901 fail:
902
903 i = ctz64(mem_tag ^ ptr_tag) >> 4;
904 mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
905
906 done:
907 return useronly_clean_ptr(ptr);
908}
909