1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef QEMU_ATOMIC128_H
14#define QEMU_ATOMIC128_H
15
16#include "qemu/int128.h"
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#if defined(CONFIG_ATOMIC128)
45static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
46{
47 Int128Alias r, c, n;
48
49 c.s = cmp;
50 n.s = new;
51 r.i = qatomic_cmpxchg__nocheck((__int128_t *)ptr, c.i, n.i);
52 return r.s;
53}
54# define HAVE_CMPXCHG128 1
55#elif defined(CONFIG_CMPXCHG128)
56static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
57{
58 Int128Alias r, c, n;
59
60 c.s = cmp;
61 n.s = new;
62 r.i = __sync_val_compare_and_swap_16((__int128_t *)ptr, c.i, n.i);
63 return r.s;
64}
65# define HAVE_CMPXCHG128 1
66#elif defined(__aarch64__)
67
68static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
69{
70 uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp);
71 uint64_t newl = int128_getlo(new), newh = int128_gethi(new);
72 uint64_t oldl, oldh;
73 uint32_t tmp;
74
75 asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t"
76 "cmp %[oldl], %[cmpl]\n\t"
77 "ccmp %[oldh], %[cmph], #0, eq\n\t"
78 "b.ne 1f\n\t"
79 "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t"
80 "cbnz %w[tmp], 0b\n"
81 "1:"
82 : [mem] "+m"(*ptr), [tmp] "=&r"(tmp),
83 [oldl] "=&r"(oldl), [oldh] "=&r"(oldh)
84 : [cmpl] "r"(cmpl), [cmph] "r"(cmph),
85 [newl] "r"(newl), [newh] "r"(newh)
86 : "memory", "cc");
87
88 return int128_make128(oldl, oldh);
89}
90# define HAVE_CMPXCHG128 1
91#else
92
93Int128 QEMU_ERROR("unsupported atomic")
94 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new);
95# define HAVE_CMPXCHG128 0
96#endif
97
98
99#if defined(CONFIG_ATOMIC128)
100static inline Int128 atomic16_read(Int128 *ptr)
101{
102 Int128Alias r;
103
104 r.i = qatomic_read__nocheck((__int128_t *)ptr);
105 return r.s;
106}
107
108static inline void atomic16_set(Int128 *ptr, Int128 val)
109{
110 Int128Alias v;
111
112 v.s = val;
113 qatomic_set__nocheck((__int128_t *)ptr, v.i);
114}
115
116# define HAVE_ATOMIC128 1
117#elif !defined(CONFIG_USER_ONLY) && defined(__aarch64__)
118
119static inline Int128 atomic16_read(Int128 *ptr)
120{
121 uint64_t l, h;
122 uint32_t tmp;
123
124
125 asm("0: ldxp %[l], %[h], %[mem]\n\t"
126 "stxp %w[tmp], %[l], %[h], %[mem]\n\t"
127 "cbnz %w[tmp], 0b"
128 : [mem] "+m"(*ptr), [tmp] "=r"(tmp), [l] "=r"(l), [h] "=r"(h));
129
130 return int128_make128(l, h);
131}
132
133static inline void atomic16_set(Int128 *ptr, Int128 val)
134{
135 uint64_t l = int128_getlo(val), h = int128_gethi(val);
136 uint64_t t1, t2;
137
138
139 asm("0: ldxp %[t1], %[t2], %[mem]\n\t"
140 "stxp %w[t1], %[l], %[h], %[mem]\n\t"
141 "cbnz %w[t1], 0b"
142 : [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2)
143 : [l] "r"(l), [h] "r"(h));
144}
145
146# define HAVE_ATOMIC128 1
147#elif !defined(CONFIG_USER_ONLY) && HAVE_CMPXCHG128
148static inline Int128 atomic16_read(Int128 *ptr)
149{
150
151 Int128 z = int128_make64(0);
152 return atomic16_cmpxchg(ptr, z, z);
153}
154
155static inline void atomic16_set(Int128 *ptr, Int128 val)
156{
157 Int128 old = *ptr, cmp;
158 do {
159 cmp = old;
160 old = atomic16_cmpxchg(ptr, cmp, val);
161 } while (int128_ne(old, cmp));
162}
163
164# define HAVE_ATOMIC128 1
165#else
166
167Int128 QEMU_ERROR("unsupported atomic") atomic16_read(Int128 *ptr);
168void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val);
169# define HAVE_ATOMIC128 0
170#endif
171
172#endif
173