1
2
3
4
5
6
7
8#include "qemu/osdep.h"
9#include "qemu/cacheflush.h"
10#include "qemu/cacheinfo.h"
11#include "qemu/bitops.h"
12#include "qemu/host-utils.h"
13#include "qemu/atomic.h"
14
15
16int qemu_icache_linesize = 0;
17int qemu_icache_linesize_log;
18int qemu_dcache_linesize = 0;
19int qemu_dcache_linesize_log;
20
21
22
23
24
25#if defined(_WIN32)
26
27static void sys_cache_info(int *isize, int *dsize)
28{
29 SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buf;
30 DWORD size = 0;
31 BOOL success;
32 size_t i, n;
33
34
35
36
37
38
39 success = GetLogicalProcessorInformation(0, &size);
40 if (success || GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
41 return;
42 }
43
44 n = size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
45 size = n * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
46 buf = g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, n);
47 if (!GetLogicalProcessorInformation(buf, &size)) {
48 goto fail;
49 }
50
51 for (i = 0; i < n; i++) {
52 if (buf[i].Relationship == RelationCache
53 && buf[i].Cache.Level == 1) {
54 switch (buf[i].Cache.Type) {
55 case CacheUnified:
56 *isize = *dsize = buf[i].Cache.LineSize;
57 break;
58 case CacheInstruction:
59 *isize = buf[i].Cache.LineSize;
60 break;
61 case CacheData:
62 *dsize = buf[i].Cache.LineSize;
63 break;
64 default:
65 break;
66 }
67 }
68 }
69 fail:
70 g_free(buf);
71}
72
73#elif defined(CONFIG_DARWIN)
74# include <sys/sysctl.h>
75static void sys_cache_info(int *isize, int *dsize)
76{
77
78 long size;
79 size_t len = sizeof(size);
80 if (!sysctlbyname("hw.cachelinesize", &size, &len, NULL, 0)) {
81 *isize = *dsize = size;
82 }
83}
84#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
85# include <sys/sysctl.h>
86static void sys_cache_info(int *isize, int *dsize)
87{
88
89 int size;
90 size_t len = sizeof(size);
91 if (!sysctlbyname("machdep.cacheline_size", &size, &len, NULL, 0)) {
92 *isize = *dsize = size;
93 }
94}
95#else
96
97
98static void sys_cache_info(int *isize, int *dsize)
99{
100# ifdef _SC_LEVEL1_ICACHE_LINESIZE
101 int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
102 if (tmp_isize > 0) {
103 *isize = tmp_isize;
104 }
105# endif
106# ifdef _SC_LEVEL1_DCACHE_LINESIZE
107 int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
108 if (tmp_dsize > 0) {
109 *dsize = tmp_dsize;
110 }
111# endif
112}
113#endif
114
115
116
117
118
119
120#if defined(__powerpc__)
121static bool have_coherent_icache;
122#endif
123
124#if defined(__aarch64__) && !defined(CONFIG_DARWIN)
125
126static uint64_t save_ctr_el0;
127static void arch_cache_info(int *isize, int *dsize)
128{
129 uint64_t ctr;
130
131
132
133
134
135
136
137
138
139 asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr));
140 save_ctr_el0 = ctr;
141
142 if (*isize == 0 || *dsize == 0) {
143 if (*isize == 0) {
144 *isize = 4 << (ctr & 0xf);
145 }
146 if (*dsize == 0) {
147 *dsize = 4 << ((ctr >> 16) & 0xf);
148 }
149 }
150}
151
152#elif defined(_ARCH_PPC) && defined(__linux__)
153# include "elf.h"
154
155static void arch_cache_info(int *isize, int *dsize)
156{
157 if (*isize == 0) {
158 *isize = qemu_getauxval(AT_ICACHEBSIZE);
159 }
160 if (*dsize == 0) {
161 *dsize = qemu_getauxval(AT_DCACHEBSIZE);
162 }
163 have_coherent_icache = qemu_getauxval(AT_HWCAP) & PPC_FEATURE_ICACHE_SNOOP;
164}
165
166#else
167static void arch_cache_info(int *isize, int *dsize) { }
168#endif
169
170
171
172
173
174static void fallback_cache_info(int *isize, int *dsize)
175{
176
177 if (*isize) {
178 if (*dsize) {
179
180 } else {
181 *dsize = *isize;
182 }
183 } else if (*dsize) {
184 *isize = *dsize;
185 } else {
186#if defined(_ARCH_PPC)
187
188
189
190
191
192 *isize = *dsize = 16;
193#else
194
195 *isize = *dsize = 64;
196#endif
197 }
198}
199
200static void __attribute__((constructor)) init_cache_info(void)
201{
202 int isize = 0, dsize = 0;
203
204 sys_cache_info(&isize, &dsize);
205 arch_cache_info(&isize, &dsize);
206 fallback_cache_info(&isize, &dsize);
207
208 assert((isize & (isize - 1)) == 0);
209 assert((dsize & (dsize - 1)) == 0);
210
211 qemu_icache_linesize = isize;
212 qemu_icache_linesize_log = ctz32(isize);
213 qemu_dcache_linesize = dsize;
214 qemu_dcache_linesize_log = ctz32(dsize);
215
216 qatomic64_init();
217}
218
219
220
221
222
223
224#if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
225
226
227
228#elif defined(__aarch64__)
229
230#ifdef CONFIG_DARWIN
231
232extern void sys_icache_invalidate(void *start, size_t len);
233extern void sys_dcache_flush(void *start, size_t len);
234void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
235{
236 sys_dcache_flush((void *)rw, len);
237 sys_icache_invalidate((void *)rx, len);
238}
239#else
240
241
242
243
244
245void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
246{
247 const unsigned CTR_IDC = 1u << 28;
248 const unsigned CTR_DIC = 1u << 29;
249 const uint64_t ctr_el0 = save_ctr_el0;
250 const uintptr_t icache_lsize = qemu_icache_linesize;
251 const uintptr_t dcache_lsize = qemu_dcache_linesize;
252 uintptr_t p;
253
254
255
256
257
258 if (!(ctr_el0 & CTR_IDC)) {
259
260
261
262
263
264 for (p = rw & -dcache_lsize; p < rw + len; p += dcache_lsize) {
265 asm volatile("dc\tcvau, %0" : : "r" (p) : "memory");
266 }
267 asm volatile("dsb\tish" : : : "memory");
268 }
269
270
271
272
273
274 if (!(ctr_el0 & CTR_DIC)) {
275 for (p = rx & -icache_lsize; p < rx + len; p += icache_lsize) {
276 asm volatile("ic\tivau, %0" : : "r"(p) : "memory");
277 }
278 asm volatile ("dsb\tish" : : : "memory");
279 }
280
281 asm volatile("isb" : : : "memory");
282}
283#endif
284
285#elif defined(__mips__)
286
287#ifdef __OpenBSD__
288#include <machine/sysarch.h>
289#else
290#include <sys/cachectl.h>
291#endif
292
293void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
294{
295 if (rx != rw) {
296 cacheflush((void *)rw, len, DCACHE);
297 }
298 cacheflush((void *)rx, len, ICACHE);
299}
300
301#elif defined(__powerpc__)
302
303void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
304{
305 uintptr_t p, b, e;
306 size_t dsize, isize;
307
308
309
310
311
312
313
314 if (have_coherent_icache) {
315 asm volatile ("sync\n\t"
316 "icbi 0,%0\n\t"
317 "isync"
318 : : "r"(rx) : "memory");
319 return;
320 }
321
322 dsize = qemu_dcache_linesize;
323 isize = qemu_icache_linesize;
324
325 b = rw & ~(dsize - 1);
326 e = (rw + len + dsize - 1) & ~(dsize - 1);
327 for (p = b; p < e; p += dsize) {
328 asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
329 }
330 asm volatile ("sync" : : : "memory");
331
332 b = rx & ~(isize - 1);
333 e = (rx + len + isize - 1) & ~(isize - 1);
334 for (p = b; p < e; p += isize) {
335 asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
336 }
337 asm volatile ("sync" : : : "memory");
338 asm volatile ("isync" : : : "memory");
339}
340
341#elif defined(__sparc__)
342
343void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
344{
345
346 uintptr_t p, end = (rx + len + 7) & -8;
347 for (p = rx & -8; p < end; p += 8) {
348 __asm__ __volatile__("flush\t%0" : : "r" (p));
349 }
350}
351
352#else
353
354void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
355{
356 if (rw != rx) {
357 __builtin___clear_cache((char *)rw, (char *)rw + len);
358 }
359 __builtin___clear_cache((char *)rx, (char *)rx + len);
360}
361
362#endif
363