qemu/util/cacheflush.c
<<
>>
Prefs
   1/*
   2 * Flush the host cpu caches.
   3 *
   4 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   5 * See the COPYING file in the top-level directory.
   6 */
   7
   8#include "qemu/osdep.h"
   9#include "qemu/cacheflush.h"
  10#include "qemu/bitops.h"
  11
  12
  13#if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
  14
  15/* Caches are coherent and do not require flushing; symbol inline. */
  16
  17#elif defined(__aarch64__)
  18
  19#ifdef CONFIG_DARWIN
  20/* Apple does not expose CTR_EL0, so we must use system interfaces. */
  21extern void sys_icache_invalidate(void *start, size_t len);
  22extern void sys_dcache_flush(void *start, size_t len);
  23void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
  24{
  25    sys_dcache_flush((void *)rw, len);
  26    sys_icache_invalidate((void *)rx, len);
  27}
  28#else
  29
  30/*
  31 * TODO: unify this with cacheinfo.c.
  32 * We want to save the whole contents of CTR_EL0, so that we
  33 * have more than the linesize, but also IDC and DIC.
  34 */
  35static uint64_t save_ctr_el0;
  36static void __attribute__((constructor)) init_ctr_el0(void)
  37{
  38    asm volatile("mrs\t%0, ctr_el0" : "=r"(save_ctr_el0));
  39}
  40
  41/*
  42 * This is a copy of gcc's __aarch64_sync_cache_range, modified
  43 * to fit this three-operand interface.
  44 */
  45void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
  46{
  47    const unsigned CTR_IDC = 1u << 28;
  48    const unsigned CTR_DIC = 1u << 29;
  49    const uint64_t ctr_el0 = save_ctr_el0;
  50    const uintptr_t icache_lsize = 4 << extract64(ctr_el0, 0, 4);
  51    const uintptr_t dcache_lsize = 4 << extract64(ctr_el0, 16, 4);
  52    uintptr_t p;
  53
  54    /*
  55     * If CTR_EL0.IDC is enabled, Data cache clean to the Point of Unification
  56     * is not required for instruction to data coherence.
  57     */
  58    if (!(ctr_el0 & CTR_IDC)) {
  59        /*
  60         * Loop over the address range, clearing one cache line at once.
  61         * Data cache must be flushed to unification first to make sure
  62         * the instruction cache fetches the updated data.
  63         */
  64        for (p = rw & -dcache_lsize; p < rw + len; p += dcache_lsize) {
  65            asm volatile("dc\tcvau, %0" : : "r" (p) : "memory");
  66        }
  67        asm volatile("dsb\tish" : : : "memory");
  68    }
  69
  70    /*
  71     * If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point
  72     * of Unification is not required for instruction to data coherence.
  73     */
  74    if (!(ctr_el0 & CTR_DIC)) {
  75        for (p = rx & -icache_lsize; p < rx + len; p += icache_lsize) {
  76            asm volatile("ic\tivau, %0" : : "r"(p) : "memory");
  77        }
  78        asm volatile ("dsb\tish" : : : "memory");
  79    }
  80
  81    asm volatile("isb" : : : "memory");
  82}
  83#endif /* CONFIG_DARWIN */
  84
  85#elif defined(__mips__)
  86
  87#ifdef __OpenBSD__
  88#include <machine/sysarch.h>
  89#else
  90#include <sys/cachectl.h>
  91#endif
  92
  93void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
  94{
  95    if (rx != rw) {
  96        cacheflush((void *)rw, len, DCACHE);
  97    }
  98    cacheflush((void *)rx, len, ICACHE);
  99}
 100
 101#elif defined(__powerpc__)
 102
 103void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
 104{
 105    uintptr_t p, b, e;
 106    size_t dsize = qemu_dcache_linesize;
 107    size_t isize = qemu_icache_linesize;
 108
 109    b = rw & ~(dsize - 1);
 110    e = (rw + len + dsize - 1) & ~(dsize - 1);
 111    for (p = b; p < e; p += dsize) {
 112        asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
 113    }
 114    asm volatile ("sync" : : : "memory");
 115
 116    b = rx & ~(isize - 1);
 117    e = (rx + len + isize - 1) & ~(isize - 1);
 118    for (p = b; p < e; p += isize) {
 119        asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
 120    }
 121    asm volatile ("sync" : : : "memory");
 122    asm volatile ("isync" : : : "memory");
 123}
 124
 125#elif defined(__sparc__)
 126
 127void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
 128{
 129    /* No additional data flush to the RW virtual address required. */
 130    uintptr_t p, end = (rx + len + 7) & -8;
 131    for (p = rx & -8; p < end; p += 8) {
 132        __asm__ __volatile__("flush\t%0" : : "r" (p));
 133    }
 134}
 135
 136#else
 137
 138void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
 139{
 140    if (rw != rx) {
 141        __builtin___clear_cache((char *)rw, (char *)rw + len);
 142    }
 143    __builtin___clear_cache((char *)rx, (char *)rx + len);
 144}
 145
 146#endif
 147