qemu/util/bufferiszero.c
<<
>>
Prefs
   1/*
   2 * Simple C functions to supplement the C library
   3 *
   4 * Copyright (c) 2006 Fabrice Bellard
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24#include "qemu/osdep.h"
  25#include "qemu-common.h"
  26#include "qemu/cutils.h"
  27#include "qemu/bswap.h"
  28
  29static bool
  30buffer_zero_int(const void *buf, size_t len)
  31{
  32    if (unlikely(len < 8)) {
  33        /* For a very small buffer, simply accumulate all the bytes.  */
  34        const unsigned char *p = buf;
  35        const unsigned char *e = buf + len;
  36        unsigned char t = 0;
  37
  38        do {
  39            t |= *p++;
  40        } while (p < e);
  41
  42        return t == 0;
  43    } else {
  44        /* Otherwise, use the unaligned memory access functions to
  45           handle the beginning and end of the buffer, with a couple
  46           of loops handling the middle aligned section.  */
  47        uint64_t t = ldq_he_p(buf);
  48        const uint64_t *p = (uint64_t *)(((uintptr_t)buf + 8) & -8);
  49        const uint64_t *e = (uint64_t *)(((uintptr_t)buf + len) & -8);
  50
  51        for (; p + 8 <= e; p += 8) {
  52            __builtin_prefetch(p + 8);
  53            if (t) {
  54                return false;
  55            }
  56            t = p[0] | p[1] | p[2] | p[3] | p[4] | p[5] | p[6] | p[7];
  57        }
  58        while (p < e) {
  59            t |= *p++;
  60        }
  61        t |= ldq_he_p(buf + len - 8);
  62
  63        return t == 0;
  64    }
  65}
  66
  67#if defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
  68/* Do not use push_options pragmas unnecessarily, because clang
  69 * does not support them.
  70 */
  71#ifdef CONFIG_AVX2_OPT
  72#pragma GCC push_options
  73#pragma GCC target("sse2")
  74#endif
  75#include <emmintrin.h>
  76
  77/* Note that each of these vectorized functions require len >= 64.  */
  78
  79static bool
  80buffer_zero_sse2(const void *buf, size_t len)
  81{
  82    __m128i t = _mm_loadu_si128(buf);
  83    __m128i *p = (__m128i *)(((uintptr_t)buf + 5 * 16) & -16);
  84    __m128i *e = (__m128i *)(((uintptr_t)buf + len) & -16);
  85    __m128i zero = _mm_setzero_si128();
  86
  87    /* Loop over 16-byte aligned blocks of 64.  */
  88    while (likely(p <= e)) {
  89        __builtin_prefetch(p);
  90        t = _mm_cmpeq_epi8(t, zero);
  91        if (unlikely(_mm_movemask_epi8(t) != 0xFFFF)) {
  92            return false;
  93        }
  94        t = p[-4] | p[-3] | p[-2] | p[-1];
  95        p += 4;
  96    }
  97
  98    /* Finish the aligned tail.  */
  99    t |= e[-3];
 100    t |= e[-2];
 101    t |= e[-1];
 102
 103    /* Finish the unaligned tail.  */
 104    t |= _mm_loadu_si128(buf + len - 16);
 105
 106    return _mm_movemask_epi8(_mm_cmpeq_epi8(t, zero)) == 0xFFFF;
 107}
 108#ifdef CONFIG_AVX2_OPT
 109#pragma GCC pop_options
 110#endif
 111
 112#ifdef CONFIG_AVX2_OPT
 113/* Note that due to restrictions/bugs wrt __builtin functions in gcc <= 4.8,
 114 * the includes have to be within the corresponding push_options region, and
 115 * therefore the regions themselves have to be ordered with increasing ISA.
 116 */
 117#pragma GCC push_options
 118#pragma GCC target("sse4")
 119#include <smmintrin.h>
 120
 121static bool
 122buffer_zero_sse4(const void *buf, size_t len)
 123{
 124    __m128i t = _mm_loadu_si128(buf);
 125    __m128i *p = (__m128i *)(((uintptr_t)buf + 5 * 16) & -16);
 126    __m128i *e = (__m128i *)(((uintptr_t)buf + len) & -16);
 127
 128    /* Loop over 16-byte aligned blocks of 64.  */
 129    while (likely(p <= e)) {
 130        __builtin_prefetch(p);
 131        if (unlikely(!_mm_testz_si128(t, t))) {
 132            return false;
 133        }
 134        t = p[-4] | p[-3] | p[-2] | p[-1];
 135        p += 4;
 136    }
 137
 138    /* Finish the aligned tail.  */
 139    t |= e[-3];
 140    t |= e[-2];
 141    t |= e[-1];
 142
 143    /* Finish the unaligned tail.  */
 144    t |= _mm_loadu_si128(buf + len - 16);
 145
 146    return _mm_testz_si128(t, t);
 147}
 148
 149#pragma GCC pop_options
 150#pragma GCC push_options
 151#pragma GCC target("avx2")
 152#include <immintrin.h>
 153
 154static bool
 155buffer_zero_avx2(const void *buf, size_t len)
 156{
 157    /* Begin with an unaligned head of 32 bytes.  */
 158    __m256i t = _mm256_loadu_si256(buf);
 159    __m256i *p = (__m256i *)(((uintptr_t)buf + 5 * 32) & -32);
 160    __m256i *e = (__m256i *)(((uintptr_t)buf + len) & -32);
 161
 162    if (likely(p <= e)) {
 163        /* Loop over 32-byte aligned blocks of 128.  */
 164        do {
 165            __builtin_prefetch(p);
 166            if (unlikely(!_mm256_testz_si256(t, t))) {
 167                return false;
 168            }
 169            t = p[-4] | p[-3] | p[-2] | p[-1];
 170            p += 4;
 171        } while (p <= e);
 172    } else {
 173        t |= _mm256_loadu_si256(buf + 32);
 174        if (len <= 128) {
 175            goto last2;
 176        }
 177    }
 178
 179    /* Finish the last block of 128 unaligned.  */
 180    t |= _mm256_loadu_si256(buf + len - 4 * 32);
 181    t |= _mm256_loadu_si256(buf + len - 3 * 32);
 182 last2:
 183    t |= _mm256_loadu_si256(buf + len - 2 * 32);
 184    t |= _mm256_loadu_si256(buf + len - 1 * 32);
 185
 186    return _mm256_testz_si256(t, t);
 187}
 188#pragma GCC pop_options
 189#endif /* CONFIG_AVX2_OPT */
 190
 191/* Note that for test_buffer_is_zero_next_accel, the most preferred
 192 * ISA must have the least significant bit.
 193 */
 194#define CACHE_AVX2    1
 195#define CACHE_SSE4    2
 196#define CACHE_SSE2    4
 197
 198/* Make sure that these variables are appropriately initialized when
 199 * SSE2 is enabled on the compiler command-line, but the compiler is
 200 * too old to support CONFIG_AVX2_OPT.
 201 */
 202#ifdef CONFIG_AVX2_OPT
 203# define INIT_CACHE 0
 204# define INIT_ACCEL buffer_zero_int
 205#else
 206# ifndef __SSE2__
 207#  error "ISA selection confusion"
 208# endif
 209# define INIT_CACHE CACHE_SSE2
 210# define INIT_ACCEL buffer_zero_sse2
 211#endif
 212
 213static unsigned cpuid_cache = INIT_CACHE;
 214static bool (*buffer_accel)(const void *, size_t) = INIT_ACCEL;
 215
 216static void init_accel(unsigned cache)
 217{
 218    bool (*fn)(const void *, size_t) = buffer_zero_int;
 219    if (cache & CACHE_SSE2) {
 220        fn = buffer_zero_sse2;
 221    }
 222#ifdef CONFIG_AVX2_OPT
 223    if (cache & CACHE_SSE4) {
 224        fn = buffer_zero_sse4;
 225    }
 226    if (cache & CACHE_AVX2) {
 227        fn = buffer_zero_avx2;
 228    }
 229#endif
 230    buffer_accel = fn;
 231}
 232
 233#ifdef CONFIG_AVX2_OPT
 234#include "qemu/cpuid.h"
 235
 236static void __attribute__((constructor)) init_cpuid_cache(void)
 237{
 238    int max = __get_cpuid_max(0, NULL);
 239    int a, b, c, d;
 240    unsigned cache = 0;
 241
 242    if (max >= 1) {
 243        __cpuid(1, a, b, c, d);
 244        if (d & bit_SSE2) {
 245            cache |= CACHE_SSE2;
 246        }
 247        if (c & bit_SSE4_1) {
 248            cache |= CACHE_SSE4;
 249        }
 250
 251        /* We must check that AVX is not just available, but usable.  */
 252        if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) {
 253            int bv;
 254            __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0));
 255            __cpuid_count(7, 0, a, b, c, d);
 256            if ((bv & 6) == 6 && (b & bit_AVX2)) {
 257                cache |= CACHE_AVX2;
 258            }
 259        }
 260    }
 261    cpuid_cache = cache;
 262    init_accel(cache);
 263}
 264#endif /* CONFIG_AVX2_OPT */
 265
 266bool test_buffer_is_zero_next_accel(void)
 267{
 268    /* If no bits set, we just tested buffer_zero_int, and there
 269       are no more acceleration options to test.  */
 270    if (cpuid_cache == 0) {
 271        return false;
 272    }
 273    /* Disable the accelerator we used before and select a new one.  */
 274    cpuid_cache &= cpuid_cache - 1;
 275    init_accel(cpuid_cache);
 276    return true;
 277}
 278
 279static bool select_accel_fn(const void *buf, size_t len)
 280{
 281    if (likely(len >= 64)) {
 282        return buffer_accel(buf, len);
 283    }
 284    return buffer_zero_int(buf, len);
 285}
 286
 287#else
 288#define select_accel_fn  buffer_zero_int
 289bool test_buffer_is_zero_next_accel(void)
 290{
 291    return false;
 292}
 293#endif
 294
 295/*
 296 * Checks if a buffer is all zeroes
 297 */
 298bool buffer_is_zero(const void *buf, size_t len)
 299{
 300    if (unlikely(len == 0)) {
 301        return true;
 302    }
 303
 304    /* Fetch the beginning of the buffer while we select the accelerator.  */
 305    __builtin_prefetch(buf);
 306
 307    /* Use an optimized zero check if possible.  Note that this also
 308       includes a check for an unrolled loop over 64-bit integers.  */
 309    return select_accel_fn(buf, len);
 310}
 311