qemu/include/qemu/xxhash.h
<<
>>
Prefs
   1/*
   2 * xxHash - Fast Hash algorithm
   3 * Copyright (C) 2012-2016, Yann Collet
   4 *
   5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
   6 *
   7 * Redistribution and use in source and binary forms, with or without
   8 * modification, are permitted provided that the following conditions are
   9 * met:
  10 *
  11 * + Redistributions of source code must retain the above copyright
  12 * notice, this list of conditions and the following disclaimer.
  13 * + Redistributions in binary form must reproduce the above
  14 * copyright notice, this list of conditions and the following disclaimer
  15 * in the documentation and/or other materials provided with the
  16 * distribution.
  17 *
  18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29 *
  30 * You can contact the author at :
  31 * - xxHash source repository : https://github.com/Cyan4973/xxHash
  32 */
  33
  34#ifndef QEMU_XXHASH_H
  35#define QEMU_XXHASH_H
  36
  37#include "qemu/bitops.h"
  38
  39#define PRIME32_1   2654435761U
  40#define PRIME32_2   2246822519U
  41#define PRIME32_3   3266489917U
  42#define PRIME32_4    668265263U
  43#define PRIME32_5    374761393U
  44
  45#define QEMU_XXHASH_SEED 1
  46
  47/*
  48 * xxhash32, customized for input variables that are not guaranteed to be
  49 * contiguous in memory.
  50 */
  51static inline uint32_t
  52qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g)
  53{
  54    uint32_t v1 = QEMU_XXHASH_SEED + PRIME32_1 + PRIME32_2;
  55    uint32_t v2 = QEMU_XXHASH_SEED + PRIME32_2;
  56    uint32_t v3 = QEMU_XXHASH_SEED + 0;
  57    uint32_t v4 = QEMU_XXHASH_SEED - PRIME32_1;
  58    uint32_t a = ab;
  59    uint32_t b = ab >> 32;
  60    uint32_t c = cd;
  61    uint32_t d = cd >> 32;
  62    uint32_t h32;
  63
  64    v1 += a * PRIME32_2;
  65    v1 = rol32(v1, 13);
  66    v1 *= PRIME32_1;
  67
  68    v2 += b * PRIME32_2;
  69    v2 = rol32(v2, 13);
  70    v2 *= PRIME32_1;
  71
  72    v3 += c * PRIME32_2;
  73    v3 = rol32(v3, 13);
  74    v3 *= PRIME32_1;
  75
  76    v4 += d * PRIME32_2;
  77    v4 = rol32(v4, 13);
  78    v4 *= PRIME32_1;
  79
  80    h32 = rol32(v1, 1) + rol32(v2, 7) + rol32(v3, 12) + rol32(v4, 18);
  81    h32 += 28;
  82
  83    h32 += e * PRIME32_3;
  84    h32  = rol32(h32, 17) * PRIME32_4;
  85
  86    h32 += f * PRIME32_3;
  87    h32  = rol32(h32, 17) * PRIME32_4;
  88
  89    h32 += g * PRIME32_3;
  90    h32  = rol32(h32, 17) * PRIME32_4;
  91
  92    h32 ^= h32 >> 15;
  93    h32 *= PRIME32_2;
  94    h32 ^= h32 >> 13;
  95    h32 *= PRIME32_3;
  96    h32 ^= h32 >> 16;
  97
  98    return h32;
  99}
 100
 101static inline uint32_t qemu_xxhash2(uint64_t ab)
 102{
 103    return qemu_xxhash7(ab, 0, 0, 0, 0);
 104}
 105
 106static inline uint32_t qemu_xxhash4(uint64_t ab, uint64_t cd)
 107{
 108    return qemu_xxhash7(ab, cd, 0, 0, 0);
 109}
 110
 111static inline uint32_t qemu_xxhash5(uint64_t ab, uint64_t cd, uint32_t e)
 112{
 113    return qemu_xxhash7(ab, cd, e, 0, 0);
 114}
 115
 116static inline uint32_t qemu_xxhash6(uint64_t ab, uint64_t cd, uint32_t e,
 117                                    uint32_t f)
 118{
 119    return qemu_xxhash7(ab, cd, e, f, 0);
 120}
 121
 122/*
 123 * Component parts of the XXH64 algorithm from
 124 * https://github.com/Cyan4973/xxHash/blob/v0.8.0/xxhash.h
 125 *
 126 * The complete algorithm looks like
 127 *
 128 *  i = 0;
 129 *  if (len >= 32) {
 130 *      v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
 131 *      v2 = seed + XXH_PRIME64_2;
 132 *      v3 = seed + 0;
 133 *      v4 = seed - XXH_PRIME64_1;
 134 *      do {
 135 *          v1 = XXH64_round(v1, get64bits(input + i));
 136 *          v2 = XXH64_round(v2, get64bits(input + i + 8));
 137 *          v3 = XXH64_round(v3, get64bits(input + i + 16));
 138 *          v4 = XXH64_round(v4, get64bits(input + i + 24));
 139 *      } while ((i += 32) <= len);
 140 *      h64 = XXH64_mergerounds(v1, v2, v3, v4);
 141 *  } else {
 142 *      h64 = seed + XXH_PRIME64_5;
 143 *  }
 144 *  h64 += len;
 145 *
 146 *  for (; i + 8 <= len; i += 8) {
 147 *      h64 ^= XXH64_round(0, get64bits(input + i));
 148 *      h64 = rol64(h64, 27) * XXH_PRIME64_1 + XXH_PRIME64_4;
 149 *  }
 150 *  for (; i + 4 <= len; i += 4) {
 151 *      h64 ^= get32bits(input + i) * PRIME64_1;
 152 *      h64 = rol64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
 153 *  }
 154 *  for (; i < len; i += 1) {
 155 *      h64 ^= get8bits(input + i) * XXH_PRIME64_5;
 156 *      h64 = rol64(h64, 11) * XXH_PRIME64_1;
 157 *  }
 158 *
 159 *  return XXH64_avalanche(h64)
 160 *
 161 * Exposing the pieces instead allows for simplified usage when
 162 * the length is a known constant and the inputs are in registers.
 163 */
 164#define XXH_PRIME64_1   0x9E3779B185EBCA87ULL
 165#define XXH_PRIME64_2   0xC2B2AE3D27D4EB4FULL
 166#define XXH_PRIME64_3   0x165667B19E3779F9ULL
 167#define XXH_PRIME64_4   0x85EBCA77C2B2AE63ULL
 168#define XXH_PRIME64_5   0x27D4EB2F165667C5ULL
 169
 170static inline uint64_t XXH64_round(uint64_t acc, uint64_t input)
 171{
 172    return rol64(acc + input * XXH_PRIME64_2, 31) * XXH_PRIME64_1;
 173}
 174
 175static inline uint64_t XXH64_mergeround(uint64_t acc, uint64_t val)
 176{
 177    return (acc ^ XXH64_round(0, val)) * XXH_PRIME64_1 + XXH_PRIME64_4;
 178}
 179
 180static inline uint64_t XXH64_mergerounds(uint64_t v1, uint64_t v2,
 181                                         uint64_t v3, uint64_t v4)
 182{
 183    uint64_t h64;
 184
 185    h64 = rol64(v1, 1) + rol64(v2, 7) + rol64(v3, 12) + rol64(v4, 18);
 186    h64 = XXH64_mergeround(h64, v1);
 187    h64 = XXH64_mergeround(h64, v2);
 188    h64 = XXH64_mergeround(h64, v3);
 189    h64 = XXH64_mergeround(h64, v4);
 190
 191    return h64;
 192}
 193
 194static inline uint64_t XXH64_avalanche(uint64_t h64)
 195{
 196    h64 ^= h64 >> 33;
 197    h64 *= XXH_PRIME64_2;
 198    h64 ^= h64 >> 29;
 199    h64 *= XXH_PRIME64_3;
 200    h64 ^= h64 >> 32;
 201    return h64;
 202}
 203
 204static inline uint64_t qemu_xxhash64_4(uint64_t a, uint64_t b,
 205                                       uint64_t c, uint64_t d)
 206{
 207    uint64_t v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2;
 208    uint64_t v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2;
 209    uint64_t v3 = QEMU_XXHASH_SEED + 0;
 210    uint64_t v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1;
 211
 212    v1 = XXH64_round(v1, a);
 213    v2 = XXH64_round(v2, b);
 214    v3 = XXH64_round(v3, c);
 215    v4 = XXH64_round(v4, d);
 216
 217    return XXH64_avalanche(XXH64_mergerounds(v1, v2, v3, v4));
 218}
 219
 220#endif /* QEMU_XXHASH_H */
 221