qemu/util/host-utils.c
<<
>>
Prefs
   1/*
   2 * Utility compute operations used by translated code.
   3 *
   4 * Copyright (c) 2003 Fabrice Bellard
   5 * Copyright (c) 2007 Aurelien Jarno
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a copy
   8 * of this software and associated documentation files (the "Software"), to deal
   9 * in the Software without restriction, including without limitation the rights
  10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 * copies of the Software, and to permit persons to whom the Software is
  12 * furnished to do so, subject to the following conditions:
  13 *
  14 * The above copyright notice and this permission notice shall be included in
  15 * all copies or substantial portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23 * THE SOFTWARE.
  24 */
  25
  26#include "qemu/osdep.h"
  27#include "qemu/host-utils.h"
  28
  29#ifndef CONFIG_INT128
  30/* Long integer helpers */
  31static inline void mul64(uint64_t *plow, uint64_t *phigh,
  32                         uint64_t a, uint64_t b)
  33{
  34    typedef union {
  35        uint64_t ll;
  36        struct {
  37#ifdef HOST_WORDS_BIGENDIAN
  38            uint32_t high, low;
  39#else
  40            uint32_t low, high;
  41#endif
  42        } l;
  43    } LL;
  44    LL rl, rm, rn, rh, a0, b0;
  45    uint64_t c;
  46
  47    a0.ll = a;
  48    b0.ll = b;
  49
  50    rl.ll = (uint64_t)a0.l.low * b0.l.low;
  51    rm.ll = (uint64_t)a0.l.low * b0.l.high;
  52    rn.ll = (uint64_t)a0.l.high * b0.l.low;
  53    rh.ll = (uint64_t)a0.l.high * b0.l.high;
  54
  55    c = (uint64_t)rl.l.high + rm.l.low + rn.l.low;
  56    rl.l.high = c;
  57    c >>= 32;
  58    c = c + rm.l.high + rn.l.high + rh.l.low;
  59    rh.l.low = c;
  60    rh.l.high += (uint32_t)(c >> 32);
  61
  62    *plow = rl.ll;
  63    *phigh = rh.ll;
  64}
  65
  66/* Unsigned 64x64 -> 128 multiplication */
  67void mulu64 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
  68{
  69    mul64(plow, phigh, a, b);
  70}
  71
  72/* Signed 64x64 -> 128 multiplication */
  73void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
  74{
  75    uint64_t rh;
  76
  77    mul64(plow, &rh, a, b);
  78
  79    /* Adjust for signs.  */
  80    if (b < 0) {
  81        rh -= a;
  82    }
  83    if (a < 0) {
  84        rh -= b;
  85    }
  86    *phigh = rh;
  87}
  88
  89/*
  90 * Unsigned 128-by-64 division.
  91 * Returns the remainder.
  92 * Returns quotient via plow and phigh.
  93 * Also returns the remainder via the function return value.
  94 */
  95uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
  96{
  97    uint64_t dhi = *phigh;
  98    uint64_t dlo = *plow;
  99    uint64_t rem, dhighest;
 100    int sh;
 101
 102    if (divisor == 0 || dhi == 0) {
 103        *plow  = dlo / divisor;
 104        *phigh = 0;
 105        return dlo % divisor;
 106    } else {
 107        sh = clz64(divisor);
 108
 109        if (dhi < divisor) {
 110            if (sh != 0) {
 111                /* normalize the divisor, shifting the dividend accordingly */
 112                divisor <<= sh;
 113                dhi = (dhi << sh) | (dlo >> (64 - sh));
 114                dlo <<= sh;
 115            }
 116
 117            *phigh = 0;
 118            *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
 119        } else {
 120            if (sh != 0) {
 121                /* normalize the divisor, shifting the dividend accordingly */
 122                divisor <<= sh;
 123                dhighest = dhi >> (64 - sh);
 124                dhi = (dhi << sh) | (dlo >> (64 - sh));
 125                dlo <<= sh;
 126
 127                *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor);
 128            } else {
 129                /**
 130                 * dhi >= divisor
 131                 * Since the MSB of divisor is set (sh == 0),
 132                 * (dhi - divisor) < divisor
 133                 *
 134                 * Thus, the high part of the quotient is 1, and we can
 135                 * calculate the low part with a single call to udiv_qrnnd
 136                 * after subtracting divisor from dhi
 137                 */
 138                dhi -= divisor;
 139                *phigh = 1;
 140            }
 141
 142            *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
 143        }
 144
 145        /*
 146         * since the dividend/divisor might have been normalized,
 147         * the remainder might also have to be shifted back
 148         */
 149        return rem >> sh;
 150    }
 151}
 152
 153/*
 154 * Signed 128-by-64 division.
 155 * Returns quotient via plow and phigh.
 156 * Also returns the remainder via the function return value.
 157 */
 158int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor)
 159{
 160    bool neg_quotient = false, neg_remainder = false;
 161    uint64_t unsig_hi = *phigh, unsig_lo = *plow;
 162    uint64_t rem;
 163
 164    if (*phigh < 0) {
 165        neg_quotient = !neg_quotient;
 166        neg_remainder = !neg_remainder;
 167
 168        if (unsig_lo == 0) {
 169            unsig_hi = -unsig_hi;
 170        } else {
 171            unsig_hi = ~unsig_hi;
 172            unsig_lo = -unsig_lo;
 173        }
 174    }
 175
 176    if (divisor < 0) {
 177        neg_quotient = !neg_quotient;
 178
 179        divisor = -divisor;
 180    }
 181
 182    rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor);
 183
 184    if (neg_quotient) {
 185        if (unsig_lo == 0) {
 186            *phigh = -unsig_hi;
 187            *plow = 0;
 188        } else {
 189            *phigh = ~unsig_hi;
 190            *plow = -unsig_lo;
 191        }
 192    } else {
 193        *phigh = unsig_hi;
 194        *plow = unsig_lo;
 195    }
 196
 197    if (neg_remainder) {
 198        return -rem;
 199    } else {
 200        return rem;
 201    }
 202}
 203#endif
 204
 205/**
 206 * urshift - 128-bit Unsigned Right Shift.
 207 * @plow: in/out - lower 64-bit integer.
 208 * @phigh: in/out - higher 64-bit integer.
 209 * @shift: in - bytes to shift, between 0 and 127.
 210 *
 211 * Result is zero-extended and stored in plow/phigh, which are
 212 * input/output variables. Shift values outside the range will
 213 * be mod to 128. In other words, the caller is responsible to
 214 * verify/assert both the shift range and plow/phigh pointers.
 215 */
 216void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift)
 217{
 218    shift &= 127;
 219    if (shift == 0) {
 220        return;
 221    }
 222
 223    uint64_t h = *phigh >> (shift & 63);
 224    if (shift >= 64) {
 225        *plow = h;
 226        *phigh = 0;
 227    } else {
 228        *plow = (*plow >> (shift & 63)) | (*phigh << (64 - (shift & 63)));
 229        *phigh = h;
 230    }
 231}
 232
 233/**
 234 * ulshift - 128-bit Unsigned Left Shift.
 235 * @plow: in/out - lower 64-bit integer.
 236 * @phigh: in/out - higher 64-bit integer.
 237 * @shift: in - bytes to shift, between 0 and 127.
 238 * @overflow: out - true if any 1-bit is shifted out.
 239 *
 240 * Result is zero-extended and stored in plow/phigh, which are
 241 * input/output variables. Shift values outside the range will
 242 * be mod to 128. In other words, the caller is responsible to
 243 * verify/assert both the shift range and plow/phigh pointers.
 244 */
 245void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow)
 246{
 247    uint64_t low = *plow;
 248    uint64_t high = *phigh;
 249
 250    shift &= 127;
 251    if (shift == 0) {
 252        return;
 253    }
 254
 255    /* check if any bit will be shifted out */
 256    urshift(&low, &high, 128 - shift);
 257    if (low | high) {
 258        *overflow = true;
 259    }
 260
 261    if (shift >= 64) {
 262        *phigh = *plow << (shift & 63);
 263        *plow = 0;
 264    } else {
 265        *phigh = (*plow >> (64 - (shift & 63))) | (*phigh << (shift & 63));
 266        *plow = *plow << shift;
 267    }
 268}
 269