qemu/include/qemu/host-utils.h
<<
>>
Prefs
   1/*
   2 * Utility compute operations used by translated code.
   3 *
   4 * Copyright (c) 2007 Thiemo Seufer
   5 * Copyright (c) 2007 Jocelyn Mayer
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a copy
   8 * of this software and associated documentation files (the "Software"), to deal
   9 * in the Software without restriction, including without limitation the rights
  10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 * copies of the Software, and to permit persons to whom the Software is
  12 * furnished to do so, subject to the following conditions:
  13 *
  14 * The above copyright notice and this permission notice shall be included in
  15 * all copies or substantial portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23 * THE SOFTWARE.
  24 */
  25
  26/* Portions of this work are licensed under the terms of the GNU GPL,
  27 * version 2 or later. See the COPYING file in the top-level directory.
  28 */
  29
  30#ifndef HOST_UTILS_H
  31#define HOST_UTILS_H
  32
  33#include "qemu/bswap.h"
  34#include "qemu/int128.h"
  35
  36#ifdef CONFIG_INT128
  37static inline void mulu64(uint64_t *plow, uint64_t *phigh,
  38                          uint64_t a, uint64_t b)
  39{
  40    __uint128_t r = (__uint128_t)a * b;
  41    *plow = r;
  42    *phigh = r >> 64;
  43}
  44
  45static inline void muls64(uint64_t *plow, uint64_t *phigh,
  46                          int64_t a, int64_t b)
  47{
  48    __int128_t r = (__int128_t)a * b;
  49    *plow = r;
  50    *phigh = r >> 64;
  51}
  52
  53/* compute with 96 bit intermediate result: (a*b)/c */
  54static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
  55{
  56    return (__int128_t)a * b / c;
  57}
  58
  59static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
  60                               uint64_t divisor)
  61{
  62    __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
  63    __uint128_t result = dividend / divisor;
  64
  65    *plow = result;
  66    *phigh = result >> 64;
  67    return dividend % divisor;
  68}
  69
  70static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
  71                              int64_t divisor)
  72{
  73    __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
  74    __int128_t result = dividend / divisor;
  75
  76    *plow = result;
  77    *phigh = result >> 64;
  78    return dividend % divisor;
  79}
  80#else
  81void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
  82void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
  83uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
  84int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
  85
  86static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
  87{
  88    union {
  89        uint64_t ll;
  90        struct {
  91#if HOST_BIG_ENDIAN
  92            uint32_t high, low;
  93#else
  94            uint32_t low, high;
  95#endif
  96        } l;
  97    } u, res;
  98    uint64_t rl, rh;
  99
 100    u.ll = a;
 101    rl = (uint64_t)u.l.low * (uint64_t)b;
 102    rh = (uint64_t)u.l.high * (uint64_t)b;
 103    rh += (rl >> 32);
 104    res.l.high = rh / c;
 105    res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c;
 106    return res.ll;
 107}
 108#endif
 109
 110/**
 111 * clz32 - count leading zeros in a 32-bit value.
 112 * @val: The value to search
 113 *
 114 * Returns 32 if the value is zero.  Note that the GCC builtin is
 115 * undefined if the value is zero.
 116 */
 117static inline int clz32(uint32_t val)
 118{
 119    return val ? __builtin_clz(val) : 32;
 120}
 121
 122/**
 123 * clo32 - count leading ones in a 32-bit value.
 124 * @val: The value to search
 125 *
 126 * Returns 32 if the value is -1.
 127 */
 128static inline int clo32(uint32_t val)
 129{
 130    return clz32(~val);
 131}
 132
 133/**
 134 * clz64 - count leading zeros in a 64-bit value.
 135 * @val: The value to search
 136 *
 137 * Returns 64 if the value is zero.  Note that the GCC builtin is
 138 * undefined if the value is zero.
 139 */
 140static inline int clz64(uint64_t val)
 141{
 142    return val ? __builtin_clzll(val) : 64;
 143}
 144
 145/**
 146 * clo64 - count leading ones in a 64-bit value.
 147 * @val: The value to search
 148 *
 149 * Returns 64 if the value is -1.
 150 */
 151static inline int clo64(uint64_t val)
 152{
 153    return clz64(~val);
 154}
 155
 156/**
 157 * ctz32 - count trailing zeros in a 32-bit value.
 158 * @val: The value to search
 159 *
 160 * Returns 32 if the value is zero.  Note that the GCC builtin is
 161 * undefined if the value is zero.
 162 */
 163static inline int ctz32(uint32_t val)
 164{
 165    return val ? __builtin_ctz(val) : 32;
 166}
 167
 168/**
 169 * cto32 - count trailing ones in a 32-bit value.
 170 * @val: The value to search
 171 *
 172 * Returns 32 if the value is -1.
 173 */
 174static inline int cto32(uint32_t val)
 175{
 176    return ctz32(~val);
 177}
 178
 179/**
 180 * ctz64 - count trailing zeros in a 64-bit value.
 181 * @val: The value to search
 182 *
 183 * Returns 64 if the value is zero.  Note that the GCC builtin is
 184 * undefined if the value is zero.
 185 */
 186static inline int ctz64(uint64_t val)
 187{
 188    return val ? __builtin_ctzll(val) : 64;
 189}
 190
 191/**
 192 * cto64 - count trailing ones in a 64-bit value.
 193 * @val: The value to search
 194 *
 195 * Returns 64 if the value is -1.
 196 */
 197static inline int cto64(uint64_t val)
 198{
 199    return ctz64(~val);
 200}
 201
 202/**
 203 * clrsb32 - count leading redundant sign bits in a 32-bit value.
 204 * @val: The value to search
 205 *
 206 * Returns the number of bits following the sign bit that are equal to it.
 207 * No special cases; output range is [0-31].
 208 */
 209static inline int clrsb32(uint32_t val)
 210{
 211#if __has_builtin(__builtin_clrsb) || !defined(__clang__)
 212    return __builtin_clrsb(val);
 213#else
 214    return clz32(val ^ ((int32_t)val >> 1)) - 1;
 215#endif
 216}
 217
 218/**
 219 * clrsb64 - count leading redundant sign bits in a 64-bit value.
 220 * @val: The value to search
 221 *
 222 * Returns the number of bits following the sign bit that are equal to it.
 223 * No special cases; output range is [0-63].
 224 */
 225static inline int clrsb64(uint64_t val)
 226{
 227#if __has_builtin(__builtin_clrsbll) || !defined(__clang__)
 228    return __builtin_clrsbll(val);
 229#else
 230    return clz64(val ^ ((int64_t)val >> 1)) - 1;
 231#endif
 232}
 233
 234/**
 235 * ctpop8 - count the population of one bits in an 8-bit value.
 236 * @val: The value to search
 237 */
 238static inline int ctpop8(uint8_t val)
 239{
 240    return __builtin_popcount(val);
 241}
 242
 243/**
 244 * ctpop16 - count the population of one bits in a 16-bit value.
 245 * @val: The value to search
 246 */
 247static inline int ctpop16(uint16_t val)
 248{
 249    return __builtin_popcount(val);
 250}
 251
 252/**
 253 * ctpop32 - count the population of one bits in a 32-bit value.
 254 * @val: The value to search
 255 */
 256static inline int ctpop32(uint32_t val)
 257{
 258    return __builtin_popcount(val);
 259}
 260
 261/**
 262 * ctpop64 - count the population of one bits in a 64-bit value.
 263 * @val: The value to search
 264 */
 265static inline int ctpop64(uint64_t val)
 266{
 267    return __builtin_popcountll(val);
 268}
 269
 270/**
 271 * revbit8 - reverse the bits in an 8-bit value.
 272 * @x: The value to modify.
 273 */
 274static inline uint8_t revbit8(uint8_t x)
 275{
 276#if __has_builtin(__builtin_bitreverse8)
 277    return __builtin_bitreverse8(x);
 278#else
 279    /* Assign the correct nibble position.  */
 280    x = ((x & 0xf0) >> 4)
 281      | ((x & 0x0f) << 4);
 282    /* Assign the correct bit position.  */
 283    x = ((x & 0x88) >> 3)
 284      | ((x & 0x44) >> 1)
 285      | ((x & 0x22) << 1)
 286      | ((x & 0x11) << 3);
 287    return x;
 288#endif
 289}
 290
 291/**
 292 * revbit16 - reverse the bits in a 16-bit value.
 293 * @x: The value to modify.
 294 */
 295static inline uint16_t revbit16(uint16_t x)
 296{
 297#if __has_builtin(__builtin_bitreverse16)
 298    return __builtin_bitreverse16(x);
 299#else
 300    /* Assign the correct byte position.  */
 301    x = bswap16(x);
 302    /* Assign the correct nibble position.  */
 303    x = ((x & 0xf0f0) >> 4)
 304      | ((x & 0x0f0f) << 4);
 305    /* Assign the correct bit position.  */
 306    x = ((x & 0x8888) >> 3)
 307      | ((x & 0x4444) >> 1)
 308      | ((x & 0x2222) << 1)
 309      | ((x & 0x1111) << 3);
 310    return x;
 311#endif
 312}
 313
 314/**
 315 * revbit32 - reverse the bits in a 32-bit value.
 316 * @x: The value to modify.
 317 */
 318static inline uint32_t revbit32(uint32_t x)
 319{
 320#if __has_builtin(__builtin_bitreverse32)
 321    return __builtin_bitreverse32(x);
 322#else
 323    /* Assign the correct byte position.  */
 324    x = bswap32(x);
 325    /* Assign the correct nibble position.  */
 326    x = ((x & 0xf0f0f0f0u) >> 4)
 327      | ((x & 0x0f0f0f0fu) << 4);
 328    /* Assign the correct bit position.  */
 329    x = ((x & 0x88888888u) >> 3)
 330      | ((x & 0x44444444u) >> 1)
 331      | ((x & 0x22222222u) << 1)
 332      | ((x & 0x11111111u) << 3);
 333    return x;
 334#endif
 335}
 336
 337/**
 338 * revbit64 - reverse the bits in a 64-bit value.
 339 * @x: The value to modify.
 340 */
 341static inline uint64_t revbit64(uint64_t x)
 342{
 343#if __has_builtin(__builtin_bitreverse64)
 344    return __builtin_bitreverse64(x);
 345#else
 346    /* Assign the correct byte position.  */
 347    x = bswap64(x);
 348    /* Assign the correct nibble position.  */
 349    x = ((x & 0xf0f0f0f0f0f0f0f0ull) >> 4)
 350      | ((x & 0x0f0f0f0f0f0f0f0full) << 4);
 351    /* Assign the correct bit position.  */
 352    x = ((x & 0x8888888888888888ull) >> 3)
 353      | ((x & 0x4444444444444444ull) >> 1)
 354      | ((x & 0x2222222222222222ull) << 1)
 355      | ((x & 0x1111111111111111ull) << 3);
 356    return x;
 357#endif
 358}
 359
 360/**
 361 * Return the absolute value of a 64-bit integer as an unsigned 64-bit value
 362 */
 363static inline uint64_t uabs64(int64_t v)
 364{
 365    return v < 0 ? -v : v;
 366}
 367
 368/**
 369 * sadd32_overflow - addition with overflow indication
 370 * @x, @y: addends
 371 * @ret: Output for sum
 372 *
 373 * Computes *@ret = @x + @y, and returns true if and only if that
 374 * value has been truncated.
 375 */
 376static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret)
 377{
 378    return __builtin_add_overflow(x, y, ret);
 379}
 380
 381/**
 382 * sadd64_overflow - addition with overflow indication
 383 * @x, @y: addends
 384 * @ret: Output for sum
 385 *
 386 * Computes *@ret = @x + @y, and returns true if and only if that
 387 * value has been truncated.
 388 */
 389static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret)
 390{
 391    return __builtin_add_overflow(x, y, ret);
 392}
 393
 394/**
 395 * uadd32_overflow - addition with overflow indication
 396 * @x, @y: addends
 397 * @ret: Output for sum
 398 *
 399 * Computes *@ret = @x + @y, and returns true if and only if that
 400 * value has been truncated.
 401 */
 402static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
 403{
 404    return __builtin_add_overflow(x, y, ret);
 405}
 406
 407/**
 408 * uadd64_overflow - addition with overflow indication
 409 * @x, @y: addends
 410 * @ret: Output for sum
 411 *
 412 * Computes *@ret = @x + @y, and returns true if and only if that
 413 * value has been truncated.
 414 */
 415static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
 416{
 417    return __builtin_add_overflow(x, y, ret);
 418}
 419
 420/**
 421 * ssub32_overflow - subtraction with overflow indication
 422 * @x: Minuend
 423 * @y: Subtrahend
 424 * @ret: Output for difference
 425 *
 426 * Computes *@ret = @x - @y, and returns true if and only if that
 427 * value has been truncated.
 428 */
 429static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret)
 430{
 431    return __builtin_sub_overflow(x, y, ret);
 432}
 433
 434/**
 435 * ssub64_overflow - subtraction with overflow indication
 436 * @x: Minuend
 437 * @y: Subtrahend
 438 * @ret: Output for sum
 439 *
 440 * Computes *@ret = @x - @y, and returns true if and only if that
 441 * value has been truncated.
 442 */
 443static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret)
 444{
 445    return __builtin_sub_overflow(x, y, ret);
 446}
 447
 448/**
 449 * usub32_overflow - subtraction with overflow indication
 450 * @x: Minuend
 451 * @y: Subtrahend
 452 * @ret: Output for sum
 453 *
 454 * Computes *@ret = @x - @y, and returns true if and only if that
 455 * value has been truncated.
 456 */
 457static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
 458{
 459    return __builtin_sub_overflow(x, y, ret);
 460}
 461
 462/**
 463 * usub64_overflow - subtraction with overflow indication
 464 * @x: Minuend
 465 * @y: Subtrahend
 466 * @ret: Output for sum
 467 *
 468 * Computes *@ret = @x - @y, and returns true if and only if that
 469 * value has been truncated.
 470 */
 471static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
 472{
 473    return __builtin_sub_overflow(x, y, ret);
 474}
 475
 476/**
 477 * smul32_overflow - multiplication with overflow indication
 478 * @x, @y: Input multipliers
 479 * @ret: Output for product
 480 *
 481 * Computes *@ret = @x * @y, and returns true if and only if that
 482 * value has been truncated.
 483 */
 484static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret)
 485{
 486    return __builtin_mul_overflow(x, y, ret);
 487}
 488
 489/**
 490 * smul64_overflow - multiplication with overflow indication
 491 * @x, @y: Input multipliers
 492 * @ret: Output for product
 493 *
 494 * Computes *@ret = @x * @y, and returns true if and only if that
 495 * value has been truncated.
 496 */
 497static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret)
 498{
 499    return __builtin_mul_overflow(x, y, ret);
 500}
 501
 502/**
 503 * umul32_overflow - multiplication with overflow indication
 504 * @x, @y: Input multipliers
 505 * @ret: Output for product
 506 *
 507 * Computes *@ret = @x * @y, and returns true if and only if that
 508 * value has been truncated.
 509 */
 510static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
 511{
 512    return __builtin_mul_overflow(x, y, ret);
 513}
 514
 515/**
 516 * umul64_overflow - multiplication with overflow indication
 517 * @x, @y: Input multipliers
 518 * @ret: Output for product
 519 *
 520 * Computes *@ret = @x * @y, and returns true if and only if that
 521 * value has been truncated.
 522 */
 523static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
 524{
 525    return __builtin_mul_overflow(x, y, ret);
 526}
 527
 528/*
 529 * Unsigned 128x64 multiplication.
 530 * Returns true if the result got truncated to 128 bits.
 531 * Otherwise, returns false and the multiplication result via plow and phigh.
 532 */
 533static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor)
 534{
 535#if defined(CONFIG_INT128)
 536    bool res;
 537    __uint128_t r;
 538    __uint128_t f = ((__uint128_t)*phigh << 64) | *plow;
 539    res = __builtin_mul_overflow(f, factor, &r);
 540
 541    *plow = r;
 542    *phigh = r >> 64;
 543
 544    return res;
 545#else
 546    uint64_t dhi = *phigh;
 547    uint64_t dlo = *plow;
 548    uint64_t ahi;
 549    uint64_t blo, bhi;
 550
 551    if (dhi == 0) {
 552        mulu64(plow, phigh, dlo, factor);
 553        return false;
 554    }
 555
 556    mulu64(plow, &ahi, dlo, factor);
 557    mulu64(&blo, &bhi, dhi, factor);
 558
 559    return uadd64_overflow(ahi, blo, phigh) || bhi != 0;
 560#endif
 561}
 562
 563/**
 564 * uadd64_carry - addition with carry-in and carry-out
 565 * @x, @y: addends
 566 * @pcarry: in-out carry value
 567 *
 568 * Computes @x + @y + *@pcarry, placing the carry-out back
 569 * into *@pcarry and returning the 64-bit sum.
 570 */
 571static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry)
 572{
 573#if __has_builtin(__builtin_addcll)
 574    unsigned long long c = *pcarry;
 575    x = __builtin_addcll(x, y, c, &c);
 576    *pcarry = c & 1;
 577    return x;
 578#else
 579    bool c = *pcarry;
 580    /* This is clang's internal expansion of __builtin_addc. */
 581    c = uadd64_overflow(x, c, &x);
 582    c |= uadd64_overflow(x, y, &x);
 583    *pcarry = c;
 584    return x;
 585#endif
 586}
 587
 588/**
 589 * usub64_borrow - subtraction with borrow-in and borrow-out
 590 * @x, @y: addends
 591 * @pborrow: in-out borrow value
 592 *
 593 * Computes @x - @y - *@pborrow, placing the borrow-out back
 594 * into *@pborrow and returning the 64-bit sum.
 595 */
 596static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow)
 597{
 598#if __has_builtin(__builtin_subcll)
 599    unsigned long long b = *pborrow;
 600    x = __builtin_subcll(x, y, b, &b);
 601    *pborrow = b & 1;
 602    return x;
 603#else
 604    bool b = *pborrow;
 605    b = usub64_overflow(x, b, &x);
 606    b |= usub64_overflow(x, y, &x);
 607    *pborrow = b;
 608    return x;
 609#endif
 610}
 611
 612/* Host type specific sizes of these routines.  */
 613
 614#if ULONG_MAX == UINT32_MAX
 615# define clzl   clz32
 616# define ctzl   ctz32
 617# define clol   clo32
 618# define ctol   cto32
 619# define ctpopl ctpop32
 620# define revbitl revbit32
 621#elif ULONG_MAX == UINT64_MAX
 622# define clzl   clz64
 623# define ctzl   ctz64
 624# define clol   clo64
 625# define ctol   cto64
 626# define ctpopl ctpop64
 627# define revbitl revbit64
 628#else
 629# error Unknown sizeof long
 630#endif
 631
 632static inline bool is_power_of_2(uint64_t value)
 633{
 634    if (!value) {
 635        return false;
 636    }
 637
 638    return !(value & (value - 1));
 639}
 640
 641/**
 642 * Return @value rounded down to the nearest power of two or zero.
 643 */
 644static inline uint64_t pow2floor(uint64_t value)
 645{
 646    if (!value) {
 647        /* Avoid undefined shift by 64 */
 648        return 0;
 649    }
 650    return 0x8000000000000000ull >> clz64(value);
 651}
 652
 653/*
 654 * Return @value rounded up to the nearest power of two modulo 2^64.
 655 * This is *zero* for @value > 2^63, so be careful.
 656 */
 657static inline uint64_t pow2ceil(uint64_t value)
 658{
 659    int n = clz64(value - 1);
 660
 661    if (!n) {
 662        /*
 663         * @value - 1 has no leading zeroes, thus @value - 1 >= 2^63
 664         * Therefore, either @value == 0 or @value > 2^63.
 665         * If it's 0, return 1, else return 0.
 666         */
 667        return !value;
 668    }
 669    return 0x8000000000000000ull >> (n - 1);
 670}
 671
 672static inline uint32_t pow2roundup32(uint32_t x)
 673{
 674    x |= (x >> 1);
 675    x |= (x >> 2);
 676    x |= (x >> 4);
 677    x |= (x >> 8);
 678    x |= (x >> 16);
 679    return x + 1;
 680}
 681
 682/**
 683 * urshift - 128-bit Unsigned Right Shift.
 684 * @plow: in/out - lower 64-bit integer.
 685 * @phigh: in/out - higher 64-bit integer.
 686 * @shift: in - bytes to shift, between 0 and 127.
 687 *
 688 * Result is zero-extended and stored in plow/phigh, which are
 689 * input/output variables. Shift values outside the range will
 690 * be mod to 128. In other words, the caller is responsible to
 691 * verify/assert both the shift range and plow/phigh pointers.
 692 */
 693void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
 694
 695/**
 696 * ulshift - 128-bit Unsigned Left Shift.
 697 * @plow: in/out - lower 64-bit integer.
 698 * @phigh: in/out - higher 64-bit integer.
 699 * @shift: in - bytes to shift, between 0 and 127.
 700 * @overflow: out - true if any 1-bit is shifted out.
 701 *
 702 * Result is zero-extended and stored in plow/phigh, which are
 703 * input/output variables. Shift values outside the range will
 704 * be mod to 128. In other words, the caller is responsible to
 705 * verify/assert both the shift range and plow/phigh pointers.
 706 */
 707void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
 708
 709/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
 710 * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
 711 *
 712 * Licensed under the GPLv2/LGPLv3
 713 */
 714static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
 715                                  uint64_t n0, uint64_t d)
 716{
 717#if defined(__x86_64__)
 718    uint64_t q;
 719    asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
 720    return q;
 721#elif defined(__s390x__) && !defined(__clang__)
 722    /* Need to use a TImode type to get an even register pair for DLGR.  */
 723    unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
 724    asm("dlgr %0, %1" : "+r"(n) : "r"(d));
 725    *r = n >> 64;
 726    return n;
 727#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
 728    /* From Power ISA 2.06, programming note for divdeu.  */
 729    uint64_t q1, q2, Q, r1, r2, R;
 730    asm("divdeu %0,%2,%4; divdu %1,%3,%4"
 731        : "=&r"(q1), "=r"(q2)
 732        : "r"(n1), "r"(n0), "r"(d));
 733    r1 = -(q1 * d);         /* low part of (n1<<64) - (q1 * d) */
 734    r2 = n0 - (q2 * d);
 735    Q = q1 + q2;
 736    R = r1 + r2;
 737    if (R >= d || R < r2) { /* overflow implies R > d */
 738        Q += 1;
 739        R -= d;
 740    }
 741    *r = R;
 742    return Q;
 743#else
 744    uint64_t d0, d1, q0, q1, r1, r0, m;
 745
 746    d0 = (uint32_t)d;
 747    d1 = d >> 32;
 748
 749    r1 = n1 % d1;
 750    q1 = n1 / d1;
 751    m = q1 * d0;
 752    r1 = (r1 << 32) | (n0 >> 32);
 753    if (r1 < m) {
 754        q1 -= 1;
 755        r1 += d;
 756        if (r1 >= d) {
 757            if (r1 < m) {
 758                q1 -= 1;
 759                r1 += d;
 760            }
 761        }
 762    }
 763    r1 -= m;
 764
 765    r0 = r1 % d1;
 766    q0 = r1 / d1;
 767    m = q0 * d0;
 768    r0 = (r0 << 32) | (uint32_t)n0;
 769    if (r0 < m) {
 770        q0 -= 1;
 771        r0 += d;
 772        if (r0 >= d) {
 773            if (r0 < m) {
 774                q0 -= 1;
 775                r0 += d;
 776            }
 777        }
 778    }
 779    r0 -= m;
 780
 781    *r = r0;
 782    return (q1 << 32) | q0;
 783#endif
 784}
 785
 786Int128 divu256(Int128 *plow, Int128 *phigh, Int128 divisor);
 787Int128 divs256(Int128 *plow, Int128 *phigh, Int128 divisor);
 788#endif
 789