1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#ifndef HOST_UTILS_H
31#define HOST_UTILS_H
32
33#include "qemu/bswap.h"
34#include "qemu/int128.h"
35
36#ifdef CONFIG_INT128
37static inline void mulu64(uint64_t *plow, uint64_t *phigh,
38 uint64_t a, uint64_t b)
39{
40 __uint128_t r = (__uint128_t)a * b;
41 *plow = r;
42 *phigh = r >> 64;
43}
44
45static inline void muls64(uint64_t *plow, uint64_t *phigh,
46 int64_t a, int64_t b)
47{
48 __int128_t r = (__int128_t)a * b;
49 *plow = r;
50 *phigh = r >> 64;
51}
52
53
54static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
55{
56 return (__int128_t)a * b / c;
57}
58
59static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
60 uint64_t divisor)
61{
62 __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
63 __uint128_t result = dividend / divisor;
64
65 *plow = result;
66 *phigh = result >> 64;
67 return dividend % divisor;
68}
69
70static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
71 int64_t divisor)
72{
73 __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
74 __int128_t result = dividend / divisor;
75
76 *plow = result;
77 *phigh = result >> 64;
78 return dividend % divisor;
79}
80#else
81void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
82void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
83uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
84int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
85
86static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
87{
88 union {
89 uint64_t ll;
90 struct {
91#if HOST_BIG_ENDIAN
92 uint32_t high, low;
93#else
94 uint32_t low, high;
95#endif
96 } l;
97 } u, res;
98 uint64_t rl, rh;
99
100 u.ll = a;
101 rl = (uint64_t)u.l.low * (uint64_t)b;
102 rh = (uint64_t)u.l.high * (uint64_t)b;
103 rh += (rl >> 32);
104 res.l.high = rh / c;
105 res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c;
106 return res.ll;
107}
108#endif
109
110
111
112
113
114
115
116
117static inline int clz32(uint32_t val)
118{
119 return val ? __builtin_clz(val) : 32;
120}
121
122
123
124
125
126
127
128static inline int clo32(uint32_t val)
129{
130 return clz32(~val);
131}
132
133
134
135
136
137
138
139
140static inline int clz64(uint64_t val)
141{
142 return val ? __builtin_clzll(val) : 64;
143}
144
145
146
147
148
149
150
151static inline int clo64(uint64_t val)
152{
153 return clz64(~val);
154}
155
156
157
158
159
160
161
162
163static inline int ctz32(uint32_t val)
164{
165 return val ? __builtin_ctz(val) : 32;
166}
167
168
169
170
171
172
173
174static inline int cto32(uint32_t val)
175{
176 return ctz32(~val);
177}
178
179
180
181
182
183
184
185
186static inline int ctz64(uint64_t val)
187{
188 return val ? __builtin_ctzll(val) : 64;
189}
190
191
192
193
194
195
196
197static inline int cto64(uint64_t val)
198{
199 return ctz64(~val);
200}
201
202
203
204
205
206
207
208
209static inline int clrsb32(uint32_t val)
210{
211#if __has_builtin(__builtin_clrsb) || !defined(__clang__)
212 return __builtin_clrsb(val);
213#else
214 return clz32(val ^ ((int32_t)val >> 1)) - 1;
215#endif
216}
217
218
219
220
221
222
223
224
225static inline int clrsb64(uint64_t val)
226{
227#if __has_builtin(__builtin_clrsbll) || !defined(__clang__)
228 return __builtin_clrsbll(val);
229#else
230 return clz64(val ^ ((int64_t)val >> 1)) - 1;
231#endif
232}
233
234
235
236
237
238static inline int ctpop8(uint8_t val)
239{
240 return __builtin_popcount(val);
241}
242
243
244
245
246
247static inline int ctpop16(uint16_t val)
248{
249 return __builtin_popcount(val);
250}
251
252
253
254
255
256static inline int ctpop32(uint32_t val)
257{
258 return __builtin_popcount(val);
259}
260
261
262
263
264
265static inline int ctpop64(uint64_t val)
266{
267 return __builtin_popcountll(val);
268}
269
270
271
272
273
274static inline uint8_t revbit8(uint8_t x)
275{
276#if __has_builtin(__builtin_bitreverse8)
277 return __builtin_bitreverse8(x);
278#else
279
280 x = ((x & 0xf0) >> 4)
281 | ((x & 0x0f) << 4);
282
283 x = ((x & 0x88) >> 3)
284 | ((x & 0x44) >> 1)
285 | ((x & 0x22) << 1)
286 | ((x & 0x11) << 3);
287 return x;
288#endif
289}
290
291
292
293
294
295static inline uint16_t revbit16(uint16_t x)
296{
297#if __has_builtin(__builtin_bitreverse16)
298 return __builtin_bitreverse16(x);
299#else
300
301 x = bswap16(x);
302
303 x = ((x & 0xf0f0) >> 4)
304 | ((x & 0x0f0f) << 4);
305
306 x = ((x & 0x8888) >> 3)
307 | ((x & 0x4444) >> 1)
308 | ((x & 0x2222) << 1)
309 | ((x & 0x1111) << 3);
310 return x;
311#endif
312}
313
314
315
316
317
318static inline uint32_t revbit32(uint32_t x)
319{
320#if __has_builtin(__builtin_bitreverse32)
321 return __builtin_bitreverse32(x);
322#else
323
324 x = bswap32(x);
325
326 x = ((x & 0xf0f0f0f0u) >> 4)
327 | ((x & 0x0f0f0f0fu) << 4);
328
329 x = ((x & 0x88888888u) >> 3)
330 | ((x & 0x44444444u) >> 1)
331 | ((x & 0x22222222u) << 1)
332 | ((x & 0x11111111u) << 3);
333 return x;
334#endif
335}
336
337
338
339
340
341static inline uint64_t revbit64(uint64_t x)
342{
343#if __has_builtin(__builtin_bitreverse64)
344 return __builtin_bitreverse64(x);
345#else
346
347 x = bswap64(x);
348
349 x = ((x & 0xf0f0f0f0f0f0f0f0ull) >> 4)
350 | ((x & 0x0f0f0f0f0f0f0f0full) << 4);
351
352 x = ((x & 0x8888888888888888ull) >> 3)
353 | ((x & 0x4444444444444444ull) >> 1)
354 | ((x & 0x2222222222222222ull) << 1)
355 | ((x & 0x1111111111111111ull) << 3);
356 return x;
357#endif
358}
359
360
361
362
363static inline uint64_t uabs64(int64_t v)
364{
365 return v < 0 ? -v : v;
366}
367
368
369
370
371
372
373
374
375
376static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret)
377{
378 return __builtin_add_overflow(x, y, ret);
379}
380
381
382
383
384
385
386
387
388
389static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret)
390{
391 return __builtin_add_overflow(x, y, ret);
392}
393
394
395
396
397
398
399
400
401
402static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
403{
404 return __builtin_add_overflow(x, y, ret);
405}
406
407
408
409
410
411
412
413
414
415static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
416{
417 return __builtin_add_overflow(x, y, ret);
418}
419
420
421
422
423
424
425
426
427
428
429static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret)
430{
431 return __builtin_sub_overflow(x, y, ret);
432}
433
434
435
436
437
438
439
440
441
442
443static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret)
444{
445 return __builtin_sub_overflow(x, y, ret);
446}
447
448
449
450
451
452
453
454
455
456
457static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
458{
459 return __builtin_sub_overflow(x, y, ret);
460}
461
462
463
464
465
466
467
468
469
470
471static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
472{
473 return __builtin_sub_overflow(x, y, ret);
474}
475
476
477
478
479
480
481
482
483
484static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret)
485{
486 return __builtin_mul_overflow(x, y, ret);
487}
488
489
490
491
492
493
494
495
496
497static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret)
498{
499 return __builtin_mul_overflow(x, y, ret);
500}
501
502
503
504
505
506
507
508
509
510static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
511{
512 return __builtin_mul_overflow(x, y, ret);
513}
514
515
516
517
518
519
520
521
522
523static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
524{
525 return __builtin_mul_overflow(x, y, ret);
526}
527
528
529
530
531
532
533static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor)
534{
535#if defined(CONFIG_INT128)
536 bool res;
537 __uint128_t r;
538 __uint128_t f = ((__uint128_t)*phigh << 64) | *plow;
539 res = __builtin_mul_overflow(f, factor, &r);
540
541 *plow = r;
542 *phigh = r >> 64;
543
544 return res;
545#else
546 uint64_t dhi = *phigh;
547 uint64_t dlo = *plow;
548 uint64_t ahi;
549 uint64_t blo, bhi;
550
551 if (dhi == 0) {
552 mulu64(plow, phigh, dlo, factor);
553 return false;
554 }
555
556 mulu64(plow, &ahi, dlo, factor);
557 mulu64(&blo, &bhi, dhi, factor);
558
559 return uadd64_overflow(ahi, blo, phigh) || bhi != 0;
560#endif
561}
562
563
564
565
566
567
568
569
570
571static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry)
572{
573#if __has_builtin(__builtin_addcll)
574 unsigned long long c = *pcarry;
575 x = __builtin_addcll(x, y, c, &c);
576 *pcarry = c & 1;
577 return x;
578#else
579 bool c = *pcarry;
580
581 c = uadd64_overflow(x, c, &x);
582 c |= uadd64_overflow(x, y, &x);
583 *pcarry = c;
584 return x;
585#endif
586}
587
588
589
590
591
592
593
594
595
596static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow)
597{
598#if __has_builtin(__builtin_subcll)
599 unsigned long long b = *pborrow;
600 x = __builtin_subcll(x, y, b, &b);
601 *pborrow = b & 1;
602 return x;
603#else
604 bool b = *pborrow;
605 b = usub64_overflow(x, b, &x);
606 b |= usub64_overflow(x, y, &x);
607 *pborrow = b;
608 return x;
609#endif
610}
611
612
613
614#if ULONG_MAX == UINT32_MAX
615# define clzl clz32
616# define ctzl ctz32
617# define clol clo32
618# define ctol cto32
619# define ctpopl ctpop32
620# define revbitl revbit32
621#elif ULONG_MAX == UINT64_MAX
622# define clzl clz64
623# define ctzl ctz64
624# define clol clo64
625# define ctol cto64
626# define ctpopl ctpop64
627# define revbitl revbit64
628#else
629# error Unknown sizeof long
630#endif
631
632static inline bool is_power_of_2(uint64_t value)
633{
634 if (!value) {
635 return false;
636 }
637
638 return !(value & (value - 1));
639}
640
641
642
643
644static inline uint64_t pow2floor(uint64_t value)
645{
646 if (!value) {
647
648 return 0;
649 }
650 return 0x8000000000000000ull >> clz64(value);
651}
652
653
654
655
656
657static inline uint64_t pow2ceil(uint64_t value)
658{
659 int n = clz64(value - 1);
660
661 if (!n) {
662
663
664
665
666
667 return !value;
668 }
669 return 0x8000000000000000ull >> (n - 1);
670}
671
672static inline uint32_t pow2roundup32(uint32_t x)
673{
674 x |= (x >> 1);
675 x |= (x >> 2);
676 x |= (x >> 4);
677 x |= (x >> 8);
678 x |= (x >> 16);
679 return x + 1;
680}
681
682
683
684
685
686
687
688
689
690
691
692
693void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
694
695
696
697
698
699
700
701
702
703
704
705
706
707void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
708
709
710
711
712
713
714static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
715 uint64_t n0, uint64_t d)
716{
717#if defined(__x86_64__)
718 uint64_t q;
719 asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
720 return q;
721#elif defined(__s390x__) && !defined(__clang__)
722
723 unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
724 asm("dlgr %0, %1" : "+r"(n) : "r"(d));
725 *r = n >> 64;
726 return n;
727#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
728
729 uint64_t q1, q2, Q, r1, r2, R;
730 asm("divdeu %0,%2,%4; divdu %1,%3,%4"
731 : "=&r"(q1), "=r"(q2)
732 : "r"(n1), "r"(n0), "r"(d));
733 r1 = -(q1 * d);
734 r2 = n0 - (q2 * d);
735 Q = q1 + q2;
736 R = r1 + r2;
737 if (R >= d || R < r2) {
738 Q += 1;
739 R -= d;
740 }
741 *r = R;
742 return Q;
743#else
744 uint64_t d0, d1, q0, q1, r1, r0, m;
745
746 d0 = (uint32_t)d;
747 d1 = d >> 32;
748
749 r1 = n1 % d1;
750 q1 = n1 / d1;
751 m = q1 * d0;
752 r1 = (r1 << 32) | (n0 >> 32);
753 if (r1 < m) {
754 q1 -= 1;
755 r1 += d;
756 if (r1 >= d) {
757 if (r1 < m) {
758 q1 -= 1;
759 r1 += d;
760 }
761 }
762 }
763 r1 -= m;
764
765 r0 = r1 % d1;
766 q0 = r1 / d1;
767 m = q0 * d0;
768 r0 = (r0 << 32) | (uint32_t)n0;
769 if (r0 < m) {
770 q0 -= 1;
771 r0 += d;
772 if (r0 >= d) {
773 if (r0 < m) {
774 q0 -= 1;
775 r0 += d;
776 }
777 }
778 }
779 r0 -= m;
780
781 *r = r0;
782 return (q1 << 32) | q0;
783#endif
784}
785
786Int128 divu256(Int128 *plow, Int128 *phigh, Int128 divisor);
787Int128 divs256(Int128 *plow, Int128 *phigh, Int128 divisor);
788#endif
789