linux/include/linux/math64.h
<<
>>
Prefs
   1#ifndef _LINUX_MATH64_H
   2#define _LINUX_MATH64_H
   3
   4#include <linux/types.h>
   5#include <asm/div64.h>
   6
   7#if BITS_PER_LONG == 64
   8
   9#define div64_long(x, y) div64_s64((x), (y))
  10#define div64_ul(x, y)   div64_u64((x), (y))
  11
  12/**
  13 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
  14 *
  15 * This is commonly provided by 32bit archs to provide an optimized 64bit
  16 * divide.
  17 */
  18static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
  19{
  20        *remainder = dividend % divisor;
  21        return dividend / divisor;
  22}
  23
  24/**
  25 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
  26 */
  27static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
  28{
  29        *remainder = dividend % divisor;
  30        return dividend / divisor;
  31}
  32
  33/**
  34 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
  35 */
  36static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
  37{
  38        *remainder = dividend % divisor;
  39        return dividend / divisor;
  40}
  41
  42/**
  43 * div64_u64 - unsigned 64bit divide with 64bit divisor
  44 */
  45static inline u64 div64_u64(u64 dividend, u64 divisor)
  46{
  47        return dividend / divisor;
  48}
  49
  50/**
  51 * div64_s64 - signed 64bit divide with 64bit divisor
  52 */
  53static inline s64 div64_s64(s64 dividend, s64 divisor)
  54{
  55        return dividend / divisor;
  56}
  57
  58#elif BITS_PER_LONG == 32
  59
  60#define div64_long(x, y) div_s64((x), (y))
  61#define div64_ul(x, y)   div_u64((x), (y))
  62
  63#ifndef div_u64_rem
  64static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
  65{
  66        *remainder = do_div(dividend, divisor);
  67        return dividend;
  68}
  69#endif
  70
  71#ifndef div_s64_rem
  72extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
  73#endif
  74
  75#ifndef div64_u64_rem
  76extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
  77#endif
  78
  79#ifndef div64_u64
  80extern u64 div64_u64(u64 dividend, u64 divisor);
  81#endif
  82
  83#ifndef div64_s64
  84extern s64 div64_s64(s64 dividend, s64 divisor);
  85#endif
  86
  87#endif /* BITS_PER_LONG */
  88
  89/**
  90 * div_u64 - unsigned 64bit divide with 32bit divisor
  91 *
  92 * This is the most common 64bit divide and should be used if possible,
  93 * as many 32bit archs can optimize this variant better than a full 64bit
  94 * divide.
  95 */
  96#ifndef div_u64
  97static inline u64 div_u64(u64 dividend, u32 divisor)
  98{
  99        u32 remainder;
 100        return div_u64_rem(dividend, divisor, &remainder);
 101}
 102#endif
 103
 104/**
 105 * div_s64 - signed 64bit divide with 32bit divisor
 106 */
 107#ifndef div_s64
 108static inline s64 div_s64(s64 dividend, s32 divisor)
 109{
 110        s32 remainder;
 111        return div_s64_rem(dividend, divisor, &remainder);
 112}
 113#endif
 114
 115u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
 116
 117static __always_inline u32
 118__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
 119{
 120        u32 ret = 0;
 121
 122        while (dividend >= divisor) {
 123                /* The following asm() prevents the compiler from
 124                   optimising this loop into a modulo operation.  */
 125                asm("" : "+rm"(dividend));
 126
 127                dividend -= divisor;
 128                ret++;
 129        }
 130
 131        *remainder = dividend;
 132
 133        return ret;
 134}
 135
 136#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
 137
 138#ifndef mul_u64_u32_shr
 139static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
 140{
 141        return (u64)(((unsigned __int128)a * mul) >> shift);
 142}
 143#endif /* mul_u64_u32_shr */
 144
 145#else
 146
 147#ifndef mul_u64_u32_shr
 148static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
 149{
 150        u32 ah, al;
 151        u64 ret;
 152
 153        al = a;
 154        ah = a >> 32;
 155
 156        ret = ((u64)al * mul) >> shift;
 157        if (ah)
 158                ret += ((u64)ah * mul) << (32 - shift);
 159
 160        return ret;
 161}
 162#endif /* mul_u64_u32_shr */
 163
 164#endif
 165
 166#endif /* _LINUX_MATH64_H */
 167