linux/include/linux/bitops.h
<<
>>
Prefs
   1#ifndef _LINUX_BITOPS_H
   2#define _LINUX_BITOPS_H
   3#include <asm/types.h>
   4
   5#ifdef  __KERNEL__
   6#define BIT(nr)                 (1UL << (nr))
   7#define BIT_ULL(nr)             (1ULL << (nr))
   8#define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_LONG))
   9#define BIT_WORD(nr)            ((nr) / BITS_PER_LONG)
  10#define BIT_ULL_MASK(nr)        (1ULL << ((nr) % BITS_PER_LONG_LONG))
  11#define BIT_ULL_WORD(nr)        ((nr) / BITS_PER_LONG_LONG)
  12#define BITS_PER_BYTE           8
  13#define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
  14#endif
  15
  16/*
  17 * Create a contiguous bitmask starting at bit position @l and ending at
  18 * position @h. For example
  19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
  20 */
  21#define GENMASK(h, l) \
  22        (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
  23
  24#define GENMASK_ULL(h, l) \
  25        (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
  26
  27extern unsigned int __sw_hweight8(unsigned int w);
  28extern unsigned int __sw_hweight16(unsigned int w);
  29extern unsigned int __sw_hweight32(unsigned int w);
  30extern unsigned long __sw_hweight64(__u64 w);
  31
  32/*
  33 * Include this here because some architectures need generic_ffs/fls in
  34 * scope
  35 */
  36#include <asm/bitops.h>
  37
  38#define for_each_set_bit(bit, addr, size) \
  39        for ((bit) = find_first_bit((addr), (size));            \
  40             (bit) < (size);                                    \
  41             (bit) = find_next_bit((addr), (size), (bit) + 1))
  42
  43/* same as for_each_set_bit() but use bit as value to start with */
  44#define for_each_set_bit_from(bit, addr, size) \
  45        for ((bit) = find_next_bit((addr), (size), (bit));      \
  46             (bit) < (size);                                    \
  47             (bit) = find_next_bit((addr), (size), (bit) + 1))
  48
  49#define for_each_clear_bit(bit, addr, size) \
  50        for ((bit) = find_first_zero_bit((addr), (size));       \
  51             (bit) < (size);                                    \
  52             (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
  53
  54/* same as for_each_clear_bit() but use bit as value to start with */
  55#define for_each_clear_bit_from(bit, addr, size) \
  56        for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
  57             (bit) < (size);                                    \
  58             (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
  59
  60static inline int get_bitmask_order(unsigned int count)
  61{
  62        int order;
  63
  64        order = fls(count);
  65        return order;   /* We could be slightly more clever with -1 here... */
  66}
  67
  68static inline int get_count_order(unsigned int count)
  69{
  70        int order;
  71
  72        order = fls(count) - 1;
  73        if (count & (count - 1))
  74                order++;
  75        return order;
  76}
  77
  78static __always_inline unsigned long hweight_long(unsigned long w)
  79{
  80        return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
  81}
  82
  83/**
  84 * rol64 - rotate a 64-bit value left
  85 * @word: value to rotate
  86 * @shift: bits to roll
  87 */
  88static inline __u64 rol64(__u64 word, unsigned int shift)
  89{
  90        return (word << shift) | (word >> (64 - shift));
  91}
  92
  93/**
  94 * ror64 - rotate a 64-bit value right
  95 * @word: value to rotate
  96 * @shift: bits to roll
  97 */
  98static inline __u64 ror64(__u64 word, unsigned int shift)
  99{
 100        return (word >> shift) | (word << (64 - shift));
 101}
 102
 103/**
 104 * rol32 - rotate a 32-bit value left
 105 * @word: value to rotate
 106 * @shift: bits to roll
 107 */
 108static inline __u32 rol32(__u32 word, unsigned int shift)
 109{
 110        return (word << shift) | (word >> ((-shift) & 31));
 111}
 112
 113/**
 114 * ror32 - rotate a 32-bit value right
 115 * @word: value to rotate
 116 * @shift: bits to roll
 117 */
 118static inline __u32 ror32(__u32 word, unsigned int shift)
 119{
 120        return (word >> shift) | (word << (32 - shift));
 121}
 122
 123/**
 124 * rol16 - rotate a 16-bit value left
 125 * @word: value to rotate
 126 * @shift: bits to roll
 127 */
 128static inline __u16 rol16(__u16 word, unsigned int shift)
 129{
 130        return (word << shift) | (word >> (16 - shift));
 131}
 132
 133/**
 134 * ror16 - rotate a 16-bit value right
 135 * @word: value to rotate
 136 * @shift: bits to roll
 137 */
 138static inline __u16 ror16(__u16 word, unsigned int shift)
 139{
 140        return (word >> shift) | (word << (16 - shift));
 141}
 142
 143/**
 144 * rol8 - rotate an 8-bit value left
 145 * @word: value to rotate
 146 * @shift: bits to roll
 147 */
 148static inline __u8 rol8(__u8 word, unsigned int shift)
 149{
 150        return (word << shift) | (word >> (8 - shift));
 151}
 152
 153/**
 154 * ror8 - rotate an 8-bit value right
 155 * @word: value to rotate
 156 * @shift: bits to roll
 157 */
 158static inline __u8 ror8(__u8 word, unsigned int shift)
 159{
 160        return (word >> shift) | (word << (8 - shift));
 161}
 162
 163/**
 164 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
 165 * @value: value to sign extend
 166 * @index: 0 based bit index (0<=index<32) to sign bit
 167 *
 168 * This is safe to use for 16- and 8-bit types as well.
 169 */
 170static inline __s32 sign_extend32(__u32 value, int index)
 171{
 172        __u8 shift = 31 - index;
 173        return (__s32)(value << shift) >> shift;
 174}
 175
 176/**
 177 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
 178 * @value: value to sign extend
 179 * @index: 0 based bit index (0<=index<64) to sign bit
 180 */
 181static inline __s64 sign_extend64(__u64 value, int index)
 182{
 183        __u8 shift = 63 - index;
 184        return (__s64)(value << shift) >> shift;
 185}
 186
 187static inline unsigned fls_long(unsigned long l)
 188{
 189        if (sizeof(l) == 4)
 190                return fls(l);
 191        return fls64(l);
 192}
 193
 194/**
 195 * __ffs64 - find first set bit in a 64 bit word
 196 * @word: The 64 bit word
 197 *
 198 * On 64 bit arches this is a synomyn for __ffs
 199 * The result is not defined if no bits are set, so check that @word
 200 * is non-zero before calling this.
 201 */
 202static inline unsigned long __ffs64(u64 word)
 203{
 204#if BITS_PER_LONG == 32
 205        if (((u32)word) == 0UL)
 206                return __ffs((u32)(word >> 32)) + 32;
 207#elif BITS_PER_LONG != 64
 208#error BITS_PER_LONG not 32 or 64
 209#endif
 210        return __ffs((unsigned long)word);
 211}
 212
 213#ifdef __KERNEL__
 214
 215#ifndef set_mask_bits
 216#define set_mask_bits(ptr, _mask, _bits)        \
 217({                                                              \
 218        const typeof(*ptr) mask = (_mask), bits = (_bits);      \
 219        typeof(*ptr) old, new;                                  \
 220                                                                \
 221        do {                                                    \
 222                old = ACCESS_ONCE(*ptr);                        \
 223                new = (old & ~mask) | bits;                     \
 224        } while (cmpxchg(ptr, old, new) != old);                \
 225                                                                \
 226        new;                                                    \
 227})
 228#endif
 229
 230#ifndef find_last_bit
 231/**
 232 * find_last_bit - find the last set bit in a memory region
 233 * @addr: The address to start the search at
 234 * @size: The number of bits to search
 235 *
 236 * Returns the bit number of the last set bit, or size.
 237 */
 238extern unsigned long find_last_bit(const unsigned long *addr,
 239                                   unsigned long size);
 240#endif
 241
 242#endif /* __KERNEL__ */
 243#endif
 244