linux/include/linux/bitops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_BITOPS_H
   3#define _LINUX_BITOPS_H
   4#include <asm/types.h>
   5#include <linux/bits.h>
   6
   7#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
   8#define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
   9
  10extern unsigned int __sw_hweight8(unsigned int w);
  11extern unsigned int __sw_hweight16(unsigned int w);
  12extern unsigned int __sw_hweight32(unsigned int w);
  13extern unsigned long __sw_hweight64(__u64 w);
  14
  15/*
  16 * Include this here because some architectures need generic_ffs/fls in
  17 * scope
  18 */
  19#include <asm/bitops.h>
  20
  21#define for_each_set_bit(bit, addr, size) \
  22        for ((bit) = find_first_bit((addr), (size));            \
  23             (bit) < (size);                                    \
  24             (bit) = find_next_bit((addr), (size), (bit) + 1))
  25
  26/* same as for_each_set_bit() but use bit as value to start with */
  27#define for_each_set_bit_from(bit, addr, size) \
  28        for ((bit) = find_next_bit((addr), (size), (bit));      \
  29             (bit) < (size);                                    \
  30             (bit) = find_next_bit((addr), (size), (bit) + 1))
  31
  32#define for_each_clear_bit(bit, addr, size) \
  33        for ((bit) = find_first_zero_bit((addr), (size));       \
  34             (bit) < (size);                                    \
  35             (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
  36
  37/* same as for_each_clear_bit() but use bit as value to start with */
  38#define for_each_clear_bit_from(bit, addr, size) \
  39        for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
  40             (bit) < (size);                                    \
  41             (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
  42
  43static inline int get_bitmask_order(unsigned int count)
  44{
  45        int order;
  46
  47        order = fls(count);
  48        return order;   /* We could be slightly more clever with -1 here... */
  49}
  50
  51static __always_inline unsigned long hweight_long(unsigned long w)
  52{
  53        return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
  54}
  55
  56/**
  57 * rol64 - rotate a 64-bit value left
  58 * @word: value to rotate
  59 * @shift: bits to roll
  60 */
  61static inline __u64 rol64(__u64 word, unsigned int shift)
  62{
  63        return (word << (shift & 63)) | (word >> ((-shift) & 63));
  64}
  65
  66/**
  67 * ror64 - rotate a 64-bit value right
  68 * @word: value to rotate
  69 * @shift: bits to roll
  70 */
  71static inline __u64 ror64(__u64 word, unsigned int shift)
  72{
  73        return (word >> (shift & 63)) | (word << ((-shift) & 63));
  74}
  75
  76/**
  77 * rol32 - rotate a 32-bit value left
  78 * @word: value to rotate
  79 * @shift: bits to roll
  80 */
  81static inline __u32 rol32(__u32 word, unsigned int shift)
  82{
  83        return (word << (shift & 31)) | (word >> ((-shift) & 31));
  84}
  85
  86/**
  87 * ror32 - rotate a 32-bit value right
  88 * @word: value to rotate
  89 * @shift: bits to roll
  90 */
  91static inline __u32 ror32(__u32 word, unsigned int shift)
  92{
  93        return (word >> (shift & 31)) | (word << ((-shift) & 31));
  94}
  95
  96/**
  97 * rol16 - rotate a 16-bit value left
  98 * @word: value to rotate
  99 * @shift: bits to roll
 100 */
 101static inline __u16 rol16(__u16 word, unsigned int shift)
 102{
 103        return (word << (shift & 15)) | (word >> ((-shift) & 15));
 104}
 105
 106/**
 107 * ror16 - rotate a 16-bit value right
 108 * @word: value to rotate
 109 * @shift: bits to roll
 110 */
 111static inline __u16 ror16(__u16 word, unsigned int shift)
 112{
 113        return (word >> (shift & 15)) | (word << ((-shift) & 15));
 114}
 115
 116/**
 117 * rol8 - rotate an 8-bit value left
 118 * @word: value to rotate
 119 * @shift: bits to roll
 120 */
 121static inline __u8 rol8(__u8 word, unsigned int shift)
 122{
 123        return (word << (shift & 7)) | (word >> ((-shift) & 7));
 124}
 125
 126/**
 127 * ror8 - rotate an 8-bit value right
 128 * @word: value to rotate
 129 * @shift: bits to roll
 130 */
 131static inline __u8 ror8(__u8 word, unsigned int shift)
 132{
 133        return (word >> (shift & 7)) | (word << ((-shift) & 7));
 134}
 135
 136/**
 137 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
 138 * @value: value to sign extend
 139 * @index: 0 based bit index (0<=index<32) to sign bit
 140 *
 141 * This is safe to use for 16- and 8-bit types as well.
 142 */
 143static inline __s32 sign_extend32(__u32 value, int index)
 144{
 145        __u8 shift = 31 - index;
 146        return (__s32)(value << shift) >> shift;
 147}
 148
 149/**
 150 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
 151 * @value: value to sign extend
 152 * @index: 0 based bit index (0<=index<64) to sign bit
 153 */
 154static inline __s64 sign_extend64(__u64 value, int index)
 155{
 156        __u8 shift = 63 - index;
 157        return (__s64)(value << shift) >> shift;
 158}
 159
 160static inline unsigned fls_long(unsigned long l)
 161{
 162        if (sizeof(l) == 4)
 163                return fls(l);
 164        return fls64(l);
 165}
 166
 167static inline int get_count_order(unsigned int count)
 168{
 169        int order;
 170
 171        order = fls(count) - 1;
 172        if (count & (count - 1))
 173                order++;
 174        return order;
 175}
 176
 177/**
 178 * get_count_order_long - get order after rounding @l up to power of 2
 179 * @l: parameter
 180 *
 181 * it is same as get_count_order() but with long type parameter
 182 */
 183static inline int get_count_order_long(unsigned long l)
 184{
 185        if (l == 0UL)
 186                return -1;
 187        else if (l & (l - 1UL))
 188                return (int)fls_long(l);
 189        else
 190                return (int)fls_long(l) - 1;
 191}
 192
 193/**
 194 * __ffs64 - find first set bit in a 64 bit word
 195 * @word: The 64 bit word
 196 *
 197 * On 64 bit arches this is a synomyn for __ffs
 198 * The result is not defined if no bits are set, so check that @word
 199 * is non-zero before calling this.
 200 */
 201static inline unsigned long __ffs64(u64 word)
 202{
 203#if BITS_PER_LONG == 32
 204        if (((u32)word) == 0UL)
 205                return __ffs((u32)(word >> 32)) + 32;
 206#elif BITS_PER_LONG != 64
 207#error BITS_PER_LONG not 32 or 64
 208#endif
 209        return __ffs((unsigned long)word);
 210}
 211
 212/**
 213 * assign_bit - Assign value to a bit in memory
 214 * @nr: the bit to set
 215 * @addr: the address to start counting from
 216 * @value: the value to assign
 217 */
 218static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
 219                                       bool value)
 220{
 221        if (value)
 222                set_bit(nr, addr);
 223        else
 224                clear_bit(nr, addr);
 225}
 226
 227static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
 228                                         bool value)
 229{
 230        if (value)
 231                __set_bit(nr, addr);
 232        else
 233                __clear_bit(nr, addr);
 234}
 235
 236#ifdef __KERNEL__
 237
 238#ifndef set_mask_bits
 239#define set_mask_bits(ptr, mask, bits)  \
 240({                                                              \
 241        const typeof(*(ptr)) mask__ = (mask), bits__ = (bits);  \
 242        typeof(*(ptr)) old__, new__;                            \
 243                                                                \
 244        do {                                                    \
 245                old__ = READ_ONCE(*(ptr));                      \
 246                new__ = (old__ & ~mask__) | bits__;             \
 247        } while (cmpxchg(ptr, old__, new__) != old__);          \
 248                                                                \
 249        old__;                                                  \
 250})
 251#endif
 252
 253#ifndef bit_clear_unless
 254#define bit_clear_unless(ptr, clear, test)      \
 255({                                                              \
 256        const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
 257        typeof(*(ptr)) old__, new__;                            \
 258                                                                \
 259        do {                                                    \
 260                old__ = READ_ONCE(*(ptr));                      \
 261                new__ = old__ & ~clear__;                       \
 262        } while (!(old__ & test__) &&                           \
 263                 cmpxchg(ptr, old__, new__) != old__);          \
 264                                                                \
 265        !(old__ & test__);                                      \
 266})
 267#endif
 268
 269#ifndef find_last_bit
 270/**
 271 * find_last_bit - find the last set bit in a memory region
 272 * @addr: The address to start the search at
 273 * @size: The number of bits to search
 274 *
 275 * Returns the bit number of the last set bit, or size.
 276 */
 277extern unsigned long find_last_bit(const unsigned long *addr,
 278                                   unsigned long size);
 279#endif
 280
 281#endif /* __KERNEL__ */
 282#endif
 283