linux/include/linux/bitops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_BITOPS_H
   3#define _LINUX_BITOPS_H
   4#include <asm/types.h>
   5#include <linux/bits.h>
   6
   7/* Set bits in the first 'n' bytes when loaded from memory */
   8#ifdef __LITTLE_ENDIAN
   9#  define aligned_byte_mask(n) ((1UL << 8*(n))-1)
  10#else
  11#  define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
  12#endif
  13
  14#define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
  15
  16extern unsigned int __sw_hweight8(unsigned int w);
  17extern unsigned int __sw_hweight16(unsigned int w);
  18extern unsigned int __sw_hweight32(unsigned int w);
  19extern unsigned long __sw_hweight64(__u64 w);
  20
  21/*
  22 * Include this here because some architectures need generic_ffs/fls in
  23 * scope
  24 */
  25#include <asm/bitops.h>
  26
  27#define for_each_set_bit(bit, addr, size) \
  28        for ((bit) = find_first_bit((addr), (size));            \
  29             (bit) < (size);                                    \
  30             (bit) = find_next_bit((addr), (size), (bit) + 1))
  31
  32/* same as for_each_set_bit() but use bit as value to start with */
  33#define for_each_set_bit_from(bit, addr, size) \
  34        for ((bit) = find_next_bit((addr), (size), (bit));      \
  35             (bit) < (size);                                    \
  36             (bit) = find_next_bit((addr), (size), (bit) + 1))
  37
  38#define for_each_clear_bit(bit, addr, size) \
  39        for ((bit) = find_first_zero_bit((addr), (size));       \
  40             (bit) < (size);                                    \
  41             (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
  42
  43/* same as for_each_clear_bit() but use bit as value to start with */
  44#define for_each_clear_bit_from(bit, addr, size) \
  45        for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
  46             (bit) < (size);                                    \
  47             (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
  48
  49/**
  50 * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
  51 * @start: bit offset to start search and to store the current iteration offset
  52 * @clump: location to store copy of current 8-bit clump
  53 * @bits: bitmap address to base the search on
  54 * @size: bitmap size in number of bits
  55 */
  56#define for_each_set_clump8(start, clump, bits, size) \
  57        for ((start) = find_first_clump8(&(clump), (bits), (size)); \
  58             (start) < (size); \
  59             (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
  60
  61static inline int get_bitmask_order(unsigned int count)
  62{
  63        int order;
  64
  65        order = fls(count);
  66        return order;   /* We could be slightly more clever with -1 here... */
  67}
  68
  69static __always_inline unsigned long hweight_long(unsigned long w)
  70{
  71        return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
  72}
  73
  74/**
  75 * rol64 - rotate a 64-bit value left
  76 * @word: value to rotate
  77 * @shift: bits to roll
  78 */
  79static inline __u64 rol64(__u64 word, unsigned int shift)
  80{
  81        return (word << shift) | (word >> (64 - shift));
  82}
  83
  84/**
  85 * ror64 - rotate a 64-bit value right
  86 * @word: value to rotate
  87 * @shift: bits to roll
  88 */
  89static inline __u64 ror64(__u64 word, unsigned int shift)
  90{
  91        return (word >> shift) | (word << (64 - shift));
  92}
  93
  94/**
  95 * rol32 - rotate a 32-bit value left
  96 * @word: value to rotate
  97 * @shift: bits to roll
  98 */
  99static inline __u32 rol32(__u32 word, unsigned int shift)
 100{
 101        return (word << shift) | (word >> ((-shift) & 31));
 102}
 103
 104/**
 105 * ror32 - rotate a 32-bit value right
 106 * @word: value to rotate
 107 * @shift: bits to roll
 108 */
 109static inline __u32 ror32(__u32 word, unsigned int shift)
 110{
 111        return (word >> shift) | (word << (32 - shift));
 112}
 113
 114/**
 115 * rol16 - rotate a 16-bit value left
 116 * @word: value to rotate
 117 * @shift: bits to roll
 118 */
 119static inline __u16 rol16(__u16 word, unsigned int shift)
 120{
 121        return (word << shift) | (word >> (16 - shift));
 122}
 123
 124/**
 125 * ror16 - rotate a 16-bit value right
 126 * @word: value to rotate
 127 * @shift: bits to roll
 128 */
 129static inline __u16 ror16(__u16 word, unsigned int shift)
 130{
 131        return (word >> shift) | (word << (16 - shift));
 132}
 133
 134/**
 135 * rol8 - rotate an 8-bit value left
 136 * @word: value to rotate
 137 * @shift: bits to roll
 138 */
 139static inline __u8 rol8(__u8 word, unsigned int shift)
 140{
 141        return (word << shift) | (word >> (8 - shift));
 142}
 143
 144/**
 145 * ror8 - rotate an 8-bit value right
 146 * @word: value to rotate
 147 * @shift: bits to roll
 148 */
 149static inline __u8 ror8(__u8 word, unsigned int shift)
 150{
 151        return (word >> shift) | (word << (8 - shift));
 152}
 153
 154/**
 155 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
 156 * @value: value to sign extend
 157 * @index: 0 based bit index (0<=index<32) to sign bit
 158 *
 159 * This is safe to use for 16- and 8-bit types as well.
 160 */
 161static inline __s32 sign_extend32(__u32 value, int index)
 162{
 163        __u8 shift = 31 - index;
 164        return (__s32)(value << shift) >> shift;
 165}
 166
 167/**
 168 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
 169 * @value: value to sign extend
 170 * @index: 0 based bit index (0<=index<64) to sign bit
 171 */
 172static inline __s64 sign_extend64(__u64 value, int index)
 173{
 174        __u8 shift = 63 - index;
 175        return (__s64)(value << shift) >> shift;
 176}
 177
 178static inline unsigned fls_long(unsigned long l)
 179{
 180        if (sizeof(l) == 4)
 181                return fls(l);
 182        return fls64(l);
 183}
 184
 185static inline int get_count_order(unsigned int count)
 186{
 187        int order;
 188
 189        order = fls(count) - 1;
 190        if (count & (count - 1))
 191                order++;
 192        return order;
 193}
 194
 195/**
 196 * get_count_order_long - get order after rounding @l up to power of 2
 197 * @l: parameter
 198 *
 199 * it is same as get_count_order() but with long type parameter
 200 */
 201static inline int get_count_order_long(unsigned long l)
 202{
 203        if (l == 0UL)
 204                return -1;
 205        else if (l & (l - 1UL))
 206                return (int)fls_long(l);
 207        else
 208                return (int)fls_long(l) - 1;
 209}
 210
 211/**
 212 * __ffs64 - find first set bit in a 64 bit word
 213 * @word: The 64 bit word
 214 *
 215 * On 64 bit arches this is a synomyn for __ffs
 216 * The result is not defined if no bits are set, so check that @word
 217 * is non-zero before calling this.
 218 */
 219static inline unsigned long __ffs64(u64 word)
 220{
 221#if BITS_PER_LONG == 32
 222        if (((u32)word) == 0UL)
 223                return __ffs((u32)(word >> 32)) + 32;
 224#elif BITS_PER_LONG != 64
 225#error BITS_PER_LONG not 32 or 64
 226#endif
 227        return __ffs((unsigned long)word);
 228}
 229
 230/**
 231 * assign_bit - Assign value to a bit in memory
 232 * @nr: the bit to set
 233 * @addr: the address to start counting from
 234 * @value: the value to assign
 235 */
 236static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
 237                                       bool value)
 238{
 239        if (value)
 240                set_bit(nr, addr);
 241        else
 242                clear_bit(nr, addr);
 243}
 244
 245static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
 246                                         bool value)
 247{
 248        if (value)
 249                __set_bit(nr, addr);
 250        else
 251                __clear_bit(nr, addr);
 252}
 253
 254#ifdef __KERNEL__
 255
 256#ifndef set_mask_bits
 257#define set_mask_bits(ptr, mask, bits)  \
 258({                                                              \
 259        const typeof(*(ptr)) mask__ = (mask), bits__ = (bits);  \
 260        typeof(*(ptr)) old__, new__;                            \
 261                                                                \
 262        do {                                                    \
 263                old__ = READ_ONCE(*(ptr));                      \
 264                new__ = (old__ & ~mask__) | bits__;             \
 265        } while (cmpxchg(ptr, old__, new__) != old__);          \
 266                                                                \
 267        new__;                                                  \
 268})
 269#endif
 270
 271#ifndef bit_clear_unless
 272#define bit_clear_unless(ptr, clear, test)      \
 273({                                                              \
 274        const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
 275        typeof(*(ptr)) old__, new__;                            \
 276                                                                \
 277        do {                                                    \
 278                old__ = READ_ONCE(*(ptr));                      \
 279                new__ = old__ & ~clear__;                       \
 280        } while (!(old__ & test__) &&                           \
 281                 cmpxchg(ptr, old__, new__) != old__);          \
 282                                                                \
 283        !(old__ & test__);                                      \
 284})
 285#endif
 286
 287#ifndef find_last_bit
 288/**
 289 * find_last_bit - find the last set bit in a memory region
 290 * @addr: The address to start the search at
 291 * @size: The number of bits to search
 292 *
 293 * Returns the bit number of the last set bit, or size.
 294 */
 295extern unsigned long find_last_bit(const unsigned long *addr,
 296                                   unsigned long size);
 297#endif
 298
 299#endif /* __KERNEL__ */
 300#endif
 301