linux/include/asm-x86/bitops_64.h
<<
>>
Prefs
   1#ifndef _X86_64_BITOPS_H
   2#define _X86_64_BITOPS_H
   3
   4/*
   5 * Copyright 1992, Linus Torvalds.
   6 */
   7
   8#ifndef _LINUX_BITOPS_H
   9#error only <linux/bitops.h> can be included directly
  10#endif
  11
  12#include <asm/alternative.h>
  13
  14#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
  15/* Technically wrong, but this avoids compilation errors on some gcc
  16   versions. */
  17#define ADDR "=m" (*(volatile long *) addr)
  18#else
  19#define ADDR "+m" (*(volatile long *) addr)
  20#endif
  21
  22/**
  23 * set_bit - Atomically set a bit in memory
  24 * @nr: the bit to set
  25 * @addr: the address to start counting from
  26 *
  27 * This function is atomic and may not be reordered.  See __set_bit()
  28 * if you do not require the atomic guarantees.
  29 * Note that @nr may be almost arbitrarily large; this function is not
  30 * restricted to acting on a single-word quantity.
  31 */
  32static inline void set_bit(int nr, volatile void *addr)
  33{
  34        __asm__ __volatile__( LOCK_PREFIX
  35                "btsl %1,%0"
  36                :ADDR
  37                :"dIr" (nr) : "memory");
  38}
  39
  40/**
  41 * __set_bit - Set a bit in memory
  42 * @nr: the bit to set
  43 * @addr: the address to start counting from
  44 *
  45 * Unlike set_bit(), this function is non-atomic and may be reordered.
  46 * If it's called on the same region of memory simultaneously, the effect
  47 * may be that only one operation succeeds.
  48 */
  49static inline void __set_bit(int nr, volatile void *addr)
  50{
  51        __asm__ volatile(
  52                "btsl %1,%0"
  53                :ADDR
  54                :"dIr" (nr) : "memory");
  55}
  56
  57/**
  58 * clear_bit - Clears a bit in memory
  59 * @nr: Bit to clear
  60 * @addr: Address to start counting from
  61 *
  62 * clear_bit() is atomic and may not be reordered.  However, it does
  63 * not contain a memory barrier, so if it is used for locking purposes,
  64 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  65 * in order to ensure changes are visible on other processors.
  66 */
  67static inline void clear_bit(int nr, volatile void *addr)
  68{
  69        __asm__ __volatile__( LOCK_PREFIX
  70                "btrl %1,%0"
  71                :ADDR
  72                :"dIr" (nr));
  73}
  74
  75/*
  76 * clear_bit_unlock - Clears a bit in memory
  77 * @nr: Bit to clear
  78 * @addr: Address to start counting from
  79 *
  80 * clear_bit() is atomic and implies release semantics before the memory
  81 * operation. It can be used for an unlock.
  82 */
  83static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
  84{
  85        barrier();
  86        clear_bit(nr, addr);
  87}
  88
  89static inline void __clear_bit(int nr, volatile void *addr)
  90{
  91        __asm__ __volatile__(
  92                "btrl %1,%0"
  93                :ADDR
  94                :"dIr" (nr));
  95}
  96
  97/*
  98 * __clear_bit_unlock - Clears a bit in memory
  99 * @nr: Bit to clear
 100 * @addr: Address to start counting from
 101 *
 102 * __clear_bit() is non-atomic and implies release semantics before the memory
 103 * operation. It can be used for an unlock if no other CPUs can concurrently
 104 * modify other bits in the word.
 105 *
 106 * No memory barrier is required here, because x86 cannot reorder stores past
 107 * older loads. Same principle as spin_unlock.
 108 */
 109static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
 110{
 111        barrier();
 112        __clear_bit(nr, addr);
 113}
 114
 115#define smp_mb__before_clear_bit()      barrier()
 116#define smp_mb__after_clear_bit()       barrier()
 117
 118/**
 119 * __change_bit - Toggle a bit in memory
 120 * @nr: the bit to change
 121 * @addr: the address to start counting from
 122 *
 123 * Unlike change_bit(), this function is non-atomic and may be reordered.
 124 * If it's called on the same region of memory simultaneously, the effect
 125 * may be that only one operation succeeds.
 126 */
 127static inline void __change_bit(int nr, volatile void *addr)
 128{
 129        __asm__ __volatile__(
 130                "btcl %1,%0"
 131                :ADDR
 132                :"dIr" (nr));
 133}
 134
 135/**
 136 * change_bit - Toggle a bit in memory
 137 * @nr: Bit to change
 138 * @addr: Address to start counting from
 139 *
 140 * change_bit() is atomic and may not be reordered.
 141 * Note that @nr may be almost arbitrarily large; this function is not
 142 * restricted to acting on a single-word quantity.
 143 */
 144static inline void change_bit(int nr, volatile void *addr)
 145{
 146        __asm__ __volatile__( LOCK_PREFIX
 147                "btcl %1,%0"
 148                :ADDR
 149                :"dIr" (nr));
 150}
 151
 152/**
 153 * test_and_set_bit - Set a bit and return its old value
 154 * @nr: Bit to set
 155 * @addr: Address to count from
 156 *
 157 * This operation is atomic and cannot be reordered.  
 158 * It also implies a memory barrier.
 159 */
 160static inline int test_and_set_bit(int nr, volatile void *addr)
 161{
 162        int oldbit;
 163
 164        __asm__ __volatile__( LOCK_PREFIX
 165                "btsl %2,%1\n\tsbbl %0,%0"
 166                :"=r" (oldbit),ADDR
 167                :"dIr" (nr) : "memory");
 168        return oldbit;
 169}
 170
 171/**
 172 * test_and_set_bit_lock - Set a bit and return its old value for lock
 173 * @nr: Bit to set
 174 * @addr: Address to count from
 175 *
 176 * This is the same as test_and_set_bit on x86.
 177 */
 178static inline int test_and_set_bit_lock(int nr, volatile void *addr)
 179{
 180        return test_and_set_bit(nr, addr);
 181}
 182
 183/**
 184 * __test_and_set_bit - Set a bit and return its old value
 185 * @nr: Bit to set
 186 * @addr: Address to count from
 187 *
 188 * This operation is non-atomic and can be reordered.  
 189 * If two examples of this operation race, one can appear to succeed
 190 * but actually fail.  You must protect multiple accesses with a lock.
 191 */
 192static inline int __test_and_set_bit(int nr, volatile void *addr)
 193{
 194        int oldbit;
 195
 196        __asm__(
 197                "btsl %2,%1\n\tsbbl %0,%0"
 198                :"=r" (oldbit),ADDR
 199                :"dIr" (nr));
 200        return oldbit;
 201}
 202
 203/**
 204 * test_and_clear_bit - Clear a bit and return its old value
 205 * @nr: Bit to clear
 206 * @addr: Address to count from
 207 *
 208 * This operation is atomic and cannot be reordered.  
 209 * It also implies a memory barrier.
 210 */
 211static inline int test_and_clear_bit(int nr, volatile void *addr)
 212{
 213        int oldbit;
 214
 215        __asm__ __volatile__( LOCK_PREFIX
 216                "btrl %2,%1\n\tsbbl %0,%0"
 217                :"=r" (oldbit),ADDR
 218                :"dIr" (nr) : "memory");
 219        return oldbit;
 220}
 221
 222/**
 223 * __test_and_clear_bit - Clear a bit and return its old value
 224 * @nr: Bit to clear
 225 * @addr: Address to count from
 226 *
 227 * This operation is non-atomic and can be reordered.  
 228 * If two examples of this operation race, one can appear to succeed
 229 * but actually fail.  You must protect multiple accesses with a lock.
 230 */
 231static inline int __test_and_clear_bit(int nr, volatile void *addr)
 232{
 233        int oldbit;
 234
 235        __asm__(
 236                "btrl %2,%1\n\tsbbl %0,%0"
 237                :"=r" (oldbit),ADDR
 238                :"dIr" (nr));
 239        return oldbit;
 240}
 241
 242/* WARNING: non atomic and it can be reordered! */
 243static inline int __test_and_change_bit(int nr, volatile void *addr)
 244{
 245        int oldbit;
 246
 247        __asm__ __volatile__(
 248                "btcl %2,%1\n\tsbbl %0,%0"
 249                :"=r" (oldbit),ADDR
 250                :"dIr" (nr) : "memory");
 251        return oldbit;
 252}
 253
 254/**
 255 * test_and_change_bit - Change a bit and return its old value
 256 * @nr: Bit to change
 257 * @addr: Address to count from
 258 *
 259 * This operation is atomic and cannot be reordered.  
 260 * It also implies a memory barrier.
 261 */
 262static inline int test_and_change_bit(int nr, volatile void *addr)
 263{
 264        int oldbit;
 265
 266        __asm__ __volatile__( LOCK_PREFIX
 267                "btcl %2,%1\n\tsbbl %0,%0"
 268                :"=r" (oldbit),ADDR
 269                :"dIr" (nr) : "memory");
 270        return oldbit;
 271}
 272
 273#if 0 /* Fool kernel-doc since it doesn't do macros yet */
 274/**
 275 * test_bit - Determine whether a bit is set
 276 * @nr: bit number to test
 277 * @addr: Address to start counting from
 278 */
 279static int test_bit(int nr, const volatile void *addr);
 280#endif
 281
 282static inline int constant_test_bit(int nr, const volatile void *addr)
 283{
 284        return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
 285}
 286
 287static inline int variable_test_bit(int nr, volatile const void *addr)
 288{
 289        int oldbit;
 290
 291        __asm__ __volatile__(
 292                "btl %2,%1\n\tsbbl %0,%0"
 293                :"=r" (oldbit)
 294                :"m" (*(volatile long *)addr),"dIr" (nr));
 295        return oldbit;
 296}
 297
 298#define test_bit(nr,addr) \
 299(__builtin_constant_p(nr) ? \
 300 constant_test_bit((nr),(addr)) : \
 301 variable_test_bit((nr),(addr)))
 302
 303#undef ADDR
 304
 305extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
 306extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
 307extern long find_first_bit(const unsigned long *addr, unsigned long size);
 308extern long find_next_bit(const unsigned long *addr, long size, long offset);
 309
 310/* return index of first bet set in val or max when no bit is set */
 311static inline long __scanbit(unsigned long val, unsigned long max)
 312{
 313        asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
 314        return val;
 315}
 316
 317#define find_first_bit(addr,size) \
 318((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
 319  (__scanbit(*(unsigned long *)addr,(size))) : \
 320  find_first_bit(addr,size)))
 321
 322#define find_next_bit(addr,size,off) \
 323((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ?         \
 324  ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
 325        find_next_bit(addr,size,off)))
 326
 327#define find_first_zero_bit(addr,size) \
 328((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
 329  (__scanbit(~*(unsigned long *)addr,(size))) : \
 330        find_first_zero_bit(addr,size)))
 331        
 332#define find_next_zero_bit(addr,size,off) \
 333((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ?         \
 334  ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
 335        find_next_zero_bit(addr,size,off)))
 336
 337/* 
 338 * Find string of zero bits in a bitmap. -1 when not found.
 339 */ 
 340extern unsigned long 
 341find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
 342
 343static inline void set_bit_string(unsigned long *bitmap, unsigned long i, 
 344                                  int len) 
 345{ 
 346        unsigned long end = i + len; 
 347        while (i < end) {
 348                __set_bit(i, bitmap); 
 349                i++;
 350        }
 351} 
 352
 353static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i, 
 354                                    int len) 
 355{ 
 356        unsigned long end = i + len; 
 357        while (i < end) {
 358                __clear_bit(i, bitmap); 
 359                i++;
 360        }
 361} 
 362
 363/**
 364 * ffz - find first zero in word.
 365 * @word: The word to search
 366 *
 367 * Undefined if no zero exists, so code should check against ~0UL first.
 368 */
 369static inline unsigned long ffz(unsigned long word)
 370{
 371        __asm__("bsfq %1,%0"
 372                :"=r" (word)
 373                :"r" (~word));
 374        return word;
 375}
 376
 377/**
 378 * __ffs - find first bit in word.
 379 * @word: The word to search
 380 *
 381 * Undefined if no bit exists, so code should check against 0 first.
 382 */
 383static inline unsigned long __ffs(unsigned long word)
 384{
 385        __asm__("bsfq %1,%0"
 386                :"=r" (word)
 387                :"rm" (word));
 388        return word;
 389}
 390
 391/*
 392 * __fls: find last bit set.
 393 * @word: The word to search
 394 *
 395 * Undefined if no zero exists, so code should check against ~0UL first.
 396 */
 397static inline unsigned long __fls(unsigned long word)
 398{
 399        __asm__("bsrq %1,%0"
 400                :"=r" (word)
 401                :"rm" (word));
 402        return word;
 403}
 404
 405#ifdef __KERNEL__
 406
 407#include <asm-generic/bitops/sched.h>
 408
 409/**
 410 * ffs - find first bit set
 411 * @x: the word to search
 412 *
 413 * This is defined the same way as
 414 * the libc and compiler builtin ffs routines, therefore
 415 * differs in spirit from the above ffz (man ffs).
 416 */
 417static inline int ffs(int x)
 418{
 419        int r;
 420
 421        __asm__("bsfl %1,%0\n\t"
 422                "cmovzl %2,%0" 
 423                : "=r" (r) : "rm" (x), "r" (-1));
 424        return r+1;
 425}
 426
 427/**
 428 * fls64 - find last bit set in 64 bit word
 429 * @x: the word to search
 430 *
 431 * This is defined the same way as fls.
 432 */
 433static inline int fls64(__u64 x)
 434{
 435        if (x == 0)
 436                return 0;
 437        return __fls(x) + 1;
 438}
 439
 440/**
 441 * fls - find last bit set
 442 * @x: the word to search
 443 *
 444 * This is defined the same way as ffs.
 445 */
 446static inline int fls(int x)
 447{
 448        int r;
 449
 450        __asm__("bsrl %1,%0\n\t"
 451                "cmovzl %2,%0"
 452                : "=&r" (r) : "rm" (x), "rm" (-1));
 453        return r+1;
 454}
 455
 456#define ARCH_HAS_FAST_MULTIPLIER 1
 457
 458#include <asm-generic/bitops/hweight.h>
 459
 460#endif /* __KERNEL__ */
 461
 462#ifdef __KERNEL__
 463
 464#include <asm-generic/bitops/ext2-non-atomic.h>
 465
 466#define ext2_set_bit_atomic(lock,nr,addr) \
 467                test_and_set_bit((nr),(unsigned long*)addr)
 468#define ext2_clear_bit_atomic(lock,nr,addr) \
 469                test_and_clear_bit((nr),(unsigned long*)addr)
 470
 471#include <asm-generic/bitops/minix.h>
 472
 473#endif /* __KERNEL__ */
 474
 475#endif /* _X86_64_BITOPS_H */
 476