linux/arch/x86/include/asm/bitops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_BITOPS_H
   3#define _ASM_X86_BITOPS_H
   4
   5/*
   6 * Copyright 1992, Linus Torvalds.
   7 *
   8 * Note: inlines with more than a single statement should be marked
   9 * __always_inline to avoid problems with older gcc's inlining heuristics.
  10 */
  11
  12#ifndef _LINUX_BITOPS_H
  13#error only <linux/bitops.h> can be included directly
  14#endif
  15
  16#include <linux/compiler.h>
  17#include <asm/alternative.h>
  18#include <asm/rmwcc.h>
  19#include <asm/barrier.h>
  20
  21#if BITS_PER_LONG == 32
  22# define _BITOPS_LONG_SHIFT 5
  23#elif BITS_PER_LONG == 64
  24# define _BITOPS_LONG_SHIFT 6
  25#else
  26# error "Unexpected BITS_PER_LONG"
  27#endif
  28
  29#define BIT_64(n)                       (U64_C(1) << (n))
  30
  31/*
  32 * These have to be done with inline assembly: that way the bit-setting
  33 * is guaranteed to be atomic. All bit operations return 0 if the bit
  34 * was cleared before the operation and != 0 if it was not.
  35 *
  36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  37 */
  38
  39#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
  40/* Technically wrong, but this avoids compilation errors on some gcc
  41   versions. */
  42#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
  43#else
  44#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
  45#endif
  46
  47#define ADDR                            BITOP_ADDR(addr)
  48
  49/*
  50 * We do the locked ops that don't return the old value as
  51 * a mask operation on a byte.
  52 */
  53#define IS_IMMEDIATE(nr)                (__builtin_constant_p(nr))
  54#define CONST_MASK_ADDR(nr, addr)       BITOP_ADDR((void *)(addr) + ((nr)>>3))
  55#define CONST_MASK(nr)                  (1 << ((nr) & 7))
  56
  57/**
  58 * set_bit - Atomically set a bit in memory
  59 * @nr: the bit to set
  60 * @addr: the address to start counting from
  61 *
  62 * This function is atomic and may not be reordered.  See __set_bit()
  63 * if you do not require the atomic guarantees.
  64 *
  65 * Note: there are no guarantees that this function will not be reordered
  66 * on non x86 architectures, so if you are writing portable code,
  67 * make sure not to rely on its reordering guarantees.
  68 *
  69 * Note that @nr may be almost arbitrarily large; this function is not
  70 * restricted to acting on a single-word quantity.
  71 */
  72static __always_inline void
  73set_bit(long nr, volatile unsigned long *addr)
  74{
  75        if (IS_IMMEDIATE(nr)) {
  76                asm volatile(LOCK_PREFIX "orb %1,%0"
  77                        : CONST_MASK_ADDR(nr, addr)
  78                        : "iq" ((u8)CONST_MASK(nr))
  79                        : "memory");
  80        } else {
  81                asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
  82                        : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
  83        }
  84}
  85
  86/**
  87 * __set_bit - Set a bit in memory
  88 * @nr: the bit to set
  89 * @addr: the address to start counting from
  90 *
  91 * Unlike set_bit(), this function is non-atomic and may be reordered.
  92 * If it's called on the same region of memory simultaneously, the effect
  93 * may be that only one operation succeeds.
  94 */
  95static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
  96{
  97        asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
  98}
  99
 100/**
 101 * clear_bit - Clears a bit in memory
 102 * @nr: Bit to clear
 103 * @addr: Address to start counting from
 104 *
 105 * clear_bit() is atomic and may not be reordered.  However, it does
 106 * not contain a memory barrier, so if it is used for locking purposes,
 107 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
 108 * in order to ensure changes are visible on other processors.
 109 */
 110static __always_inline void
 111clear_bit(long nr, volatile unsigned long *addr)
 112{
 113        if (IS_IMMEDIATE(nr)) {
 114                asm volatile(LOCK_PREFIX "andb %1,%0"
 115                        : CONST_MASK_ADDR(nr, addr)
 116                        : "iq" ((u8)~CONST_MASK(nr)));
 117        } else {
 118                asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
 119                        : BITOP_ADDR(addr)
 120                        : "Ir" (nr));
 121        }
 122}
 123
 124/*
 125 * clear_bit_unlock - Clears a bit in memory
 126 * @nr: Bit to clear
 127 * @addr: Address to start counting from
 128 *
 129 * clear_bit() is atomic and implies release semantics before the memory
 130 * operation. It can be used for an unlock.
 131 */
 132static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
 133{
 134        barrier();
 135        clear_bit(nr, addr);
 136}
 137
 138static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
 139{
 140        asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
 141}
 142
 143static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
 144{
 145        bool negative;
 146        asm volatile(LOCK_PREFIX "andb %2,%1"
 147                CC_SET(s)
 148                : CC_OUT(s) (negative), ADDR
 149                : "ir" ((char) ~(1 << nr)) : "memory");
 150        return negative;
 151}
 152
 153// Let everybody know we have it
 154#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
 155
 156/*
 157 * __clear_bit_unlock - Clears a bit in memory
 158 * @nr: Bit to clear
 159 * @addr: Address to start counting from
 160 *
 161 * __clear_bit() is non-atomic and implies release semantics before the memory
 162 * operation. It can be used for an unlock if no other CPUs can concurrently
 163 * modify other bits in the word.
 164 *
 165 * No memory barrier is required here, because x86 cannot reorder stores past
 166 * older loads. Same principle as spin_unlock.
 167 */
 168static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
 169{
 170        barrier();
 171        __clear_bit(nr, addr);
 172}
 173
 174/**
 175 * __change_bit - Toggle a bit in memory
 176 * @nr: the bit to change
 177 * @addr: the address to start counting from
 178 *
 179 * Unlike change_bit(), this function is non-atomic and may be reordered.
 180 * If it's called on the same region of memory simultaneously, the effect
 181 * may be that only one operation succeeds.
 182 */
 183static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
 184{
 185        asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
 186}
 187
 188/**
 189 * change_bit - Toggle a bit in memory
 190 * @nr: Bit to change
 191 * @addr: Address to start counting from
 192 *
 193 * change_bit() is atomic and may not be reordered.
 194 * Note that @nr may be almost arbitrarily large; this function is not
 195 * restricted to acting on a single-word quantity.
 196 */
 197static __always_inline void change_bit(long nr, volatile unsigned long *addr)
 198{
 199        if (IS_IMMEDIATE(nr)) {
 200                asm volatile(LOCK_PREFIX "xorb %1,%0"
 201                        : CONST_MASK_ADDR(nr, addr)
 202                        : "iq" ((u8)CONST_MASK(nr)));
 203        } else {
 204                asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
 205                        : BITOP_ADDR(addr)
 206                        : "Ir" (nr));
 207        }
 208}
 209
 210/**
 211 * test_and_set_bit - Set a bit and return its old value
 212 * @nr: Bit to set
 213 * @addr: Address to count from
 214 *
 215 * This operation is atomic and cannot be reordered.
 216 * It also implies a memory barrier.
 217 */
 218static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
 219{
 220        return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
 221}
 222
 223/**
 224 * test_and_set_bit_lock - Set a bit and return its old value for lock
 225 * @nr: Bit to set
 226 * @addr: Address to count from
 227 *
 228 * This is the same as test_and_set_bit on x86.
 229 */
 230static __always_inline bool
 231test_and_set_bit_lock(long nr, volatile unsigned long *addr)
 232{
 233        return test_and_set_bit(nr, addr);
 234}
 235
 236/**
 237 * __test_and_set_bit - Set a bit and return its old value
 238 * @nr: Bit to set
 239 * @addr: Address to count from
 240 *
 241 * This operation is non-atomic and can be reordered.
 242 * If two examples of this operation race, one can appear to succeed
 243 * but actually fail.  You must protect multiple accesses with a lock.
 244 */
 245static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
 246{
 247        bool oldbit;
 248
 249        asm(__ASM_SIZE(bts) " %2,%1"
 250            CC_SET(c)
 251            : CC_OUT(c) (oldbit), ADDR
 252            : "Ir" (nr));
 253        return oldbit;
 254}
 255
 256/**
 257 * test_and_clear_bit - Clear a bit and return its old value
 258 * @nr: Bit to clear
 259 * @addr: Address to count from
 260 *
 261 * This operation is atomic and cannot be reordered.
 262 * It also implies a memory barrier.
 263 */
 264static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
 265{
 266        return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
 267}
 268
 269/**
 270 * __test_and_clear_bit - Clear a bit and return its old value
 271 * @nr: Bit to clear
 272 * @addr: Address to count from
 273 *
 274 * This operation is non-atomic and can be reordered.
 275 * If two examples of this operation race, one can appear to succeed
 276 * but actually fail.  You must protect multiple accesses with a lock.
 277 *
 278 * Note: the operation is performed atomically with respect to
 279 * the local CPU, but not other CPUs. Portable code should not
 280 * rely on this behaviour.
 281 * KVM relies on this behaviour on x86 for modifying memory that is also
 282 * accessed from a hypervisor on the same CPU if running in a VM: don't change
 283 * this without also updating arch/x86/kernel/kvm.c
 284 */
 285static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
 286{
 287        bool oldbit;
 288
 289        asm volatile(__ASM_SIZE(btr) " %2,%1"
 290                     CC_SET(c)
 291                     : CC_OUT(c) (oldbit), ADDR
 292                     : "Ir" (nr));
 293        return oldbit;
 294}
 295
 296/* WARNING: non atomic and it can be reordered! */
 297static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
 298{
 299        bool oldbit;
 300
 301        asm volatile(__ASM_SIZE(btc) " %2,%1"
 302                     CC_SET(c)
 303                     : CC_OUT(c) (oldbit), ADDR
 304                     : "Ir" (nr) : "memory");
 305
 306        return oldbit;
 307}
 308
 309/**
 310 * test_and_change_bit - Change a bit and return its old value
 311 * @nr: Bit to change
 312 * @addr: Address to count from
 313 *
 314 * This operation is atomic and cannot be reordered.
 315 * It also implies a memory barrier.
 316 */
 317static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
 318{
 319        return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
 320}
 321
 322static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
 323{
 324        return ((1UL << (nr & (BITS_PER_LONG-1))) &
 325                (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
 326}
 327
 328static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
 329{
 330        bool oldbit;
 331
 332        asm volatile(__ASM_SIZE(bt) " %2,%1"
 333                     CC_SET(c)
 334                     : CC_OUT(c) (oldbit)
 335                     : "m" (*(unsigned long *)addr), "Ir" (nr));
 336
 337        return oldbit;
 338}
 339
 340#if 0 /* Fool kernel-doc since it doesn't do macros yet */
 341/**
 342 * test_bit - Determine whether a bit is set
 343 * @nr: bit number to test
 344 * @addr: Address to start counting from
 345 */
 346static bool test_bit(int nr, const volatile unsigned long *addr);
 347#endif
 348
 349#define test_bit(nr, addr)                      \
 350        (__builtin_constant_p((nr))             \
 351         ? constant_test_bit((nr), (addr))      \
 352         : variable_test_bit((nr), (addr)))
 353
 354/**
 355 * __ffs - find first set bit in word
 356 * @word: The word to search
 357 *
 358 * Undefined if no bit exists, so code should check against 0 first.
 359 */
 360static __always_inline unsigned long __ffs(unsigned long word)
 361{
 362        asm("rep; bsf %1,%0"
 363                : "=r" (word)
 364                : "rm" (word));
 365        return word;
 366}
 367
 368/**
 369 * ffz - find first zero bit in word
 370 * @word: The word to search
 371 *
 372 * Undefined if no zero exists, so code should check against ~0UL first.
 373 */
 374static __always_inline unsigned long ffz(unsigned long word)
 375{
 376        asm("rep; bsf %1,%0"
 377                : "=r" (word)
 378                : "r" (~word));
 379        return word;
 380}
 381
 382/*
 383 * __fls: find last set bit in word
 384 * @word: The word to search
 385 *
 386 * Undefined if no set bit exists, so code should check against 0 first.
 387 */
 388static __always_inline unsigned long __fls(unsigned long word)
 389{
 390        asm("bsr %1,%0"
 391            : "=r" (word)
 392            : "rm" (word));
 393        return word;
 394}
 395
 396#undef ADDR
 397
 398#ifdef __KERNEL__
 399/**
 400 * ffs - find first set bit in word
 401 * @x: the word to search
 402 *
 403 * This is defined the same way as the libc and compiler builtin ffs
 404 * routines, therefore differs in spirit from the other bitops.
 405 *
 406 * ffs(value) returns 0 if value is 0 or the position of the first
 407 * set bit if value is nonzero. The first (least significant) bit
 408 * is at position 1.
 409 */
 410static __always_inline int ffs(int x)
 411{
 412        int r;
 413
 414#ifdef CONFIG_X86_64
 415        /*
 416         * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
 417         * dest reg is undefined if x==0, but their CPU architect says its
 418         * value is written to set it to the same as before, except that the
 419         * top 32 bits will be cleared.
 420         *
 421         * We cannot do this on 32 bits because at the very least some
 422         * 486 CPUs did not behave this way.
 423         */
 424        asm("bsfl %1,%0"
 425            : "=r" (r)
 426            : "rm" (x), "0" (-1));
 427#elif defined(CONFIG_X86_CMOV)
 428        asm("bsfl %1,%0\n\t"
 429            "cmovzl %2,%0"
 430            : "=&r" (r) : "rm" (x), "r" (-1));
 431#else
 432        asm("bsfl %1,%0\n\t"
 433            "jnz 1f\n\t"
 434            "movl $-1,%0\n"
 435            "1:" : "=r" (r) : "rm" (x));
 436#endif
 437        return r + 1;
 438}
 439
 440/**
 441 * fls - find last set bit in word
 442 * @x: the word to search
 443 *
 444 * This is defined in a similar way as the libc and compiler builtin
 445 * ffs, but returns the position of the most significant set bit.
 446 *
 447 * fls(value) returns 0 if value is 0 or the position of the last
 448 * set bit if value is nonzero. The last (most significant) bit is
 449 * at position 32.
 450 */
 451static __always_inline int fls(int x)
 452{
 453        int r;
 454
 455#ifdef CONFIG_X86_64
 456        /*
 457         * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
 458         * dest reg is undefined if x==0, but their CPU architect says its
 459         * value is written to set it to the same as before, except that the
 460         * top 32 bits will be cleared.
 461         *
 462         * We cannot do this on 32 bits because at the very least some
 463         * 486 CPUs did not behave this way.
 464         */
 465        asm("bsrl %1,%0"
 466            : "=r" (r)
 467            : "rm" (x), "0" (-1));
 468#elif defined(CONFIG_X86_CMOV)
 469        asm("bsrl %1,%0\n\t"
 470            "cmovzl %2,%0"
 471            : "=&r" (r) : "rm" (x), "rm" (-1));
 472#else
 473        asm("bsrl %1,%0\n\t"
 474            "jnz 1f\n\t"
 475            "movl $-1,%0\n"
 476            "1:" : "=r" (r) : "rm" (x));
 477#endif
 478        return r + 1;
 479}
 480
 481/**
 482 * fls64 - find last set bit in a 64-bit word
 483 * @x: the word to search
 484 *
 485 * This is defined in a similar way as the libc and compiler builtin
 486 * ffsll, but returns the position of the most significant set bit.
 487 *
 488 * fls64(value) returns 0 if value is 0 or the position of the last
 489 * set bit if value is nonzero. The last (most significant) bit is
 490 * at position 64.
 491 */
 492#ifdef CONFIG_X86_64
 493static __always_inline int fls64(__u64 x)
 494{
 495        int bitpos = -1;
 496        /*
 497         * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
 498         * dest reg is undefined if x==0, but their CPU architect says its
 499         * value is written to set it to the same as before.
 500         */
 501        asm("bsrq %1,%q0"
 502            : "+r" (bitpos)
 503            : "rm" (x));
 504        return bitpos + 1;
 505}
 506#else
 507#include <asm-generic/bitops/fls64.h>
 508#endif
 509
 510#include <asm-generic/bitops/find.h>
 511
 512#include <asm-generic/bitops/sched.h>
 513
 514#include <asm/arch_hweight.h>
 515
 516#include <asm-generic/bitops/const_hweight.h>
 517
 518#include <asm-generic/bitops/le.h>
 519
 520#include <asm-generic/bitops/ext2-atomic-setbit.h>
 521
 522#endif /* __KERNEL__ */
 523#endif /* _ASM_X86_BITOPS_H */
 524