linux/arch/ia64/include/asm/bitops.h
<<
>>
Prefs
   1#ifndef _ASM_IA64_BITOPS_H
   2#define _ASM_IA64_BITOPS_H
   3
   4/*
   5 * Copyright (C) 1998-2003 Hewlett-Packard Co
   6 *      David Mosberger-Tang <davidm@hpl.hp.com>
   7 *
   8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
   9 * O(1) scheduler patch
  10 */
  11
  12#ifndef _LINUX_BITOPS_H
  13#error only <linux/bitops.h> can be included directly
  14#endif
  15
  16#include <linux/compiler.h>
  17#include <linux/types.h>
  18#include <asm/intrinsics.h>
  19#include <asm/barrier.h>
  20
  21/**
  22 * set_bit - Atomically set a bit in memory
  23 * @nr: the bit to set
  24 * @addr: the address to start counting from
  25 *
  26 * This function is atomic and may not be reordered.  See __set_bit()
  27 * if you do not require the atomic guarantees.
  28 * Note that @nr may be almost arbitrarily large; this function is not
  29 * restricted to acting on a single-word quantity.
  30 *
  31 * The address must be (at least) "long" aligned.
  32 * Note that there are driver (e.g., eepro100) which use these operations to
  33 * operate on hw-defined data-structures, so we can't easily change these
  34 * operations to force a bigger alignment.
  35 *
  36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  37 */
  38static __inline__ void
  39set_bit (int nr, volatile void *addr)
  40{
  41        __u32 bit, old, new;
  42        volatile __u32 *m;
  43        CMPXCHG_BUGCHECK_DECL
  44
  45        m = (volatile __u32 *) addr + (nr >> 5);
  46        bit = 1 << (nr & 31);
  47        do {
  48                CMPXCHG_BUGCHECK(m);
  49                old = *m;
  50                new = old | bit;
  51        } while (cmpxchg_acq(m, old, new) != old);
  52}
  53
  54/**
  55 * __set_bit - Set a bit in memory
  56 * @nr: the bit to set
  57 * @addr: the address to start counting from
  58 *
  59 * Unlike set_bit(), this function is non-atomic and may be reordered.
  60 * If it's called on the same region of memory simultaneously, the effect
  61 * may be that only one operation succeeds.
  62 */
  63static __inline__ void
  64__set_bit (int nr, volatile void *addr)
  65{
  66        *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
  67}
  68
  69/**
  70 * clear_bit - Clears a bit in memory
  71 * @nr: Bit to clear
  72 * @addr: Address to start counting from
  73 *
  74 * clear_bit() is atomic and may not be reordered.  However, it does
  75 * not contain a memory barrier, so if it is used for locking purposes,
  76 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
  77 * in order to ensure changes are visible on other processors.
  78 */
  79static __inline__ void
  80clear_bit (int nr, volatile void *addr)
  81{
  82        __u32 mask, old, new;
  83        volatile __u32 *m;
  84        CMPXCHG_BUGCHECK_DECL
  85
  86        m = (volatile __u32 *) addr + (nr >> 5);
  87        mask = ~(1 << (nr & 31));
  88        do {
  89                CMPXCHG_BUGCHECK(m);
  90                old = *m;
  91                new = old & mask;
  92        } while (cmpxchg_acq(m, old, new) != old);
  93}
  94
  95/**
  96 * clear_bit_unlock - Clears a bit in memory with release
  97 * @nr: Bit to clear
  98 * @addr: Address to start counting from
  99 *
 100 * clear_bit_unlock() is atomic and may not be reordered.  It does
 101 * contain a memory barrier suitable for unlock type operations.
 102 */
 103static __inline__ void
 104clear_bit_unlock (int nr, volatile void *addr)
 105{
 106        __u32 mask, old, new;
 107        volatile __u32 *m;
 108        CMPXCHG_BUGCHECK_DECL
 109
 110        m = (volatile __u32 *) addr + (nr >> 5);
 111        mask = ~(1 << (nr & 31));
 112        do {
 113                CMPXCHG_BUGCHECK(m);
 114                old = *m;
 115                new = old & mask;
 116        } while (cmpxchg_rel(m, old, new) != old);
 117}
 118
 119/**
 120 * __clear_bit_unlock - Non-atomically clears a bit in memory with release
 121 * @nr: Bit to clear
 122 * @addr: Address to start counting from
 123 *
 124 * Similarly to clear_bit_unlock, the implementation uses a store
 125 * with release semantics. See also arch_spin_unlock().
 126 */
 127static __inline__ void
 128__clear_bit_unlock(int nr, void *addr)
 129{
 130        __u32 * const m = (__u32 *) addr + (nr >> 5);
 131        __u32 const new = *m & ~(1 << (nr & 31));
 132
 133        ia64_st4_rel_nta(m, new);
 134}
 135
 136/**
 137 * __clear_bit - Clears a bit in memory (non-atomic version)
 138 * @nr: the bit to clear
 139 * @addr: the address to start counting from
 140 *
 141 * Unlike clear_bit(), this function is non-atomic and may be reordered.
 142 * If it's called on the same region of memory simultaneously, the effect
 143 * may be that only one operation succeeds.
 144 */
 145static __inline__ void
 146__clear_bit (int nr, volatile void *addr)
 147{
 148        *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
 149}
 150
 151/**
 152 * change_bit - Toggle a bit in memory
 153 * @nr: Bit to toggle
 154 * @addr: Address to start counting from
 155 *
 156 * change_bit() is atomic and may not be reordered.
 157 * Note that @nr may be almost arbitrarily large; this function is not
 158 * restricted to acting on a single-word quantity.
 159 */
 160static __inline__ void
 161change_bit (int nr, volatile void *addr)
 162{
 163        __u32 bit, old, new;
 164        volatile __u32 *m;
 165        CMPXCHG_BUGCHECK_DECL
 166
 167        m = (volatile __u32 *) addr + (nr >> 5);
 168        bit = (1 << (nr & 31));
 169        do {
 170                CMPXCHG_BUGCHECK(m);
 171                old = *m;
 172                new = old ^ bit;
 173        } while (cmpxchg_acq(m, old, new) != old);
 174}
 175
 176/**
 177 * __change_bit - Toggle a bit in memory
 178 * @nr: the bit to toggle
 179 * @addr: the address to start counting from
 180 *
 181 * Unlike change_bit(), this function is non-atomic and may be reordered.
 182 * If it's called on the same region of memory simultaneously, the effect
 183 * may be that only one operation succeeds.
 184 */
 185static __inline__ void
 186__change_bit (int nr, volatile void *addr)
 187{
 188        *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
 189}
 190
 191/**
 192 * test_and_set_bit - Set a bit and return its old value
 193 * @nr: Bit to set
 194 * @addr: Address to count from
 195 *
 196 * This operation is atomic and cannot be reordered.  
 197 * It also implies the acquisition side of the memory barrier.
 198 */
 199static __inline__ int
 200test_and_set_bit (int nr, volatile void *addr)
 201{
 202        __u32 bit, old, new;
 203        volatile __u32 *m;
 204        CMPXCHG_BUGCHECK_DECL
 205
 206        m = (volatile __u32 *) addr + (nr >> 5);
 207        bit = 1 << (nr & 31);
 208        do {
 209                CMPXCHG_BUGCHECK(m);
 210                old = *m;
 211                new = old | bit;
 212        } while (cmpxchg_acq(m, old, new) != old);
 213        return (old & bit) != 0;
 214}
 215
 216/**
 217 * test_and_set_bit_lock - Set a bit and return its old value for lock
 218 * @nr: Bit to set
 219 * @addr: Address to count from
 220 *
 221 * This is the same as test_and_set_bit on ia64
 222 */
 223#define test_and_set_bit_lock test_and_set_bit
 224
 225/**
 226 * __test_and_set_bit - Set a bit and return its old value
 227 * @nr: Bit to set
 228 * @addr: Address to count from
 229 *
 230 * This operation is non-atomic and can be reordered.  
 231 * If two examples of this operation race, one can appear to succeed
 232 * but actually fail.  You must protect multiple accesses with a lock.
 233 */
 234static __inline__ int
 235__test_and_set_bit (int nr, volatile void *addr)
 236{
 237        __u32 *p = (__u32 *) addr + (nr >> 5);
 238        __u32 m = 1 << (nr & 31);
 239        int oldbitset = (*p & m) != 0;
 240
 241        *p |= m;
 242        return oldbitset;
 243}
 244
 245/**
 246 * test_and_clear_bit - Clear a bit and return its old value
 247 * @nr: Bit to clear
 248 * @addr: Address to count from
 249 *
 250 * This operation is atomic and cannot be reordered.  
 251 * It also implies the acquisition side of the memory barrier.
 252 */
 253static __inline__ int
 254test_and_clear_bit (int nr, volatile void *addr)
 255{
 256        __u32 mask, old, new;
 257        volatile __u32 *m;
 258        CMPXCHG_BUGCHECK_DECL
 259
 260        m = (volatile __u32 *) addr + (nr >> 5);
 261        mask = ~(1 << (nr & 31));
 262        do {
 263                CMPXCHG_BUGCHECK(m);
 264                old = *m;
 265                new = old & mask;
 266        } while (cmpxchg_acq(m, old, new) != old);
 267        return (old & ~mask) != 0;
 268}
 269
 270/**
 271 * __test_and_clear_bit - Clear a bit and return its old value
 272 * @nr: Bit to clear
 273 * @addr: Address to count from
 274 *
 275 * This operation is non-atomic and can be reordered.  
 276 * If two examples of this operation race, one can appear to succeed
 277 * but actually fail.  You must protect multiple accesses with a lock.
 278 */
 279static __inline__ int
 280__test_and_clear_bit(int nr, volatile void * addr)
 281{
 282        __u32 *p = (__u32 *) addr + (nr >> 5);
 283        __u32 m = 1 << (nr & 31);
 284        int oldbitset = (*p & m) != 0;
 285
 286        *p &= ~m;
 287        return oldbitset;
 288}
 289
 290/**
 291 * test_and_change_bit - Change a bit and return its old value
 292 * @nr: Bit to change
 293 * @addr: Address to count from
 294 *
 295 * This operation is atomic and cannot be reordered.  
 296 * It also implies the acquisition side of the memory barrier.
 297 */
 298static __inline__ int
 299test_and_change_bit (int nr, volatile void *addr)
 300{
 301        __u32 bit, old, new;
 302        volatile __u32 *m;
 303        CMPXCHG_BUGCHECK_DECL
 304
 305        m = (volatile __u32 *) addr + (nr >> 5);
 306        bit = (1 << (nr & 31));
 307        do {
 308                CMPXCHG_BUGCHECK(m);
 309                old = *m;
 310                new = old ^ bit;
 311        } while (cmpxchg_acq(m, old, new) != old);
 312        return (old & bit) != 0;
 313}
 314
 315/**
 316 * __test_and_change_bit - Change a bit and return its old value
 317 * @nr: Bit to change
 318 * @addr: Address to count from
 319 *
 320 * This operation is non-atomic and can be reordered.
 321 */
 322static __inline__ int
 323__test_and_change_bit (int nr, void *addr)
 324{
 325        __u32 old, bit = (1 << (nr & 31));
 326        __u32 *m = (__u32 *) addr + (nr >> 5);
 327
 328        old = *m;
 329        *m = old ^ bit;
 330        return (old & bit) != 0;
 331}
 332
 333static __inline__ int
 334test_bit (int nr, const volatile void *addr)
 335{
 336        return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
 337}
 338
 339/**
 340 * ffz - find the first zero bit in a long word
 341 * @x: The long word to find the bit in
 342 *
 343 * Returns the bit-number (0..63) of the first (least significant) zero bit.
 344 * Undefined if no zero exists, so code should check against ~0UL first...
 345 */
 346static inline unsigned long
 347ffz (unsigned long x)
 348{
 349        unsigned long result;
 350
 351        result = ia64_popcnt(x & (~x - 1));
 352        return result;
 353}
 354
 355/**
 356 * __ffs - find first bit in word.
 357 * @x: The word to search
 358 *
 359 * Undefined if no bit exists, so code should check against 0 first.
 360 */
 361static __inline__ unsigned long
 362__ffs (unsigned long x)
 363{
 364        unsigned long result;
 365
 366        result = ia64_popcnt((x-1) & ~x);
 367        return result;
 368}
 369
 370#ifdef __KERNEL__
 371
 372/*
 373 * Return bit number of last (most-significant) bit set.  Undefined
 374 * for x==0.  Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
 375 */
 376static inline unsigned long
 377ia64_fls (unsigned long x)
 378{
 379        long double d = x;
 380        long exp;
 381
 382        exp = ia64_getf_exp(d);
 383        return exp - 0xffff;
 384}
 385
 386/*
 387 * Find the last (most significant) bit set.  Returns 0 for x==0 and
 388 * bits are numbered from 1..32 (e.g., fls(9) == 4).
 389 */
 390static inline int
 391fls (int t)
 392{
 393        unsigned long x = t & 0xffffffffu;
 394
 395        if (!x)
 396                return 0;
 397        x |= x >> 1;
 398        x |= x >> 2;
 399        x |= x >> 4;
 400        x |= x >> 8;
 401        x |= x >> 16;
 402        return ia64_popcnt(x);
 403}
 404
 405/*
 406 * Find the last (most significant) bit set.  Undefined for x==0.
 407 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
 408 */
 409static inline unsigned long
 410__fls (unsigned long x)
 411{
 412        x |= x >> 1;
 413        x |= x >> 2;
 414        x |= x >> 4;
 415        x |= x >> 8;
 416        x |= x >> 16;
 417        x |= x >> 32;
 418        return ia64_popcnt(x) - 1;
 419}
 420
 421#include <asm-generic/bitops/fls64.h>
 422
 423#include <asm-generic/bitops/builtin-ffs.h>
 424
 425/*
 426 * hweightN: returns the hamming weight (i.e. the number
 427 * of bits set) of a N-bit word
 428 */
 429static __inline__ unsigned long __arch_hweight64(unsigned long x)
 430{
 431        unsigned long result;
 432        result = ia64_popcnt(x);
 433        return result;
 434}
 435
 436#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
 437#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
 438#define __arch_hweight8(x)  ((unsigned int) __arch_hweight64((x) & 0xfful))
 439
 440#include <asm-generic/bitops/const_hweight.h>
 441
 442#endif /* __KERNEL__ */
 443
 444#include <asm-generic/bitops/find.h>
 445
 446#ifdef __KERNEL__
 447
 448#include <asm-generic/bitops/le.h>
 449
 450#include <asm-generic/bitops/ext2-atomic-setbit.h>
 451
 452#include <asm-generic/bitops/sched.h>
 453
 454#endif /* __KERNEL__ */
 455
 456#endif /* _ASM_IA64_BITOPS_H */
 457