uboot/arch/x86/include/asm/bitops.h
<<
>>
Prefs
   1#ifndef _I386_BITOPS_H
   2#define _I386_BITOPS_H
   3
   4/*
   5 * Copyright 1992, Linus Torvalds.
   6 */
   7
   8
   9/*
  10 * These have to be done with inline assembly: that way the bit-setting
  11 * is guaranteed to be atomic. All bit operations return 0 if the bit
  12 * was cleared before the operation and != 0 if it was not.
  13 *
  14 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  15 */
  16
  17#ifdef CONFIG_SMP
  18#define LOCK_PREFIX "lock ; "
  19#else
  20#define LOCK_PREFIX ""
  21#endif
  22
  23#define ADDR (*(volatile long *) addr)
  24
  25/**
  26 * set_bit - Atomically set a bit in memory
  27 * @nr: the bit to set
  28 * @addr: the address to start counting from
  29 *
  30 * This function is atomic and may not be reordered.  See __set_bit()
  31 * if you do not require the atomic guarantees.
  32 * Note that @nr may be almost arbitrarily large; this function is not
  33 * restricted to acting on a single-word quantity.
  34 */
  35static __inline__ void set_bit(int nr, volatile void * addr)
  36{
  37        __asm__ __volatile__( LOCK_PREFIX
  38                "btsl %1,%0"
  39                :"=m" (ADDR)
  40                :"Ir" (nr));
  41}
  42
  43/**
  44 * __set_bit - Set a bit in memory
  45 * @nr: the bit to set
  46 * @addr: the address to start counting from
  47 *
  48 * Unlike set_bit(), this function is non-atomic and may be reordered.
  49 * If it's called on the same region of memory simultaneously, the effect
  50 * may be that only one operation succeeds.
  51 */
  52static __inline__ void __set_bit(int nr, volatile void * addr)
  53{
  54        __asm__(
  55                "btsl %1,%0"
  56                :"=m" (ADDR)
  57                :"Ir" (nr));
  58}
  59
  60/**
  61 * clear_bit - Clears a bit in memory
  62 * @nr: Bit to clear
  63 * @addr: Address to start counting from
  64 *
  65 * clear_bit() is atomic and may not be reordered.  However, it does
  66 * not contain a memory barrier, so if it is used for locking purposes,
  67 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  68 * in order to ensure changes are visible on other processors.
  69 */
  70static __inline__ void clear_bit(int nr, volatile void * addr)
  71{
  72        __asm__ __volatile__( LOCK_PREFIX
  73                "btrl %1,%0"
  74                :"=m" (ADDR)
  75                :"Ir" (nr));
  76}
  77#define smp_mb__before_clear_bit()      barrier()
  78#define smp_mb__after_clear_bit()       barrier()
  79
  80/**
  81 * __change_bit - Toggle a bit in memory
  82 * @nr: the bit to set
  83 * @addr: the address to start counting from
  84 *
  85 * Unlike change_bit(), this function is non-atomic and may be reordered.
  86 * If it's called on the same region of memory simultaneously, the effect
  87 * may be that only one operation succeeds.
  88 */
  89static __inline__ void __change_bit(int nr, volatile void * addr)
  90{
  91        __asm__ __volatile__(
  92                "btcl %1,%0"
  93                :"=m" (ADDR)
  94                :"Ir" (nr));
  95}
  96
  97/**
  98 * change_bit - Toggle a bit in memory
  99 * @nr: Bit to clear
 100 * @addr: Address to start counting from
 101 *
 102 * change_bit() is atomic and may not be reordered.
 103 * Note that @nr may be almost arbitrarily large; this function is not
 104 * restricted to acting on a single-word quantity.
 105 */
 106static __inline__ void change_bit(int nr, volatile void * addr)
 107{
 108        __asm__ __volatile__( LOCK_PREFIX
 109                "btcl %1,%0"
 110                :"=m" (ADDR)
 111                :"Ir" (nr));
 112}
 113
 114/**
 115 * test_and_set_bit - Set a bit and return its old value
 116 * @nr: Bit to set
 117 * @addr: Address to count from
 118 *
 119 * This operation is atomic and cannot be reordered.
 120 * It also implies a memory barrier.
 121 */
 122static __inline__ int test_and_set_bit(int nr, volatile void * addr)
 123{
 124        int oldbit;
 125
 126        __asm__ __volatile__( LOCK_PREFIX
 127                "btsl %2,%1\n\tsbbl %0,%0"
 128                :"=r" (oldbit),"=m" (ADDR)
 129                :"Ir" (nr) : "memory");
 130        return oldbit;
 131}
 132
 133/**
 134 * __test_and_set_bit - Set a bit and return its old value
 135 * @nr: Bit to set
 136 * @addr: Address to count from
 137 *
 138 * This operation is non-atomic and can be reordered.
 139 * If two examples of this operation race, one can appear to succeed
 140 * but actually fail.  You must protect multiple accesses with a lock.
 141 */
 142static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
 143{
 144        int oldbit;
 145
 146        __asm__(
 147                "btsl %2,%1\n\tsbbl %0,%0"
 148                :"=r" (oldbit),"=m" (ADDR)
 149                :"Ir" (nr));
 150        return oldbit;
 151}
 152
 153/**
 154 * test_and_clear_bit - Clear a bit and return its old value
 155 * @nr: Bit to set
 156 * @addr: Address to count from
 157 *
 158 * This operation is atomic and cannot be reordered.
 159 * It also implies a memory barrier.
 160 */
 161static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
 162{
 163        int oldbit;
 164
 165        __asm__ __volatile__( LOCK_PREFIX
 166                "btrl %2,%1\n\tsbbl %0,%0"
 167                :"=r" (oldbit),"=m" (ADDR)
 168                :"Ir" (nr) : "memory");
 169        return oldbit;
 170}
 171
 172/**
 173 * __test_and_clear_bit - Clear a bit and return its old value
 174 * @nr: Bit to set
 175 * @addr: Address to count from
 176 *
 177 * This operation is non-atomic and can be reordered.
 178 * If two examples of this operation race, one can appear to succeed
 179 * but actually fail.  You must protect multiple accesses with a lock.
 180 */
 181static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
 182{
 183        int oldbit;
 184
 185        __asm__(
 186                "btrl %2,%1\n\tsbbl %0,%0"
 187                :"=r" (oldbit),"=m" (ADDR)
 188                :"Ir" (nr));
 189        return oldbit;
 190}
 191
 192/* WARNING: non atomic and it can be reordered! */
 193static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
 194{
 195        int oldbit;
 196
 197        __asm__ __volatile__(
 198                "btcl %2,%1\n\tsbbl %0,%0"
 199                :"=r" (oldbit),"=m" (ADDR)
 200                :"Ir" (nr) : "memory");
 201        return oldbit;
 202}
 203
 204/**
 205 * test_and_change_bit - Change a bit and return its new value
 206 * @nr: Bit to set
 207 * @addr: Address to count from
 208 *
 209 * This operation is atomic and cannot be reordered.
 210 * It also implies a memory barrier.
 211 */
 212static __inline__ int test_and_change_bit(int nr, volatile void * addr)
 213{
 214        int oldbit;
 215
 216        __asm__ __volatile__( LOCK_PREFIX
 217                "btcl %2,%1\n\tsbbl %0,%0"
 218                :"=r" (oldbit),"=m" (ADDR)
 219                :"Ir" (nr) : "memory");
 220        return oldbit;
 221}
 222
 223#if 0 /* Fool kernel-doc since it doesn't do macros yet */
 224/**
 225 * test_bit - Determine whether a bit is set
 226 * @nr: bit number to test
 227 * @addr: Address to start counting from
 228 */
 229static int test_bit(int nr, const volatile void * addr);
 230#endif
 231
 232static __inline__ int constant_test_bit(int nr, const volatile void * addr)
 233{
 234        return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
 235}
 236
 237static __inline__ int variable_test_bit(int nr, volatile void * addr)
 238{
 239        int oldbit;
 240
 241        __asm__ __volatile__(
 242                "btl %2,%1\n\tsbbl %0,%0"
 243                :"=r" (oldbit)
 244                :"m" (ADDR),"Ir" (nr));
 245        return oldbit;
 246}
 247
 248#define test_bit(nr,addr) \
 249(__builtin_constant_p(nr) ? \
 250 constant_test_bit((nr),(addr)) : \
 251 variable_test_bit((nr),(addr)))
 252
 253/**
 254 * find_first_zero_bit - find the first zero bit in a memory region
 255 * @addr: The address to start the search at
 256 * @size: The maximum size to search
 257 *
 258 * Returns the bit-number of the first zero bit, not the number of the byte
 259 * containing a bit.
 260 */
 261static __inline__ int find_first_zero_bit(void * addr, unsigned size)
 262{
 263        int d0, d1, d2;
 264        int res;
 265
 266        if (!size)
 267                return 0;
 268        /* This looks at memory. Mark it volatile to tell gcc not to move it around */
 269        __asm__ __volatile__(
 270                "movl $-1,%%eax\n\t"
 271                "xorl %%edx,%%edx\n\t"
 272                "repe; scasl\n\t"
 273                "je 1f\n\t"
 274                "xorl -4(%%edi),%%eax\n\t"
 275                "subl $4,%%edi\n\t"
 276                "bsfl %%eax,%%edx\n"
 277                "1:\tsubl %%ebx,%%edi\n\t"
 278                "shll $3,%%edi\n\t"
 279                "addl %%edi,%%edx"
 280                :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
 281                :"1" ((size + 31) >> 5), "2" (addr), "b" (addr));
 282        return res;
 283}
 284
 285/**
 286 * find_next_zero_bit - find the first zero bit in a memory region
 287 * @addr: The address to base the search on
 288 * @offset: The bitnumber to start searching at
 289 * @size: The maximum size to search
 290 */
 291static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
 292{
 293        unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
 294        int set = 0, bit = offset & 31, res;
 295
 296        if (bit) {
 297                /*
 298                 * Look for zero in first byte
 299                 */
 300                __asm__("bsfl %1,%0\n\t"
 301                        "jne 1f\n\t"
 302                        "movl $32, %0\n"
 303                        "1:"
 304                        : "=r" (set)
 305                        : "r" (~(*p >> bit)));
 306                if (set < (32 - bit))
 307                        return set + offset;
 308                set = 32 - bit;
 309                p++;
 310        }
 311        /*
 312         * No zero yet, search remaining full bytes for a zero
 313         */
 314        res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
 315        return (offset + set + res);
 316}
 317
 318/**
 319 * ffz - find first zero in word.
 320 * @word: The word to search
 321 *
 322 * Undefined if no zero exists, so code should check against ~0UL first.
 323 */
 324static __inline__ unsigned long ffz(unsigned long word)
 325{
 326        __asm__("bsfl %1,%0"
 327                :"=r" (word)
 328                :"r" (~word));
 329        return word;
 330}
 331
 332#ifdef __KERNEL__
 333
 334/**
 335 * ffs - find first bit set
 336 * @x: the word to search
 337 *
 338 * This is defined the same way as
 339 * the libc and compiler builtin ffs routines, therefore
 340 * differs in spirit from the above ffz (man ffs).
 341 */
 342static __inline__ int ffs(int x)
 343{
 344        int r;
 345
 346        __asm__("bsfl %1,%0\n\t"
 347                "jnz 1f\n\t"
 348                "movl $-1,%0\n"
 349                "1:" : "=r" (r) : "g" (x));
 350        return r+1;
 351}
 352#define PLATFORM_FFS
 353
 354static inline int __ilog2(unsigned int x)
 355{
 356        return generic_fls(x) - 1;
 357}
 358
 359/**
 360 * hweightN - returns the hamming weight of a N-bit word
 361 * @x: the word to weigh
 362 *
 363 * The Hamming Weight of a number is the total number of bits set in it.
 364 */
 365
 366#define hweight32(x) generic_hweight32(x)
 367#define hweight16(x) generic_hweight16(x)
 368#define hweight8(x) generic_hweight8(x)
 369
 370#endif /* __KERNEL__ */
 371
 372#ifdef __KERNEL__
 373
 374#define ext2_set_bit                 __test_and_set_bit
 375#define ext2_clear_bit               __test_and_clear_bit
 376#define ext2_test_bit                test_bit
 377#define ext2_find_first_zero_bit     find_first_zero_bit
 378#define ext2_find_next_zero_bit      find_next_zero_bit
 379
 380/* Bitmap functions for the minix filesystem.  */
 381#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
 382#define minix_set_bit(nr,addr) __set_bit(nr,addr)
 383#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
 384#define minix_test_bit(nr,addr) test_bit(nr,addr)
 385#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 386
 387#endif /* __KERNEL__ */
 388
 389#endif /* _I386_BITOPS_H */
 390