uboot/arch/x86/include/asm/bitops.h
<<
>>
Prefs
   1#ifndef _I386_BITOPS_H
   2#define _I386_BITOPS_H
   3
   4/*
   5 * Copyright 1992, Linus Torvalds.
   6 */
   7
   8
   9/*
  10 * These have to be done with inline assembly: that way the bit-setting
  11 * is guaranteed to be atomic. All bit operations return 0 if the bit
  12 * was cleared before the operation and != 0 if it was not.
  13 *
  14 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  15 */
  16
  17#include <asm-generic/bitops/fls.h>
  18#include <asm-generic/bitops/__fls.h>
  19#include <asm-generic/bitops/fls64.h>
  20
  21#ifdef CONFIG_SMP
  22#define LOCK_PREFIX "lock ; "
  23#else
  24#define LOCK_PREFIX ""
  25#endif
  26
  27#define ADDR (*(volatile long *) addr)
  28
  29/**
  30 * set_bit - Atomically set a bit in memory
  31 * @nr: the bit to set
  32 * @addr: the address to start counting from
  33 *
  34 * This function is atomic and may not be reordered.  See __set_bit()
  35 * if you do not require the atomic guarantees.
  36 * Note that @nr may be almost arbitrarily large; this function is not
  37 * restricted to acting on a single-word quantity.
  38 */
  39static __inline__ void set_bit(int nr, volatile void * addr)
  40{
  41        __asm__ __volatile__( LOCK_PREFIX
  42                "btsl %1,%0"
  43                :"=m" (ADDR)
  44                :"Ir" (nr));
  45}
  46
  47/**
  48 * __set_bit - Set a bit in memory
  49 * @nr: the bit to set
  50 * @addr: the address to start counting from
  51 *
  52 * Unlike set_bit(), this function is non-atomic and may be reordered.
  53 * If it's called on the same region of memory simultaneously, the effect
  54 * may be that only one operation succeeds.
  55 */
  56static __inline__ void __set_bit(int nr, volatile void * addr)
  57{
  58        __asm__(
  59                "btsl %1,%0"
  60                :"=m" (ADDR)
  61                :"Ir" (nr));
  62}
  63
  64/**
  65 * clear_bit - Clears a bit in memory
  66 * @nr: Bit to clear
  67 * @addr: Address to start counting from
  68 *
  69 * clear_bit() is atomic and may not be reordered.  However, it does
  70 * not contain a memory barrier, so if it is used for locking purposes,
  71 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  72 * in order to ensure changes are visible on other processors.
  73 */
  74static __inline__ void clear_bit(int nr, volatile void * addr)
  75{
  76        __asm__ __volatile__( LOCK_PREFIX
  77                "btrl %1,%0"
  78                :"=m" (ADDR)
  79                :"Ir" (nr));
  80}
  81#define smp_mb__before_clear_bit()      barrier()
  82#define smp_mb__after_clear_bit()       barrier()
  83
  84/**
  85 * __change_bit - Toggle a bit in memory
  86 * @nr: the bit to set
  87 * @addr: the address to start counting from
  88 *
  89 * Unlike change_bit(), this function is non-atomic and may be reordered.
  90 * If it's called on the same region of memory simultaneously, the effect
  91 * may be that only one operation succeeds.
  92 */
  93static __inline__ void __change_bit(int nr, volatile void * addr)
  94{
  95        __asm__ __volatile__(
  96                "btcl %1,%0"
  97                :"=m" (ADDR)
  98                :"Ir" (nr));
  99}
 100
 101/**
 102 * change_bit - Toggle a bit in memory
 103 * @nr: Bit to clear
 104 * @addr: Address to start counting from
 105 *
 106 * change_bit() is atomic and may not be reordered.
 107 * Note that @nr may be almost arbitrarily large; this function is not
 108 * restricted to acting on a single-word quantity.
 109 */
 110static __inline__ void change_bit(int nr, volatile void * addr)
 111{
 112        __asm__ __volatile__( LOCK_PREFIX
 113                "btcl %1,%0"
 114                :"=m" (ADDR)
 115                :"Ir" (nr));
 116}
 117
 118/**
 119 * test_and_set_bit - Set a bit and return its old value
 120 * @nr: Bit to set
 121 * @addr: Address to count from
 122 *
 123 * This operation is atomic and cannot be reordered.
 124 * It also implies a memory barrier.
 125 */
 126static __inline__ int test_and_set_bit(int nr, volatile void * addr)
 127{
 128        int oldbit;
 129
 130        __asm__ __volatile__( LOCK_PREFIX
 131                "btsl %2,%1\n\tsbbl %0,%0"
 132                :"=r" (oldbit),"=m" (ADDR)
 133                :"Ir" (nr) : "memory");
 134        return oldbit;
 135}
 136
 137/**
 138 * __test_and_set_bit - Set a bit and return its old value
 139 * @nr: Bit to set
 140 * @addr: Address to count from
 141 *
 142 * This operation is non-atomic and can be reordered.
 143 * If two examples of this operation race, one can appear to succeed
 144 * but actually fail.  You must protect multiple accesses with a lock.
 145 */
 146static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
 147{
 148        int oldbit;
 149
 150        __asm__(
 151                "btsl %2,%1\n\tsbbl %0,%0"
 152                :"=r" (oldbit),"=m" (ADDR)
 153                :"Ir" (nr));
 154        return oldbit;
 155}
 156
 157/**
 158 * test_and_clear_bit - Clear a bit and return its old value
 159 * @nr: Bit to set
 160 * @addr: Address to count from
 161 *
 162 * This operation is atomic and cannot be reordered.
 163 * It also implies a memory barrier.
 164 */
 165static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
 166{
 167        int oldbit;
 168
 169        __asm__ __volatile__( LOCK_PREFIX
 170                "btrl %2,%1\n\tsbbl %0,%0"
 171                :"=r" (oldbit),"=m" (ADDR)
 172                :"Ir" (nr) : "memory");
 173        return oldbit;
 174}
 175
 176/**
 177 * __test_and_clear_bit - Clear a bit and return its old value
 178 * @nr: Bit to set
 179 * @addr: Address to count from
 180 *
 181 * This operation is non-atomic and can be reordered.
 182 * If two examples of this operation race, one can appear to succeed
 183 * but actually fail.  You must protect multiple accesses with a lock.
 184 */
 185static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
 186{
 187        int oldbit;
 188
 189        __asm__(
 190                "btrl %2,%1\n\tsbbl %0,%0"
 191                :"=r" (oldbit),"=m" (ADDR)
 192                :"Ir" (nr));
 193        return oldbit;
 194}
 195
 196/* WARNING: non atomic and it can be reordered! */
 197static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
 198{
 199        int oldbit;
 200
 201        __asm__ __volatile__(
 202                "btcl %2,%1\n\tsbbl %0,%0"
 203                :"=r" (oldbit),"=m" (ADDR)
 204                :"Ir" (nr) : "memory");
 205        return oldbit;
 206}
 207
 208/**
 209 * test_and_change_bit - Change a bit and return its new value
 210 * @nr: Bit to set
 211 * @addr: Address to count from
 212 *
 213 * This operation is atomic and cannot be reordered.
 214 * It also implies a memory barrier.
 215 */
 216static __inline__ int test_and_change_bit(int nr, volatile void * addr)
 217{
 218        int oldbit;
 219
 220        __asm__ __volatile__( LOCK_PREFIX
 221                "btcl %2,%1\n\tsbbl %0,%0"
 222                :"=r" (oldbit),"=m" (ADDR)
 223                :"Ir" (nr) : "memory");
 224        return oldbit;
 225}
 226
 227#if 0 /* Fool kernel-doc since it doesn't do macros yet */
 228/**
 229 * test_bit - Determine whether a bit is set
 230 * @nr: bit number to test
 231 * @addr: Address to start counting from
 232 */
 233static int test_bit(int nr, const volatile void * addr);
 234#endif
 235
 236static __inline__ int constant_test_bit(int nr, const volatile void * addr)
 237{
 238        return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
 239}
 240
 241static __inline__ int variable_test_bit(int nr, volatile void * addr)
 242{
 243        int oldbit;
 244
 245        __asm__ __volatile__(
 246                "btl %2,%1\n\tsbbl %0,%0"
 247                :"=r" (oldbit)
 248                :"m" (ADDR),"Ir" (nr));
 249        return oldbit;
 250}
 251
 252#define test_bit(nr,addr) \
 253(__builtin_constant_p(nr) ? \
 254 constant_test_bit((nr),(addr)) : \
 255 variable_test_bit((nr),(addr)))
 256
 257/**
 258 * find_first_zero_bit - find the first zero bit in a memory region
 259 * @addr: The address to start the search at
 260 * @size: The maximum size to search
 261 *
 262 * Returns the bit-number of the first zero bit, not the number of the byte
 263 * containing a bit.
 264 */
 265static __inline__ int find_first_zero_bit(void * addr, unsigned size)
 266{
 267        int d0, d1, d2;
 268        int res;
 269
 270        if (!size)
 271                return 0;
 272        /* This looks at memory. Mark it volatile to tell gcc not to move it around */
 273        __asm__ __volatile__(
 274                "movl $-1,%%eax\n\t"
 275                "xorl %%edx,%%edx\n\t"
 276                "repe; scasl\n\t"
 277                "je 1f\n\t"
 278                "xorl -4(%%edi),%%eax\n\t"
 279                "subl $4,%%edi\n\t"
 280                "bsfl %%eax,%%edx\n"
 281                "1:\tsubl %%ebx,%%edi\n\t"
 282                "shll $3,%%edi\n\t"
 283                "addl %%edi,%%edx"
 284                :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
 285                :"1" ((size + 31) >> 5), "2" (addr), "b" (addr));
 286        return res;
 287}
 288
 289/**
 290 * find_next_zero_bit - find the first zero bit in a memory region
 291 * @addr: The address to base the search on
 292 * @offset: The bitnumber to start searching at
 293 * @size: The maximum size to search
 294 */
 295static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
 296{
 297        unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
 298        int set = 0, bit = offset & 31, res;
 299
 300        if (bit) {
 301                /*
 302                 * Look for zero in first byte
 303                 */
 304                __asm__("bsfl %1,%0\n\t"
 305                        "jne 1f\n\t"
 306                        "movl $32, %0\n"
 307                        "1:"
 308                        : "=r" (set)
 309                        : "r" (~(*p >> bit)));
 310                if (set < (32 - bit))
 311                        return set + offset;
 312                set = 32 - bit;
 313                p++;
 314        }
 315        /*
 316         * No zero yet, search remaining full bytes for a zero
 317         */
 318        res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
 319        return (offset + set + res);
 320}
 321
 322/**
 323 * ffz - find first zero in word.
 324 * @word: The word to search
 325 *
 326 * Undefined if no zero exists, so code should check against ~0UL first.
 327 */
 328static __inline__ unsigned long ffz(unsigned long word)
 329{
 330        __asm__("bsfl %1,%0"
 331                :"=r" (word)
 332                :"r" (~word));
 333        return word;
 334}
 335
 336#ifdef __KERNEL__
 337
 338/**
 339 * __ffs - find first set bit in word
 340 * @word: The word to search
 341 *
 342 * Undefined if no bit exists, so code should check against 0 first.
 343 */
 344static inline unsigned long __ffs(unsigned long word)
 345{
 346        __asm__("rep; bsf %1,%0"
 347                : "=r" (word)
 348                : "rm" (word));
 349        return word;
 350}
 351
 352/**
 353 * ffs - find first bit set
 354 * @x: the word to search
 355 *
 356 * This is defined the same way as
 357 * the libc and compiler builtin ffs routines, therefore
 358 * differs in spirit from the above ffz (man ffs).
 359 */
 360static __inline__ int ffs(int x)
 361{
 362        int r;
 363
 364        __asm__("bsfl %1,%0\n\t"
 365                "jnz 1f\n\t"
 366                "movl $-1,%0\n"
 367                "1:" : "=r" (r) : "rm" (x));
 368
 369        return r+1;
 370}
 371#define PLATFORM_FFS
 372
 373static inline int __ilog2(unsigned int x)
 374{
 375        return generic_fls(x) - 1;
 376}
 377
 378/**
 379 * hweightN - returns the hamming weight of a N-bit word
 380 * @x: the word to weigh
 381 *
 382 * The Hamming Weight of a number is the total number of bits set in it.
 383 */
 384
 385#define hweight32(x) generic_hweight32(x)
 386#define hweight16(x) generic_hweight16(x)
 387#define hweight8(x) generic_hweight8(x)
 388
 389#endif /* __KERNEL__ */
 390
 391#ifdef __KERNEL__
 392
 393#define ext2_set_bit                 __test_and_set_bit
 394#define ext2_clear_bit               __test_and_clear_bit
 395#define ext2_test_bit                test_bit
 396#define ext2_find_first_zero_bit     find_first_zero_bit
 397#define ext2_find_next_zero_bit      find_next_zero_bit
 398
 399/* Bitmap functions for the minix filesystem.  */
 400#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
 401#define minix_set_bit(nr,addr) __set_bit(nr,addr)
 402#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
 403#define minix_test_bit(nr,addr) test_bit(nr,addr)
 404#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 405
 406#endif /* __KERNEL__ */
 407
 408#endif /* _I386_BITOPS_H */
 409