uboot/include/asm-ppc/bitops.h
<<
>>
Prefs
   1/*
   2 * bitops.h: Bit string operations on the ppc
   3 */
   4
   5#ifndef _PPC_BITOPS_H
   6#define _PPC_BITOPS_H
   7
   8#include <linux/config.h>
   9#include <asm/byteorder.h>
  10
  11extern void set_bit(int nr, volatile void *addr);
  12extern void clear_bit(int nr, volatile void *addr);
  13extern void change_bit(int nr, volatile void *addr);
  14extern int test_and_set_bit(int nr, volatile void *addr);
  15extern int test_and_clear_bit(int nr, volatile void *addr);
  16extern int test_and_change_bit(int nr, volatile void *addr);
  17
  18/*
  19 * Arguably these bit operations don't imply any memory barrier or
  20 * SMP ordering, but in fact a lot of drivers expect them to imply
  21 * both, since they do on x86 cpus.
  22 */
  23#ifdef CONFIG_SMP
  24#define SMP_WMB         "eieio\n"
  25#define SMP_MB          "\nsync"
  26#else
  27#define SMP_WMB
  28#define SMP_MB
  29#endif /* CONFIG_SMP */
  30
  31#define __INLINE_BITOPS 1
  32
  33#if __INLINE_BITOPS
  34/*
  35 * These used to be if'd out here because using : "cc" as a constraint
  36 * resulted in errors from egcs.  Things may be OK with gcc-2.95.
  37 */
  38extern __inline__ void set_bit(int nr, volatile void * addr)
  39{
  40        unsigned long old;
  41        unsigned long mask = 1 << (nr & 0x1f);
  42        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
  43
  44        __asm__ __volatile__(SMP_WMB "\
  451:      lwarx   %0,0,%3\n\
  46        or      %0,%0,%2\n\
  47        stwcx.  %0,0,%3\n\
  48        bne     1b"
  49        SMP_MB
  50        : "=&r" (old), "=m" (*p)
  51        : "r" (mask), "r" (p), "m" (*p)
  52        : "cc" );
  53}
  54
  55extern __inline__ void clear_bit(int nr, volatile void *addr)
  56{
  57        unsigned long old;
  58        unsigned long mask = 1 << (nr & 0x1f);
  59        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
  60
  61        __asm__ __volatile__(SMP_WMB "\
  621:      lwarx   %0,0,%3\n\
  63        andc    %0,%0,%2\n\
  64        stwcx.  %0,0,%3\n\
  65        bne     1b"
  66        SMP_MB
  67        : "=&r" (old), "=m" (*p)
  68        : "r" (mask), "r" (p), "m" (*p)
  69        : "cc");
  70}
  71
  72extern __inline__ void change_bit(int nr, volatile void *addr)
  73{
  74        unsigned long old;
  75        unsigned long mask = 1 << (nr & 0x1f);
  76        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
  77
  78        __asm__ __volatile__(SMP_WMB "\
  791:      lwarx   %0,0,%3\n\
  80        xor     %0,%0,%2\n\
  81        stwcx.  %0,0,%3\n\
  82        bne     1b"
  83        SMP_MB
  84        : "=&r" (old), "=m" (*p)
  85        : "r" (mask), "r" (p), "m" (*p)
  86        : "cc");
  87}
  88
  89extern __inline__ int test_and_set_bit(int nr, volatile void *addr)
  90{
  91        unsigned int old, t;
  92        unsigned int mask = 1 << (nr & 0x1f);
  93        volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
  94
  95        __asm__ __volatile__(SMP_WMB "\
  961:      lwarx   %0,0,%4\n\
  97        or      %1,%0,%3\n\
  98        stwcx.  %1,0,%4\n\
  99        bne     1b"
 100        SMP_MB
 101        : "=&r" (old), "=&r" (t), "=m" (*p)
 102        : "r" (mask), "r" (p), "m" (*p)
 103        : "cc");
 104
 105        return (old & mask) != 0;
 106}
 107
 108extern __inline__ int test_and_clear_bit(int nr, volatile void *addr)
 109{
 110        unsigned int old, t;
 111        unsigned int mask = 1 << (nr & 0x1f);
 112        volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
 113
 114        __asm__ __volatile__(SMP_WMB "\
 1151:      lwarx   %0,0,%4\n\
 116        andc    %1,%0,%3\n\
 117        stwcx.  %1,0,%4\n\
 118        bne     1b"
 119        SMP_MB
 120        : "=&r" (old), "=&r" (t), "=m" (*p)
 121        : "r" (mask), "r" (p), "m" (*p)
 122        : "cc");
 123
 124        return (old & mask) != 0;
 125}
 126
 127extern __inline__ int test_and_change_bit(int nr, volatile void *addr)
 128{
 129        unsigned int old, t;
 130        unsigned int mask = 1 << (nr & 0x1f);
 131        volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
 132
 133        __asm__ __volatile__(SMP_WMB "\
 1341:      lwarx   %0,0,%4\n\
 135        xor     %1,%0,%3\n\
 136        stwcx.  %1,0,%4\n\
 137        bne     1b"
 138        SMP_MB
 139        : "=&r" (old), "=&r" (t), "=m" (*p)
 140        : "r" (mask), "r" (p), "m" (*p)
 141        : "cc");
 142
 143        return (old & mask) != 0;
 144}
 145#endif /* __INLINE_BITOPS */
 146
 147extern __inline__ int test_bit(int nr, __const__ volatile void *addr)
 148{
 149        __const__ unsigned int *p = (__const__ unsigned int *) addr;
 150
 151        return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
 152}
 153
 154/* Return the bit position of the most significant 1 bit in a word */
 155/* - the result is undefined when x == 0 */
 156extern __inline__ int __ilog2(unsigned int x)
 157{
 158        int lz;
 159
 160        asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
 161        return 31 - lz;
 162}
 163
 164extern __inline__ int ffz(unsigned int x)
 165{
 166        if ((x = ~x) == 0)
 167                return 32;
 168        return __ilog2(x & -x);
 169}
 170
 171/*
 172 * fls: find last (most-significant) bit set.
 173 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
 174 *
 175 * On powerpc, __ilog2(0) returns -1, but this is not safe in general
 176 */
 177static __inline__ int fls(unsigned int x)
 178{
 179        return __ilog2(x) + 1;
 180}
 181
 182/**
 183 * fls64 - find last set bit in a 64-bit word
 184 * @x: the word to search
 185 *
 186 * This is defined in a similar way as the libc and compiler builtin
 187 * ffsll, but returns the position of the most significant set bit.
 188 *
 189 * fls64(value) returns 0 if value is 0 or the position of the last
 190 * set bit if value is nonzero. The last (most significant) bit is
 191 * at position 64.
 192 */
 193#if BITS_PER_LONG == 32
 194static inline int fls64(__u64 x)
 195{
 196        __u32 h = x >> 32;
 197        if (h)
 198                return fls(h) + 32;
 199        return fls(x);
 200}
 201#elif BITS_PER_LONG == 64
 202static inline int fls64(__u64 x)
 203{
 204        if (x == 0)
 205                return 0;
 206        return __ilog2(x) + 1;
 207}
 208#else
 209#error BITS_PER_LONG not 32 or 64
 210#endif
 211
 212static inline int __ilog2_u64(u64 n)
 213{
 214        return fls64(n) - 1;
 215}
 216
 217static inline int ffs64(u64 x)
 218{
 219        return __ilog2_u64(x & -x) + 1ull;
 220}
 221
 222#ifdef __KERNEL__
 223
 224/*
 225 * ffs: find first bit set. This is defined the same way as
 226 * the libc and compiler builtin ffs routines, therefore
 227 * differs in spirit from the above ffz (man ffs).
 228 */
 229extern __inline__ int ffs(int x)
 230{
 231        return __ilog2(x & -x) + 1;
 232}
 233
 234/*
 235 * hweightN: returns the hamming weight (i.e. the number
 236 * of bits set) of a N-bit word
 237 */
 238
 239#define hweight32(x) generic_hweight32(x)
 240#define hweight16(x) generic_hweight16(x)
 241#define hweight8(x) generic_hweight8(x)
 242
 243#endif /* __KERNEL__ */
 244
 245/*
 246 * This implementation of find_{first,next}_zero_bit was stolen from
 247 * Linus' asm-alpha/bitops.h.
 248 */
 249#define find_first_zero_bit(addr, size) \
 250        find_next_zero_bit((addr), (size), 0)
 251
 252extern __inline__ unsigned long find_next_zero_bit(void * addr,
 253        unsigned long size, unsigned long offset)
 254{
 255        unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
 256        unsigned int result = offset & ~31UL;
 257        unsigned int tmp;
 258
 259        if (offset >= size)
 260                return size;
 261        size -= result;
 262        offset &= 31UL;
 263        if (offset) {
 264                tmp = *p++;
 265                tmp |= ~0UL >> (32-offset);
 266                if (size < 32)
 267                        goto found_first;
 268                if (tmp != ~0U)
 269                        goto found_middle;
 270                size -= 32;
 271                result += 32;
 272        }
 273        while (size >= 32) {
 274                if ((tmp = *p++) != ~0U)
 275                        goto found_middle;
 276                result += 32;
 277                size -= 32;
 278        }
 279        if (!size)
 280                return result;
 281        tmp = *p;
 282found_first:
 283        tmp |= ~0UL << size;
 284found_middle:
 285        return result + ffz(tmp);
 286}
 287
 288
 289#define _EXT2_HAVE_ASM_BITOPS_
 290
 291#ifdef __KERNEL__
 292/*
 293 * test_and_{set,clear}_bit guarantee atomicity without
 294 * disabling interrupts.
 295 */
 296#define ext2_set_bit(nr, addr)          test_and_set_bit((nr) ^ 0x18, addr)
 297#define ext2_clear_bit(nr, addr)        test_and_clear_bit((nr) ^ 0x18, addr)
 298
 299#else
 300extern __inline__ int ext2_set_bit(int nr, void * addr)
 301{
 302        int             mask;
 303        unsigned char   *ADDR = (unsigned char *) addr;
 304        int oldbit;
 305
 306        ADDR += nr >> 3;
 307        mask = 1 << (nr & 0x07);
 308        oldbit = (*ADDR & mask) ? 1 : 0;
 309        *ADDR |= mask;
 310        return oldbit;
 311}
 312
 313extern __inline__ int ext2_clear_bit(int nr, void * addr)
 314{
 315        int             mask;
 316        unsigned char   *ADDR = (unsigned char *) addr;
 317        int oldbit;
 318
 319        ADDR += nr >> 3;
 320        mask = 1 << (nr & 0x07);
 321        oldbit = (*ADDR & mask) ? 1 : 0;
 322        *ADDR = *ADDR & ~mask;
 323        return oldbit;
 324}
 325#endif  /* __KERNEL__ */
 326
 327extern __inline__ int ext2_test_bit(int nr, __const__ void * addr)
 328{
 329        __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
 330
 331        return (ADDR[nr >> 3] >> (nr & 7)) & 1;
 332}
 333
 334/*
 335 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
 336 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
 337 */
 338
 339#define ext2_find_first_zero_bit(addr, size) \
 340        ext2_find_next_zero_bit((addr), (size), 0)
 341
 342static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
 343        unsigned long size, unsigned long offset)
 344{
 345        unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
 346        unsigned int result = offset & ~31UL;
 347        unsigned int tmp;
 348
 349        if (offset >= size)
 350                return size;
 351        size -= result;
 352        offset &= 31UL;
 353        if (offset) {
 354                tmp = cpu_to_le32p(p++);
 355                tmp |= ~0UL >> (32-offset);
 356                if (size < 32)
 357                        goto found_first;
 358                if (tmp != ~0U)
 359                        goto found_middle;
 360                size -= 32;
 361                result += 32;
 362        }
 363        while (size >= 32) {
 364                if ((tmp = cpu_to_le32p(p++)) != ~0U)
 365                        goto found_middle;
 366                result += 32;
 367                size -= 32;
 368        }
 369        if (!size)
 370                return result;
 371        tmp = cpu_to_le32p(p);
 372found_first:
 373        tmp |= ~0U << size;
 374found_middle:
 375        return result + ffz(tmp);
 376}
 377
 378/* Bitmap functions for the minix filesystem.  */
 379#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
 380#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
 381#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
 382#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
 383#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
 384
 385#endif /* _PPC_BITOPS_H */
 386