uboot/arch/powerpc/include/asm/bitops.h
<<
>>
Prefs
   1/*
   2 * bitops.h: Bit string operations on the ppc
   3 */
   4
   5#ifndef _PPC_BITOPS_H
   6#define _PPC_BITOPS_H
   7
   8#include <linux/config.h>
   9#include <asm/byteorder.h>
  10
  11extern void set_bit(int nr, volatile void *addr);
  12extern void clear_bit(int nr, volatile void *addr);
  13extern void change_bit(int nr, volatile void *addr);
  14extern int test_and_set_bit(int nr, volatile void *addr);
  15extern int test_and_clear_bit(int nr, volatile void *addr);
  16extern int test_and_change_bit(int nr, volatile void *addr);
  17
  18/*
  19 * Arguably these bit operations don't imply any memory barrier or
  20 * SMP ordering, but in fact a lot of drivers expect them to imply
  21 * both, since they do on x86 cpus.
  22 */
  23#ifdef CONFIG_SMP
  24#define SMP_WMB         "eieio\n"
  25#define SMP_MB          "\nsync"
  26#else
  27#define SMP_WMB
  28#define SMP_MB
  29#endif /* CONFIG_SMP */
  30
  31#define __INLINE_BITOPS 1
  32
  33#if __INLINE_BITOPS
  34/*
  35 * These used to be if'd out here because using : "cc" as a constraint
  36 * resulted in errors from egcs.  Things may be OK with gcc-2.95.
  37 */
  38extern __inline__ void set_bit(int nr, volatile void * addr)
  39{
  40        unsigned long old;
  41        unsigned long mask = 1 << (nr & 0x1f);
  42        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
  43
  44        __asm__ __volatile__(SMP_WMB "\
  451:      lwarx   %0,0,%3\n\
  46        or      %0,%0,%2\n\
  47        stwcx.  %0,0,%3\n\
  48        bne     1b"
  49        SMP_MB
  50        : "=&r" (old), "=m" (*p)
  51        : "r" (mask), "r" (p), "m" (*p)
  52        : "cc" );
  53}
  54
  55extern __inline__ void clear_bit(int nr, volatile void *addr)
  56{
  57        unsigned long old;
  58        unsigned long mask = 1 << (nr & 0x1f);
  59        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
  60
  61        __asm__ __volatile__(SMP_WMB "\
  621:      lwarx   %0,0,%3\n\
  63        andc    %0,%0,%2\n\
  64        stwcx.  %0,0,%3\n\
  65        bne     1b"
  66        SMP_MB
  67        : "=&r" (old), "=m" (*p)
  68        : "r" (mask), "r" (p), "m" (*p)
  69        : "cc");
  70}
  71
  72extern __inline__ void change_bit(int nr, volatile void *addr)
  73{
  74        unsigned long old;
  75        unsigned long mask = 1 << (nr & 0x1f);
  76        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
  77
  78        __asm__ __volatile__(SMP_WMB "\
  791:      lwarx   %0,0,%3\n\
  80        xor     %0,%0,%2\n\
  81        stwcx.  %0,0,%3\n\
  82        bne     1b"
  83        SMP_MB
  84        : "=&r" (old), "=m" (*p)
  85        : "r" (mask), "r" (p), "m" (*p)
  86        : "cc");
  87}
  88
  89extern __inline__ int test_and_set_bit(int nr, volatile void *addr)
  90{
  91        unsigned int old, t;
  92        unsigned int mask = 1 << (nr & 0x1f);
  93        volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
  94
  95        __asm__ __volatile__(SMP_WMB "\
  961:      lwarx   %0,0,%4\n\
  97        or      %1,%0,%3\n\
  98        stwcx.  %1,0,%4\n\
  99        bne     1b"
 100        SMP_MB
 101        : "=&r" (old), "=&r" (t), "=m" (*p)
 102        : "r" (mask), "r" (p), "m" (*p)
 103        : "cc");
 104
 105        return (old & mask) != 0;
 106}
 107
 108extern __inline__ int test_and_clear_bit(int nr, volatile void *addr)
 109{
 110        unsigned int old, t;
 111        unsigned int mask = 1 << (nr & 0x1f);
 112        volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
 113
 114        __asm__ __volatile__(SMP_WMB "\
 1151:      lwarx   %0,0,%4\n\
 116        andc    %1,%0,%3\n\
 117        stwcx.  %1,0,%4\n\
 118        bne     1b"
 119        SMP_MB
 120        : "=&r" (old), "=&r" (t), "=m" (*p)
 121        : "r" (mask), "r" (p), "m" (*p)
 122        : "cc");
 123
 124        return (old & mask) != 0;
 125}
 126
 127extern __inline__ int test_and_change_bit(int nr, volatile void *addr)
 128{
 129        unsigned int old, t;
 130        unsigned int mask = 1 << (nr & 0x1f);
 131        volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
 132
 133        __asm__ __volatile__(SMP_WMB "\
 1341:      lwarx   %0,0,%4\n\
 135        xor     %1,%0,%3\n\
 136        stwcx.  %1,0,%4\n\
 137        bne     1b"
 138        SMP_MB
 139        : "=&r" (old), "=&r" (t), "=m" (*p)
 140        : "r" (mask), "r" (p), "m" (*p)
 141        : "cc");
 142
 143        return (old & mask) != 0;
 144}
 145#endif /* __INLINE_BITOPS */
 146
 147extern __inline__ int test_bit(int nr, __const__ volatile void *addr)
 148{
 149        __const__ unsigned int *p = (__const__ unsigned int *) addr;
 150
 151        return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
 152}
 153
 154/* Return the bit position of the most significant 1 bit in a word */
 155/* - the result is undefined when x == 0 */
 156extern __inline__ int __ilog2(unsigned int x)
 157{
 158        int lz;
 159
 160        asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
 161        return 31 - lz;
 162}
 163
 164extern __inline__ int ffz(unsigned int x)
 165{
 166        if ((x = ~x) == 0)
 167                return 32;
 168        return __ilog2(x & -x);
 169}
 170
 171/*
 172 * fls: find last (most-significant) bit set.
 173 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
 174 *
 175 * On powerpc, __ilog2(0) returns -1, but this is not safe in general
 176 */
 177static __inline__ int fls(unsigned int x)
 178{
 179        return __ilog2(x) + 1;
 180}
 181#define PLATFORM_FLS
 182
 183/**
 184 * fls64 - find last set bit in a 64-bit word
 185 * @x: the word to search
 186 *
 187 * This is defined in a similar way as the libc and compiler builtin
 188 * ffsll, but returns the position of the most significant set bit.
 189 *
 190 * fls64(value) returns 0 if value is 0 or the position of the last
 191 * set bit if value is nonzero. The last (most significant) bit is
 192 * at position 64.
 193 */
 194#if BITS_PER_LONG == 32
 195static inline int fls64(__u64 x)
 196{
 197        __u32 h = x >> 32;
 198        if (h)
 199                return fls(h) + 32;
 200        return fls(x);
 201}
 202#elif BITS_PER_LONG == 64
 203static inline int fls64(__u64 x)
 204{
 205        if (x == 0)
 206                return 0;
 207        return __ilog2(x) + 1;
 208}
 209#else
 210#error BITS_PER_LONG not 32 or 64
 211#endif
 212
 213static inline int __ilog2_u64(u64 n)
 214{
 215        return fls64(n) - 1;
 216}
 217
 218static inline int ffs64(u64 x)
 219{
 220        return __ilog2_u64(x & -x) + 1ull;
 221}
 222
 223#ifdef __KERNEL__
 224
 225/*
 226 * ffs: find first bit set. This is defined the same way as
 227 * the libc and compiler builtin ffs routines, therefore
 228 * differs in spirit from the above ffz (man ffs).
 229 */
 230extern __inline__ int ffs(int x)
 231{
 232        return __ilog2(x & -x) + 1;
 233}
 234#define PLATFORM_FFS
 235
 236/*
 237 * hweightN: returns the hamming weight (i.e. the number
 238 * of bits set) of a N-bit word
 239 */
 240
 241#define hweight32(x) generic_hweight32(x)
 242#define hweight16(x) generic_hweight16(x)
 243#define hweight8(x) generic_hweight8(x)
 244
 245#endif /* __KERNEL__ */
 246
 247/*
 248 * This implementation of find_{first,next}_zero_bit was stolen from
 249 * Linus' asm-alpha/bitops.h.
 250 */
 251#define find_first_zero_bit(addr, size) \
 252        find_next_zero_bit((addr), (size), 0)
 253
 254extern __inline__ unsigned long find_next_zero_bit(void * addr,
 255        unsigned long size, unsigned long offset)
 256{
 257        unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
 258        unsigned int result = offset & ~31UL;
 259        unsigned int tmp;
 260
 261        if (offset >= size)
 262                return size;
 263        size -= result;
 264        offset &= 31UL;
 265        if (offset) {
 266                tmp = *p++;
 267                tmp |= ~0UL >> (32-offset);
 268                if (size < 32)
 269                        goto found_first;
 270                if (tmp != ~0U)
 271                        goto found_middle;
 272                size -= 32;
 273                result += 32;
 274        }
 275        while (size >= 32) {
 276                if ((tmp = *p++) != ~0U)
 277                        goto found_middle;
 278                result += 32;
 279                size -= 32;
 280        }
 281        if (!size)
 282                return result;
 283        tmp = *p;
 284found_first:
 285        tmp |= ~0UL << size;
 286found_middle:
 287        return result + ffz(tmp);
 288}
 289
 290
 291#define _EXT2_HAVE_ASM_BITOPS_
 292
 293#ifdef __KERNEL__
 294/*
 295 * test_and_{set,clear}_bit guarantee atomicity without
 296 * disabling interrupts.
 297 */
 298#define ext2_set_bit(nr, addr)          test_and_set_bit((nr) ^ 0x18, addr)
 299#define ext2_clear_bit(nr, addr)        test_and_clear_bit((nr) ^ 0x18, addr)
 300
 301#else
 302extern __inline__ int ext2_set_bit(int nr, void * addr)
 303{
 304        int             mask;
 305        unsigned char   *ADDR = (unsigned char *) addr;
 306        int oldbit;
 307
 308        ADDR += nr >> 3;
 309        mask = 1 << (nr & 0x07);
 310        oldbit = (*ADDR & mask) ? 1 : 0;
 311        *ADDR |= mask;
 312        return oldbit;
 313}
 314
 315extern __inline__ int ext2_clear_bit(int nr, void * addr)
 316{
 317        int             mask;
 318        unsigned char   *ADDR = (unsigned char *) addr;
 319        int oldbit;
 320
 321        ADDR += nr >> 3;
 322        mask = 1 << (nr & 0x07);
 323        oldbit = (*ADDR & mask) ? 1 : 0;
 324        *ADDR = *ADDR & ~mask;
 325        return oldbit;
 326}
 327#endif  /* __KERNEL__ */
 328
 329extern __inline__ int ext2_test_bit(int nr, __const__ void * addr)
 330{
 331        __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
 332
 333        return (ADDR[nr >> 3] >> (nr & 7)) & 1;
 334}
 335
 336/*
 337 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
 338 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
 339 */
 340
 341#define ext2_find_first_zero_bit(addr, size) \
 342        ext2_find_next_zero_bit((addr), (size), 0)
 343
 344static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
 345        unsigned long size, unsigned long offset)
 346{
 347        unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
 348        unsigned int result = offset & ~31UL;
 349        unsigned int tmp;
 350
 351        if (offset >= size)
 352                return size;
 353        size -= result;
 354        offset &= 31UL;
 355        if (offset) {
 356                tmp = cpu_to_le32p(p++);
 357                tmp |= ~0UL >> (32-offset);
 358                if (size < 32)
 359                        goto found_first;
 360                if (tmp != ~0U)
 361                        goto found_middle;
 362                size -= 32;
 363                result += 32;
 364        }
 365        while (size >= 32) {
 366                if ((tmp = cpu_to_le32p(p++)) != ~0U)
 367                        goto found_middle;
 368                result += 32;
 369                size -= 32;
 370        }
 371        if (!size)
 372                return result;
 373        tmp = cpu_to_le32p(p);
 374found_first:
 375        tmp |= ~0U << size;
 376found_middle:
 377        return result + ffz(tmp);
 378}
 379
 380/* Bitmap functions for the minix filesystem.  */
 381#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
 382#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
 383#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
 384#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
 385#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
 386
 387#endif /* _PPC_BITOPS_H */
 388