linux/arch/parisc/include/asm/bitops.h
<<
>>
Prefs
   1#ifndef _PARISC_BITOPS_H
   2#define _PARISC_BITOPS_H
   3
   4#ifndef _LINUX_BITOPS_H
   5#error only <linux/bitops.h> can be included directly
   6#endif
   7
   8#include <linux/compiler.h>
   9#include <asm/types.h>          /* for BITS_PER_LONG/SHIFT_PER_LONG */
  10#include <asm/byteorder.h>
  11#include <asm/atomic.h>
  12
  13/*
  14 * HP-PARISC specific bit operations
  15 * for a detailed description of the functions please refer
  16 * to include/asm-i386/bitops.h or kerneldoc
  17 */
  18
  19#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
  20
  21
  22#define smp_mb__before_clear_bit()      smp_mb()
  23#define smp_mb__after_clear_bit()       smp_mb()
  24
  25/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
  26 * on use of volatile and __*_bit() (set/clear/change):
  27 *      *_bit() want use of volatile.
  28 *      __*_bit() are "relaxed" and don't use spinlock or volatile.
  29 */
  30
  31static __inline__ void set_bit(int nr, volatile unsigned long * addr)
  32{
  33        unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  34        unsigned long flags;
  35
  36        addr += (nr >> SHIFT_PER_LONG);
  37        _atomic_spin_lock_irqsave(addr, flags);
  38        *addr |= mask;
  39        _atomic_spin_unlock_irqrestore(addr, flags);
  40}
  41
  42static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
  43{
  44        unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
  45        unsigned long flags;
  46
  47        addr += (nr >> SHIFT_PER_LONG);
  48        _atomic_spin_lock_irqsave(addr, flags);
  49        *addr &= mask;
  50        _atomic_spin_unlock_irqrestore(addr, flags);
  51}
  52
  53static __inline__ void change_bit(int nr, volatile unsigned long * addr)
  54{
  55        unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  56        unsigned long flags;
  57
  58        addr += (nr >> SHIFT_PER_LONG);
  59        _atomic_spin_lock_irqsave(addr, flags);
  60        *addr ^= mask;
  61        _atomic_spin_unlock_irqrestore(addr, flags);
  62}
  63
  64static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
  65{
  66        unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  67        unsigned long old;
  68        unsigned long flags;
  69        int set;
  70
  71        addr += (nr >> SHIFT_PER_LONG);
  72        _atomic_spin_lock_irqsave(addr, flags);
  73        old = *addr;
  74        set = (old & mask) ? 1 : 0;
  75        if (!set)
  76                *addr = old | mask;
  77        _atomic_spin_unlock_irqrestore(addr, flags);
  78
  79        return set;
  80}
  81
  82static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
  83{
  84        unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  85        unsigned long old;
  86        unsigned long flags;
  87        int set;
  88
  89        addr += (nr >> SHIFT_PER_LONG);
  90        _atomic_spin_lock_irqsave(addr, flags);
  91        old = *addr;
  92        set = (old & mask) ? 1 : 0;
  93        if (set)
  94                *addr = old & ~mask;
  95        _atomic_spin_unlock_irqrestore(addr, flags);
  96
  97        return set;
  98}
  99
 100static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
 101{
 102        unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
 103        unsigned long oldbit;
 104        unsigned long flags;
 105
 106        addr += (nr >> SHIFT_PER_LONG);
 107        _atomic_spin_lock_irqsave(addr, flags);
 108        oldbit = *addr;
 109        *addr = oldbit ^ mask;
 110        _atomic_spin_unlock_irqrestore(addr, flags);
 111
 112        return (oldbit & mask) ? 1 : 0;
 113}
 114
 115#include <asm-generic/bitops/non-atomic.h>
 116
 117#ifdef __KERNEL__
 118
 119/**
 120 * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
 121 * @word: The word to search
 122 *
 123 * __ffs() return is undefined if no bit is set.
 124 *
 125 * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
 126 * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
 127 * (with help from willy/jejb to get the semantics right)
 128 *
 129 * This algorithm avoids branches by making use of nullification.
 130 * One side effect of "extr" instructions is it sets PSW[N] bit.
 131 * How PSW[N] (nullify next insn) gets set is determined by the 
 132 * "condition" field (eg "<>" or "TR" below) in the extr* insn.
 133 * Only the 1st and one of either the 2cd or 3rd insn will get executed.
 134 * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
 135 * cycles for each mispredicted branch.
 136 */
 137
 138static __inline__ unsigned long __ffs(unsigned long x)
 139{
 140        unsigned long ret;
 141
 142        __asm__(
 143#ifdef CONFIG_64BIT
 144                " ldi       63,%1\n"
 145                " extrd,u,*<>  %0,63,32,%%r0\n"
 146                " extrd,u,*TR  %0,31,32,%0\n"   /* move top 32-bits down */
 147                " addi    -32,%1,%1\n"
 148#else
 149                " ldi       31,%1\n"
 150#endif
 151                " extru,<>  %0,31,16,%%r0\n"
 152                " extru,TR  %0,15,16,%0\n"      /* xxxx0000 -> 0000xxxx */
 153                " addi    -16,%1,%1\n"
 154                " extru,<>  %0,31,8,%%r0\n"
 155                " extru,TR  %0,23,8,%0\n"       /* 0000xx00 -> 000000xx */
 156                " addi    -8,%1,%1\n"
 157                " extru,<>  %0,31,4,%%r0\n"
 158                " extru,TR  %0,27,4,%0\n"       /* 000000x0 -> 0000000x */
 159                " addi    -4,%1,%1\n"
 160                " extru,<>  %0,31,2,%%r0\n"
 161                " extru,TR  %0,29,2,%0\n"       /* 0000000y, 1100b -> 0011b */
 162                " addi    -2,%1,%1\n"
 163                " extru,=  %0,31,1,%%r0\n"      /* check last bit */
 164                " addi    -1,%1,%1\n"
 165                        : "+r" (x), "=r" (ret) );
 166        return ret;
 167}
 168
 169#include <asm-generic/bitops/ffz.h>
 170
 171/*
 172 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
 173 * This is defined the same way as the libc and compiler builtin
 174 * ffs routines, therefore differs in spirit from the above ffz (man ffs).
 175 */
 176static __inline__ int ffs(int x)
 177{
 178        return x ? (__ffs((unsigned long)x) + 1) : 0;
 179}
 180
 181/*
 182 * fls: find last (most significant) bit set.
 183 * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
 184 */
 185
 186static __inline__ int fls(int x)
 187{
 188        int ret;
 189        if (!x)
 190                return 0;
 191
 192        __asm__(
 193        "       ldi             1,%1\n"
 194        "       extru,<>        %0,15,16,%%r0\n"
 195        "       zdep,TR         %0,15,16,%0\n"          /* xxxx0000 */
 196        "       addi            16,%1,%1\n"
 197        "       extru,<>        %0,7,8,%%r0\n"
 198        "       zdep,TR         %0,23,24,%0\n"          /* xx000000 */
 199        "       addi            8,%1,%1\n"
 200        "       extru,<>        %0,3,4,%%r0\n"
 201        "       zdep,TR         %0,27,28,%0\n"          /* x0000000 */
 202        "       addi            4,%1,%1\n"
 203        "       extru,<>        %0,1,2,%%r0\n"
 204        "       zdep,TR         %0,29,30,%0\n"          /* y0000000 (y&3 = 0) */
 205        "       addi            2,%1,%1\n"
 206        "       extru,=         %0,0,1,%%r0\n"
 207        "       addi            1,%1,%1\n"              /* if y & 8, add 1 */
 208                : "+r" (x), "=r" (ret) );
 209
 210        return ret;
 211}
 212
 213#include <asm-generic/bitops/__fls.h>
 214#include <asm-generic/bitops/fls64.h>
 215#include <asm-generic/bitops/hweight.h>
 216#include <asm-generic/bitops/lock.h>
 217#include <asm-generic/bitops/sched.h>
 218
 219#endif /* __KERNEL__ */
 220
 221#include <asm-generic/bitops/find.h>
 222
 223#ifdef __KERNEL__
 224
 225#include <asm-generic/bitops/ext2-non-atomic.h>
 226
 227/* '3' is bits per byte */
 228#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
 229
 230#define ext2_set_bit_atomic(l,nr,addr) \
 231                test_and_set_bit((nr)   ^ LE_BYTE_ADDR, (unsigned long *)addr)
 232#define ext2_clear_bit_atomic(l,nr,addr) \
 233                test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
 234
 235#endif  /* __KERNEL__ */
 236
 237#include <asm-generic/bitops/minix-le.h>
 238
 239#endif /* _PARISC_BITOPS_H */
 240