linux/arch/parisc/include/asm/bitops.h
<<
>>
Prefs
   1#ifndef _PARISC_BITOPS_H
   2#define _PARISC_BITOPS_H
   3
   4#ifndef _LINUX_BITOPS_H
   5#error only <linux/bitops.h> can be included directly
   6#endif
   7
   8#include <linux/compiler.h>
   9#include <asm/types.h>          /* for BITS_PER_LONG/SHIFT_PER_LONG */
  10#include <asm/byteorder.h>
  11#include <asm/barrier.h>
  12#include <linux/atomic.h>
  13
  14/*
  15 * HP-PARISC specific bit operations
  16 * for a detailed description of the functions please refer
  17 * to include/asm-i386/bitops.h or kerneldoc
  18 */
  19
  20#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
  21
  22
  23/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
  24 * on use of volatile and __*_bit() (set/clear/change):
  25 *      *_bit() want use of volatile.
  26 *      __*_bit() are "relaxed" and don't use spinlock or volatile.
  27 */
  28
  29static __inline__ void set_bit(int nr, volatile unsigned long * addr)
  30{
  31        unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  32        unsigned long flags;
  33
  34        addr += (nr >> SHIFT_PER_LONG);
  35        _atomic_spin_lock_irqsave(addr, flags);
  36        *addr |= mask;
  37        _atomic_spin_unlock_irqrestore(addr, flags);
  38}
  39
  40static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
  41{
  42        unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
  43        unsigned long flags;
  44
  45        addr += (nr >> SHIFT_PER_LONG);
  46        _atomic_spin_lock_irqsave(addr, flags);
  47        *addr &= mask;
  48        _atomic_spin_unlock_irqrestore(addr, flags);
  49}
  50
  51static __inline__ void change_bit(int nr, volatile unsigned long * addr)
  52{
  53        unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  54        unsigned long flags;
  55
  56        addr += (nr >> SHIFT_PER_LONG);
  57        _atomic_spin_lock_irqsave(addr, flags);
  58        *addr ^= mask;
  59        _atomic_spin_unlock_irqrestore(addr, flags);
  60}
  61
  62static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
  63{
  64        unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  65        unsigned long old;
  66        unsigned long flags;
  67        int set;
  68
  69        addr += (nr >> SHIFT_PER_LONG);
  70        _atomic_spin_lock_irqsave(addr, flags);
  71        old = *addr;
  72        set = (old & mask) ? 1 : 0;
  73        if (!set)
  74                *addr = old | mask;
  75        _atomic_spin_unlock_irqrestore(addr, flags);
  76
  77        return set;
  78}
  79
  80static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
  81{
  82        unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  83        unsigned long old;
  84        unsigned long flags;
  85        int set;
  86
  87        addr += (nr >> SHIFT_PER_LONG);
  88        _atomic_spin_lock_irqsave(addr, flags);
  89        old = *addr;
  90        set = (old & mask) ? 1 : 0;
  91        if (set)
  92                *addr = old & ~mask;
  93        _atomic_spin_unlock_irqrestore(addr, flags);
  94
  95        return set;
  96}
  97
  98static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
  99{
 100        unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
 101        unsigned long oldbit;
 102        unsigned long flags;
 103
 104        addr += (nr >> SHIFT_PER_LONG);
 105        _atomic_spin_lock_irqsave(addr, flags);
 106        oldbit = *addr;
 107        *addr = oldbit ^ mask;
 108        _atomic_spin_unlock_irqrestore(addr, flags);
 109
 110        return (oldbit & mask) ? 1 : 0;
 111}
 112
 113#include <asm-generic/bitops/non-atomic.h>
 114
 115#ifdef __KERNEL__
 116
 117/**
 118 * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
 119 * @word: The word to search
 120 *
 121 * __ffs() return is undefined if no bit is set.
 122 *
 123 * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
 124 * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
 125 * (with help from willy/jejb to get the semantics right)
 126 *
 127 * This algorithm avoids branches by making use of nullification.
 128 * One side effect of "extr" instructions is it sets PSW[N] bit.
 129 * How PSW[N] (nullify next insn) gets set is determined by the 
 130 * "condition" field (eg "<>" or "TR" below) in the extr* insn.
 131 * Only the 1st and one of either the 2cd or 3rd insn will get executed.
 132 * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
 133 * cycles for each mispredicted branch.
 134 */
 135
 136static __inline__ unsigned long __ffs(unsigned long x)
 137{
 138        unsigned long ret;
 139
 140        __asm__(
 141#ifdef CONFIG_64BIT
 142                " ldi       63,%1\n"
 143                " extrd,u,*<>  %0,63,32,%%r0\n"
 144                " extrd,u,*TR  %0,31,32,%0\n"   /* move top 32-bits down */
 145                " addi    -32,%1,%1\n"
 146#else
 147                " ldi       31,%1\n"
 148#endif
 149                " extru,<>  %0,31,16,%%r0\n"
 150                " extru,TR  %0,15,16,%0\n"      /* xxxx0000 -> 0000xxxx */
 151                " addi    -16,%1,%1\n"
 152                " extru,<>  %0,31,8,%%r0\n"
 153                " extru,TR  %0,23,8,%0\n"       /* 0000xx00 -> 000000xx */
 154                " addi    -8,%1,%1\n"
 155                " extru,<>  %0,31,4,%%r0\n"
 156                " extru,TR  %0,27,4,%0\n"       /* 000000x0 -> 0000000x */
 157                " addi    -4,%1,%1\n"
 158                " extru,<>  %0,31,2,%%r0\n"
 159                " extru,TR  %0,29,2,%0\n"       /* 0000000y, 1100b -> 0011b */
 160                " addi    -2,%1,%1\n"
 161                " extru,=  %0,31,1,%%r0\n"      /* check last bit */
 162                " addi    -1,%1,%1\n"
 163                        : "+r" (x), "=r" (ret) );
 164        return ret;
 165}
 166
 167#include <asm-generic/bitops/ffz.h>
 168
 169/*
 170 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
 171 * This is defined the same way as the libc and compiler builtin
 172 * ffs routines, therefore differs in spirit from the above ffz (man ffs).
 173 */
 174static __inline__ int ffs(int x)
 175{
 176        return x ? (__ffs((unsigned long)x) + 1) : 0;
 177}
 178
 179/*
 180 * fls: find last (most significant) bit set.
 181 * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
 182 */
 183
 184static __inline__ int fls(int x)
 185{
 186        int ret;
 187        if (!x)
 188                return 0;
 189
 190        __asm__(
 191        "       ldi             1,%1\n"
 192        "       extru,<>        %0,15,16,%%r0\n"
 193        "       zdep,TR         %0,15,16,%0\n"          /* xxxx0000 */
 194        "       addi            16,%1,%1\n"
 195        "       extru,<>        %0,7,8,%%r0\n"
 196        "       zdep,TR         %0,23,24,%0\n"          /* xx000000 */
 197        "       addi            8,%1,%1\n"
 198        "       extru,<>        %0,3,4,%%r0\n"
 199        "       zdep,TR         %0,27,28,%0\n"          /* x0000000 */
 200        "       addi            4,%1,%1\n"
 201        "       extru,<>        %0,1,2,%%r0\n"
 202        "       zdep,TR         %0,29,30,%0\n"          /* y0000000 (y&3 = 0) */
 203        "       addi            2,%1,%1\n"
 204        "       extru,=         %0,0,1,%%r0\n"
 205        "       addi            1,%1,%1\n"              /* if y & 8, add 1 */
 206                : "+r" (x), "=r" (ret) );
 207
 208        return ret;
 209}
 210
 211#include <asm-generic/bitops/__fls.h>
 212#include <asm-generic/bitops/fls64.h>
 213#include <asm-generic/bitops/hweight.h>
 214#include <asm-generic/bitops/lock.h>
 215#include <asm-generic/bitops/sched.h>
 216
 217#endif /* __KERNEL__ */
 218
 219#include <asm-generic/bitops/find.h>
 220
 221#ifdef __KERNEL__
 222
 223#include <asm-generic/bitops/le.h>
 224#include <asm-generic/bitops/ext2-atomic-setbit.h>
 225
 226#endif  /* __KERNEL__ */
 227
 228#endif /* _PARISC_BITOPS_H */
 229