uboot/arch/microblaze/include/asm/bitops.h
<<
>>
Prefs
   1#ifndef _MICROBLAZE_BITOPS_H
   2#define _MICROBLAZE_BITOPS_H
   3
   4/*
   5 * Copyright 1992, Linus Torvalds.
   6 */
   7
   8#include <linux/config.h>
   9#include <asm/byteorder.h>      /* swab32 */
  10#include <asm/system.h>         /* save_flags */
  11
  12#ifdef __KERNEL__
  13/*
  14 * Function prototypes to keep gcc -Wall happy
  15 */
  16
  17/*
  18 * The __ functions are not atomic
  19 */
  20
  21extern void set_bit(int nr, volatile void * addr);
  22extern void __set_bit(int nr, volatile void * addr);
  23
  24extern void clear_bit(int nr, volatile void * addr);
  25#define __clear_bit(nr, addr) clear_bit(nr, addr)
  26#define PLATFORM__CLEAR_BIT
  27
  28extern void change_bit(int nr, volatile void * addr);
  29extern void __change_bit(int nr, volatile void * addr);
  30extern int test_and_set_bit(int nr, volatile void * addr);
  31extern int __test_and_set_bit(int nr, volatile void * addr);
  32extern int test_and_clear_bit(int nr, volatile void * addr);
  33extern int __test_and_clear_bit(int nr, volatile void * addr);
  34extern int test_and_change_bit(int nr, volatile void * addr);
  35extern int __test_and_change_bit(int nr, volatile void * addr);
  36extern int __constant_test_bit(int nr, const volatile void * addr);
  37extern int __test_bit(int nr, volatile void * addr);
  38extern int find_first_zero_bit(void * addr, unsigned size);
  39extern int find_next_zero_bit (void * addr, int size, int offset);
  40
  41/*
  42 * ffz = Find First Zero in word. Undefined if no zero exists,
  43 * so code should check against ~0UL first..
  44 */
  45extern __inline__ unsigned long ffz(unsigned long word)
  46{
  47        unsigned long result = 0;
  48
  49        while(word & 1) {
  50                result++;
  51                word >>= 1;
  52        }
  53        return result;
  54}
  55
  56
  57extern __inline__ void set_bit(int nr, volatile void * addr)
  58{
  59        int     * a = (int *) addr;
  60        int     mask;
  61        unsigned long flags;
  62
  63        a += nr >> 5;
  64        mask = 1 << (nr & 0x1f);
  65        save_flags_cli(flags);
  66        *a |= mask;
  67        restore_flags(flags);
  68}
  69
  70extern __inline__ void __set_bit(int nr, volatile void * addr)
  71{
  72        int     * a = (int *) addr;
  73        int     mask;
  74
  75        a += nr >> 5;
  76        mask = 1 << (nr & 0x1f);
  77        *a |= mask;
  78}
  79#define PLATFORM__SET_BIT
  80
  81/*
  82 * clear_bit() doesn't provide any barrier for the compiler.
  83 */
  84#define smp_mb__before_clear_bit()      barrier()
  85#define smp_mb__after_clear_bit()       barrier()
  86
  87extern __inline__ void clear_bit(int nr, volatile void * addr)
  88{
  89        int     * a = (int *) addr;
  90        int     mask;
  91        unsigned long flags;
  92
  93        a += nr >> 5;
  94        mask = 1 << (nr & 0x1f);
  95        save_flags_cli(flags);
  96        *a &= ~mask;
  97        restore_flags(flags);
  98}
  99
 100extern __inline__ void change_bit(int nr, volatile void * addr)
 101{
 102        int mask;
 103        unsigned long flags;
 104        unsigned long *ADDR = (unsigned long *) addr;
 105
 106        ADDR += nr >> 5;
 107        mask = 1 << (nr & 31);
 108        save_flags_cli(flags);
 109        *ADDR ^= mask;
 110        restore_flags(flags);
 111}
 112
 113extern __inline__ void __change_bit(int nr, volatile void * addr)
 114{
 115        int mask;
 116        unsigned long *ADDR = (unsigned long *) addr;
 117
 118        ADDR += nr >> 5;
 119        mask = 1 << (nr & 31);
 120        *ADDR ^= mask;
 121}
 122
 123extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
 124{
 125        int     mask, retval;
 126        volatile unsigned int *a = (volatile unsigned int *) addr;
 127        unsigned long flags;
 128
 129        a += nr >> 5;
 130        mask = 1 << (nr & 0x1f);
 131        save_flags_cli(flags);
 132        retval = (mask & *a) != 0;
 133        *a |= mask;
 134        restore_flags(flags);
 135
 136        return retval;
 137}
 138
 139extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
 140{
 141        int     mask, retval;
 142        volatile unsigned int *a = (volatile unsigned int *) addr;
 143
 144        a += nr >> 5;
 145        mask = 1 << (nr & 0x1f);
 146        retval = (mask & *a) != 0;
 147        *a |= mask;
 148        return retval;
 149}
 150
 151extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
 152{
 153        int     mask, retval;
 154        volatile unsigned int *a = (volatile unsigned int *) addr;
 155        unsigned long flags;
 156
 157        a += nr >> 5;
 158        mask = 1 << (nr & 0x1f);
 159        save_flags_cli(flags);
 160        retval = (mask & *a) != 0;
 161        *a &= ~mask;
 162        restore_flags(flags);
 163
 164        return retval;
 165}
 166
 167extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
 168{
 169        int     mask, retval;
 170        volatile unsigned int *a = (volatile unsigned int *) addr;
 171
 172        a += nr >> 5;
 173        mask = 1 << (nr & 0x1f);
 174        retval = (mask & *a) != 0;
 175        *a &= ~mask;
 176        return retval;
 177}
 178
 179extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
 180{
 181        int     mask, retval;
 182        volatile unsigned int *a = (volatile unsigned int *) addr;
 183        unsigned long flags;
 184
 185        a += nr >> 5;
 186        mask = 1 << (nr & 0x1f);
 187        save_flags_cli(flags);
 188        retval = (mask & *a) != 0;
 189        *a ^= mask;
 190        restore_flags(flags);
 191
 192        return retval;
 193}
 194
 195extern __inline__ int __test_and_change_bit(int nr, volatile void * addr)
 196{
 197        int     mask, retval;
 198        volatile unsigned int *a = (volatile unsigned int *) addr;
 199
 200        a += nr >> 5;
 201        mask = 1 << (nr & 0x1f);
 202        retval = (mask & *a) != 0;
 203        *a ^= mask;
 204        return retval;
 205}
 206
 207/*
 208 * This routine doesn't need to be atomic.
 209 */
 210extern __inline__ int __constant_test_bit(int nr, const volatile void * addr)
 211{
 212        return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
 213}
 214
 215extern __inline__ int __test_bit(int nr, volatile void * addr)
 216{
 217        int     * a = (int *) addr;
 218        int     mask;
 219
 220        a += nr >> 5;
 221        mask = 1 << (nr & 0x1f);
 222        return ((mask & *a) != 0);
 223}
 224
 225#define test_bit(nr,addr) \
 226(__builtin_constant_p(nr) ? \
 227 __constant_test_bit((nr),(addr)) : \
 228 __test_bit((nr),(addr)))
 229
 230#define find_first_zero_bit(addr, size) \
 231        find_next_zero_bit((addr), (size), 0)
 232
 233extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
 234{
 235        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
 236        unsigned long result = offset & ~31UL;
 237        unsigned long tmp;
 238
 239        if (offset >= size)
 240                return size;
 241        size -= result;
 242        offset &= 31UL;
 243        if (offset) {
 244                tmp = *(p++);
 245                tmp |= ~0UL >> (32-offset);
 246                if (size < 32)
 247                        goto found_first;
 248                if (~tmp)
 249                        goto found_middle;
 250                size -= 32;
 251                result += 32;
 252        }
 253        while (size & ~31UL) {
 254                if (~(tmp = *(p++)))
 255                        goto found_middle;
 256                result += 32;
 257                size -= 32;
 258        }
 259        if (!size)
 260                return result;
 261        tmp = *p;
 262
 263found_first:
 264        tmp |= ~0UL >> size;
 265found_middle:
 266        return result + ffz(tmp);
 267}
 268
 269/*
 270 * hweightN: returns the hamming weight (i.e. the number
 271 * of bits set) of a N-bit word
 272 */
 273
 274#define hweight32(x) generic_hweight32(x)
 275#define hweight16(x) generic_hweight16(x)
 276#define hweight8(x) generic_hweight8(x)
 277
 278
 279extern __inline__ int ext2_set_bit(int nr, volatile void * addr)
 280{
 281        int             mask, retval;
 282        unsigned long   flags;
 283        volatile unsigned char  *ADDR = (unsigned char *) addr;
 284
 285        ADDR += nr >> 3;
 286        mask = 1 << (nr & 0x07);
 287        save_flags_cli(flags);
 288        retval = (mask & *ADDR) != 0;
 289        *ADDR |= mask;
 290        restore_flags(flags);
 291        return retval;
 292}
 293
 294extern __inline__ int ext2_clear_bit(int nr, volatile void * addr)
 295{
 296        int             mask, retval;
 297        unsigned long   flags;
 298        volatile unsigned char  *ADDR = (unsigned char *) addr;
 299
 300        ADDR += nr >> 3;
 301        mask = 1 << (nr & 0x07);
 302        save_flags_cli(flags);
 303        retval = (mask & *ADDR) != 0;
 304        *ADDR &= ~mask;
 305        restore_flags(flags);
 306        return retval;
 307}
 308
 309extern __inline__ int ext2_test_bit(int nr, const volatile void * addr)
 310{
 311        int                     mask;
 312        const volatile unsigned char    *ADDR = (const unsigned char *) addr;
 313
 314        ADDR += nr >> 3;
 315        mask = 1 << (nr & 0x07);
 316        return ((mask & *ADDR) != 0);
 317}
 318
 319#define ext2_find_first_zero_bit(addr, size) \
 320        ext2_find_next_zero_bit((addr), (size), 0)
 321
 322extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
 323{
 324        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
 325        unsigned long result = offset & ~31UL;
 326        unsigned long tmp;
 327
 328        if (offset >= size)
 329                return size;
 330        size -= result;
 331        offset &= 31UL;
 332        if(offset) {
 333                /* We hold the little endian value in tmp, but then the
 334                 * shift is illegal. So we could keep a big endian value
 335                 * in tmp, like this:
 336                 *
 337                 * tmp = __swab32(*(p++));
 338                 * tmp |= ~0UL >> (32-offset);
 339                 *
 340                 * but this would decrease preformance, so we change the
 341                 * shift:
 342                 */
 343                tmp = *(p++);
 344                tmp |= __swab32(~0UL >> (32-offset));
 345                if(size < 32)
 346                        goto found_first;
 347                if(~tmp)
 348                        goto found_middle;
 349                size -= 32;
 350                result += 32;
 351        }
 352        while(size & ~31UL) {
 353                if(~(tmp = *(p++)))
 354                        goto found_middle;
 355                result += 32;
 356                size -= 32;
 357        }
 358        if(!size)
 359                return result;
 360        tmp = *p;
 361
 362found_first:
 363        /* tmp is little endian, so we would have to swab the shift,
 364         * see above. But then we have to swab tmp below for ffz, so
 365         * we might as well do this here.
 366         */
 367        return result + ffz(__swab32(tmp) | (~0UL << size));
 368found_middle:
 369        return result + ffz(__swab32(tmp));
 370}
 371
 372/* Bitmap functions for the minix filesystem.  */
 373#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
 374#define minix_set_bit(nr,addr) set_bit(nr,addr)
 375#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
 376#define minix_test_bit(nr,addr) test_bit(nr,addr)
 377#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 378
 379/**
 380 * hweightN - returns the hamming weight of a N-bit word
 381 * @x: the word to weigh
 382 *
 383 * The Hamming Weight of a number is the total number of bits set in it.
 384 */
 385
 386#define hweight32(x) generic_hweight32(x)
 387#define hweight16(x) generic_hweight16(x)
 388#define hweight8(x) generic_hweight8(x)
 389
 390#endif /* __KERNEL__ */
 391
 392#endif /* _MICROBLAZE_BITOPS_H */
 393