uboot/arch/mips/include/asm/bitops.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
   7 * Copyright (c) 2000  Silicon Graphics, Inc.
   8 */
   9#ifndef _ASM_BITOPS_H
  10#define _ASM_BITOPS_H
  11
  12#include <linux/types.h>
  13#include <asm/byteorder.h>              /* sigh ... */
  14
  15#ifdef __KERNEL__
  16
  17#include <asm/sgidefs.h>
  18#include <asm/system.h>
  19#include <linux/config.h>
  20
  21/*
  22 * clear_bit() doesn't provide any barrier for the compiler.
  23 */
  24#define smp_mb__before_clear_bit()      barrier()
  25#define smp_mb__after_clear_bit()       barrier()
  26
  27/*
  28 * Only disable interrupt for kernel mode stuff to keep usermode stuff
  29 * that dares to use kernel include files alive.
  30 */
  31#define __bi_flags unsigned long flags
  32#define __bi_cli() __cli()
  33#define __bi_save_flags(x) __save_flags(x)
  34#define __bi_save_and_cli(x) __save_and_cli(x)
  35#define __bi_restore_flags(x) __restore_flags(x)
  36#else
  37#define __bi_flags
  38#define __bi_cli()
  39#define __bi_save_flags(x)
  40#define __bi_save_and_cli(x)
  41#define __bi_restore_flags(x)
  42#endif /* __KERNEL__ */
  43
  44#ifdef CONFIG_CPU_HAS_LLSC
  45
  46#include <asm/mipsregs.h>
  47
  48/*
  49 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
  50 * interrupt friendly
  51 */
  52
  53/*
  54 * set_bit - Atomically set a bit in memory
  55 * @nr: the bit to set
  56 * @addr: the address to start counting from
  57 *
  58 * This function is atomic and may not be reordered.  See __set_bit()
  59 * if you do not require the atomic guarantees.
  60 * Note that @nr may be almost arbitrarily large; this function is not
  61 * restricted to acting on a single-word quantity.
  62 */
  63static __inline__ void
  64set_bit(int nr, volatile void *addr)
  65{
  66        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  67        unsigned long temp;
  68
  69        __asm__ __volatile__(
  70                "1:\tll\t%0, %1\t\t# set_bit\n\t"
  71                "or\t%0, %2\n\t"
  72                "sc\t%0, %1\n\t"
  73                "beqz\t%0, 1b"
  74                : "=&r" (temp), "=m" (*m)
  75                : "ir" (1UL << (nr & 0x1f)), "m" (*m));
  76}
  77
  78/*
  79 * __set_bit - Set a bit in memory
  80 * @nr: the bit to set
  81 * @addr: the address to start counting from
  82 *
  83 * Unlike set_bit(), this function is non-atomic and may be reordered.
  84 * If it's called on the same region of memory simultaneously, the effect
  85 * may be that only one operation succeeds.
  86 */
  87static __inline__ void __set_bit(int nr, volatile void * addr)
  88{
  89        unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  90
  91        *m |= 1UL << (nr & 31);
  92}
  93#define PLATFORM__SET_BIT
  94
  95/*
  96 * clear_bit - Clears a bit in memory
  97 * @nr: Bit to clear
  98 * @addr: Address to start counting from
  99 *
 100 * clear_bit() is atomic and may not be reordered.  However, it does
 101 * not contain a memory barrier, so if it is used for locking purposes,
 102 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
 103 * in order to ensure changes are visible on other processors.
 104 */
 105static __inline__ void
 106clear_bit(int nr, volatile void *addr)
 107{
 108        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
 109        unsigned long temp;
 110
 111        __asm__ __volatile__(
 112                "1:\tll\t%0, %1\t\t# clear_bit\n\t"
 113                "and\t%0, %2\n\t"
 114                "sc\t%0, %1\n\t"
 115                "beqz\t%0, 1b\n\t"
 116                : "=&r" (temp), "=m" (*m)
 117                : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
 118}
 119
 120/*
 121 * change_bit - Toggle a bit in memory
 122 * @nr: Bit to clear
 123 * @addr: Address to start counting from
 124 *
 125 * change_bit() is atomic and may not be reordered.
 126 * Note that @nr may be almost arbitrarily large; this function is not
 127 * restricted to acting on a single-word quantity.
 128 */
 129static __inline__ void
 130change_bit(int nr, volatile void *addr)
 131{
 132        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
 133        unsigned long temp;
 134
 135        __asm__ __volatile__(
 136                "1:\tll\t%0, %1\t\t# change_bit\n\t"
 137                "xor\t%0, %2\n\t"
 138                "sc\t%0, %1\n\t"
 139                "beqz\t%0, 1b"
 140                : "=&r" (temp), "=m" (*m)
 141                : "ir" (1UL << (nr & 0x1f)), "m" (*m));
 142}
 143
 144/*
 145 * __change_bit - Toggle a bit in memory
 146 * @nr: the bit to set
 147 * @addr: the address to start counting from
 148 *
 149 * Unlike change_bit(), this function is non-atomic and may be reordered.
 150 * If it's called on the same region of memory simultaneously, the effect
 151 * may be that only one operation succeeds.
 152 */
 153static __inline__ void __change_bit(int nr, volatile void * addr)
 154{
 155        unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
 156
 157        *m ^= 1UL << (nr & 31);
 158}
 159
 160/*
 161 * test_and_set_bit - Set a bit and return its old value
 162 * @nr: Bit to set
 163 * @addr: Address to count from
 164 *
 165 * This operation is atomic and cannot be reordered.
 166 * It also implies a memory barrier.
 167 */
 168static __inline__ int
 169test_and_set_bit(int nr, volatile void *addr)
 170{
 171        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
 172        unsigned long temp, res;
 173
 174        __asm__ __volatile__(
 175                ".set\tnoreorder\t\t# test_and_set_bit\n"
 176                "1:\tll\t%0, %1\n\t"
 177                "or\t%2, %0, %3\n\t"
 178                "sc\t%2, %1\n\t"
 179                "beqz\t%2, 1b\n\t"
 180                " and\t%2, %0, %3\n\t"
 181                ".set\treorder"
 182                : "=&r" (temp), "=m" (*m), "=&r" (res)
 183                : "r" (1UL << (nr & 0x1f)), "m" (*m)
 184                : "memory");
 185
 186        return res != 0;
 187}
 188
 189/*
 190 * __test_and_set_bit - Set a bit and return its old value
 191 * @nr: Bit to set
 192 * @addr: Address to count from
 193 *
 194 * This operation is non-atomic and can be reordered.
 195 * If two examples of this operation race, one can appear to succeed
 196 * but actually fail.  You must protect multiple accesses with a lock.
 197 */
 198static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
 199{
 200        int mask, retval;
 201        volatile int *a = addr;
 202
 203        a += nr >> 5;
 204        mask = 1 << (nr & 0x1f);
 205        retval = (mask & *a) != 0;
 206        *a |= mask;
 207
 208        return retval;
 209}
 210
 211/*
 212 * test_and_clear_bit - Clear a bit and return its old value
 213 * @nr: Bit to set
 214 * @addr: Address to count from
 215 *
 216 * This operation is atomic and cannot be reordered.
 217 * It also implies a memory barrier.
 218 */
 219static __inline__ int
 220test_and_clear_bit(int nr, volatile void *addr)
 221{
 222        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
 223        unsigned long temp, res;
 224
 225        __asm__ __volatile__(
 226                ".set\tnoreorder\t\t# test_and_clear_bit\n"
 227                "1:\tll\t%0, %1\n\t"
 228                "or\t%2, %0, %3\n\t"
 229                "xor\t%2, %3\n\t"
 230                "sc\t%2, %1\n\t"
 231                "beqz\t%2, 1b\n\t"
 232                " and\t%2, %0, %3\n\t"
 233                ".set\treorder"
 234                : "=&r" (temp), "=m" (*m), "=&r" (res)
 235                : "r" (1UL << (nr & 0x1f)), "m" (*m)
 236                : "memory");
 237
 238        return res != 0;
 239}
 240
 241/*
 242 * __test_and_clear_bit - Clear a bit and return its old value
 243 * @nr: Bit to set
 244 * @addr: Address to count from
 245 *
 246 * This operation is non-atomic and can be reordered.
 247 * If two examples of this operation race, one can appear to succeed
 248 * but actually fail.  You must protect multiple accesses with a lock.
 249 */
 250static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
 251{
 252        int     mask, retval;
 253        volatile int    *a = addr;
 254
 255        a += nr >> 5;
 256        mask = 1 << (nr & 0x1f);
 257        retval = (mask & *a) != 0;
 258        *a &= ~mask;
 259
 260        return retval;
 261}
 262
 263/*
 264 * test_and_change_bit - Change a bit and return its new value
 265 * @nr: Bit to set
 266 * @addr: Address to count from
 267 *
 268 * This operation is atomic and cannot be reordered.
 269 * It also implies a memory barrier.
 270 */
 271static __inline__ int
 272test_and_change_bit(int nr, volatile void *addr)
 273{
 274        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
 275        unsigned long temp, res;
 276
 277        __asm__ __volatile__(
 278                ".set\tnoreorder\t\t# test_and_change_bit\n"
 279                "1:\tll\t%0, %1\n\t"
 280                "xor\t%2, %0, %3\n\t"
 281                "sc\t%2, %1\n\t"
 282                "beqz\t%2, 1b\n\t"
 283                " and\t%2, %0, %3\n\t"
 284                ".set\treorder"
 285                : "=&r" (temp), "=m" (*m), "=&r" (res)
 286                : "r" (1UL << (nr & 0x1f)), "m" (*m)
 287                : "memory");
 288
 289        return res != 0;
 290}
 291
 292/*
 293 * __test_and_change_bit - Change a bit and return its old value
 294 * @nr: Bit to set
 295 * @addr: Address to count from
 296 *
 297 * This operation is non-atomic and can be reordered.
 298 * If two examples of this operation race, one can appear to succeed
 299 * but actually fail.  You must protect multiple accesses with a lock.
 300 */
 301static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
 302{
 303        int     mask, retval;
 304        volatile int    *a = addr;
 305
 306        a += nr >> 5;
 307        mask = 1 << (nr & 0x1f);
 308        retval = (mask & *a) != 0;
 309        *a ^= mask;
 310
 311        return retval;
 312}
 313
 314#else /* MIPS I */
 315
 316/*
 317 * set_bit - Atomically set a bit in memory
 318 * @nr: the bit to set
 319 * @addr: the address to start counting from
 320 *
 321 * This function is atomic and may not be reordered.  See __set_bit()
 322 * if you do not require the atomic guarantees.
 323 * Note that @nr may be almost arbitrarily large; this function is not
 324 * restricted to acting on a single-word quantity.
 325 */
 326static __inline__ void set_bit(int nr, volatile void * addr)
 327{
 328        int     mask;
 329        volatile int    *a = addr;
 330        __bi_flags;
 331
 332        a += nr >> 5;
 333        mask = 1 << (nr & 0x1f);
 334        __bi_save_and_cli(flags);
 335        *a |= mask;
 336        __bi_restore_flags(flags);
 337}
 338
 339/*
 340 * __set_bit - Set a bit in memory
 341 * @nr: the bit to set
 342 * @addr: the address to start counting from
 343 *
 344 * Unlike set_bit(), this function is non-atomic and may be reordered.
 345 * If it's called on the same region of memory simultaneously, the effect
 346 * may be that only one operation succeeds.
 347 */
 348static __inline__ void __set_bit(int nr, volatile void * addr)
 349{
 350        int     mask;
 351        volatile int    *a = addr;
 352
 353        a += nr >> 5;
 354        mask = 1 << (nr & 0x1f);
 355        *a |= mask;
 356}
 357
 358/*
 359 * clear_bit - Clears a bit in memory
 360 * @nr: Bit to clear
 361 * @addr: Address to start counting from
 362 *
 363 * clear_bit() is atomic and may not be reordered.  However, it does
 364 * not contain a memory barrier, so if it is used for locking purposes,
 365 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
 366 * in order to ensure changes are visible on other processors.
 367 */
 368static __inline__ void clear_bit(int nr, volatile void * addr)
 369{
 370        int     mask;
 371        volatile int    *a = addr;
 372        __bi_flags;
 373
 374        a += nr >> 5;
 375        mask = 1 << (nr & 0x1f);
 376        __bi_save_and_cli(flags);
 377        *a &= ~mask;
 378        __bi_restore_flags(flags);
 379}
 380
 381/*
 382 * change_bit - Toggle a bit in memory
 383 * @nr: Bit to clear
 384 * @addr: Address to start counting from
 385 *
 386 * change_bit() is atomic and may not be reordered.
 387 * Note that @nr may be almost arbitrarily large; this function is not
 388 * restricted to acting on a single-word quantity.
 389 */
 390static __inline__ void change_bit(int nr, volatile void * addr)
 391{
 392        int     mask;
 393        volatile int    *a = addr;
 394        __bi_flags;
 395
 396        a += nr >> 5;
 397        mask = 1 << (nr & 0x1f);
 398        __bi_save_and_cli(flags);
 399        *a ^= mask;
 400        __bi_restore_flags(flags);
 401}
 402
 403/*
 404 * __change_bit - Toggle a bit in memory
 405 * @nr: the bit to set
 406 * @addr: the address to start counting from
 407 *
 408 * Unlike change_bit(), this function is non-atomic and may be reordered.
 409 * If it's called on the same region of memory simultaneously, the effect
 410 * may be that only one operation succeeds.
 411 */
 412static __inline__ void __change_bit(int nr, volatile void * addr)
 413{
 414        unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
 415
 416        *m ^= 1UL << (nr & 31);
 417}
 418
 419/*
 420 * test_and_set_bit - Set a bit and return its old value
 421 * @nr: Bit to set
 422 * @addr: Address to count from
 423 *
 424 * This operation is atomic and cannot be reordered.
 425 * It also implies a memory barrier.
 426 */
 427static __inline__ int test_and_set_bit(int nr, volatile void * addr)
 428{
 429        int     mask, retval;
 430        volatile int    *a = addr;
 431        __bi_flags;
 432
 433        a += nr >> 5;
 434        mask = 1 << (nr & 0x1f);
 435        __bi_save_and_cli(flags);
 436        retval = (mask & *a) != 0;
 437        *a |= mask;
 438        __bi_restore_flags(flags);
 439
 440        return retval;
 441}
 442
 443/*
 444 * __test_and_set_bit - Set a bit and return its old value
 445 * @nr: Bit to set
 446 * @addr: Address to count from
 447 *
 448 * This operation is non-atomic and can be reordered.
 449 * If two examples of this operation race, one can appear to succeed
 450 * but actually fail.  You must protect multiple accesses with a lock.
 451 */
 452static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
 453{
 454        int     mask, retval;
 455        volatile int    *a = addr;
 456
 457        a += nr >> 5;
 458        mask = 1 << (nr & 0x1f);
 459        retval = (mask & *a) != 0;
 460        *a |= mask;
 461
 462        return retval;
 463}
 464
 465/*
 466 * test_and_clear_bit - Clear a bit and return its old value
 467 * @nr: Bit to set
 468 * @addr: Address to count from
 469 *
 470 * This operation is atomic and cannot be reordered.
 471 * It also implies a memory barrier.
 472 */
 473static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
 474{
 475        int     mask, retval;
 476        volatile int    *a = addr;
 477        __bi_flags;
 478
 479        a += nr >> 5;
 480        mask = 1 << (nr & 0x1f);
 481        __bi_save_and_cli(flags);
 482        retval = (mask & *a) != 0;
 483        *a &= ~mask;
 484        __bi_restore_flags(flags);
 485
 486        return retval;
 487}
 488
 489/*
 490 * __test_and_clear_bit - Clear a bit and return its old value
 491 * @nr: Bit to set
 492 * @addr: Address to count from
 493 *
 494 * This operation is non-atomic and can be reordered.
 495 * If two examples of this operation race, one can appear to succeed
 496 * but actually fail.  You must protect multiple accesses with a lock.
 497 */
 498static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
 499{
 500        int     mask, retval;
 501        volatile int    *a = addr;
 502
 503        a += nr >> 5;
 504        mask = 1 << (nr & 0x1f);
 505        retval = (mask & *a) != 0;
 506        *a &= ~mask;
 507
 508        return retval;
 509}
 510
 511/*
 512 * test_and_change_bit - Change a bit and return its new value
 513 * @nr: Bit to set
 514 * @addr: Address to count from
 515 *
 516 * This operation is atomic and cannot be reordered.
 517 * It also implies a memory barrier.
 518 */
 519static __inline__ int test_and_change_bit(int nr, volatile void * addr)
 520{
 521        int     mask, retval;
 522        volatile int    *a = addr;
 523        __bi_flags;
 524
 525        a += nr >> 5;
 526        mask = 1 << (nr & 0x1f);
 527        __bi_save_and_cli(flags);
 528        retval = (mask & *a) != 0;
 529        *a ^= mask;
 530        __bi_restore_flags(flags);
 531
 532        return retval;
 533}
 534
 535/*
 536 * __test_and_change_bit - Change a bit and return its old value
 537 * @nr: Bit to set
 538 * @addr: Address to count from
 539 *
 540 * This operation is non-atomic and can be reordered.
 541 * If two examples of this operation race, one can appear to succeed
 542 * but actually fail.  You must protect multiple accesses with a lock.
 543 */
 544static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
 545{
 546        int     mask, retval;
 547        volatile int    *a = addr;
 548
 549        a += nr >> 5;
 550        mask = 1 << (nr & 0x1f);
 551        retval = (mask & *a) != 0;
 552        *a ^= mask;
 553
 554        return retval;
 555}
 556
 557#undef __bi_flags
 558#undef __bi_cli
 559#undef __bi_save_flags
 560#undef __bi_restore_flags
 561
 562#endif /* MIPS I */
 563
 564/*
 565 * test_bit - Determine whether a bit is set
 566 * @nr: bit number to test
 567 * @addr: Address to start counting from
 568 */
 569static __inline__ int test_bit(int nr, volatile void *addr)
 570{
 571        return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
 572}
 573
 574#ifndef __MIPSEB__
 575
 576/* Little endian versions. */
 577
 578/*
 579 * find_first_zero_bit - find the first zero bit in a memory region
 580 * @addr: The address to start the search at
 581 * @size: The maximum size to search
 582 *
 583 * Returns the bit-number of the first zero bit, not the number of the byte
 584 * containing a bit.
 585 */
 586static __inline__ int find_first_zero_bit (void *addr, unsigned size)
 587{
 588        unsigned long dummy;
 589        int res;
 590
 591        if (!size)
 592                return 0;
 593
 594        __asm__ (".set\tnoreorder\n\t"
 595                ".set\tnoat\n"
 596                "1:\tsubu\t$1,%6,%0\n\t"
 597                "blez\t$1,2f\n\t"
 598                "lw\t$1,(%5)\n\t"
 599                "addiu\t%5,4\n\t"
 600#if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
 601    (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
 602    (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
 603                "beql\t%1,$1,1b\n\t"
 604                "addiu\t%0,32\n\t"
 605#else
 606                "addiu\t%0,32\n\t"
 607                "beq\t%1,$1,1b\n\t"
 608                "nop\n\t"
 609                "subu\t%0,32\n\t"
 610#endif
 611#ifdef __MIPSEB__
 612#error "Fix this for big endian"
 613#endif /* __MIPSEB__ */
 614                "li\t%1,1\n"
 615                "1:\tand\t%2,$1,%1\n\t"
 616                "beqz\t%2,2f\n\t"
 617                "sll\t%1,%1,1\n\t"
 618                "bnez\t%1,1b\n\t"
 619                "add\t%0,%0,1\n\t"
 620                ".set\tat\n\t"
 621                ".set\treorder\n"
 622                "2:"
 623                : "=r" (res), "=r" (dummy), "=r" (addr)
 624                : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
 625                  "2" (addr), "r" (size)
 626                : "$1");
 627
 628        return res;
 629}
 630
 631/*
 632 * find_next_zero_bit - find the first zero bit in a memory region
 633 * @addr: The address to base the search on
 634 * @offset: The bitnumber to start searching at
 635 * @size: The maximum size to search
 636 */
 637static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
 638{
 639        unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
 640        int set = 0, bit = offset & 31, res;
 641        unsigned long dummy;
 642
 643        if (bit) {
 644                /*
 645                 * Look for zero in first byte
 646                 */
 647#ifdef __MIPSEB__
 648#error "Fix this for big endian byte order"
 649#endif
 650                __asm__(".set\tnoreorder\n\t"
 651                        ".set\tnoat\n"
 652                        "1:\tand\t$1,%4,%1\n\t"
 653                        "beqz\t$1,1f\n\t"
 654                        "sll\t%1,%1,1\n\t"
 655                        "bnez\t%1,1b\n\t"
 656                        "addiu\t%0,1\n\t"
 657                        ".set\tat\n\t"
 658                        ".set\treorder\n"
 659                        "1:"
 660                        : "=r" (set), "=r" (dummy)
 661                        : "0" (0), "1" (1 << bit), "r" (*p)
 662                        : "$1");
 663                if (set < (32 - bit))
 664                        return set + offset;
 665                set = 32 - bit;
 666                p++;
 667        }
 668        /*
 669         * No zero yet, search remaining full bytes for a zero
 670         */
 671        res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
 672        return offset + set + res;
 673}
 674
 675#endif /* !(__MIPSEB__) */
 676
 677/*
 678 * ffz - find first zero in word.
 679 * @word: The word to search
 680 *
 681 * Undefined if no zero exists, so code should check against ~0UL first.
 682 */
 683static __inline__ unsigned long ffz(unsigned long word)
 684{
 685        unsigned int    __res;
 686        unsigned int    mask = 1;
 687
 688        __asm__ (
 689                ".set\tnoreorder\n\t"
 690                ".set\tnoat\n\t"
 691                "move\t%0,$0\n"
 692                "1:\tand\t$1,%2,%1\n\t"
 693                "beqz\t$1,2f\n\t"
 694                "sll\t%1,1\n\t"
 695                "bnez\t%1,1b\n\t"
 696                "addiu\t%0,1\n\t"
 697                ".set\tat\n\t"
 698                ".set\treorder\n"
 699                "2:\n\t"
 700                : "=&r" (__res), "=r" (mask)
 701                : "r" (word), "1" (mask)
 702                : "$1");
 703
 704        return __res;
 705}
 706
 707#ifdef __KERNEL__
 708
 709/*
 710 * hweightN - returns the hamming weight of a N-bit word
 711 * @x: the word to weigh
 712 *
 713 * The Hamming Weight of a number is the total number of bits set in it.
 714 */
 715
 716#define hweight32(x) generic_hweight32(x)
 717#define hweight16(x) generic_hweight16(x)
 718#define hweight8(x) generic_hweight8(x)
 719
 720#endif /* __KERNEL__ */
 721
 722#ifdef __MIPSEB__
 723/*
 724 * find_next_zero_bit - find the first zero bit in a memory region
 725 * @addr: The address to base the search on
 726 * @offset: The bitnumber to start searching at
 727 * @size: The maximum size to search
 728 */
 729static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
 730{
 731        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
 732        unsigned long result = offset & ~31UL;
 733        unsigned long tmp;
 734
 735        if (offset >= size)
 736                return size;
 737        size -= result;
 738        offset &= 31UL;
 739        if (offset) {
 740                tmp = *(p++);
 741                tmp |= ~0UL >> (32-offset);
 742                if (size < 32)
 743                        goto found_first;
 744                if (~tmp)
 745                        goto found_middle;
 746                size -= 32;
 747                result += 32;
 748        }
 749        while (size & ~31UL) {
 750                if (~(tmp = *(p++)))
 751                        goto found_middle;
 752                result += 32;
 753                size -= 32;
 754        }
 755        if (!size)
 756                return result;
 757        tmp = *p;
 758
 759found_first:
 760        tmp |= ~0UL << size;
 761found_middle:
 762        return result + ffz(tmp);
 763}
 764
 765/* Linus sez that gcc can optimize the following correctly, we'll see if this
 766 * holds on the Sparc as it does for the ALPHA.
 767 */
 768
 769#if 0 /* Fool kernel-doc since it doesn't do macros yet */
 770/*
 771 * find_first_zero_bit - find the first zero bit in a memory region
 772 * @addr: The address to start the search at
 773 * @size: The maximum size to search
 774 *
 775 * Returns the bit-number of the first zero bit, not the number of the byte
 776 * containing a bit.
 777 */
 778static int find_first_zero_bit (void *addr, unsigned size);
 779#endif
 780
 781#define find_first_zero_bit(addr, size) \
 782        find_next_zero_bit((addr), (size), 0)
 783
 784#endif /* (__MIPSEB__) */
 785
 786/* Now for the ext2 filesystem bit operations and helper routines. */
 787
 788#ifdef __MIPSEB__
 789static __inline__ int ext2_set_bit(int nr, void * addr)
 790{
 791        int             mask, retval, flags;
 792        unsigned char   *ADDR = (unsigned char *) addr;
 793
 794        ADDR += nr >> 3;
 795        mask = 1 << (nr & 0x07);
 796        save_and_cli(flags);
 797        retval = (mask & *ADDR) != 0;
 798        *ADDR |= mask;
 799        restore_flags(flags);
 800        return retval;
 801}
 802
 803static __inline__ int ext2_clear_bit(int nr, void * addr)
 804{
 805        int             mask, retval, flags;
 806        unsigned char   *ADDR = (unsigned char *) addr;
 807
 808        ADDR += nr >> 3;
 809        mask = 1 << (nr & 0x07);
 810        save_and_cli(flags);
 811        retval = (mask & *ADDR) != 0;
 812        *ADDR &= ~mask;
 813        restore_flags(flags);
 814        return retval;
 815}
 816
 817static __inline__ int ext2_test_bit(int nr, const void * addr)
 818{
 819        int                     mask;
 820        const unsigned char     *ADDR = (const unsigned char *) addr;
 821
 822        ADDR += nr >> 3;
 823        mask = 1 << (nr & 0x07);
 824        return ((mask & *ADDR) != 0);
 825}
 826
 827#define ext2_find_first_zero_bit(addr, size) \
 828        ext2_find_next_zero_bit((addr), (size), 0)
 829
 830static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
 831{
 832        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
 833        unsigned long result = offset & ~31UL;
 834        unsigned long tmp;
 835
 836        if (offset >= size)
 837                return size;
 838        size -= result;
 839        offset &= 31UL;
 840        if(offset) {
 841                /* We hold the little endian value in tmp, but then the
 842                 * shift is illegal. So we could keep a big endian value
 843                 * in tmp, like this:
 844                 *
 845                 * tmp = __swab32(*(p++));
 846                 * tmp |= ~0UL >> (32-offset);
 847                 *
 848                 * but this would decrease preformance, so we change the
 849                 * shift:
 850                 */
 851                tmp = *(p++);
 852                tmp |= __swab32(~0UL >> (32-offset));
 853                if(size < 32)
 854                        goto found_first;
 855                if(~tmp)
 856                        goto found_middle;
 857                size -= 32;
 858                result += 32;
 859        }
 860        while(size & ~31UL) {
 861                if(~(tmp = *(p++)))
 862                        goto found_middle;
 863                result += 32;
 864                size -= 32;
 865        }
 866        if(!size)
 867                return result;
 868        tmp = *p;
 869
 870found_first:
 871        /* tmp is little endian, so we would have to swab the shift,
 872         * see above. But then we have to swab tmp below for ffz, so
 873         * we might as well do this here.
 874         */
 875        return result + ffz(__swab32(tmp) | (~0UL << size));
 876found_middle:
 877        return result + ffz(__swab32(tmp));
 878}
 879#else /* !(__MIPSEB__) */
 880
 881/* Native ext2 byte ordering, just collapse using defines. */
 882#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
 883#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
 884#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
 885#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
 886#define ext2_find_next_zero_bit(addr, size, offset) \
 887                find_next_zero_bit((addr), (size), (offset))
 888
 889#endif /* !(__MIPSEB__) */
 890
 891/*
 892 * Bitmap functions for the minix filesystem.
 893 * FIXME: These assume that Minix uses the native byte/bitorder.
 894 * This limits the Minix filesystem's value for data exchange very much.
 895 */
 896#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
 897#define minix_set_bit(nr,addr) set_bit(nr,addr)
 898#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
 899#define minix_test_bit(nr,addr) test_bit(nr,addr)
 900#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 901
 902#endif /* _ASM_BITOPS_H */
 903