uboot/arch/mips/include/asm/bitops.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
   7 * Copyright (c) 2000  Silicon Graphics, Inc.
   8 */
   9#ifndef _ASM_BITOPS_H
  10#define _ASM_BITOPS_H
  11
  12#include <linux/types.h>
  13#include <asm/byteorder.h>              /* sigh ... */
  14
  15#ifdef __KERNEL__
  16
  17#include <asm/sgidefs.h>
  18#include <asm/system.h>
  19
  20/*
  21 * clear_bit() doesn't provide any barrier for the compiler.
  22 */
  23#define smp_mb__before_clear_bit()      barrier()
  24#define smp_mb__after_clear_bit()       barrier()
  25
  26/*
  27 * Only disable interrupt for kernel mode stuff to keep usermode stuff
  28 * that dares to use kernel include files alive.
  29 */
  30#define __bi_flags unsigned long flags
  31#define __bi_cli() __cli()
  32#define __bi_save_flags(x) __save_flags(x)
  33#define __bi_save_and_cli(x) __save_and_cli(x)
  34#define __bi_restore_flags(x) __restore_flags(x)
  35#else
  36#define __bi_flags
  37#define __bi_cli()
  38#define __bi_save_flags(x)
  39#define __bi_save_and_cli(x)
  40#define __bi_restore_flags(x)
  41#endif /* __KERNEL__ */
  42
  43#ifdef CONFIG_CPU_HAS_LLSC
  44
  45#include <asm/mipsregs.h>
  46
  47/*
  48 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
  49 * interrupt friendly
  50 */
  51
  52/*
  53 * set_bit - Atomically set a bit in memory
  54 * @nr: the bit to set
  55 * @addr: the address to start counting from
  56 *
  57 * This function is atomic and may not be reordered.  See __set_bit()
  58 * if you do not require the atomic guarantees.
  59 * Note that @nr may be almost arbitrarily large; this function is not
  60 * restricted to acting on a single-word quantity.
  61 */
  62static __inline__ void
  63set_bit(int nr, volatile void *addr)
  64{
  65        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  66        unsigned long temp;
  67
  68        __asm__ __volatile__(
  69                "1:\tll\t%0, %1\t\t# set_bit\n\t"
  70                "or\t%0, %2\n\t"
  71                "sc\t%0, %1\n\t"
  72                "beqz\t%0, 1b"
  73                : "=&r" (temp), "=m" (*m)
  74                : "ir" (1UL << (nr & 0x1f)), "m" (*m));
  75}
  76
  77/*
  78 * __set_bit - Set a bit in memory
  79 * @nr: the bit to set
  80 * @addr: the address to start counting from
  81 *
  82 * Unlike set_bit(), this function is non-atomic and may be reordered.
  83 * If it's called on the same region of memory simultaneously, the effect
  84 * may be that only one operation succeeds.
  85 */
  86static __inline__ void __set_bit(int nr, volatile void * addr)
  87{
  88        unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  89
  90        *m |= 1UL << (nr & 31);
  91}
  92#define PLATFORM__SET_BIT
  93
  94/*
  95 * clear_bit - Clears a bit in memory
  96 * @nr: Bit to clear
  97 * @addr: Address to start counting from
  98 *
  99 * clear_bit() is atomic and may not be reordered.  However, it does
 100 * not contain a memory barrier, so if it is used for locking purposes,
 101 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
 102 * in order to ensure changes are visible on other processors.
 103 */
 104static __inline__ void
 105clear_bit(int nr, volatile void *addr)
 106{
 107        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
 108        unsigned long temp;
 109
 110        __asm__ __volatile__(
 111                "1:\tll\t%0, %1\t\t# clear_bit\n\t"
 112                "and\t%0, %2\n\t"
 113                "sc\t%0, %1\n\t"
 114                "beqz\t%0, 1b\n\t"
 115                : "=&r" (temp), "=m" (*m)
 116                : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
 117}
 118
 119/*
 120 * change_bit - Toggle a bit in memory
 121 * @nr: Bit to clear
 122 * @addr: Address to start counting from
 123 *
 124 * change_bit() is atomic and may not be reordered.
 125 * Note that @nr may be almost arbitrarily large; this function is not
 126 * restricted to acting on a single-word quantity.
 127 */
 128static __inline__ void
 129change_bit(int nr, volatile void *addr)
 130{
 131        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
 132        unsigned long temp;
 133
 134        __asm__ __volatile__(
 135                "1:\tll\t%0, %1\t\t# change_bit\n\t"
 136                "xor\t%0, %2\n\t"
 137                "sc\t%0, %1\n\t"
 138                "beqz\t%0, 1b"
 139                : "=&r" (temp), "=m" (*m)
 140                : "ir" (1UL << (nr & 0x1f)), "m" (*m));
 141}
 142
 143/*
 144 * __change_bit - Toggle a bit in memory
 145 * @nr: the bit to set
 146 * @addr: the address to start counting from
 147 *
 148 * Unlike change_bit(), this function is non-atomic and may be reordered.
 149 * If it's called on the same region of memory simultaneously, the effect
 150 * may be that only one operation succeeds.
 151 */
 152static __inline__ void __change_bit(int nr, volatile void * addr)
 153{
 154        unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
 155
 156        *m ^= 1UL << (nr & 31);
 157}
 158
 159/*
 160 * test_and_set_bit - Set a bit and return its old value
 161 * @nr: Bit to set
 162 * @addr: Address to count from
 163 *
 164 * This operation is atomic and cannot be reordered.
 165 * It also implies a memory barrier.
 166 */
 167static __inline__ int
 168test_and_set_bit(int nr, volatile void *addr)
 169{
 170        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
 171        unsigned long temp, res;
 172
 173        __asm__ __volatile__(
 174                ".set\tnoreorder\t\t# test_and_set_bit\n"
 175                "1:\tll\t%0, %1\n\t"
 176                "or\t%2, %0, %3\n\t"
 177                "sc\t%2, %1\n\t"
 178                "beqz\t%2, 1b\n\t"
 179                " and\t%2, %0, %3\n\t"
 180                ".set\treorder"
 181                : "=&r" (temp), "=m" (*m), "=&r" (res)
 182                : "r" (1UL << (nr & 0x1f)), "m" (*m)
 183                : "memory");
 184
 185        return res != 0;
 186}
 187
 188/*
 189 * __test_and_set_bit - Set a bit and return its old value
 190 * @nr: Bit to set
 191 * @addr: Address to count from
 192 *
 193 * This operation is non-atomic and can be reordered.
 194 * If two examples of this operation race, one can appear to succeed
 195 * but actually fail.  You must protect multiple accesses with a lock.
 196 */
 197static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
 198{
 199        int mask, retval;
 200        volatile int *a = addr;
 201
 202        a += nr >> 5;
 203        mask = 1 << (nr & 0x1f);
 204        retval = (mask & *a) != 0;
 205        *a |= mask;
 206
 207        return retval;
 208}
 209
 210/*
 211 * test_and_clear_bit - Clear a bit and return its old value
 212 * @nr: Bit to set
 213 * @addr: Address to count from
 214 *
 215 * This operation is atomic and cannot be reordered.
 216 * It also implies a memory barrier.
 217 */
 218static __inline__ int
 219test_and_clear_bit(int nr, volatile void *addr)
 220{
 221        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
 222        unsigned long temp, res;
 223
 224        __asm__ __volatile__(
 225                ".set\tnoreorder\t\t# test_and_clear_bit\n"
 226                "1:\tll\t%0, %1\n\t"
 227                "or\t%2, %0, %3\n\t"
 228                "xor\t%2, %3\n\t"
 229                "sc\t%2, %1\n\t"
 230                "beqz\t%2, 1b\n\t"
 231                " and\t%2, %0, %3\n\t"
 232                ".set\treorder"
 233                : "=&r" (temp), "=m" (*m), "=&r" (res)
 234                : "r" (1UL << (nr & 0x1f)), "m" (*m)
 235                : "memory");
 236
 237        return res != 0;
 238}
 239
 240/*
 241 * __test_and_clear_bit - Clear a bit and return its old value
 242 * @nr: Bit to set
 243 * @addr: Address to count from
 244 *
 245 * This operation is non-atomic and can be reordered.
 246 * If two examples of this operation race, one can appear to succeed
 247 * but actually fail.  You must protect multiple accesses with a lock.
 248 */
 249static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
 250{
 251        int     mask, retval;
 252        volatile int    *a = addr;
 253
 254        a += nr >> 5;
 255        mask = 1 << (nr & 0x1f);
 256        retval = (mask & *a) != 0;
 257        *a &= ~mask;
 258
 259        return retval;
 260}
 261
 262/*
 263 * test_and_change_bit - Change a bit and return its new value
 264 * @nr: Bit to set
 265 * @addr: Address to count from
 266 *
 267 * This operation is atomic and cannot be reordered.
 268 * It also implies a memory barrier.
 269 */
 270static __inline__ int
 271test_and_change_bit(int nr, volatile void *addr)
 272{
 273        unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
 274        unsigned long temp, res;
 275
 276        __asm__ __volatile__(
 277                ".set\tnoreorder\t\t# test_and_change_bit\n"
 278                "1:\tll\t%0, %1\n\t"
 279                "xor\t%2, %0, %3\n\t"
 280                "sc\t%2, %1\n\t"
 281                "beqz\t%2, 1b\n\t"
 282                " and\t%2, %0, %3\n\t"
 283                ".set\treorder"
 284                : "=&r" (temp), "=m" (*m), "=&r" (res)
 285                : "r" (1UL << (nr & 0x1f)), "m" (*m)
 286                : "memory");
 287
 288        return res != 0;
 289}
 290
 291/*
 292 * __test_and_change_bit - Change a bit and return its old value
 293 * @nr: Bit to set
 294 * @addr: Address to count from
 295 *
 296 * This operation is non-atomic and can be reordered.
 297 * If two examples of this operation race, one can appear to succeed
 298 * but actually fail.  You must protect multiple accesses with a lock.
 299 */
 300static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
 301{
 302        int     mask, retval;
 303        volatile int    *a = addr;
 304
 305        a += nr >> 5;
 306        mask = 1 << (nr & 0x1f);
 307        retval = (mask & *a) != 0;
 308        *a ^= mask;
 309
 310        return retval;
 311}
 312
 313#else /* MIPS I */
 314
 315/*
 316 * set_bit - Atomically set a bit in memory
 317 * @nr: the bit to set
 318 * @addr: the address to start counting from
 319 *
 320 * This function is atomic and may not be reordered.  See __set_bit()
 321 * if you do not require the atomic guarantees.
 322 * Note that @nr may be almost arbitrarily large; this function is not
 323 * restricted to acting on a single-word quantity.
 324 */
 325static __inline__ void set_bit(int nr, volatile void * addr)
 326{
 327        int     mask;
 328        volatile int    *a = addr;
 329        __bi_flags;
 330
 331        a += nr >> 5;
 332        mask = 1 << (nr & 0x1f);
 333        __bi_save_and_cli(flags);
 334        *a |= mask;
 335        __bi_restore_flags(flags);
 336}
 337
 338/*
 339 * __set_bit - Set a bit in memory
 340 * @nr: the bit to set
 341 * @addr: the address to start counting from
 342 *
 343 * Unlike set_bit(), this function is non-atomic and may be reordered.
 344 * If it's called on the same region of memory simultaneously, the effect
 345 * may be that only one operation succeeds.
 346 */
 347static __inline__ void __set_bit(int nr, volatile void * addr)
 348{
 349        int     mask;
 350        volatile int    *a = addr;
 351
 352        a += nr >> 5;
 353        mask = 1 << (nr & 0x1f);
 354        *a |= mask;
 355}
 356
 357/*
 358 * clear_bit - Clears a bit in memory
 359 * @nr: Bit to clear
 360 * @addr: Address to start counting from
 361 *
 362 * clear_bit() is atomic and may not be reordered.  However, it does
 363 * not contain a memory barrier, so if it is used for locking purposes,
 364 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
 365 * in order to ensure changes are visible on other processors.
 366 */
 367static __inline__ void clear_bit(int nr, volatile void * addr)
 368{
 369        int     mask;
 370        volatile int    *a = addr;
 371        __bi_flags;
 372
 373        a += nr >> 5;
 374        mask = 1 << (nr & 0x1f);
 375        __bi_save_and_cli(flags);
 376        *a &= ~mask;
 377        __bi_restore_flags(flags);
 378}
 379
 380/*
 381 * change_bit - Toggle a bit in memory
 382 * @nr: Bit to clear
 383 * @addr: Address to start counting from
 384 *
 385 * change_bit() is atomic and may not be reordered.
 386 * Note that @nr may be almost arbitrarily large; this function is not
 387 * restricted to acting on a single-word quantity.
 388 */
 389static __inline__ void change_bit(int nr, volatile void * addr)
 390{
 391        int     mask;
 392        volatile int    *a = addr;
 393        __bi_flags;
 394
 395        a += nr >> 5;
 396        mask = 1 << (nr & 0x1f);
 397        __bi_save_and_cli(flags);
 398        *a ^= mask;
 399        __bi_restore_flags(flags);
 400}
 401
 402/*
 403 * __change_bit - Toggle a bit in memory
 404 * @nr: the bit to set
 405 * @addr: the address to start counting from
 406 *
 407 * Unlike change_bit(), this function is non-atomic and may be reordered.
 408 * If it's called on the same region of memory simultaneously, the effect
 409 * may be that only one operation succeeds.
 410 */
 411static __inline__ void __change_bit(int nr, volatile void * addr)
 412{
 413        unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
 414
 415        *m ^= 1UL << (nr & 31);
 416}
 417
 418/*
 419 * test_and_set_bit - Set a bit and return its old value
 420 * @nr: Bit to set
 421 * @addr: Address to count from
 422 *
 423 * This operation is atomic and cannot be reordered.
 424 * It also implies a memory barrier.
 425 */
 426static __inline__ int test_and_set_bit(int nr, volatile void * addr)
 427{
 428        int     mask, retval;
 429        volatile int    *a = addr;
 430        __bi_flags;
 431
 432        a += nr >> 5;
 433        mask = 1 << (nr & 0x1f);
 434        __bi_save_and_cli(flags);
 435        retval = (mask & *a) != 0;
 436        *a |= mask;
 437        __bi_restore_flags(flags);
 438
 439        return retval;
 440}
 441
 442/*
 443 * __test_and_set_bit - Set a bit and return its old value
 444 * @nr: Bit to set
 445 * @addr: Address to count from
 446 *
 447 * This operation is non-atomic and can be reordered.
 448 * If two examples of this operation race, one can appear to succeed
 449 * but actually fail.  You must protect multiple accesses with a lock.
 450 */
 451static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
 452{
 453        int     mask, retval;
 454        volatile int    *a = addr;
 455
 456        a += nr >> 5;
 457        mask = 1 << (nr & 0x1f);
 458        retval = (mask & *a) != 0;
 459        *a |= mask;
 460
 461        return retval;
 462}
 463
 464/*
 465 * test_and_clear_bit - Clear a bit and return its old value
 466 * @nr: Bit to set
 467 * @addr: Address to count from
 468 *
 469 * This operation is atomic and cannot be reordered.
 470 * It also implies a memory barrier.
 471 */
 472static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
 473{
 474        int     mask, retval;
 475        volatile int    *a = addr;
 476        __bi_flags;
 477
 478        a += nr >> 5;
 479        mask = 1 << (nr & 0x1f);
 480        __bi_save_and_cli(flags);
 481        retval = (mask & *a) != 0;
 482        *a &= ~mask;
 483        __bi_restore_flags(flags);
 484
 485        return retval;
 486}
 487
 488/*
 489 * __test_and_clear_bit - Clear a bit and return its old value
 490 * @nr: Bit to set
 491 * @addr: Address to count from
 492 *
 493 * This operation is non-atomic and can be reordered.
 494 * If two examples of this operation race, one can appear to succeed
 495 * but actually fail.  You must protect multiple accesses with a lock.
 496 */
 497static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
 498{
 499        int     mask, retval;
 500        volatile int    *a = addr;
 501
 502        a += nr >> 5;
 503        mask = 1 << (nr & 0x1f);
 504        retval = (mask & *a) != 0;
 505        *a &= ~mask;
 506
 507        return retval;
 508}
 509
 510/*
 511 * test_and_change_bit - Change a bit and return its new value
 512 * @nr: Bit to set
 513 * @addr: Address to count from
 514 *
 515 * This operation is atomic and cannot be reordered.
 516 * It also implies a memory barrier.
 517 */
 518static __inline__ int test_and_change_bit(int nr, volatile void * addr)
 519{
 520        int     mask, retval;
 521        volatile int    *a = addr;
 522        __bi_flags;
 523
 524        a += nr >> 5;
 525        mask = 1 << (nr & 0x1f);
 526        __bi_save_and_cli(flags);
 527        retval = (mask & *a) != 0;
 528        *a ^= mask;
 529        __bi_restore_flags(flags);
 530
 531        return retval;
 532}
 533
 534/*
 535 * __test_and_change_bit - Change a bit and return its old value
 536 * @nr: Bit to set
 537 * @addr: Address to count from
 538 *
 539 * This operation is non-atomic and can be reordered.
 540 * If two examples of this operation race, one can appear to succeed
 541 * but actually fail.  You must protect multiple accesses with a lock.
 542 */
 543static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
 544{
 545        int     mask, retval;
 546        volatile int    *a = addr;
 547
 548        a += nr >> 5;
 549        mask = 1 << (nr & 0x1f);
 550        retval = (mask & *a) != 0;
 551        *a ^= mask;
 552
 553        return retval;
 554}
 555
 556#undef __bi_flags
 557#undef __bi_cli
 558#undef __bi_save_flags
 559#undef __bi_restore_flags
 560
 561#endif /* MIPS I */
 562
 563/*
 564 * test_bit - Determine whether a bit is set
 565 * @nr: bit number to test
 566 * @addr: Address to start counting from
 567 */
 568static __inline__ int test_bit(int nr, const volatile void *addr)
 569{
 570        return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
 571}
 572
 573#ifndef __MIPSEB__
 574
 575/* Little endian versions. */
 576
 577/*
 578 * find_first_zero_bit - find the first zero bit in a memory region
 579 * @addr: The address to start the search at
 580 * @size: The maximum size to search
 581 *
 582 * Returns the bit-number of the first zero bit, not the number of the byte
 583 * containing a bit.
 584 */
 585static __inline__ int find_first_zero_bit (void *addr, unsigned size)
 586{
 587        unsigned long dummy;
 588        int res;
 589
 590        if (!size)
 591                return 0;
 592
 593        __asm__ (".set\tnoreorder\n\t"
 594                ".set\tnoat\n"
 595                "1:\tsubu\t$1,%6,%0\n\t"
 596                "blez\t$1,2f\n\t"
 597                "lw\t$1,(%5)\n\t"
 598                "addiu\t%5,4\n\t"
 599#if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
 600    (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
 601    (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
 602                "beql\t%1,$1,1b\n\t"
 603                "addiu\t%0,32\n\t"
 604#else
 605                "addiu\t%0,32\n\t"
 606                "beq\t%1,$1,1b\n\t"
 607                "nop\n\t"
 608                "subu\t%0,32\n\t"
 609#endif
 610#ifdef __MIPSEB__
 611#error "Fix this for big endian"
 612#endif /* __MIPSEB__ */
 613                "li\t%1,1\n"
 614                "1:\tand\t%2,$1,%1\n\t"
 615                "beqz\t%2,2f\n\t"
 616                "sll\t%1,%1,1\n\t"
 617                "bnez\t%1,1b\n\t"
 618                "add\t%0,%0,1\n\t"
 619                ".set\tat\n\t"
 620                ".set\treorder\n"
 621                "2:"
 622                : "=r" (res), "=r" (dummy), "=r" (addr)
 623                : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
 624                  "2" (addr), "r" (size)
 625                : "$1");
 626
 627        return res;
 628}
 629
 630/*
 631 * find_next_zero_bit - find the first zero bit in a memory region
 632 * @addr: The address to base the search on
 633 * @offset: The bitnumber to start searching at
 634 * @size: The maximum size to search
 635 */
 636static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
 637{
 638        unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
 639        int set = 0, bit = offset & 31, res;
 640        unsigned long dummy;
 641
 642        if (bit) {
 643                /*
 644                 * Look for zero in first byte
 645                 */
 646#ifdef __MIPSEB__
 647#error "Fix this for big endian byte order"
 648#endif
 649                __asm__(".set\tnoreorder\n\t"
 650                        ".set\tnoat\n"
 651                        "1:\tand\t$1,%4,%1\n\t"
 652                        "beqz\t$1,1f\n\t"
 653                        "sll\t%1,%1,1\n\t"
 654                        "bnez\t%1,1b\n\t"
 655                        "addiu\t%0,1\n\t"
 656                        ".set\tat\n\t"
 657                        ".set\treorder\n"
 658                        "1:"
 659                        : "=r" (set), "=r" (dummy)
 660                        : "0" (0), "1" (1 << bit), "r" (*p)
 661                        : "$1");
 662                if (set < (32 - bit))
 663                        return set + offset;
 664                set = 32 - bit;
 665                p++;
 666        }
 667        /*
 668         * No zero yet, search remaining full bytes for a zero
 669         */
 670        res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
 671        return offset + set + res;
 672}
 673
 674#endif /* !(__MIPSEB__) */
 675
 676/*
 677 * ffz - find first zero in word.
 678 * @word: The word to search
 679 *
 680 * Undefined if no zero exists, so code should check against ~0UL first.
 681 */
 682static __inline__ unsigned long ffz(unsigned long word)
 683{
 684        unsigned int    __res;
 685        unsigned int    mask = 1;
 686
 687        __asm__ (
 688                ".set\tnoreorder\n\t"
 689                ".set\tnoat\n\t"
 690                "move\t%0,$0\n"
 691                "1:\tand\t$1,%2,%1\n\t"
 692                "beqz\t$1,2f\n\t"
 693                "sll\t%1,1\n\t"
 694                "bnez\t%1,1b\n\t"
 695                "addiu\t%0,1\n\t"
 696                ".set\tat\n\t"
 697                ".set\treorder\n"
 698                "2:\n\t"
 699                : "=&r" (__res), "=r" (mask)
 700                : "r" (word), "1" (mask)
 701                : "$1");
 702
 703        return __res;
 704}
 705
 706#ifdef __KERNEL__
 707
 708/*
 709 * hweightN - returns the hamming weight of a N-bit word
 710 * @x: the word to weigh
 711 *
 712 * The Hamming Weight of a number is the total number of bits set in it.
 713 */
 714
 715#define hweight32(x) generic_hweight32(x)
 716#define hweight16(x) generic_hweight16(x)
 717#define hweight8(x) generic_hweight8(x)
 718
 719#endif /* __KERNEL__ */
 720
 721#ifdef __MIPSEB__
 722/*
 723 * find_next_zero_bit - find the first zero bit in a memory region
 724 * @addr: The address to base the search on
 725 * @offset: The bitnumber to start searching at
 726 * @size: The maximum size to search
 727 */
 728static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
 729{
 730        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
 731        unsigned long result = offset & ~31UL;
 732        unsigned long tmp;
 733
 734        if (offset >= size)
 735                return size;
 736        size -= result;
 737        offset &= 31UL;
 738        if (offset) {
 739                tmp = *(p++);
 740                tmp |= ~0UL >> (32-offset);
 741                if (size < 32)
 742                        goto found_first;
 743                if (~tmp)
 744                        goto found_middle;
 745                size -= 32;
 746                result += 32;
 747        }
 748        while (size & ~31UL) {
 749                if (~(tmp = *(p++)))
 750                        goto found_middle;
 751                result += 32;
 752                size -= 32;
 753        }
 754        if (!size)
 755                return result;
 756        tmp = *p;
 757
 758found_first:
 759        tmp |= ~0UL << size;
 760found_middle:
 761        return result + ffz(tmp);
 762}
 763
 764/* Linus sez that gcc can optimize the following correctly, we'll see if this
 765 * holds on the Sparc as it does for the ALPHA.
 766 */
 767
 768#if 0 /* Fool kernel-doc since it doesn't do macros yet */
 769/*
 770 * find_first_zero_bit - find the first zero bit in a memory region
 771 * @addr: The address to start the search at
 772 * @size: The maximum size to search
 773 *
 774 * Returns the bit-number of the first zero bit, not the number of the byte
 775 * containing a bit.
 776 */
 777static int find_first_zero_bit (void *addr, unsigned size);
 778#endif
 779
 780#define find_first_zero_bit(addr, size) \
 781        find_next_zero_bit((addr), (size), 0)
 782
 783#endif /* (__MIPSEB__) */
 784
 785/* Now for the ext2 filesystem bit operations and helper routines. */
 786
 787#ifdef __MIPSEB__
 788static __inline__ int ext2_set_bit(int nr, void * addr)
 789{
 790        int             mask, retval, flags;
 791        unsigned char   *ADDR = (unsigned char *) addr;
 792
 793        ADDR += nr >> 3;
 794        mask = 1 << (nr & 0x07);
 795        save_and_cli(flags);
 796        retval = (mask & *ADDR) != 0;
 797        *ADDR |= mask;
 798        restore_flags(flags);
 799        return retval;
 800}
 801
 802static __inline__ int ext2_clear_bit(int nr, void * addr)
 803{
 804        int             mask, retval, flags;
 805        unsigned char   *ADDR = (unsigned char *) addr;
 806
 807        ADDR += nr >> 3;
 808        mask = 1 << (nr & 0x07);
 809        save_and_cli(flags);
 810        retval = (mask & *ADDR) != 0;
 811        *ADDR &= ~mask;
 812        restore_flags(flags);
 813        return retval;
 814}
 815
 816static __inline__ int ext2_test_bit(int nr, const void * addr)
 817{
 818        int                     mask;
 819        const unsigned char     *ADDR = (const unsigned char *) addr;
 820
 821        ADDR += nr >> 3;
 822        mask = 1 << (nr & 0x07);
 823        return ((mask & *ADDR) != 0);
 824}
 825
 826#define ext2_find_first_zero_bit(addr, size) \
 827        ext2_find_next_zero_bit((addr), (size), 0)
 828
 829static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
 830{
 831        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
 832        unsigned long result = offset & ~31UL;
 833        unsigned long tmp;
 834
 835        if (offset >= size)
 836                return size;
 837        size -= result;
 838        offset &= 31UL;
 839        if(offset) {
 840                /* We hold the little endian value in tmp, but then the
 841                 * shift is illegal. So we could keep a big endian value
 842                 * in tmp, like this:
 843                 *
 844                 * tmp = __swab32(*(p++));
 845                 * tmp |= ~0UL >> (32-offset);
 846                 *
 847                 * but this would decrease preformance, so we change the
 848                 * shift:
 849                 */
 850                tmp = *(p++);
 851                tmp |= __swab32(~0UL >> (32-offset));
 852                if(size < 32)
 853                        goto found_first;
 854                if(~tmp)
 855                        goto found_middle;
 856                size -= 32;
 857                result += 32;
 858        }
 859        while(size & ~31UL) {
 860                if(~(tmp = *(p++)))
 861                        goto found_middle;
 862                result += 32;
 863                size -= 32;
 864        }
 865        if(!size)
 866                return result;
 867        tmp = *p;
 868
 869found_first:
 870        /* tmp is little endian, so we would have to swab the shift,
 871         * see above. But then we have to swab tmp below for ffz, so
 872         * we might as well do this here.
 873         */
 874        return result + ffz(__swab32(tmp) | (~0UL << size));
 875found_middle:
 876        return result + ffz(__swab32(tmp));
 877}
 878#else /* !(__MIPSEB__) */
 879
 880/* Native ext2 byte ordering, just collapse using defines. */
 881#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
 882#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
 883#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
 884#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
 885#define ext2_find_next_zero_bit(addr, size, offset) \
 886                find_next_zero_bit((addr), (size), (offset))
 887
 888#endif /* !(__MIPSEB__) */
 889
 890/*
 891 * Bitmap functions for the minix filesystem.
 892 * FIXME: These assume that Minix uses the native byte/bitorder.
 893 * This limits the Minix filesystem's value for data exchange very much.
 894 */
 895#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
 896#define minix_set_bit(nr,addr) set_bit(nr,addr)
 897#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
 898#define minix_test_bit(nr,addr) test_bit(nr,addr)
 899#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
 900
 901#endif /* _ASM_BITOPS_H */
 902