linux/arch/s390/include/asm/bitops.h
<<
>>
Prefs
   1/*
   2 *  S390 version
   3 *    Copyright IBM Corp. 1999
   4 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
   5 *
   6 *  Derived from "include/asm-i386/bitops.h"
   7 *    Copyright (C) 1992, Linus Torvalds
   8 *
   9 */
  10
  11#ifndef _S390_BITOPS_H
  12#define _S390_BITOPS_H
  13
  14#ifndef _LINUX_BITOPS_H
  15#error only <linux/bitops.h> can be included directly
  16#endif
  17
  18#include <linux/compiler.h>
  19
  20/*
  21 * 32 bit bitops format:
  22 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
  23 * bit 32 is the LSB of *(addr+4). That combined with the
  24 * big endian byte order on S390 give the following bit
  25 * order in memory:
  26 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
  27 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  28 * after that follows the next long with bit numbers
  29 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  30 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  31 * The reason for this bit ordering is the fact that
  32 * in the architecture independent code bits operations
  33 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  34 * with operation of the form "set_bit(bitnr, flags)".
  35 *
  36 * 64 bit bitops format:
  37 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
  38 * bit 64 is the LSB of *(addr+8). That combined with the
  39 * big endian byte order on S390 give the following bit
  40 * order in memory:
  41 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
  42 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
  43 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
  44 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
  45 * after that follows the next long with bit numbers
  46 *    7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
  47 *    6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
  48 *    5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
  49 *    4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
  50 * The reason for this bit ordering is the fact that
  51 * in the architecture independent code bits operations
  52 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
  53 * with operation of the form "set_bit(bitnr, flags)".
  54 */
  55
  56/* bitmap tables from arch/s390/kernel/bitmap.c */
  57extern const char _oi_bitmap[];
  58extern const char _ni_bitmap[];
  59extern const char _zb_findmap[];
  60extern const char _sb_findmap[];
  61
  62#ifndef CONFIG_64BIT
  63
  64#define __BITOPS_ALIGN          3
  65#define __BITOPS_WORDSIZE       32
  66#define __BITOPS_OR             "or"
  67#define __BITOPS_AND            "nr"
  68#define __BITOPS_XOR            "xr"
  69
  70#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  71        asm volatile(                                           \
  72                "       l       %0,%2\n"                        \
  73                "0:     lr      %1,%0\n"                        \
  74                __op_string "   %1,%3\n"                        \
  75                "       cs      %0,%1,%2\n"                     \
  76                "       jl      0b"                             \
  77                : "=&d" (__old), "=&d" (__new),                 \
  78                  "=Q" (*(unsigned long *) __addr)              \
  79                : "d" (__val), "Q" (*(unsigned long *) __addr)  \
  80                : "cc");
  81
  82#else /* CONFIG_64BIT */
  83
  84#define __BITOPS_ALIGN          7
  85#define __BITOPS_WORDSIZE       64
  86#define __BITOPS_OR             "ogr"
  87#define __BITOPS_AND            "ngr"
  88#define __BITOPS_XOR            "xgr"
  89
  90#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
  91        asm volatile(                                           \
  92                "       lg      %0,%2\n"                        \
  93                "0:     lgr     %1,%0\n"                        \
  94                __op_string "   %1,%3\n"                        \
  95                "       csg     %0,%1,%2\n"                     \
  96                "       jl      0b"                             \
  97                : "=&d" (__old), "=&d" (__new),                 \
  98                  "=Q" (*(unsigned long *) __addr)              \
  99                : "d" (__val), "Q" (*(unsigned long *) __addr)  \
 100                : "cc");
 101
 102#endif /* CONFIG_64BIT */
 103
 104#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
 105#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
 106
 107#ifdef CONFIG_SMP
 108/*
 109 * SMP safe set_bit routine based on compare and swap (CS)
 110 */
 111static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
 112{
 113        unsigned long addr, old, new, mask;
 114
 115        addr = (unsigned long) ptr;
 116        /* calculate address for CS */
 117        addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
 118        /* make OR mask */
 119        mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
 120        /* Do the atomic update. */
 121        __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
 122}
 123
 124/*
 125 * SMP safe clear_bit routine based on compare and swap (CS)
 126 */
 127static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
 128{
 129        unsigned long addr, old, new, mask;
 130
 131        addr = (unsigned long) ptr;
 132        /* calculate address for CS */
 133        addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
 134        /* make AND mask */
 135        mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
 136        /* Do the atomic update. */
 137        __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
 138}
 139
 140/*
 141 * SMP safe change_bit routine based on compare and swap (CS)
 142 */
 143static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
 144{
 145        unsigned long addr, old, new, mask;
 146
 147        addr = (unsigned long) ptr;
 148        /* calculate address for CS */
 149        addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
 150        /* make XOR mask */
 151        mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
 152        /* Do the atomic update. */
 153        __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
 154}
 155
 156/*
 157 * SMP safe test_and_set_bit routine based on compare and swap (CS)
 158 */
 159static inline int
 160test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
 161{
 162        unsigned long addr, old, new, mask;
 163
 164        addr = (unsigned long) ptr;
 165        /* calculate address for CS */
 166        addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
 167        /* make OR/test mask */
 168        mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
 169        /* Do the atomic update. */
 170        __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
 171        __BITOPS_BARRIER();
 172        return (old & mask) != 0;
 173}
 174
 175/*
 176 * SMP safe test_and_clear_bit routine based on compare and swap (CS)
 177 */
 178static inline int
 179test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
 180{
 181        unsigned long addr, old, new, mask;
 182
 183        addr = (unsigned long) ptr;
 184        /* calculate address for CS */
 185        addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
 186        /* make AND/test mask */
 187        mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
 188        /* Do the atomic update. */
 189        __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
 190        __BITOPS_BARRIER();
 191        return (old ^ new) != 0;
 192}
 193
 194/*
 195 * SMP safe test_and_change_bit routine based on compare and swap (CS) 
 196 */
 197static inline int
 198test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
 199{
 200        unsigned long addr, old, new, mask;
 201
 202        addr = (unsigned long) ptr;
 203        /* calculate address for CS */
 204        addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
 205        /* make XOR/test mask */
 206        mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
 207        /* Do the atomic update. */
 208        __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
 209        __BITOPS_BARRIER();
 210        return (old & mask) != 0;
 211}
 212#endif /* CONFIG_SMP */
 213
 214/*
 215 * fast, non-SMP set_bit routine
 216 */
 217static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
 218{
 219        unsigned long addr;
 220
 221        addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
 222        asm volatile(
 223                "       oc      %O0(1,%R0),%1"
 224                : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
 225}
 226
 227static inline void 
 228__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
 229{
 230        unsigned long addr;
 231
 232        addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
 233        *(unsigned char *) addr |= 1 << (nr & 7);
 234}
 235
 236#define set_bit_simple(nr,addr) \
 237(__builtin_constant_p((nr)) ? \
 238 __constant_set_bit((nr),(addr)) : \
 239 __set_bit((nr),(addr)) )
 240
 241/*
 242 * fast, non-SMP clear_bit routine
 243 */
 244static inline void 
 245__clear_bit(unsigned long nr, volatile unsigned long *ptr)
 246{
 247        unsigned long addr;
 248
 249        addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
 250        asm volatile(
 251                "       nc      %O0(1,%R0),%1"
 252                : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
 253}
 254
 255static inline void 
 256__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
 257{
 258        unsigned long addr;
 259
 260        addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
 261        *(unsigned char *) addr &= ~(1 << (nr & 7));
 262}
 263
 264#define clear_bit_simple(nr,addr) \
 265(__builtin_constant_p((nr)) ? \
 266 __constant_clear_bit((nr),(addr)) : \
 267 __clear_bit((nr),(addr)) )
 268
 269/* 
 270 * fast, non-SMP change_bit routine 
 271 */
 272static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
 273{
 274        unsigned long addr;
 275
 276        addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
 277        asm volatile(
 278                "       xc      %O0(1,%R0),%1"
 279                : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
 280}
 281
 282static inline void 
 283__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) 
 284{
 285        unsigned long addr;
 286
 287        addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
 288        *(unsigned char *) addr ^= 1 << (nr & 7);
 289}
 290
 291#define change_bit_simple(nr,addr) \
 292(__builtin_constant_p((nr)) ? \
 293 __constant_change_bit((nr),(addr)) : \
 294 __change_bit((nr),(addr)) )
 295
 296/*
 297 * fast, non-SMP test_and_set_bit routine
 298 */
 299static inline int
 300test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
 301{
 302        unsigned long addr;
 303        unsigned char ch;
 304
 305        addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
 306        ch = *(unsigned char *) addr;
 307        asm volatile(
 308                "       oc      %O0(1,%R0),%1"
 309                : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
 310                : "cc", "memory");
 311        return (ch >> (nr & 7)) & 1;
 312}
 313#define __test_and_set_bit(X,Y)         test_and_set_bit_simple(X,Y)
 314
 315/*
 316 * fast, non-SMP test_and_clear_bit routine
 317 */
 318static inline int
 319test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
 320{
 321        unsigned long addr;
 322        unsigned char ch;
 323
 324        addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
 325        ch = *(unsigned char *) addr;
 326        asm volatile(
 327                "       nc      %O0(1,%R0),%1"
 328                : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
 329                : "cc", "memory");
 330        return (ch >> (nr & 7)) & 1;
 331}
 332#define __test_and_clear_bit(X,Y)       test_and_clear_bit_simple(X,Y)
 333
 334/*
 335 * fast, non-SMP test_and_change_bit routine
 336 */
 337static inline int
 338test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
 339{
 340        unsigned long addr;
 341        unsigned char ch;
 342
 343        addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
 344        ch = *(unsigned char *) addr;
 345        asm volatile(
 346                "       xc      %O0(1,%R0),%1"
 347                : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
 348                : "cc", "memory");
 349        return (ch >> (nr & 7)) & 1;
 350}
 351#define __test_and_change_bit(X,Y)      test_and_change_bit_simple(X,Y)
 352
 353#ifdef CONFIG_SMP
 354#define set_bit             set_bit_cs
 355#define clear_bit           clear_bit_cs
 356#define change_bit          change_bit_cs
 357#define test_and_set_bit    test_and_set_bit_cs
 358#define test_and_clear_bit  test_and_clear_bit_cs
 359#define test_and_change_bit test_and_change_bit_cs
 360#else
 361#define set_bit             set_bit_simple
 362#define clear_bit           clear_bit_simple
 363#define change_bit          change_bit_simple
 364#define test_and_set_bit    test_and_set_bit_simple
 365#define test_and_clear_bit  test_and_clear_bit_simple
 366#define test_and_change_bit test_and_change_bit_simple
 367#endif
 368
 369
 370/*
 371 * This routine doesn't need to be atomic.
 372 */
 373
 374static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
 375{
 376        unsigned long addr;
 377        unsigned char ch;
 378
 379        addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
 380        ch = *(volatile unsigned char *) addr;
 381        return (ch >> (nr & 7)) & 1;
 382}
 383
 384static inline int 
 385__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
 386    return (((volatile char *) addr)
 387            [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
 388}
 389
 390#define test_bit(nr,addr) \
 391(__builtin_constant_p((nr)) ? \
 392 __constant_test_bit((nr),(addr)) : \
 393 __test_bit((nr),(addr)) )
 394
 395/*
 396 * Optimized find bit helper functions.
 397 */
 398
 399/**
 400 * __ffz_word_loop - find byte offset of first long != -1UL
 401 * @addr: pointer to array of unsigned long
 402 * @size: size of the array in bits
 403 */
 404static inline unsigned long __ffz_word_loop(const unsigned long *addr,
 405                                            unsigned long size)
 406{
 407        typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
 408        unsigned long bytes = 0;
 409
 410        asm volatile(
 411#ifndef CONFIG_64BIT
 412                "       ahi     %1,-1\n"
 413                "       sra     %1,5\n"
 414                "       jz      1f\n"
 415                "0:     c       %2,0(%0,%3)\n"
 416                "       jne     1f\n"
 417                "       la      %0,4(%0)\n"
 418                "       brct    %1,0b\n"
 419                "1:\n"
 420#else
 421                "       aghi    %1,-1\n"
 422                "       srag    %1,%1,6\n"
 423                "       jz      1f\n"
 424                "0:     cg      %2,0(%0,%3)\n"
 425                "       jne     1f\n"
 426                "       la      %0,8(%0)\n"
 427                "       brct    %1,0b\n"
 428                "1:\n"
 429#endif
 430                : "+&a" (bytes), "+&d" (size)
 431                : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
 432                : "cc" );
 433        return bytes;
 434}
 435
 436/**
 437 * __ffs_word_loop - find byte offset of first long != 0UL
 438 * @addr: pointer to array of unsigned long
 439 * @size: size of the array in bits
 440 */
 441static inline unsigned long __ffs_word_loop(const unsigned long *addr,
 442                                            unsigned long size)
 443{
 444        typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
 445        unsigned long bytes = 0;
 446
 447        asm volatile(
 448#ifndef CONFIG_64BIT
 449                "       ahi     %1,-1\n"
 450                "       sra     %1,5\n"
 451                "       jz      1f\n"
 452                "0:     c       %2,0(%0,%3)\n"
 453                "       jne     1f\n"
 454                "       la      %0,4(%0)\n"
 455                "       brct    %1,0b\n"
 456                "1:\n"
 457#else
 458                "       aghi    %1,-1\n"
 459                "       srag    %1,%1,6\n"
 460                "       jz      1f\n"
 461                "0:     cg      %2,0(%0,%3)\n"
 462                "       jne     1f\n"
 463                "       la      %0,8(%0)\n"
 464                "       brct    %1,0b\n"
 465                "1:\n"
 466#endif
 467                : "+&a" (bytes), "+&a" (size)
 468                : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
 469                : "cc" );
 470        return bytes;
 471}
 472
 473/**
 474 * __ffz_word - add number of the first unset bit
 475 * @nr: base value the bit number is added to
 476 * @word: the word that is searched for unset bits
 477 */
 478static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
 479{
 480#ifdef CONFIG_64BIT
 481        if ((word & 0xffffffff) == 0xffffffff) {
 482                word >>= 32;
 483                nr += 32;
 484        }
 485#endif
 486        if ((word & 0xffff) == 0xffff) {
 487                word >>= 16;
 488                nr += 16;
 489        }
 490        if ((word & 0xff) == 0xff) {
 491                word >>= 8;
 492                nr += 8;
 493        }
 494        return nr + _zb_findmap[(unsigned char) word];
 495}
 496
 497/**
 498 * __ffs_word - add number of the first set bit
 499 * @nr: base value the bit number is added to
 500 * @word: the word that is searched for set bits
 501 */
 502static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
 503{
 504#ifdef CONFIG_64BIT
 505        if ((word & 0xffffffff) == 0) {
 506                word >>= 32;
 507                nr += 32;
 508        }
 509#endif
 510        if ((word & 0xffff) == 0) {
 511                word >>= 16;
 512                nr += 16;
 513        }
 514        if ((word & 0xff) == 0) {
 515                word >>= 8;
 516                nr += 8;
 517        }
 518        return nr + _sb_findmap[(unsigned char) word];
 519}
 520
 521
 522/**
 523 * __load_ulong_be - load big endian unsigned long
 524 * @p: pointer to array of unsigned long
 525 * @offset: byte offset of source value in the array
 526 */
 527static inline unsigned long __load_ulong_be(const unsigned long *p,
 528                                            unsigned long offset)
 529{
 530        p = (unsigned long *)((unsigned long) p + offset);
 531        return *p;
 532}
 533
 534/**
 535 * __load_ulong_le - load little endian unsigned long
 536 * @p: pointer to array of unsigned long
 537 * @offset: byte offset of source value in the array
 538 */
 539static inline unsigned long __load_ulong_le(const unsigned long *p,
 540                                            unsigned long offset)
 541{
 542        unsigned long word;
 543
 544        p = (unsigned long *)((unsigned long) p + offset);
 545#ifndef CONFIG_64BIT
 546        asm volatile(
 547                "       ic      %0,%O1(%R1)\n"
 548                "       icm     %0,2,%O1+1(%R1)\n"
 549                "       icm     %0,4,%O1+2(%R1)\n"
 550                "       icm     %0,8,%O1+3(%R1)"
 551                : "=&d" (word) : "Q" (*p) : "cc");
 552#else
 553        asm volatile(
 554                "       lrvg    %0,%1"
 555                : "=d" (word) : "m" (*p) );
 556#endif
 557        return word;
 558}
 559
 560/*
 561 * The various find bit functions.
 562 */
 563
 564/*
 565 * ffz - find first zero in word.
 566 * @word: The word to search
 567 *
 568 * Undefined if no zero exists, so code should check against ~0UL first.
 569 */
 570static inline unsigned long ffz(unsigned long word)
 571{
 572        return __ffz_word(0, word);
 573}
 574
 575/**
 576 * __ffs - find first bit in word.
 577 * @word: The word to search
 578 *
 579 * Undefined if no bit exists, so code should check against 0 first.
 580 */
 581static inline unsigned long __ffs (unsigned long word)
 582{
 583        return __ffs_word(0, word);
 584}
 585
 586/**
 587 * ffs - find first bit set
 588 * @x: the word to search
 589 *
 590 * This is defined the same way as
 591 * the libc and compiler builtin ffs routines, therefore
 592 * differs in spirit from the above ffz (man ffs).
 593 */
 594static inline int ffs(int x)
 595{
 596        if (!x)
 597                return 0;
 598        return __ffs_word(1, x);
 599}
 600
 601/**
 602 * find_first_zero_bit - find the first zero bit in a memory region
 603 * @addr: The address to start the search at
 604 * @size: The maximum size to search
 605 *
 606 * Returns the bit-number of the first zero bit, not the number of the byte
 607 * containing a bit.
 608 */
 609static inline unsigned long find_first_zero_bit(const unsigned long *addr,
 610                                                unsigned long size)
 611{
 612        unsigned long bytes, bits;
 613
 614        if (!size)
 615                return 0;
 616        bytes = __ffz_word_loop(addr, size);
 617        bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
 618        return (bits < size) ? bits : size;
 619}
 620#define find_first_zero_bit find_first_zero_bit
 621
 622/**
 623 * find_first_bit - find the first set bit in a memory region
 624 * @addr: The address to start the search at
 625 * @size: The maximum size to search
 626 *
 627 * Returns the bit-number of the first set bit, not the number of the byte
 628 * containing a bit.
 629 */
 630static inline unsigned long find_first_bit(const unsigned long * addr,
 631                                           unsigned long size)
 632{
 633        unsigned long bytes, bits;
 634
 635        if (!size)
 636                return 0;
 637        bytes = __ffs_word_loop(addr, size);
 638        bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
 639        return (bits < size) ? bits : size;
 640}
 641#define find_first_bit find_first_bit
 642
 643/**
 644 * find_next_zero_bit - find the first zero bit in a memory region
 645 * @addr: The address to base the search on
 646 * @offset: The bitnumber to start searching at
 647 * @size: The maximum size to search
 648 */
 649static inline int find_next_zero_bit (const unsigned long * addr,
 650                                      unsigned long size,
 651                                      unsigned long offset)
 652{
 653        const unsigned long *p;
 654        unsigned long bit, set;
 655
 656        if (offset >= size)
 657                return size;
 658        bit = offset & (__BITOPS_WORDSIZE - 1);
 659        offset -= bit;
 660        size -= offset;
 661        p = addr + offset / __BITOPS_WORDSIZE;
 662        if (bit) {
 663                /*
 664                 * __ffz_word returns __BITOPS_WORDSIZE
 665                 * if no zero bit is present in the word.
 666                 */
 667                set = __ffz_word(bit, *p >> bit);
 668                if (set >= size)
 669                        return size + offset;
 670                if (set < __BITOPS_WORDSIZE)
 671                        return set + offset;
 672                offset += __BITOPS_WORDSIZE;
 673                size -= __BITOPS_WORDSIZE;
 674                p++;
 675        }
 676        return offset + find_first_zero_bit(p, size);
 677}
 678#define find_next_zero_bit find_next_zero_bit
 679
 680/**
 681 * find_next_bit - find the first set bit in a memory region
 682 * @addr: The address to base the search on
 683 * @offset: The bitnumber to start searching at
 684 * @size: The maximum size to search
 685 */
 686static inline int find_next_bit (const unsigned long * addr,
 687                                 unsigned long size,
 688                                 unsigned long offset)
 689{
 690        const unsigned long *p;
 691        unsigned long bit, set;
 692
 693        if (offset >= size)
 694                return size;
 695        bit = offset & (__BITOPS_WORDSIZE - 1);
 696        offset -= bit;
 697        size -= offset;
 698        p = addr + offset / __BITOPS_WORDSIZE;
 699        if (bit) {
 700                /*
 701                 * __ffs_word returns __BITOPS_WORDSIZE
 702                 * if no one bit is present in the word.
 703                 */
 704                set = __ffs_word(0, *p & (~0UL << bit));
 705                if (set >= size)
 706                        return size + offset;
 707                if (set < __BITOPS_WORDSIZE)
 708                        return set + offset;
 709                offset += __BITOPS_WORDSIZE;
 710                size -= __BITOPS_WORDSIZE;
 711                p++;
 712        }
 713        return offset + find_first_bit(p, size);
 714}
 715#define find_next_bit find_next_bit
 716
 717/*
 718 * Every architecture must define this function. It's the fastest
 719 * way of searching a 140-bit bitmap where the first 100 bits are
 720 * unlikely to be set. It's guaranteed that at least one of the 140
 721 * bits is cleared.
 722 */
 723static inline int sched_find_first_bit(unsigned long *b)
 724{
 725        return find_first_bit(b, 140);
 726}
 727
 728#include <asm-generic/bitops/fls.h>
 729#include <asm-generic/bitops/__fls.h>
 730#include <asm-generic/bitops/fls64.h>
 731
 732#include <asm-generic/bitops/hweight.h>
 733#include <asm-generic/bitops/lock.h>
 734
 735/*
 736 * ATTENTION: intel byte ordering convention for ext2 and minix !!
 737 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
 738 * bit 32 is the LSB of (addr+4).
 739 * That combined with the little endian byte order of Intel gives the
 740 * following bit order in memory:
 741 *    07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
 742 *    23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
 743 */
 744
 745static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
 746{
 747        unsigned long bytes, bits;
 748
 749        if (!size)
 750                return 0;
 751        bytes = __ffz_word_loop(vaddr, size);
 752        bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
 753        return (bits < size) ? bits : size;
 754}
 755#define find_first_zero_bit_le find_first_zero_bit_le
 756
 757static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
 758                                          unsigned long offset)
 759{
 760        unsigned long *addr = vaddr, *p;
 761        unsigned long bit, set;
 762
 763        if (offset >= size)
 764                return size;
 765        bit = offset & (__BITOPS_WORDSIZE - 1);
 766        offset -= bit;
 767        size -= offset;
 768        p = addr + offset / __BITOPS_WORDSIZE;
 769        if (bit) {
 770                /*
 771                 * s390 version of ffz returns __BITOPS_WORDSIZE
 772                 * if no zero bit is present in the word.
 773                 */
 774                set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
 775                if (set >= size)
 776                        return size + offset;
 777                if (set < __BITOPS_WORDSIZE)
 778                        return set + offset;
 779                offset += __BITOPS_WORDSIZE;
 780                size -= __BITOPS_WORDSIZE;
 781                p++;
 782        }
 783        return offset + find_first_zero_bit_le(p, size);
 784}
 785#define find_next_zero_bit_le find_next_zero_bit_le
 786
 787static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
 788{
 789        unsigned long bytes, bits;
 790
 791        if (!size)
 792                return 0;
 793        bytes = __ffs_word_loop(vaddr, size);
 794        bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
 795        return (bits < size) ? bits : size;
 796}
 797#define find_first_bit_le find_first_bit_le
 798
 799static inline int find_next_bit_le(void *vaddr, unsigned long size,
 800                                     unsigned long offset)
 801{
 802        unsigned long *addr = vaddr, *p;
 803        unsigned long bit, set;
 804
 805        if (offset >= size)
 806                return size;
 807        bit = offset & (__BITOPS_WORDSIZE - 1);
 808        offset -= bit;
 809        size -= offset;
 810        p = addr + offset / __BITOPS_WORDSIZE;
 811        if (bit) {
 812                /*
 813                 * s390 version of ffz returns __BITOPS_WORDSIZE
 814                 * if no zero bit is present in the word.
 815                 */
 816                set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
 817                if (set >= size)
 818                        return size + offset;
 819                if (set < __BITOPS_WORDSIZE)
 820                        return set + offset;
 821                offset += __BITOPS_WORDSIZE;
 822                size -= __BITOPS_WORDSIZE;
 823                p++;
 824        }
 825        return offset + find_first_bit_le(p, size);
 826}
 827#define find_next_bit_le find_next_bit_le
 828
 829#include <asm-generic/bitops/le.h>
 830
 831#include <asm-generic/bitops/ext2-atomic-setbit.h>
 832
 833#endif /* _S390_BITOPS_H */
 834