linux/arch/m68k/include/asm/bitops.h
<<
>>
Prefs
   1#ifndef _M68K_BITOPS_H
   2#define _M68K_BITOPS_H
   3/*
   4 * Copyright 1992, Linus Torvalds.
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file COPYING in the main directory of this archive
   8 * for more details.
   9 */
  10
  11#ifndef _LINUX_BITOPS_H
  12#error only <linux/bitops.h> can be included directly
  13#endif
  14
  15#include <linux/compiler.h>
  16
  17/*
  18 *      Bit access functions vary across the ColdFire and 68k families.
  19 *      So we will break them out here, and then macro in the ones we want.
  20 *
  21 *      ColdFire - supports standard bset/bclr/bchg with register operand only
  22 *      68000    - supports standard bset/bclr/bchg with memory operand
  23 *      >= 68020 - also supports the bfset/bfclr/bfchg instructions
  24 *
  25 *      Although it is possible to use only the bset/bclr/bchg with register
  26 *      operands on all platforms you end up with larger generated code.
  27 *      So we use the best form possible on a given platform.
  28 */
  29
  30static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
  31{
  32        char *p = (char *)vaddr + (nr ^ 31) / 8;
  33
  34        __asm__ __volatile__ ("bset %1,(%0)"
  35                :
  36                : "a" (p), "di" (nr & 7)
  37                : "memory");
  38}
  39
  40static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  41{
  42        char *p = (char *)vaddr + (nr ^ 31) / 8;
  43
  44        __asm__ __volatile__ ("bset %1,%0"
  45                : "+m" (*p)
  46                : "di" (nr & 7));
  47}
  48
  49static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  50{
  51        __asm__ __volatile__ ("bfset %1{%0:#1}"
  52                :
  53                : "d" (nr ^ 31), "o" (*vaddr)
  54                : "memory");
  55}
  56
  57#if defined(CONFIG_COLDFIRE)
  58#define set_bit(nr, vaddr)      bset_reg_set_bit(nr, vaddr)
  59#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  60#define set_bit(nr, vaddr)      bset_mem_set_bit(nr, vaddr)
  61#else
  62#define set_bit(nr, vaddr)      (__builtin_constant_p(nr) ? \
  63                                bset_mem_set_bit(nr, vaddr) : \
  64                                bfset_mem_set_bit(nr, vaddr))
  65#endif
  66
  67#define __set_bit(nr, vaddr)    set_bit(nr, vaddr)
  68
  69
  70/*
  71 * clear_bit() doesn't provide any barrier for the compiler.
  72 */
  73#define smp_mb__before_clear_bit()      barrier()
  74#define smp_mb__after_clear_bit()       barrier()
  75
  76static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
  77{
  78        char *p = (char *)vaddr + (nr ^ 31) / 8;
  79
  80        __asm__ __volatile__ ("bclr %1,(%0)"
  81                :
  82                : "a" (p), "di" (nr & 7)
  83                : "memory");
  84}
  85
  86static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  87{
  88        char *p = (char *)vaddr + (nr ^ 31) / 8;
  89
  90        __asm__ __volatile__ ("bclr %1,%0"
  91                : "+m" (*p)
  92                : "di" (nr & 7));
  93}
  94
  95static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  96{
  97        __asm__ __volatile__ ("bfclr %1{%0:#1}"
  98                :
  99                : "d" (nr ^ 31), "o" (*vaddr)
 100                : "memory");
 101}
 102
 103#if defined(CONFIG_COLDFIRE)
 104#define clear_bit(nr, vaddr)    bclr_reg_clear_bit(nr, vaddr)
 105#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
 106#define clear_bit(nr, vaddr)    bclr_mem_clear_bit(nr, vaddr)
 107#else
 108#define clear_bit(nr, vaddr)    (__builtin_constant_p(nr) ? \
 109                                bclr_mem_clear_bit(nr, vaddr) : \
 110                                bfclr_mem_clear_bit(nr, vaddr))
 111#endif
 112
 113#define __clear_bit(nr, vaddr)  clear_bit(nr, vaddr)
 114
 115
 116static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
 117{
 118        char *p = (char *)vaddr + (nr ^ 31) / 8;
 119
 120        __asm__ __volatile__ ("bchg %1,(%0)"
 121                :
 122                : "a" (p), "di" (nr & 7)
 123                : "memory");
 124}
 125
 126static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
 127{
 128        char *p = (char *)vaddr + (nr ^ 31) / 8;
 129
 130        __asm__ __volatile__ ("bchg %1,%0"
 131                : "+m" (*p)
 132                : "di" (nr & 7));
 133}
 134
 135static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
 136{
 137        __asm__ __volatile__ ("bfchg %1{%0:#1}"
 138                :
 139                : "d" (nr ^ 31), "o" (*vaddr)
 140                : "memory");
 141}
 142
 143#if defined(CONFIG_COLDFIRE)
 144#define change_bit(nr, vaddr)   bchg_reg_change_bit(nr, vaddr)
 145#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
 146#define change_bit(nr, vaddr)   bchg_mem_change_bit(nr, vaddr)
 147#else
 148#define change_bit(nr, vaddr)   (__builtin_constant_p(nr) ? \
 149                                bchg_mem_change_bit(nr, vaddr) : \
 150                                bfchg_mem_change_bit(nr, vaddr))
 151#endif
 152
 153#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
 154
 155
 156static inline int test_bit(int nr, const unsigned long *vaddr)
 157{
 158        return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
 159}
 160
 161
 162static inline int bset_reg_test_and_set_bit(int nr,
 163                                            volatile unsigned long *vaddr)
 164{
 165        char *p = (char *)vaddr + (nr ^ 31) / 8;
 166        char retval;
 167
 168        __asm__ __volatile__ ("bset %2,(%1); sne %0"
 169                : "=d" (retval)
 170                : "a" (p), "di" (nr & 7)
 171                : "memory");
 172        return retval;
 173}
 174
 175static inline int bset_mem_test_and_set_bit(int nr,
 176                                            volatile unsigned long *vaddr)
 177{
 178        char *p = (char *)vaddr + (nr ^ 31) / 8;
 179        char retval;
 180
 181        __asm__ __volatile__ ("bset %2,%1; sne %0"
 182                : "=d" (retval), "+m" (*p)
 183                : "di" (nr & 7));
 184        return retval;
 185}
 186
 187static inline int bfset_mem_test_and_set_bit(int nr,
 188                                             volatile unsigned long *vaddr)
 189{
 190        char retval;
 191
 192        __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
 193                : "=d" (retval)
 194                : "d" (nr ^ 31), "o" (*vaddr)
 195                : "memory");
 196        return retval;
 197}
 198
 199#if defined(CONFIG_COLDFIRE)
 200#define test_and_set_bit(nr, vaddr)     bset_reg_test_and_set_bit(nr, vaddr)
 201#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
 202#define test_and_set_bit(nr, vaddr)     bset_mem_test_and_set_bit(nr, vaddr)
 203#else
 204#define test_and_set_bit(nr, vaddr)     (__builtin_constant_p(nr) ? \
 205                                        bset_mem_test_and_set_bit(nr, vaddr) : \
 206                                        bfset_mem_test_and_set_bit(nr, vaddr))
 207#endif
 208
 209#define __test_and_set_bit(nr, vaddr)   test_and_set_bit(nr, vaddr)
 210
 211
 212static inline int bclr_reg_test_and_clear_bit(int nr,
 213                                              volatile unsigned long *vaddr)
 214{
 215        char *p = (char *)vaddr + (nr ^ 31) / 8;
 216        char retval;
 217
 218        __asm__ __volatile__ ("bclr %2,(%1); sne %0"
 219                : "=d" (retval)
 220                : "a" (p), "di" (nr & 7)
 221                : "memory");
 222        return retval;
 223}
 224
 225static inline int bclr_mem_test_and_clear_bit(int nr,
 226                                              volatile unsigned long *vaddr)
 227{
 228        char *p = (char *)vaddr + (nr ^ 31) / 8;
 229        char retval;
 230
 231        __asm__ __volatile__ ("bclr %2,%1; sne %0"
 232                : "=d" (retval), "+m" (*p)
 233                : "di" (nr & 7));
 234        return retval;
 235}
 236
 237static inline int bfclr_mem_test_and_clear_bit(int nr,
 238                                               volatile unsigned long *vaddr)
 239{
 240        char retval;
 241
 242        __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
 243                : "=d" (retval)
 244                : "d" (nr ^ 31), "o" (*vaddr)
 245                : "memory");
 246        return retval;
 247}
 248
 249#if defined(CONFIG_COLDFIRE)
 250#define test_and_clear_bit(nr, vaddr)   bclr_reg_test_and_clear_bit(nr, vaddr)
 251#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
 252#define test_and_clear_bit(nr, vaddr)   bclr_mem_test_and_clear_bit(nr, vaddr)
 253#else
 254#define test_and_clear_bit(nr, vaddr)   (__builtin_constant_p(nr) ? \
 255                                        bclr_mem_test_and_clear_bit(nr, vaddr) : \
 256                                        bfclr_mem_test_and_clear_bit(nr, vaddr))
 257#endif
 258
 259#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
 260
 261
 262static inline int bchg_reg_test_and_change_bit(int nr,
 263                                               volatile unsigned long *vaddr)
 264{
 265        char *p = (char *)vaddr + (nr ^ 31) / 8;
 266        char retval;
 267
 268        __asm__ __volatile__ ("bchg %2,(%1); sne %0"
 269                : "=d" (retval)
 270                : "a" (p), "di" (nr & 7)
 271                : "memory");
 272        return retval;
 273}
 274
 275static inline int bchg_mem_test_and_change_bit(int nr,
 276                                               volatile unsigned long *vaddr)
 277{
 278        char *p = (char *)vaddr + (nr ^ 31) / 8;
 279        char retval;
 280
 281        __asm__ __volatile__ ("bchg %2,%1; sne %0"
 282                : "=d" (retval), "+m" (*p)
 283                : "di" (nr & 7));
 284        return retval;
 285}
 286
 287static inline int bfchg_mem_test_and_change_bit(int nr,
 288                                                volatile unsigned long *vaddr)
 289{
 290        char retval;
 291
 292        __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
 293                : "=d" (retval)
 294                : "d" (nr ^ 31), "o" (*vaddr)
 295                : "memory");
 296        return retval;
 297}
 298
 299#if defined(CONFIG_COLDFIRE)
 300#define test_and_change_bit(nr, vaddr)  bchg_reg_test_and_change_bit(nr, vaddr)
 301#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
 302#define test_and_change_bit(nr, vaddr)  bchg_mem_test_and_change_bit(nr, vaddr)
 303#else
 304#define test_and_change_bit(nr, vaddr)  (__builtin_constant_p(nr) ? \
 305                                        bchg_mem_test_and_change_bit(nr, vaddr) : \
 306                                        bfchg_mem_test_and_change_bit(nr, vaddr))
 307#endif
 308
 309#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
 310
 311
 312/*
 313 *      The true 68020 and more advanced processors support the "bfffo"
 314 *      instruction for finding bits. ColdFire and simple 68000 parts
 315 *      (including CPU32) do not support this. They simply use the generic
 316 *      functions.
 317 */
 318#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
 319#include <asm-generic/bitops/find.h>
 320#include <asm-generic/bitops/ffz.h>
 321#else
 322
 323static inline int find_first_zero_bit(const unsigned long *vaddr,
 324                                      unsigned size)
 325{
 326        const unsigned long *p = vaddr;
 327        int res = 32;
 328        unsigned int words;
 329        unsigned long num;
 330
 331        if (!size)
 332                return 0;
 333
 334        words = (size + 31) >> 5;
 335        while (!(num = ~*p++)) {
 336                if (!--words)
 337                        goto out;
 338        }
 339
 340        __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
 341                              : "=d" (res) : "d" (num & -num));
 342        res ^= 31;
 343out:
 344        res += ((long)p - (long)vaddr - 4) * 8;
 345        return res < size ? res : size;
 346}
 347#define find_first_zero_bit find_first_zero_bit
 348
 349static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
 350                                     int offset)
 351{
 352        const unsigned long *p = vaddr + (offset >> 5);
 353        int bit = offset & 31UL, res;
 354
 355        if (offset >= size)
 356                return size;
 357
 358        if (bit) {
 359                unsigned long num = ~*p++ & (~0UL << bit);
 360                offset -= bit;
 361
 362                /* Look for zero in first longword */
 363                __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
 364                                      : "=d" (res) : "d" (num & -num));
 365                if (res < 32) {
 366                        offset += res ^ 31;
 367                        return offset < size ? offset : size;
 368                }
 369                offset += 32;
 370
 371                if (offset >= size)
 372                        return size;
 373        }
 374        /* No zero yet, search remaining full bytes for a zero */
 375        return offset + find_first_zero_bit(p, size - offset);
 376}
 377#define find_next_zero_bit find_next_zero_bit
 378
 379static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
 380{
 381        const unsigned long *p = vaddr;
 382        int res = 32;
 383        unsigned int words;
 384        unsigned long num;
 385
 386        if (!size)
 387                return 0;
 388
 389        words = (size + 31) >> 5;
 390        while (!(num = *p++)) {
 391                if (!--words)
 392                        goto out;
 393        }
 394
 395        __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
 396                              : "=d" (res) : "d" (num & -num));
 397        res ^= 31;
 398out:
 399        res += ((long)p - (long)vaddr - 4) * 8;
 400        return res < size ? res : size;
 401}
 402#define find_first_bit find_first_bit
 403
 404static inline int find_next_bit(const unsigned long *vaddr, int size,
 405                                int offset)
 406{
 407        const unsigned long *p = vaddr + (offset >> 5);
 408        int bit = offset & 31UL, res;
 409
 410        if (offset >= size)
 411                return size;
 412
 413        if (bit) {
 414                unsigned long num = *p++ & (~0UL << bit);
 415                offset -= bit;
 416
 417                /* Look for one in first longword */
 418                __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
 419                                      : "=d" (res) : "d" (num & -num));
 420                if (res < 32) {
 421                        offset += res ^ 31;
 422                        return offset < size ? offset : size;
 423                }
 424                offset += 32;
 425
 426                if (offset >= size)
 427                        return size;
 428        }
 429        /* No one yet, search remaining full bytes for a one */
 430        return offset + find_first_bit(p, size - offset);
 431}
 432#define find_next_bit find_next_bit
 433
 434/*
 435 * ffz = Find First Zero in word. Undefined if no zero exists,
 436 * so code should check against ~0UL first..
 437 */
 438static inline unsigned long ffz(unsigned long word)
 439{
 440        int res;
 441
 442        __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
 443                              : "=d" (res) : "d" (~word & -~word));
 444        return res ^ 31;
 445}
 446
 447#endif
 448
 449#ifdef __KERNEL__
 450
 451#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
 452
 453/*
 454 *      The newer ColdFire family members support a "bitrev" instruction
 455 *      and we can use that to implement a fast ffs. Older Coldfire parts,
 456 *      and normal 68000 parts don't have anything special, so we use the
 457 *      generic functions for those.
 458 */
 459#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
 460        !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
 461static inline int __ffs(int x)
 462{
 463        __asm__ __volatile__ ("bitrev %0; ff1 %0"
 464                : "=d" (x)
 465                : "0" (x));
 466        return x;
 467}
 468
 469static inline int ffs(int x)
 470{
 471        if (!x)
 472                return 0;
 473        return __ffs(x) + 1;
 474}
 475
 476#else
 477#include <asm-generic/bitops/ffs.h>
 478#include <asm-generic/bitops/__ffs.h>
 479#endif
 480
 481#include <asm-generic/bitops/fls.h>
 482#include <asm-generic/bitops/__fls.h>
 483
 484#else
 485
 486/*
 487 *      ffs: find first bit set. This is defined the same way as
 488 *      the libc and compiler builtin ffs routines, therefore
 489 *      differs in spirit from the above ffz (man ffs).
 490 */
 491static inline int ffs(int x)
 492{
 493        int cnt;
 494
 495        __asm__ ("bfffo %1{#0:#0},%0"
 496                : "=d" (cnt)
 497                : "dm" (x & -x));
 498        return 32 - cnt;
 499}
 500#define __ffs(x) (ffs(x) - 1)
 501
 502/*
 503 *      fls: find last bit set.
 504 */
 505static inline int fls(int x)
 506{
 507        int cnt;
 508
 509        __asm__ ("bfffo %1{#0,#0},%0"
 510                : "=d" (cnt)
 511                : "dm" (x));
 512        return 32 - cnt;
 513}
 514
 515static inline int __fls(int x)
 516{
 517        return fls(x) - 1;
 518}
 519
 520#endif
 521
 522#include <asm-generic/bitops/ext2-atomic.h>
 523#include <asm-generic/bitops/le.h>
 524#include <asm-generic/bitops/fls64.h>
 525#include <asm-generic/bitops/sched.h>
 526#include <asm-generic/bitops/hweight.h>
 527#include <asm-generic/bitops/lock.h>
 528#endif /* __KERNEL__ */
 529
 530#endif /* _M68K_BITOPS_H */
 531