linux/arch/hexagon/include/asm/bitops.h
<<
>>
Prefs
   1/*
   2 * Bit operations for the Hexagon architecture
   3 *
   4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
   5 *
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 and
   9 * only version 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  19 * 02110-1301, USA.
  20 */
  21
  22#ifndef _ASM_BITOPS_H
  23#define _ASM_BITOPS_H
  24
  25#include <linux/compiler.h>
  26#include <asm/byteorder.h>
  27#include <asm/atomic.h>
  28
  29#ifdef __KERNEL__
  30
  31#define smp_mb__before_clear_bit()      barrier()
  32#define smp_mb__after_clear_bit()       barrier()
  33
  34/*
  35 * The offset calculations for these are based on BITS_PER_LONG == 32
  36 * (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access),
  37 * mask by 0x0000001F)
  38 *
  39 * Typically, R10 is clobbered for address, R11 bit nr, and R12 is temp
  40 */
  41
  42/**
  43 * test_and_clear_bit - clear a bit and return its old value
  44 * @nr:  bit number to clear
  45 * @addr:  pointer to memory
  46 */
  47static inline int test_and_clear_bit(int nr, volatile void *addr)
  48{
  49        int oldval;
  50
  51        __asm__ __volatile__ (
  52        "       {R10 = %1; R11 = asr(%2,#5); }\n"
  53        "       {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
  54        "1:     R12 = memw_locked(R10);\n"
  55        "       { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
  56        "       memw_locked(R10,P1) = R12;\n"
  57        "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
  58        : "=&r" (oldval)
  59        : "r" (addr), "r" (nr)
  60        : "r10", "r11", "r12", "p0", "p1", "memory"
  61        );
  62
  63        return oldval;
  64}
  65
  66/**
  67 * test_and_set_bit - set a bit and return its old value
  68 * @nr:  bit number to set
  69 * @addr:  pointer to memory
  70 */
  71static inline int test_and_set_bit(int nr, volatile void *addr)
  72{
  73        int oldval;
  74
  75        __asm__ __volatile__ (
  76        "       {R10 = %1; R11 = asr(%2,#5); }\n"
  77        "       {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
  78        "1:     R12 = memw_locked(R10);\n"
  79        "       { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
  80        "       memw_locked(R10,P1) = R12;\n"
  81        "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
  82        : "=&r" (oldval)
  83        : "r" (addr), "r" (nr)
  84        : "r10", "r11", "r12", "p0", "p1", "memory"
  85        );
  86
  87
  88        return oldval;
  89
  90}
  91
  92/**
  93 * test_and_change_bit - toggle a bit and return its old value
  94 * @nr:  bit number to set
  95 * @addr:  pointer to memory
  96 */
  97static inline int test_and_change_bit(int nr, volatile void *addr)
  98{
  99        int oldval;
 100
 101        __asm__ __volatile__ (
 102        "       {R10 = %1; R11 = asr(%2,#5); }\n"
 103        "       {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
 104        "1:     R12 = memw_locked(R10);\n"
 105        "       { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
 106        "       memw_locked(R10,P1) = R12;\n"
 107        "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
 108        : "=&r" (oldval)
 109        : "r" (addr), "r" (nr)
 110        : "r10", "r11", "r12", "p0", "p1", "memory"
 111        );
 112
 113        return oldval;
 114
 115}
 116
 117/*
 118 * Atomic, but doesn't care about the return value.
 119 * Rewrite later to save a cycle or two.
 120 */
 121
 122static inline void clear_bit(int nr, volatile void *addr)
 123{
 124        test_and_clear_bit(nr, addr);
 125}
 126
 127static inline void set_bit(int nr, volatile void *addr)
 128{
 129        test_and_set_bit(nr, addr);
 130}
 131
 132static inline void change_bit(int nr, volatile void *addr)
 133{
 134        test_and_change_bit(nr, addr);
 135}
 136
 137
 138/*
 139 * These are allowed to be non-atomic.  In fact the generic flavors are
 140 * in non-atomic.h.  Would it be better to use intrinsics for this?
 141 *
 142 * OK, writes in our architecture do not invalidate LL/SC, so this has to
 143 * be atomic, particularly for things like slab_lock and slab_unlock.
 144 *
 145 */
 146static inline void __clear_bit(int nr, volatile unsigned long *addr)
 147{
 148        test_and_clear_bit(nr, addr);
 149}
 150
 151static inline void __set_bit(int nr, volatile unsigned long *addr)
 152{
 153        test_and_set_bit(nr, addr);
 154}
 155
 156static inline void __change_bit(int nr, volatile unsigned long *addr)
 157{
 158        test_and_change_bit(nr, addr);
 159}
 160
 161/*  Apparently, at least some of these are allowed to be non-atomic  */
 162static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
 163{
 164        return test_and_clear_bit(nr, addr);
 165}
 166
 167static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
 168{
 169        return test_and_set_bit(nr, addr);
 170}
 171
 172static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
 173{
 174        return test_and_change_bit(nr, addr);
 175}
 176
 177static inline int __test_bit(int nr, const volatile unsigned long *addr)
 178{
 179        int retval;
 180
 181        asm volatile(
 182        "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
 183        : "=&r" (retval)
 184        : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
 185        : "p0"
 186        );
 187
 188        return retval;
 189}
 190
 191#define test_bit(nr, addr) __test_bit(nr, addr)
 192
 193/*
 194 * ffz - find first zero in word.
 195 * @word: The word to search
 196 *
 197 * Undefined if no zero exists, so code should check against ~0UL first.
 198 */
 199static inline long ffz(int x)
 200{
 201        int r;
 202
 203        asm("%0 = ct1(%1);\n"
 204                : "=&r" (r)
 205                : "r" (x));
 206        return r;
 207}
 208
 209/*
 210 * fls - find last (most-significant) bit set
 211 * @x: the word to search
 212 *
 213 * This is defined the same way as ffs.
 214 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
 215 */
 216static inline long fls(int x)
 217{
 218        int r;
 219
 220        asm("{ %0 = cl0(%1);}\n"
 221                "%0 = sub(#32,%0);\n"
 222                : "=&r" (r)
 223                : "r" (x)
 224                : "p0");
 225
 226        return r;
 227}
 228
 229/*
 230 * ffs - find first bit set
 231 * @x: the word to search
 232 *
 233 * This is defined the same way as
 234 * the libc and compiler builtin ffs routines, therefore
 235 * differs in spirit from the above ffz (man ffs).
 236 */
 237static inline long ffs(int x)
 238{
 239        int r;
 240
 241        asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
 242                "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
 243                : "=&r" (r)
 244                : "r" (x)
 245                : "p0");
 246
 247        return r;
 248}
 249
 250/*
 251 * __ffs - find first bit in word.
 252 * @word: The word to search
 253 *
 254 * Undefined if no bit exists, so code should check against 0 first.
 255 *
 256 * bits_per_long assumed to be 32
 257 * numbering starts at 0 I think (instead of 1 like ffs)
 258 */
 259static inline unsigned long __ffs(unsigned long word)
 260{
 261        int num;
 262
 263        asm("%0 = ct0(%1);\n"
 264                : "=&r" (num)
 265                : "r" (word));
 266
 267        return num;
 268}
 269
 270/*
 271 * __fls - find last (most-significant) set bit in a long word
 272 * @word: the word to search
 273 *
 274 * Undefined if no set bit exists, so code should check against 0 first.
 275 * bits_per_long assumed to be 32
 276 */
 277static inline unsigned long __fls(unsigned long word)
 278{
 279        int num;
 280
 281        asm("%0 = cl0(%1);\n"
 282                "%0 = sub(#31,%0);\n"
 283                : "=&r" (num)
 284                : "r" (word));
 285
 286        return num;
 287}
 288
 289#include <asm-generic/bitops/lock.h>
 290#include <asm-generic/bitops/find.h>
 291
 292#include <asm-generic/bitops/fls64.h>
 293#include <asm-generic/bitops/sched.h>
 294#include <asm-generic/bitops/hweight.h>
 295
 296#include <asm-generic/bitops/le.h>
 297#include <asm-generic/bitops/ext2-atomic.h>
 298
 299#endif /* __KERNEL__ */
 300#endif
 301