linux/arch/mn10300/include/asm/atomic.h
<<
>>
Prefs
   1/* MN10300 Atomic counter operations
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public Licence
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the Licence, or (at your option) any later version.
  10 */
  11#ifndef _ASM_ATOMIC_H
  12#define _ASM_ATOMIC_H
  13
  14#include <asm/irqflags.h>
  15#include <asm/cmpxchg.h>
  16
  17#ifndef CONFIG_SMP
  18#include <asm-generic/atomic.h>
  19#else
  20
  21/*
  22 * Atomic operations that C can't guarantee us.  Useful for
  23 * resource counting etc..
  24 */
  25
  26#define ATOMIC_INIT(i)  { (i) }
  27
  28#ifdef __KERNEL__
  29
  30/**
  31 * atomic_read - read atomic variable
  32 * @v: pointer of type atomic_t
  33 *
  34 * Atomically reads the value of @v.  Note that the guaranteed
  35 * useful range of an atomic_t is only 24 bits.
  36 */
  37#define atomic_read(v)  (ACCESS_ONCE((v)->counter))
  38
  39/**
  40 * atomic_set - set atomic variable
  41 * @v: pointer of type atomic_t
  42 * @i: required value
  43 *
  44 * Atomically sets the value of @v to @i.  Note that the guaranteed
  45 * useful range of an atomic_t is only 24 bits.
  46 */
  47#define atomic_set(v, i) (((v)->counter) = (i))
  48
  49/**
  50 * atomic_add_return - add integer to atomic variable
  51 * @i: integer value to add
  52 * @v: pointer of type atomic_t
  53 *
  54 * Atomically adds @i to @v and returns the result
  55 * Note that the guaranteed useful range of an atomic_t is only 24 bits.
  56 */
  57static inline int atomic_add_return(int i, atomic_t *v)
  58{
  59        int retval;
  60#ifdef CONFIG_SMP
  61        int status;
  62
  63        asm volatile(
  64                "1:     mov     %4,(_AAR,%3)    \n"
  65                "       mov     (_ADR,%3),%1    \n"
  66                "       add     %5,%1           \n"
  67                "       mov     %1,(_ADR,%3)    \n"
  68                "       mov     (_ADR,%3),%0    \n"     /* flush */
  69                "       mov     (_ASR,%3),%0    \n"
  70                "       or      %0,%0           \n"
  71                "       bne     1b              \n"
  72                : "=&r"(status), "=&r"(retval), "=m"(v->counter)
  73                : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
  74                : "memory", "cc");
  75
  76#else
  77        unsigned long flags;
  78
  79        flags = arch_local_cli_save();
  80        retval = v->counter;
  81        retval += i;
  82        v->counter = retval;
  83        arch_local_irq_restore(flags);
  84#endif
  85        return retval;
  86}
  87
  88/**
  89 * atomic_sub_return - subtract integer from atomic variable
  90 * @i: integer value to subtract
  91 * @v: pointer of type atomic_t
  92 *
  93 * Atomically subtracts @i from @v and returns the result
  94 * Note that the guaranteed useful range of an atomic_t is only 24 bits.
  95 */
  96static inline int atomic_sub_return(int i, atomic_t *v)
  97{
  98        int retval;
  99#ifdef CONFIG_SMP
 100        int status;
 101
 102        asm volatile(
 103                "1:     mov     %4,(_AAR,%3)    \n"
 104                "       mov     (_ADR,%3),%1    \n"
 105                "       sub     %5,%1           \n"
 106                "       mov     %1,(_ADR,%3)    \n"
 107                "       mov     (_ADR,%3),%0    \n"     /* flush */
 108                "       mov     (_ASR,%3),%0    \n"
 109                "       or      %0,%0           \n"
 110                "       bne     1b              \n"
 111                : "=&r"(status), "=&r"(retval), "=m"(v->counter)
 112                : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
 113                : "memory", "cc");
 114
 115#else
 116        unsigned long flags;
 117        flags = arch_local_cli_save();
 118        retval = v->counter;
 119        retval -= i;
 120        v->counter = retval;
 121        arch_local_irq_restore(flags);
 122#endif
 123        return retval;
 124}
 125
 126static inline int atomic_add_negative(int i, atomic_t *v)
 127{
 128        return atomic_add_return(i, v) < 0;
 129}
 130
 131static inline void atomic_add(int i, atomic_t *v)
 132{
 133        atomic_add_return(i, v);
 134}
 135
 136static inline void atomic_sub(int i, atomic_t *v)
 137{
 138        atomic_sub_return(i, v);
 139}
 140
 141static inline void atomic_inc(atomic_t *v)
 142{
 143        atomic_add_return(1, v);
 144}
 145
 146static inline void atomic_dec(atomic_t *v)
 147{
 148        atomic_sub_return(1, v);
 149}
 150
 151#define atomic_dec_return(v)            atomic_sub_return(1, (v))
 152#define atomic_inc_return(v)            atomic_add_return(1, (v))
 153
 154#define atomic_sub_and_test(i, v)       (atomic_sub_return((i), (v)) == 0)
 155#define atomic_dec_and_test(v)          (atomic_sub_return(1, (v)) == 0)
 156#define atomic_inc_and_test(v)          (atomic_add_return(1, (v)) == 0)
 157
 158#define __atomic_add_unless(v, a, u)                            \
 159({                                                              \
 160        int c, old;                                             \
 161        c = atomic_read(v);                                     \
 162        while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
 163                c = old;                                        \
 164        c;                                                      \
 165})
 166
 167#define atomic_xchg(ptr, v)             (xchg(&(ptr)->counter, (v)))
 168#define atomic_cmpxchg(v, old, new)     (cmpxchg(&((v)->counter), (old), (new)))
 169
 170/**
 171 * atomic_clear_mask - Atomically clear bits in memory
 172 * @mask: Mask of the bits to be cleared
 173 * @v: pointer to word in memory
 174 *
 175 * Atomically clears the bits set in mask from the memory word specified.
 176 */
 177static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 178{
 179#ifdef CONFIG_SMP
 180        int status;
 181
 182        asm volatile(
 183                "1:     mov     %3,(_AAR,%2)    \n"
 184                "       mov     (_ADR,%2),%0    \n"
 185                "       and     %4,%0           \n"
 186                "       mov     %0,(_ADR,%2)    \n"
 187                "       mov     (_ADR,%2),%0    \n"     /* flush */
 188                "       mov     (_ASR,%2),%0    \n"
 189                "       or      %0,%0           \n"
 190                "       bne     1b              \n"
 191                : "=&r"(status), "=m"(*addr)
 192                : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
 193                : "memory", "cc");
 194#else
 195        unsigned long flags;
 196
 197        mask = ~mask;
 198        flags = arch_local_cli_save();
 199        *addr &= mask;
 200        arch_local_irq_restore(flags);
 201#endif
 202}
 203
 204/**
 205 * atomic_set_mask - Atomically set bits in memory
 206 * @mask: Mask of the bits to be set
 207 * @v: pointer to word in memory
 208 *
 209 * Atomically sets the bits set in mask from the memory word specified.
 210 */
 211static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
 212{
 213#ifdef CONFIG_SMP
 214        int status;
 215
 216        asm volatile(
 217                "1:     mov     %3,(_AAR,%2)    \n"
 218                "       mov     (_ADR,%2),%0    \n"
 219                "       or      %4,%0           \n"
 220                "       mov     %0,(_ADR,%2)    \n"
 221                "       mov     (_ADR,%2),%0    \n"     /* flush */
 222                "       mov     (_ASR,%2),%0    \n"
 223                "       or      %0,%0           \n"
 224                "       bne     1b              \n"
 225                : "=&r"(status), "=m"(*addr)
 226                : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
 227                : "memory", "cc");
 228#else
 229        unsigned long flags;
 230
 231        flags = arch_local_cli_save();
 232        *addr |= mask;
 233        arch_local_irq_restore(flags);
 234#endif
 235}
 236
 237/* Atomic operations are already serializing on MN10300??? */
 238#define smp_mb__before_atomic_dec()     barrier()
 239#define smp_mb__after_atomic_dec()      barrier()
 240#define smp_mb__before_atomic_inc()     barrier()
 241#define smp_mb__after_atomic_inc()      barrier()
 242
 243#endif /* __KERNEL__ */
 244#endif /* CONFIG_SMP */
 245#endif /* _ASM_ATOMIC_H */
 246