linux/arch/arc/include/asm/cmpxchg.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#ifndef __ASM_ARC_CMPXCHG_H
  10#define __ASM_ARC_CMPXCHG_H
  11
  12#include <linux/types.h>
  13#include <asm/smp.h>
  14
  15#ifdef CONFIG_ARC_HAS_LLSC
  16
  17static inline unsigned long
  18__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
  19{
  20        unsigned long prev;
  21
  22        __asm__ __volatile__(
  23        "1:     llock   %0, [%1]        \n"
  24        "       brne    %0, %2, 2f      \n"
  25        "       scond   %3, [%1]        \n"
  26        "       bnz     1b              \n"
  27        "2:                             \n"
  28        : "=&r"(prev)
  29        : "r"(ptr), "ir"(expected),
  30          "r"(new) /* can't be "ir". scond can't take limm for "b" */
  31        : "cc");
  32
  33        return prev;
  34}
  35
  36#else
  37
  38static inline unsigned long
  39__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
  40{
  41        unsigned long flags;
  42        int prev;
  43        volatile unsigned long *p = ptr;
  44
  45        atomic_ops_lock(flags);
  46        prev = *p;
  47        if (prev == expected)
  48                *p = new;
  49        atomic_ops_unlock(flags);
  50        return prev;
  51}
  52
  53#endif /* CONFIG_ARC_HAS_LLSC */
  54
  55#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
  56                                (unsigned long)(o), (unsigned long)(n)))
  57
  58/*
  59 * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
  60 * just to gaurantee semantics.
  61 * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
  62 * which also happens to be atomic_ops_lock.
  63 *
  64 * Thus despite semantically being different, implementation of atomic_cmpxchg()
  65 * is same as cmpxchg().
  66 */
  67#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  68
  69
  70/*
  71 * xchg (reg with memory) based on "Native atomic" EX insn
  72 */
  73static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
  74                                   int size)
  75{
  76        extern unsigned long __xchg_bad_pointer(void);
  77
  78        switch (size) {
  79        case 4:
  80                __asm__ __volatile__(
  81                "       ex  %0, [%1]    \n"
  82                : "+r"(val)
  83                : "r"(ptr)
  84                : "memory");
  85
  86                return val;
  87        }
  88        return __xchg_bad_pointer();
  89}
  90
  91#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
  92                                                 sizeof(*(ptr))))
  93
  94/*
  95 * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
  96 * not require any locking. However there's a quirk.
  97 * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
  98 * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
  99 * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
 100 * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
 101 *
 102 * This however is only relevant if SMP and/or ARC lacks LLSC
 103 *   if (UP or LLSC)
 104 *      xchg doesn't need serialization
 105 *   else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
 106 *      xchg needs serialization
 107 */
 108
 109#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
 110
 111#define xchg(ptr, with)                 \
 112({                                      \
 113        unsigned long flags;            \
 114        typeof(*(ptr)) old_val;         \
 115                                        \
 116        atomic_ops_lock(flags);         \
 117        old_val = _xchg(ptr, with);     \
 118        atomic_ops_unlock(flags);       \
 119        old_val;                        \
 120})
 121
 122#else
 123
 124#define xchg(ptr, with)  _xchg(ptr, with)
 125
 126#endif
 127
 128/*
 129 * "atomic" variant of xchg()
 130 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
 131 * Since xchg() doesn't always do that, it would seem that following defintion
 132 * is incorrect. But here's the rationale:
 133 *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
 134 *   LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
 135 *         is natively "SMP safe", no serialization required).
 136 *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
 137 *         could clobber them. atomic_xchg() itself would be 1 insn, so it
 138 *         can't be clobbered by others. Thus no serialization required when
 139 *         atomic_xchg is involved.
 140 */
 141#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 142
 143#endif
 144