linux/arch/hexagon/include/asm/cmpxchg.h
<<
>>
Prefs
   1/*
   2 * xchg/cmpxchg operations for the Hexagon architecture
   3 *
   4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
   5 *
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 and
   9 * only version 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  19 * 02110-1301, USA.
  20 */
  21
  22#ifndef _ASM_CMPXCHG_H
  23#define _ASM_CMPXCHG_H
  24
  25/*
  26 * __xchg - atomically exchange a register and a memory location
  27 * @x: value to swap
  28 * @ptr: pointer to memory
  29 * @size:  size of the value
  30 *
  31 * Only 4 bytes supported currently.
  32 *
  33 * Note:  there was an errata for V2 about .new's and memw_locked.
  34 *
  35 */
  36static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
  37                                   int size)
  38{
  39        unsigned long retval;
  40
  41        /*  Can't seem to use printk or panic here, so just stop  */
  42        if (size != 4) do { asm volatile("brkpt;\n"); } while (1);
  43
  44        __asm__ __volatile__ (
  45        "1:     %0 = memw_locked(%1);\n"    /*  load into retval */
  46        "       memw_locked(%1,P0) = %2;\n" /*  store into memory */
  47        "       if !P0 jump 1b;\n"
  48        : "=&r" (retval)
  49        : "r" (ptr), "r" (x)
  50        : "memory", "p0"
  51        );
  52        return retval;
  53}
  54
  55/*
  56 * Atomically swap the contents of a register with memory.  Should be atomic
  57 * between multiple CPU's and within interrupts on the same CPU.
  58 */
  59#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
  60        sizeof(*(ptr))))
  61
  62/*
  63 *  see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps.
  64 *  looks just like atomic_cmpxchg on our arch currently with a bunch of
  65 *  variable casting.
  66 */
  67
  68#define cmpxchg(ptr, old, new)                                  \
  69({                                                              \
  70        __typeof__(ptr) __ptr = (ptr);                          \
  71        __typeof__(*(ptr)) __old = (old);                       \
  72        __typeof__(*(ptr)) __new = (new);                       \
  73        __typeof__(*(ptr)) __oldval = 0;                        \
  74                                                                \
  75        asm volatile(                                           \
  76                "1:     %0 = memw_locked(%1);\n"                \
  77                "       { P0 = cmp.eq(%0,%2);\n"                \
  78                "         if (!P0.new) jump:nt 2f; }\n"         \
  79                "       memw_locked(%1,p0) = %3;\n"             \
  80                "       if (!P0) jump 1b;\n"                    \
  81                "2:\n"                                          \
  82                : "=&r" (__oldval)                              \
  83                : "r" (__ptr), "r" (__old), "r" (__new)         \
  84                : "memory", "p0"                                \
  85        );                                                      \
  86        __oldval;                                               \
  87})
  88
  89#endif /* _ASM_CMPXCHG_H */
  90