linux/arch/powerpc/include/asm/cmpxchg.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_CMPXCHG_H_
   3#define _ASM_POWERPC_CMPXCHG_H_
   4
   5#ifdef __KERNEL__
   6#include <linux/compiler.h>
   7#include <asm/synch.h>
   8#include <linux/bug.h>
   9
  10#ifdef __BIG_ENDIAN
  11#define BITOFF_CAL(size, off)   ((sizeof(u32) - size - off) * BITS_PER_BYTE)
  12#else
  13#define BITOFF_CAL(size, off)   (off * BITS_PER_BYTE)
  14#endif
  15
  16#define XCHG_GEN(type, sfx, cl)                         \
  17static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
  18{                                                               \
  19        unsigned int prev, prev_mask, tmp, bitoff, off;         \
  20                                                                \
  21        off = (unsigned long)p % sizeof(u32);                   \
  22        bitoff = BITOFF_CAL(sizeof(type), off);                 \
  23        p -= off;                                               \
  24        val <<= bitoff;                                         \
  25        prev_mask = (u32)(type)-1 << bitoff;                    \
  26                                                                \
  27        __asm__ __volatile__(                                   \
  28"1:     lwarx   %0,0,%3\n"                                      \
  29"       andc    %1,%0,%5\n"                                     \
  30"       or      %1,%1,%4\n"                                     \
  31"       stwcx.  %1,0,%3\n"                                      \
  32"       bne-    1b\n"                                           \
  33        : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)            \
  34        : "r" (p), "r" (val), "r" (prev_mask)                   \
  35        : "cc", cl);                                            \
  36                                                                \
  37        return prev >> bitoff;                                  \
  38}
  39
  40#define CMPXCHG_GEN(type, sfx, br, br2, cl)                     \
  41static inline                                                   \
  42u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new)   \
  43{                                                               \
  44        unsigned int prev, prev_mask, tmp, bitoff, off;         \
  45                                                                \
  46        off = (unsigned long)p % sizeof(u32);                   \
  47        bitoff = BITOFF_CAL(sizeof(type), off);                 \
  48        p -= off;                                               \
  49        old <<= bitoff;                                         \
  50        new <<= bitoff;                                         \
  51        prev_mask = (u32)(type)-1 << bitoff;                    \
  52                                                                \
  53        __asm__ __volatile__(                                   \
  54        br                                                      \
  55"1:     lwarx   %0,0,%3\n"                                      \
  56"       and     %1,%0,%6\n"                                     \
  57"       cmpw    0,%1,%4\n"                                      \
  58"       bne-    2f\n"                                           \
  59"       andc    %1,%0,%6\n"                                     \
  60"       or      %1,%1,%5\n"                                     \
  61"       stwcx.  %1,0,%3\n"                                      \
  62"       bne-    1b\n"                                           \
  63        br2                                                     \
  64        "\n"                                                    \
  65"2:"                                                            \
  66        : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)            \
  67        : "r" (p), "r" (old), "r" (new), "r" (prev_mask)        \
  68        : "cc", cl);                                            \
  69                                                                \
  70        return prev >> bitoff;                                  \
  71}
  72
  73/*
  74 * Atomic exchange
  75 *
  76 * Changes the memory location '*p' to be val and returns
  77 * the previous value stored there.
  78 */
  79
  80XCHG_GEN(u8, _local, "memory");
  81XCHG_GEN(u8, _relaxed, "cc");
  82XCHG_GEN(u16, _local, "memory");
  83XCHG_GEN(u16, _relaxed, "cc");
  84
  85static __always_inline unsigned long
  86__xchg_u32_local(volatile void *p, unsigned long val)
  87{
  88        unsigned long prev;
  89
  90        __asm__ __volatile__(
  91"1:     lwarx   %0,0,%2 \n"
  92"       stwcx.  %3,0,%2 \n\
  93        bne-    1b"
  94        : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
  95        : "r" (p), "r" (val)
  96        : "cc", "memory");
  97
  98        return prev;
  99}
 100
 101static __always_inline unsigned long
 102__xchg_u32_relaxed(u32 *p, unsigned long val)
 103{
 104        unsigned long prev;
 105
 106        __asm__ __volatile__(
 107"1:     lwarx   %0,0,%2\n"
 108"       stwcx.  %3,0,%2\n"
 109"       bne-    1b"
 110        : "=&r" (prev), "+m" (*p)
 111        : "r" (p), "r" (val)
 112        : "cc");
 113
 114        return prev;
 115}
 116
 117#ifdef CONFIG_PPC64
 118static __always_inline unsigned long
 119__xchg_u64_local(volatile void *p, unsigned long val)
 120{
 121        unsigned long prev;
 122
 123        __asm__ __volatile__(
 124"1:     ldarx   %0,0,%2 \n"
 125"       stdcx.  %3,0,%2 \n\
 126        bne-    1b"
 127        : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
 128        : "r" (p), "r" (val)
 129        : "cc", "memory");
 130
 131        return prev;
 132}
 133
 134static __always_inline unsigned long
 135__xchg_u64_relaxed(u64 *p, unsigned long val)
 136{
 137        unsigned long prev;
 138
 139        __asm__ __volatile__(
 140"1:     ldarx   %0,0,%2\n"
 141"       stdcx.  %3,0,%2\n"
 142"       bne-    1b"
 143        : "=&r" (prev), "+m" (*p)
 144        : "r" (p), "r" (val)
 145        : "cc");
 146
 147        return prev;
 148}
 149#endif
 150
 151static __always_inline unsigned long
 152__xchg_local(void *ptr, unsigned long x, unsigned int size)
 153{
 154        switch (size) {
 155        case 1:
 156                return __xchg_u8_local(ptr, x);
 157        case 2:
 158                return __xchg_u16_local(ptr, x);
 159        case 4:
 160                return __xchg_u32_local(ptr, x);
 161#ifdef CONFIG_PPC64
 162        case 8:
 163                return __xchg_u64_local(ptr, x);
 164#endif
 165        }
 166        BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
 167        return x;
 168}
 169
 170static __always_inline unsigned long
 171__xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
 172{
 173        switch (size) {
 174        case 1:
 175                return __xchg_u8_relaxed(ptr, x);
 176        case 2:
 177                return __xchg_u16_relaxed(ptr, x);
 178        case 4:
 179                return __xchg_u32_relaxed(ptr, x);
 180#ifdef CONFIG_PPC64
 181        case 8:
 182                return __xchg_u64_relaxed(ptr, x);
 183#endif
 184        }
 185        BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
 186        return x;
 187}
 188#define xchg_local(ptr,x)                                                    \
 189  ({                                                                         \
 190     __typeof__(*(ptr)) _x_ = (x);                                           \
 191     (__typeof__(*(ptr))) __xchg_local((ptr),                                \
 192                (unsigned long)_x_, sizeof(*(ptr)));                         \
 193  })
 194
 195#define xchg_relaxed(ptr, x)                                            \
 196({                                                                      \
 197        __typeof__(*(ptr)) _x_ = (x);                                   \
 198        (__typeof__(*(ptr))) __xchg_relaxed((ptr),                      \
 199                        (unsigned long)_x_, sizeof(*(ptr)));            \
 200})
 201/*
 202 * Compare and exchange - if *p == old, set it to new,
 203 * and return the old value of *p.
 204 */
 205
 206CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
 207CMPXCHG_GEN(u8, _local, , , "memory");
 208CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
 209CMPXCHG_GEN(u8, _relaxed, , , "cc");
 210CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
 211CMPXCHG_GEN(u16, _local, , , "memory");
 212CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
 213CMPXCHG_GEN(u16, _relaxed, , , "cc");
 214
 215static __always_inline unsigned long
 216__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
 217{
 218        unsigned int prev;
 219
 220        __asm__ __volatile__ (
 221        PPC_ATOMIC_ENTRY_BARRIER
 222"1:     lwarx   %0,0,%2         # __cmpxchg_u32\n\
 223        cmpw    0,%0,%3\n\
 224        bne-    2f\n"
 225"       stwcx.  %4,0,%2\n\
 226        bne-    1b"
 227        PPC_ATOMIC_EXIT_BARRIER
 228        "\n\
 2292:"
 230        : "=&r" (prev), "+m" (*p)
 231        : "r" (p), "r" (old), "r" (new)
 232        : "cc", "memory");
 233
 234        return prev;
 235}
 236
 237static __always_inline unsigned long
 238__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
 239                        unsigned long new)
 240{
 241        unsigned int prev;
 242
 243        __asm__ __volatile__ (
 244"1:     lwarx   %0,0,%2         # __cmpxchg_u32\n\
 245        cmpw    0,%0,%3\n\
 246        bne-    2f\n"
 247"       stwcx.  %4,0,%2\n\
 248        bne-    1b"
 249        "\n\
 2502:"
 251        : "=&r" (prev), "+m" (*p)
 252        : "r" (p), "r" (old), "r" (new)
 253        : "cc", "memory");
 254
 255        return prev;
 256}
 257
 258static __always_inline unsigned long
 259__cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
 260{
 261        unsigned long prev;
 262
 263        __asm__ __volatile__ (
 264"1:     lwarx   %0,0,%2         # __cmpxchg_u32_relaxed\n"
 265"       cmpw    0,%0,%3\n"
 266"       bne-    2f\n"
 267"       stwcx.  %4,0,%2\n"
 268"       bne-    1b\n"
 269"2:"
 270        : "=&r" (prev), "+m" (*p)
 271        : "r" (p), "r" (old), "r" (new)
 272        : "cc");
 273
 274        return prev;
 275}
 276
 277/*
 278 * cmpxchg family don't have order guarantee if cmp part fails, therefore we
 279 * can avoid superfluous barriers if we use assembly code to implement
 280 * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
 281 * cmpxchg_release() because that will result in putting a barrier in the
 282 * middle of a ll/sc loop, which is probably a bad idea. For example, this
 283 * might cause the conditional store more likely to fail.
 284 */
 285static __always_inline unsigned long
 286__cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
 287{
 288        unsigned long prev;
 289
 290        __asm__ __volatile__ (
 291"1:     lwarx   %0,0,%2         # __cmpxchg_u32_acquire\n"
 292"       cmpw    0,%0,%3\n"
 293"       bne-    2f\n"
 294"       stwcx.  %4,0,%2\n"
 295"       bne-    1b\n"
 296        PPC_ACQUIRE_BARRIER
 297        "\n"
 298"2:"
 299        : "=&r" (prev), "+m" (*p)
 300        : "r" (p), "r" (old), "r" (new)
 301        : "cc", "memory");
 302
 303        return prev;
 304}
 305
 306#ifdef CONFIG_PPC64
 307static __always_inline unsigned long
 308__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
 309{
 310        unsigned long prev;
 311
 312        __asm__ __volatile__ (
 313        PPC_ATOMIC_ENTRY_BARRIER
 314"1:     ldarx   %0,0,%2         # __cmpxchg_u64\n\
 315        cmpd    0,%0,%3\n\
 316        bne-    2f\n\
 317        stdcx.  %4,0,%2\n\
 318        bne-    1b"
 319        PPC_ATOMIC_EXIT_BARRIER
 320        "\n\
 3212:"
 322        : "=&r" (prev), "+m" (*p)
 323        : "r" (p), "r" (old), "r" (new)
 324        : "cc", "memory");
 325
 326        return prev;
 327}
 328
 329static __always_inline unsigned long
 330__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
 331                        unsigned long new)
 332{
 333        unsigned long prev;
 334
 335        __asm__ __volatile__ (
 336"1:     ldarx   %0,0,%2         # __cmpxchg_u64\n\
 337        cmpd    0,%0,%3\n\
 338        bne-    2f\n\
 339        stdcx.  %4,0,%2\n\
 340        bne-    1b"
 341        "\n\
 3422:"
 343        : "=&r" (prev), "+m" (*p)
 344        : "r" (p), "r" (old), "r" (new)
 345        : "cc", "memory");
 346
 347        return prev;
 348}
 349
 350static __always_inline unsigned long
 351__cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
 352{
 353        unsigned long prev;
 354
 355        __asm__ __volatile__ (
 356"1:     ldarx   %0,0,%2         # __cmpxchg_u64_relaxed\n"
 357"       cmpd    0,%0,%3\n"
 358"       bne-    2f\n"
 359"       stdcx.  %4,0,%2\n"
 360"       bne-    1b\n"
 361"2:"
 362        : "=&r" (prev), "+m" (*p)
 363        : "r" (p), "r" (old), "r" (new)
 364        : "cc");
 365
 366        return prev;
 367}
 368
 369static __always_inline unsigned long
 370__cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
 371{
 372        unsigned long prev;
 373
 374        __asm__ __volatile__ (
 375"1:     ldarx   %0,0,%2         # __cmpxchg_u64_acquire\n"
 376"       cmpd    0,%0,%3\n"
 377"       bne-    2f\n"
 378"       stdcx.  %4,0,%2\n"
 379"       bne-    1b\n"
 380        PPC_ACQUIRE_BARRIER
 381        "\n"
 382"2:"
 383        : "=&r" (prev), "+m" (*p)
 384        : "r" (p), "r" (old), "r" (new)
 385        : "cc", "memory");
 386
 387        return prev;
 388}
 389#endif
 390
 391static __always_inline unsigned long
 392__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
 393          unsigned int size)
 394{
 395        switch (size) {
 396        case 1:
 397                return __cmpxchg_u8(ptr, old, new);
 398        case 2:
 399                return __cmpxchg_u16(ptr, old, new);
 400        case 4:
 401                return __cmpxchg_u32(ptr, old, new);
 402#ifdef CONFIG_PPC64
 403        case 8:
 404                return __cmpxchg_u64(ptr, old, new);
 405#endif
 406        }
 407        BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
 408        return old;
 409}
 410
 411static __always_inline unsigned long
 412__cmpxchg_local(void *ptr, unsigned long old, unsigned long new,
 413          unsigned int size)
 414{
 415        switch (size) {
 416        case 1:
 417                return __cmpxchg_u8_local(ptr, old, new);
 418        case 2:
 419                return __cmpxchg_u16_local(ptr, old, new);
 420        case 4:
 421                return __cmpxchg_u32_local(ptr, old, new);
 422#ifdef CONFIG_PPC64
 423        case 8:
 424                return __cmpxchg_u64_local(ptr, old, new);
 425#endif
 426        }
 427        BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
 428        return old;
 429}
 430
 431static __always_inline unsigned long
 432__cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
 433                  unsigned int size)
 434{
 435        switch (size) {
 436        case 1:
 437                return __cmpxchg_u8_relaxed(ptr, old, new);
 438        case 2:
 439                return __cmpxchg_u16_relaxed(ptr, old, new);
 440        case 4:
 441                return __cmpxchg_u32_relaxed(ptr, old, new);
 442#ifdef CONFIG_PPC64
 443        case 8:
 444                return __cmpxchg_u64_relaxed(ptr, old, new);
 445#endif
 446        }
 447        BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
 448        return old;
 449}
 450
 451static __always_inline unsigned long
 452__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
 453                  unsigned int size)
 454{
 455        switch (size) {
 456        case 1:
 457                return __cmpxchg_u8_acquire(ptr, old, new);
 458        case 2:
 459                return __cmpxchg_u16_acquire(ptr, old, new);
 460        case 4:
 461                return __cmpxchg_u32_acquire(ptr, old, new);
 462#ifdef CONFIG_PPC64
 463        case 8:
 464                return __cmpxchg_u64_acquire(ptr, old, new);
 465#endif
 466        }
 467        BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
 468        return old;
 469}
 470#define cmpxchg(ptr, o, n)                                               \
 471  ({                                                                     \
 472     __typeof__(*(ptr)) _o_ = (o);                                       \
 473     __typeof__(*(ptr)) _n_ = (n);                                       \
 474     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,           \
 475                                    (unsigned long)_n_, sizeof(*(ptr))); \
 476  })
 477
 478
 479#define cmpxchg_local(ptr, o, n)                                         \
 480  ({                                                                     \
 481     __typeof__(*(ptr)) _o_ = (o);                                       \
 482     __typeof__(*(ptr)) _n_ = (n);                                       \
 483     (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,     \
 484                                    (unsigned long)_n_, sizeof(*(ptr))); \
 485  })
 486
 487#define cmpxchg_relaxed(ptr, o, n)                                      \
 488({                                                                      \
 489        __typeof__(*(ptr)) _o_ = (o);                                   \
 490        __typeof__(*(ptr)) _n_ = (n);                                   \
 491        (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr),                   \
 492                        (unsigned long)_o_, (unsigned long)_n_,         \
 493                        sizeof(*(ptr)));                                \
 494})
 495
 496#define cmpxchg_acquire(ptr, o, n)                                      \
 497({                                                                      \
 498        __typeof__(*(ptr)) _o_ = (o);                                   \
 499        __typeof__(*(ptr)) _n_ = (n);                                   \
 500        (__typeof__(*(ptr))) __cmpxchg_acquire((ptr),                   \
 501                        (unsigned long)_o_, (unsigned long)_n_,         \
 502                        sizeof(*(ptr)));                                \
 503})
 504#ifdef CONFIG_PPC64
 505#define cmpxchg64(ptr, o, n)                                            \
 506  ({                                                                    \
 507        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 508        cmpxchg((ptr), (o), (n));                                       \
 509  })
 510#define cmpxchg64_local(ptr, o, n)                                      \
 511  ({                                                                    \
 512        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 513        cmpxchg_local((ptr), (o), (n));                                 \
 514  })
 515#define cmpxchg64_relaxed(ptr, o, n)                                    \
 516({                                                                      \
 517        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 518        cmpxchg_relaxed((ptr), (o), (n));                               \
 519})
 520#define cmpxchg64_acquire(ptr, o, n)                                    \
 521({                                                                      \
 522        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 523        cmpxchg_acquire((ptr), (o), (n));                               \
 524})
 525#else
 526#include <asm-generic/cmpxchg-local.h>
 527#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 528#endif
 529
 530#endif /* __KERNEL__ */
 531#endif /* _ASM_POWERPC_CMPXCHG_H_ */
 532