linux/arch/s390/lib/spinlock.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *    Out of line spinlock code.
   4 *
   5 *    Copyright IBM Corp. 2004, 2006
   6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/export.h>
  11#include <linux/spinlock.h>
  12#include <linux/jiffies.h>
  13#include <linux/init.h>
  14#include <linux/smp.h>
  15#include <linux/percpu.h>
  16#include <asm/alternative.h>
  17#include <asm/io.h>
  18
  19int spin_retry = -1;
  20
  21static int __init spin_retry_init(void)
  22{
  23        if (spin_retry < 0)
  24                spin_retry = 1000;
  25        return 0;
  26}
  27early_initcall(spin_retry_init);
  28
  29/**
  30 * spin_retry= parameter
  31 */
  32static int __init spin_retry_setup(char *str)
  33{
  34        spin_retry = simple_strtoul(str, &str, 0);
  35        return 1;
  36}
  37__setup("spin_retry=", spin_retry_setup);
  38
  39struct spin_wait {
  40        struct spin_wait *next, *prev;
  41        int node_id;
  42} __aligned(32);
  43
  44static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
  45
  46#define _Q_LOCK_CPU_OFFSET      0
  47#define _Q_LOCK_STEAL_OFFSET    16
  48#define _Q_TAIL_IDX_OFFSET      18
  49#define _Q_TAIL_CPU_OFFSET      20
  50
  51#define _Q_LOCK_CPU_MASK        0x0000ffff
  52#define _Q_LOCK_STEAL_ADD       0x00010000
  53#define _Q_LOCK_STEAL_MASK      0x00030000
  54#define _Q_TAIL_IDX_MASK        0x000c0000
  55#define _Q_TAIL_CPU_MASK        0xfff00000
  56
  57#define _Q_LOCK_MASK            (_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
  58#define _Q_TAIL_MASK            (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
  59
  60void arch_spin_lock_setup(int cpu)
  61{
  62        struct spin_wait *node;
  63        int ix;
  64
  65        node = per_cpu_ptr(&spin_wait[0], cpu);
  66        for (ix = 0; ix < 4; ix++, node++) {
  67                memset(node, 0, sizeof(*node));
  68                node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
  69                        (ix << _Q_TAIL_IDX_OFFSET);
  70        }
  71}
  72
  73static inline int arch_load_niai4(int *lock)
  74{
  75        int owner;
  76
  77        asm volatile(
  78                ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
  79                "       l       %0,%1\n"
  80                : "=d" (owner) : "Q" (*lock) : "memory");
  81        return owner;
  82}
  83
  84static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
  85{
  86        int expected = old;
  87
  88        asm volatile(
  89                ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
  90                "       cs      %0,%3,%1\n"
  91                : "=d" (old), "=Q" (*lock)
  92                : "0" (old), "d" (new), "Q" (*lock)
  93                : "cc", "memory");
  94        return expected == old;
  95}
  96
  97static inline struct spin_wait *arch_spin_decode_tail(int lock)
  98{
  99        int ix, cpu;
 100
 101        ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
 102        cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
 103        return per_cpu_ptr(&spin_wait[ix], cpu - 1);
 104}
 105
 106static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
 107{
 108        if (lock & _Q_LOCK_CPU_MASK)
 109                return lock & _Q_LOCK_CPU_MASK;
 110        if (node == NULL || node->prev == NULL)
 111                return 0;       /* 0 -> no target cpu */
 112        while (node->prev)
 113                node = node->prev;
 114        return node->node_id >> _Q_TAIL_CPU_OFFSET;
 115}
 116
 117static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
 118{
 119        struct spin_wait *node, *next;
 120        int lockval, ix, node_id, tail_id, old, new, owner, count;
 121
 122        ix = S390_lowcore.spinlock_index++;
 123        barrier();
 124        lockval = SPINLOCK_LOCKVAL;     /* cpu + 1 */
 125        node = this_cpu_ptr(&spin_wait[ix]);
 126        node->prev = node->next = NULL;
 127        node_id = node->node_id;
 128
 129        /* Enqueue the node for this CPU in the spinlock wait queue */
 130        while (1) {
 131                old = READ_ONCE(lp->lock);
 132                if ((old & _Q_LOCK_CPU_MASK) == 0 &&
 133                    (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
 134                        /*
 135                         * The lock is free but there may be waiters.
 136                         * With no waiters simply take the lock, if there
 137                         * are waiters try to steal the lock. The lock may
 138                         * be stolen three times before the next queued
 139                         * waiter will get the lock.
 140                         */
 141                        new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
 142                        if (__atomic_cmpxchg_bool(&lp->lock, old, new))
 143                                /* Got the lock */
 144                                goto out;
 145                        /* lock passing in progress */
 146                        continue;
 147                }
 148                /* Make the node of this CPU the new tail. */
 149                new = node_id | (old & _Q_LOCK_MASK);
 150                if (__atomic_cmpxchg_bool(&lp->lock, old, new))
 151                        break;
 152        }
 153        /* Set the 'next' pointer of the tail node in the queue */
 154        tail_id = old & _Q_TAIL_MASK;
 155        if (tail_id != 0) {
 156                node->prev = arch_spin_decode_tail(tail_id);
 157                WRITE_ONCE(node->prev->next, node);
 158        }
 159
 160        /* Pass the virtual CPU to the lock holder if it is not running */
 161        owner = arch_spin_yield_target(old, node);
 162        if (owner && arch_vcpu_is_preempted(owner - 1))
 163                smp_yield_cpu(owner - 1);
 164
 165        /* Spin on the CPU local node->prev pointer */
 166        if (tail_id != 0) {
 167                count = spin_retry;
 168                while (READ_ONCE(node->prev) != NULL) {
 169                        if (count-- >= 0)
 170                                continue;
 171                        count = spin_retry;
 172                        /* Query running state of lock holder again. */
 173                        owner = arch_spin_yield_target(old, node);
 174                        if (owner && arch_vcpu_is_preempted(owner - 1))
 175                                smp_yield_cpu(owner - 1);
 176                }
 177        }
 178
 179        /* Spin on the lock value in the spinlock_t */
 180        count = spin_retry;
 181        while (1) {
 182                old = READ_ONCE(lp->lock);
 183                owner = old & _Q_LOCK_CPU_MASK;
 184                if (!owner) {
 185                        tail_id = old & _Q_TAIL_MASK;
 186                        new = ((tail_id != node_id) ? tail_id : 0) | lockval;
 187                        if (__atomic_cmpxchg_bool(&lp->lock, old, new))
 188                                /* Got the lock */
 189                                break;
 190                        continue;
 191                }
 192                if (count-- >= 0)
 193                        continue;
 194                count = spin_retry;
 195                if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
 196                        smp_yield_cpu(owner - 1);
 197        }
 198
 199        /* Pass lock_spin job to next CPU in the queue */
 200        if (node_id && tail_id != node_id) {
 201                /* Wait until the next CPU has set up the 'next' pointer */
 202                while ((next = READ_ONCE(node->next)) == NULL)
 203                        ;
 204                next->prev = NULL;
 205        }
 206
 207 out:
 208        S390_lowcore.spinlock_index--;
 209}
 210
 211static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
 212{
 213        int lockval, old, new, owner, count;
 214
 215        lockval = SPINLOCK_LOCKVAL;     /* cpu + 1 */
 216
 217        /* Pass the virtual CPU to the lock holder if it is not running */
 218        owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
 219        if (owner && arch_vcpu_is_preempted(owner - 1))
 220                smp_yield_cpu(owner - 1);
 221
 222        count = spin_retry;
 223        while (1) {
 224                old = arch_load_niai4(&lp->lock);
 225                owner = old & _Q_LOCK_CPU_MASK;
 226                /* Try to get the lock if it is free. */
 227                if (!owner) {
 228                        new = (old & _Q_TAIL_MASK) | lockval;
 229                        if (arch_cmpxchg_niai8(&lp->lock, old, new)) {
 230                                /* Got the lock */
 231                                return;
 232                        }
 233                        continue;
 234                }
 235                if (count-- >= 0)
 236                        continue;
 237                count = spin_retry;
 238                if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
 239                        smp_yield_cpu(owner - 1);
 240        }
 241}
 242
 243void arch_spin_lock_wait(arch_spinlock_t *lp)
 244{
 245        /* Use classic spinlocks + niai if the steal time is >= 10% */
 246        if (test_cpu_flag(CIF_DEDICATED_CPU))
 247                arch_spin_lock_queued(lp);
 248        else
 249                arch_spin_lock_classic(lp);
 250}
 251EXPORT_SYMBOL(arch_spin_lock_wait);
 252
 253int arch_spin_trylock_retry(arch_spinlock_t *lp)
 254{
 255        int cpu = SPINLOCK_LOCKVAL;
 256        int owner, count;
 257
 258        for (count = spin_retry; count > 0; count--) {
 259                owner = READ_ONCE(lp->lock);
 260                /* Try to get the lock if it is free. */
 261                if (!owner) {
 262                        if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
 263                                return 1;
 264                }
 265        }
 266        return 0;
 267}
 268EXPORT_SYMBOL(arch_spin_trylock_retry);
 269
 270void arch_read_lock_wait(arch_rwlock_t *rw)
 271{
 272        if (unlikely(in_interrupt())) {
 273                while (READ_ONCE(rw->cnts) & 0x10000)
 274                        barrier();
 275                return;
 276        }
 277
 278        /* Remove this reader again to allow recursive read locking */
 279        __atomic_add_const(-1, &rw->cnts);
 280        /* Put the reader into the wait queue */
 281        arch_spin_lock(&rw->wait);
 282        /* Now add this reader to the count value again */
 283        __atomic_add_const(1, &rw->cnts);
 284        /* Loop until the writer is done */
 285        while (READ_ONCE(rw->cnts) & 0x10000)
 286                barrier();
 287        arch_spin_unlock(&rw->wait);
 288}
 289EXPORT_SYMBOL(arch_read_lock_wait);
 290
 291void arch_write_lock_wait(arch_rwlock_t *rw)
 292{
 293        int old;
 294
 295        /* Add this CPU to the write waiters */
 296        __atomic_add(0x20000, &rw->cnts);
 297
 298        /* Put the writer into the wait queue */
 299        arch_spin_lock(&rw->wait);
 300
 301        while (1) {
 302                old = READ_ONCE(rw->cnts);
 303                if ((old & 0x1ffff) == 0 &&
 304                    __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
 305                        /* Got the lock */
 306                        break;
 307                barrier();
 308        }
 309
 310        arch_spin_unlock(&rw->wait);
 311}
 312EXPORT_SYMBOL(arch_write_lock_wait);
 313
 314void arch_spin_relax(arch_spinlock_t *lp)
 315{
 316        int cpu;
 317
 318        cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
 319        if (!cpu)
 320                return;
 321        if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
 322                return;
 323        smp_yield_cpu(cpu - 1);
 324}
 325EXPORT_SYMBOL(arch_spin_relax);
 326