linux/arch/s390/lib/spinlock.c
<<
>>
Prefs
   1/*
   2 *  arch/s390/lib/spinlock.c
   3 *    Out of line spinlock code.
   4 *
   5 *    Copyright (C) IBM Corp. 2004, 2006
   6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/module.h>
  11#include <linux/spinlock.h>
  12#include <linux/init.h>
  13#include <asm/io.h>
  14
  15int spin_retry = 1000;
  16
  17/**
  18 * spin_retry= parameter
  19 */
  20static int __init spin_retry_setup(char *str)
  21{
  22        spin_retry = simple_strtoul(str, &str, 0);
  23        return 1;
  24}
  25__setup("spin_retry=", spin_retry_setup);
  26
  27static inline void _raw_yield(void)
  28{
  29        if (MACHINE_HAS_DIAG44)
  30                asm volatile("diag 0,0,0x44");
  31}
  32
  33static inline void _raw_yield_cpu(int cpu)
  34{
  35        if (MACHINE_HAS_DIAG9C)
  36                asm volatile("diag %0,0,0x9c"
  37                             : : "d" (cpu_logical_map(cpu)));
  38        else
  39                _raw_yield();
  40}
  41
  42void arch_spin_lock_wait(arch_spinlock_t *lp)
  43{
  44        int count = spin_retry;
  45        unsigned int cpu = ~smp_processor_id();
  46        unsigned int owner;
  47
  48        while (1) {
  49                owner = lp->owner_cpu;
  50                if (!owner || smp_vcpu_scheduled(~owner)) {
  51                        for (count = spin_retry; count > 0; count--) {
  52                                if (arch_spin_is_locked(lp))
  53                                        continue;
  54                                if (_raw_compare_and_swap(&lp->owner_cpu, 0,
  55                                                          cpu) == 0)
  56                                        return;
  57                        }
  58                        if (MACHINE_IS_LPAR)
  59                                continue;
  60                }
  61                owner = lp->owner_cpu;
  62                if (owner)
  63                        _raw_yield_cpu(~owner);
  64                if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
  65                        return;
  66        }
  67}
  68EXPORT_SYMBOL(arch_spin_lock_wait);
  69
  70void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
  71{
  72        int count = spin_retry;
  73        unsigned int cpu = ~smp_processor_id();
  74        unsigned int owner;
  75
  76        local_irq_restore(flags);
  77        while (1) {
  78                owner = lp->owner_cpu;
  79                if (!owner || smp_vcpu_scheduled(~owner)) {
  80                        for (count = spin_retry; count > 0; count--) {
  81                                if (arch_spin_is_locked(lp))
  82                                        continue;
  83                                local_irq_disable();
  84                                if (_raw_compare_and_swap(&lp->owner_cpu, 0,
  85                                                          cpu) == 0)
  86                                        return;
  87                                local_irq_restore(flags);
  88                        }
  89                        if (MACHINE_IS_LPAR)
  90                                continue;
  91                }
  92                owner = lp->owner_cpu;
  93                if (owner)
  94                        _raw_yield_cpu(~owner);
  95                local_irq_disable();
  96                if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
  97                        return;
  98                local_irq_restore(flags);
  99        }
 100}
 101EXPORT_SYMBOL(arch_spin_lock_wait_flags);
 102
 103int arch_spin_trylock_retry(arch_spinlock_t *lp)
 104{
 105        unsigned int cpu = ~smp_processor_id();
 106        int count;
 107
 108        for (count = spin_retry; count > 0; count--) {
 109                if (arch_spin_is_locked(lp))
 110                        continue;
 111                if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
 112                        return 1;
 113        }
 114        return 0;
 115}
 116EXPORT_SYMBOL(arch_spin_trylock_retry);
 117
 118void arch_spin_relax(arch_spinlock_t *lock)
 119{
 120        unsigned int cpu = lock->owner_cpu;
 121        if (cpu != 0) {
 122                if (MACHINE_IS_VM || MACHINE_IS_KVM ||
 123                    !smp_vcpu_scheduled(~cpu))
 124                        _raw_yield_cpu(~cpu);
 125        }
 126}
 127EXPORT_SYMBOL(arch_spin_relax);
 128
 129void _raw_read_lock_wait(arch_rwlock_t *rw)
 130{
 131        unsigned int old;
 132        int count = spin_retry;
 133
 134        while (1) {
 135                if (count-- <= 0) {
 136                        _raw_yield();
 137                        count = spin_retry;
 138                }
 139                if (!arch_read_can_lock(rw))
 140                        continue;
 141                old = rw->lock & 0x7fffffffU;
 142                if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
 143                        return;
 144        }
 145}
 146EXPORT_SYMBOL(_raw_read_lock_wait);
 147
 148void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
 149{
 150        unsigned int old;
 151        int count = spin_retry;
 152
 153        local_irq_restore(flags);
 154        while (1) {
 155                if (count-- <= 0) {
 156                        _raw_yield();
 157                        count = spin_retry;
 158                }
 159                if (!arch_read_can_lock(rw))
 160                        continue;
 161                old = rw->lock & 0x7fffffffU;
 162                local_irq_disable();
 163                if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
 164                        return;
 165        }
 166}
 167EXPORT_SYMBOL(_raw_read_lock_wait_flags);
 168
 169int _raw_read_trylock_retry(arch_rwlock_t *rw)
 170{
 171        unsigned int old;
 172        int count = spin_retry;
 173
 174        while (count-- > 0) {
 175                if (!arch_read_can_lock(rw))
 176                        continue;
 177                old = rw->lock & 0x7fffffffU;
 178                if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
 179                        return 1;
 180        }
 181        return 0;
 182}
 183EXPORT_SYMBOL(_raw_read_trylock_retry);
 184
 185void _raw_write_lock_wait(arch_rwlock_t *rw)
 186{
 187        int count = spin_retry;
 188
 189        while (1) {
 190                if (count-- <= 0) {
 191                        _raw_yield();
 192                        count = spin_retry;
 193                }
 194                if (!arch_write_can_lock(rw))
 195                        continue;
 196                if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
 197                        return;
 198        }
 199}
 200EXPORT_SYMBOL(_raw_write_lock_wait);
 201
 202void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
 203{
 204        int count = spin_retry;
 205
 206        local_irq_restore(flags);
 207        while (1) {
 208                if (count-- <= 0) {
 209                        _raw_yield();
 210                        count = spin_retry;
 211                }
 212                if (!arch_write_can_lock(rw))
 213                        continue;
 214                local_irq_disable();
 215                if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
 216                        return;
 217        }
 218}
 219EXPORT_SYMBOL(_raw_write_lock_wait_flags);
 220
 221int _raw_write_trylock_retry(arch_rwlock_t *rw)
 222{
 223        int count = spin_retry;
 224
 225        while (count-- > 0) {
 226                if (!arch_write_can_lock(rw))
 227                        continue;
 228                if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
 229                        return 1;
 230        }
 231        return 0;
 232}
 233EXPORT_SYMBOL(_raw_write_trylock_retry);
 234