linux/arch/powerpc/lib/locks.c
<<
>>
Prefs
   1/*
   2 * Spin and read/write lock operations.
   3 *
   4 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
   5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
   6 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
   7 *   Rework to support virtual processors
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; either version
  12 * 2 of the License, or (at your option) any later version.
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/spinlock.h>
  17#include <linux/export.h>
  18#include <linux/stringify.h>
  19#include <linux/smp.h>
  20
  21/* waiting for a spinlock... */
  22#if defined(CONFIG_PPC_SPLPAR)
  23#include <asm/hvcall.h>
  24#include <asm/smp.h>
  25
  26void __spin_yield(arch_spinlock_t *lock)
  27{
  28        unsigned int lock_value, holder_cpu, yield_count;
  29
  30        lock_value = lock->slock;
  31        if (lock_value == 0)
  32                return;
  33        holder_cpu = lock_value & 0xffff;
  34        BUG_ON(holder_cpu >= NR_CPUS);
  35        yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
  36        if ((yield_count & 1) == 0)
  37                return;         /* virtual cpu is currently running */
  38        rmb();
  39        if (lock->slock != lock_value)
  40                return;         /* something has changed */
  41        plpar_hcall_norets(H_CONFER,
  42                get_hard_smp_processor_id(holder_cpu), yield_count);
  43}
  44EXPORT_SYMBOL_GPL(__spin_yield);
  45
  46/*
  47 * Waiting for a read lock or a write lock on a rwlock...
  48 * This turns out to be the same for read and write locks, since
  49 * we only know the holder if it is write-locked.
  50 */
  51void __rw_yield(arch_rwlock_t *rw)
  52{
  53        int lock_value;
  54        unsigned int holder_cpu, yield_count;
  55
  56        lock_value = rw->lock;
  57        if (lock_value >= 0)
  58                return;         /* no write lock at present */
  59        holder_cpu = lock_value & 0xffff;
  60        BUG_ON(holder_cpu >= NR_CPUS);
  61        yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
  62        if ((yield_count & 1) == 0)
  63                return;         /* virtual cpu is currently running */
  64        rmb();
  65        if (rw->lock != lock_value)
  66                return;         /* something has changed */
  67        plpar_hcall_norets(H_CONFER,
  68                get_hard_smp_processor_id(holder_cpu), yield_count);
  69}
  70#endif
  71
  72void arch_spin_unlock_wait(arch_spinlock_t *lock)
  73{
  74        smp_mb();
  75
  76        while (lock->slock) {
  77                HMT_low();
  78                if (SHARED_PROCESSOR)
  79                        __spin_yield(lock);
  80        }
  81        HMT_medium();
  82
  83        smp_mb();
  84}
  85
  86EXPORT_SYMBOL(arch_spin_unlock_wait);
  87