1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/spinlock.h>
11#include <linux/atomic.h>
12
13#ifdef CONFIG_SMP
14arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
15 [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
16};
17#endif
18
19#ifdef CONFIG_64BIT
20unsigned long __xchg64(unsigned long x, unsigned long *ptr)
21{
22 unsigned long temp, flags;
23
24 _atomic_spin_lock_irqsave(ptr, flags);
25 temp = *ptr;
26 *ptr = x;
27 _atomic_spin_unlock_irqrestore(ptr, flags);
28 return temp;
29}
30#endif
31
32unsigned long __xchg32(int x, int *ptr)
33{
34 unsigned long flags;
35 long temp;
36
37 _atomic_spin_lock_irqsave(ptr, flags);
38 temp = (long) *ptr;
39 *ptr = x;
40 _atomic_spin_unlock_irqrestore(ptr, flags);
41 return (unsigned long)temp;
42}
43
44
45unsigned long __xchg8(char x, char *ptr)
46{
47 unsigned long flags;
48 long temp;
49
50 _atomic_spin_lock_irqsave(ptr, flags);
51 temp = (long) *ptr;
52 *ptr = x;
53 _atomic_spin_unlock_irqrestore(ptr, flags);
54 return (unsigned long)temp;
55}
56
57
58#ifdef CONFIG_64BIT
59unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
60{
61 unsigned long flags;
62 unsigned long prev;
63
64 _atomic_spin_lock_irqsave(ptr, flags);
65 if ((prev = *ptr) == old)
66 *ptr = new;
67 _atomic_spin_unlock_irqrestore(ptr, flags);
68 return prev;
69}
70#endif
71
72unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
73{
74 unsigned long flags;
75 unsigned int prev;
76
77 _atomic_spin_lock_irqsave(ptr, flags);
78 if ((prev = *ptr) == old)
79 *ptr = new;
80 _atomic_spin_unlock_irqrestore(ptr, flags);
81 return (unsigned long)prev;
82}
83