1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#ifndef __ASM_SPINLOCK_H
17#define __ASM_SPINLOCK_H
18
19#include <asm/spinlock_types.h>
20#include <asm/processor.h>
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#define arch_spin_is_locked(x) ((x)->lock != 0)
36#define arch_spin_unlock_wait(lock) \
37 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
38
39#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
40
41static inline void arch_spin_lock(arch_spinlock_t *lock)
42{
43 unsigned int tmp;
44
45 asm volatile(
46 " sevl\n"
47 "1: wfe\n"
48 "2: ldaxr %w0, %1\n"
49 " cbnz %w0, 1b\n"
50 " stxr %w0, %w2, %1\n"
51 " cbnz %w0, 2b\n"
52 : "=&r" (tmp), "+Q" (lock->lock)
53 : "r" (1)
54 : "cc", "memory");
55}
56
57static inline int arch_spin_trylock(arch_spinlock_t *lock)
58{
59 unsigned int tmp;
60
61 asm volatile(
62 " ldaxr %w0, %1\n"
63 " cbnz %w0, 1f\n"
64 " stxr %w0, %w2, %1\n"
65 "1:\n"
66 : "=&r" (tmp), "+Q" (lock->lock)
67 : "r" (1)
68 : "cc", "memory");
69
70 return !tmp;
71}
72
73static inline void arch_spin_unlock(arch_spinlock_t *lock)
74{
75 asm volatile(
76 " stlr %w1, %0\n"
77 : "=Q" (lock->lock) : "r" (0) : "memory");
78}
79
80
81
82
83
84
85
86
87
88
89
90static inline void arch_write_lock(arch_rwlock_t *rw)
91{
92 unsigned int tmp;
93
94 asm volatile(
95 " sevl\n"
96 "1: wfe\n"
97 "2: ldaxr %w0, %1\n"
98 " cbnz %w0, 1b\n"
99 " stxr %w0, %w2, %1\n"
100 " cbnz %w0, 2b\n"
101 : "=&r" (tmp), "+Q" (rw->lock)
102 : "r" (0x80000000)
103 : "cc", "memory");
104}
105
106static inline int arch_write_trylock(arch_rwlock_t *rw)
107{
108 unsigned int tmp;
109
110 asm volatile(
111 " ldaxr %w0, %1\n"
112 " cbnz %w0, 1f\n"
113 " stxr %w0, %w2, %1\n"
114 "1:\n"
115 : "=&r" (tmp), "+Q" (rw->lock)
116 : "r" (0x80000000)
117 : "cc", "memory");
118
119 return !tmp;
120}
121
122static inline void arch_write_unlock(arch_rwlock_t *rw)
123{
124 asm volatile(
125 " stlr %w1, %0\n"
126 : "=Q" (rw->lock) : "r" (0) : "memory");
127}
128
129
130#define arch_write_can_lock(x) ((x)->lock == 0)
131
132
133
134
135
136
137
138
139
140
141
142
143
144static inline void arch_read_lock(arch_rwlock_t *rw)
145{
146 unsigned int tmp, tmp2;
147
148 asm volatile(
149 " sevl\n"
150 "1: wfe\n"
151 "2: ldaxr %w0, %2\n"
152 " add %w0, %w0, #1\n"
153 " tbnz %w0, #31, 1b\n"
154 " stxr %w1, %w0, %2\n"
155 " cbnz %w1, 2b\n"
156 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
157 :
158 : "cc", "memory");
159}
160
161static inline void arch_read_unlock(arch_rwlock_t *rw)
162{
163 unsigned int tmp, tmp2;
164
165 asm volatile(
166 "1: ldxr %w0, %2\n"
167 " sub %w0, %w0, #1\n"
168 " stlxr %w1, %w0, %2\n"
169 " cbnz %w1, 1b\n"
170 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
171 :
172 : "cc", "memory");
173}
174
175static inline int arch_read_trylock(arch_rwlock_t *rw)
176{
177 unsigned int tmp, tmp2 = 1;
178
179 asm volatile(
180 " ldaxr %w0, %2\n"
181 " add %w0, %w0, #1\n"
182 " tbnz %w0, #31, 1f\n"
183 " stxr %w1, %w0, %2\n"
184 "1:\n"
185 : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
186 :
187 : "cc", "memory");
188
189 return !tmp2;
190}
191
192
193#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
194
195#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
196#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
197
198#define arch_spin_relax(lock) cpu_relax()
199#define arch_read_relax(lock) cpu_relax()
200#define arch_write_relax(lock) cpu_relax()
201
202#endif
203