1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef __ASM_GENERIC_QRWLOCK_H
19#define __ASM_GENERIC_QRWLOCK_H
20
21#include <linux/atomic.h>
22#include <asm/barrier.h>
23#include <asm/processor.h>
24
25#include <asm-generic/qrwlock_types.h>
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#define _QW_WAITING 1
44#define _QW_LOCKED 0xff
45#define _QW_WMASK 0xff
46#define _QR_SHIFT 8
47#define _QR_BIAS (1U << _QR_SHIFT)
48
49
50
51
52extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
53extern void queued_write_lock_slowpath(struct qrwlock *lock);
54
55
56
57
58
59static inline int queued_read_can_lock(struct qrwlock *lock)
60{
61 return !(atomic_read(&lock->cnts) & _QW_WMASK);
62}
63
64
65
66
67
68static inline int queued_write_can_lock(struct qrwlock *lock)
69{
70 return !atomic_read(&lock->cnts);
71}
72
73
74
75
76
77
78static inline int queued_read_trylock(struct qrwlock *lock)
79{
80 u32 cnts;
81
82 cnts = atomic_read(&lock->cnts);
83 if (likely(!(cnts & _QW_WMASK))) {
84 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
85 if (likely(!(cnts & _QW_WMASK)))
86 return 1;
87 atomic_sub(_QR_BIAS, &lock->cnts);
88 }
89 return 0;
90}
91
92
93
94
95
96
97static inline int queued_write_trylock(struct qrwlock *lock)
98{
99 u32 cnts;
100
101 cnts = atomic_read(&lock->cnts);
102 if (unlikely(cnts))
103 return 0;
104
105 return likely(atomic_cmpxchg_acquire(&lock->cnts,
106 cnts, cnts | _QW_LOCKED) == cnts);
107}
108
109
110
111
112static inline void queued_read_lock(struct qrwlock *lock)
113{
114 u32 cnts;
115
116 cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
117 if (likely(!(cnts & _QW_WMASK)))
118 return;
119
120
121 queued_read_lock_slowpath(lock, cnts);
122}
123
124
125
126
127
128static inline void queued_write_lock(struct qrwlock *lock)
129{
130
131 if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
132 return;
133
134 queued_write_lock_slowpath(lock);
135}
136
137
138
139
140
141static inline void queued_read_unlock(struct qrwlock *lock)
142{
143
144
145
146 (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
147}
148
149
150
151
152
153
154static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
155{
156 return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
157}
158
159
160
161
162
163static inline void queued_write_unlock(struct qrwlock *lock)
164{
165 smp_store_release(__qrwlock_write_byte(lock), 0);
166}
167
168
169
170
171
172#define arch_read_can_lock(l) queued_read_can_lock(l)
173#define arch_write_can_lock(l) queued_write_can_lock(l)
174#define arch_read_lock(l) queued_read_lock(l)
175#define arch_write_lock(l) queued_write_lock(l)
176#define arch_read_trylock(l) queued_read_trylock(l)
177#define arch_write_trylock(l) queued_write_trylock(l)
178#define arch_read_unlock(l) queued_read_unlock(l)
179#define arch_write_unlock(l) queued_write_unlock(l)
180
181#endif
182