1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef __ASM_GENERIC_QRWLOCK_H
19#define __ASM_GENERIC_QRWLOCK_H
20
21#include <linux/atomic.h>
22#include <asm/barrier.h>
23#include <asm/processor.h>
24
25#include <asm-generic/qrwlock_types.h>
26
27
28
29
30#define _QW_WAITING 1
31#define _QW_LOCKED 0xff
32#define _QW_WMASK 0xff
33#define _QR_SHIFT 8
34#define _QR_BIAS (1U << _QR_SHIFT)
35
36
37
38
39extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
40extern void queued_write_lock_slowpath(struct qrwlock *lock);
41
42
43
44
45
46static inline int queued_read_can_lock(struct qrwlock *lock)
47{
48 return !(atomic_read(&lock->cnts) & _QW_WMASK);
49}
50
51
52
53
54
55static inline int queued_write_can_lock(struct qrwlock *lock)
56{
57 return !atomic_read(&lock->cnts);
58}
59
60
61
62
63
64
65static inline int queued_read_trylock(struct qrwlock *lock)
66{
67 u32 cnts;
68
69 cnts = atomic_read(&lock->cnts);
70 if (likely(!(cnts & _QW_WMASK))) {
71 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
72 if (likely(!(cnts & _QW_WMASK)))
73 return 1;
74 atomic_sub(_QR_BIAS, &lock->cnts);
75 }
76 return 0;
77}
78
79
80
81
82
83
84static inline int queued_write_trylock(struct qrwlock *lock)
85{
86 u32 cnts;
87
88 cnts = atomic_read(&lock->cnts);
89 if (unlikely(cnts))
90 return 0;
91
92 return likely(atomic_cmpxchg_acquire(&lock->cnts,
93 cnts, cnts | _QW_LOCKED) == cnts);
94}
95
96
97
98
99static inline void queued_read_lock(struct qrwlock *lock)
100{
101 u32 cnts;
102
103 cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
104 if (likely(!(cnts & _QW_WMASK)))
105 return;
106
107
108 queued_read_lock_slowpath(lock, cnts);
109}
110
111
112
113
114
115static inline void queued_write_lock(struct qrwlock *lock)
116{
117
118 if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
119 return;
120
121 queued_write_lock_slowpath(lock);
122}
123
124
125
126
127
128static inline void queued_read_unlock(struct qrwlock *lock)
129{
130
131
132
133 (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
134}
135
136
137
138
139
140static inline void queued_write_unlock(struct qrwlock *lock)
141{
142 smp_store_release((u8 *)&lock->cnts, 0);
143}
144
145
146
147
148
149#define arch_read_can_lock(l) queued_read_can_lock(l)
150#define arch_write_can_lock(l) queued_write_can_lock(l)
151#define arch_read_lock(l) queued_read_lock(l)
152#define arch_write_lock(l) queued_write_lock(l)
153#define arch_read_trylock(l) queued_read_trylock(l)
154#define arch_write_trylock(l) queued_write_trylock(l)
155#define arch_read_unlock(l) queued_read_unlock(l)
156#define arch_write_unlock(l) queued_write_unlock(l)
157
158#endif
159