1
2#include <linux/percpu.h>
3#include <linux/sched.h>
4#include <linux/osq_lock.h>
5
6
7
8
9
10
11
12
13
14static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
15
16
17
18
19
20static inline int encode_cpu(int cpu_nr)
21{
22 return cpu_nr + 1;
23}
24
25static inline int node_cpu(struct optimistic_spin_node *node)
26{
27 return node->cpu - 1;
28}
29
30static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
31{
32 int cpu_nr = encoded_cpu_val - 1;
33
34 return per_cpu_ptr(&osq_node, cpu_nr);
35}
36
37
38
39
40
41static inline struct optimistic_spin_node *
42osq_wait_next(struct optimistic_spin_queue *lock,
43 struct optimistic_spin_node *node,
44 struct optimistic_spin_node *prev)
45{
46 struct optimistic_spin_node *next = NULL;
47 int curr = encode_cpu(smp_processor_id());
48 int old;
49
50
51
52
53
54
55 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
56
57 for (;;) {
58 if (atomic_read(&lock->tail) == curr &&
59 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
60
61
62
63
64
65 break;
66 }
67
68
69
70
71
72
73
74
75
76
77
78 if (node->next) {
79 next = xchg(&node->next, NULL);
80 if (next)
81 break;
82 }
83
84 cpu_relax();
85 }
86
87 return next;
88}
89
90bool osq_lock(struct optimistic_spin_queue *lock)
91{
92 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
93 struct optimistic_spin_node *prev, *next;
94 int curr = encode_cpu(smp_processor_id());
95 int old;
96
97 node->locked = 0;
98 node->next = NULL;
99 node->cpu = curr;
100
101
102
103
104
105
106
107 old = atomic_xchg(&lock->tail, curr);
108 if (old == OSQ_UNLOCKED_VAL)
109 return true;
110
111 prev = decode_cpu(old);
112 node->prev = prev;
113
114
115
116
117
118
119
120
121
122
123
124 smp_wmb();
125
126 WRITE_ONCE(prev->next, node);
127
128
129
130
131
132
133
134
135
136
137 while (!READ_ONCE(node->locked)) {
138
139
140
141
142
143 if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
144 goto unqueue;
145
146 cpu_relax();
147 }
148 return true;
149
150unqueue:
151
152
153
154
155
156
157
158
159 for (;;) {
160 if (prev->next == node &&
161 cmpxchg(&prev->next, node, NULL) == node)
162 break;
163
164
165
166
167
168
169 if (smp_load_acquire(&node->locked))
170 return true;
171
172 cpu_relax();
173
174
175
176
177
178 prev = READ_ONCE(node->prev);
179 }
180
181
182
183
184
185
186
187
188 next = osq_wait_next(lock, node, prev);
189 if (!next)
190 return false;
191
192
193
194
195
196
197
198
199
200 WRITE_ONCE(next->prev, prev);
201 WRITE_ONCE(prev->next, next);
202
203 return false;
204}
205
206void osq_unlock(struct optimistic_spin_queue *lock)
207{
208 struct optimistic_spin_node *node, *next;
209 int curr = encode_cpu(smp_processor_id());
210
211
212
213
214 if (likely(atomic_cmpxchg_release(&lock->tail, curr,
215 OSQ_UNLOCKED_VAL) == curr))
216 return;
217
218
219
220
221 node = this_cpu_ptr(&osq_node);
222 next = xchg(&node->next, NULL);
223 if (next) {
224 WRITE_ONCE(next->locked, 1);
225 return;
226 }
227
228 next = osq_wait_next(lock, node, NULL);
229 if (next)
230 WRITE_ONCE(next->locked, 1);
231}
232