1#include <linux/percpu.h>
2#include <linux/sched.h>
3#include <linux/osq_lock.h>
4
5
6
7
8
9
10
11
12
13static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
14
15
16
17
18
19static inline int encode_cpu(int cpu_nr)
20{
21 return cpu_nr + 1;
22}
23
24static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
25{
26 int cpu_nr = encoded_cpu_val - 1;
27
28 return per_cpu_ptr(&osq_node, cpu_nr);
29}
30
31
32
33
34
35static inline struct optimistic_spin_node *
36osq_wait_next(struct optimistic_spin_queue *lock,
37 struct optimistic_spin_node *node,
38 struct optimistic_spin_node *prev)
39{
40 struct optimistic_spin_node *next = NULL;
41 int curr = encode_cpu(smp_processor_id());
42 int old;
43
44
45
46
47
48
49 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
50
51 for (;;) {
52 if (atomic_read(&lock->tail) == curr &&
53 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
54
55
56
57
58
59 break;
60 }
61
62
63
64
65
66
67
68
69
70
71
72 if (node->next) {
73 next = xchg(&node->next, NULL);
74 if (next)
75 break;
76 }
77
78 cpu_relax_lowlatency();
79 }
80
81 return next;
82}
83
84bool osq_lock(struct optimistic_spin_queue *lock)
85{
86 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
87 struct optimistic_spin_node *prev, *next;
88 int curr = encode_cpu(smp_processor_id());
89 int old;
90
91 node->locked = 0;
92 node->next = NULL;
93 node->cpu = curr;
94
95
96
97
98
99
100
101 old = atomic_xchg(&lock->tail, curr);
102 if (old == OSQ_UNLOCKED_VAL)
103 return true;
104
105 prev = decode_cpu(old);
106 node->prev = prev;
107 WRITE_ONCE(prev->next, node);
108
109
110
111
112
113
114
115
116
117
118 while (!READ_ONCE(node->locked)) {
119
120
121
122 if (need_resched())
123 goto unqueue;
124
125 cpu_relax_lowlatency();
126 }
127 return true;
128
129unqueue:
130
131
132
133
134
135
136
137
138 for (;;) {
139 if (prev->next == node &&
140 cmpxchg(&prev->next, node, NULL) == node)
141 break;
142
143
144
145
146
147
148 if (smp_load_acquire(&node->locked))
149 return true;
150
151 cpu_relax_lowlatency();
152
153
154
155
156
157 prev = READ_ONCE(node->prev);
158 }
159
160
161
162
163
164
165
166
167 next = osq_wait_next(lock, node, prev);
168 if (!next)
169 return false;
170
171
172
173
174
175
176
177
178
179 WRITE_ONCE(next->prev, prev);
180 WRITE_ONCE(prev->next, next);
181
182 return false;
183}
184
185void osq_unlock(struct optimistic_spin_queue *lock)
186{
187 struct optimistic_spin_node *node, *next;
188 int curr = encode_cpu(smp_processor_id());
189
190
191
192
193 if (likely(atomic_cmpxchg_release(&lock->tail, curr,
194 OSQ_UNLOCKED_VAL) == curr))
195 return;
196
197
198
199
200 node = this_cpu_ptr(&osq_node);
201 next = xchg(&node->next, NULL);
202 if (next) {
203 WRITE_ONCE(next->locked, 1);
204 return;
205 }
206
207 next = osq_wait_next(lock, node, NULL);
208 if (next)
209 WRITE_ONCE(next->locked, 1);
210}
211