1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <drm/ttm/ttm_lock.h>
33#include <drm/ttm/ttm_module.h>
34#include <linux/atomic.h>
35#include <linux/errno.h>
36#include <linux/wait.h>
37#include <linux/sched/signal.h>
38#include <linux/module.h>
39
40#define TTM_WRITE_LOCK_PENDING (1 << 0)
41#define TTM_VT_LOCK_PENDING (1 << 1)
42#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
43#define TTM_VT_LOCK (1 << 3)
44#define TTM_SUSPEND_LOCK (1 << 4)
45
46void ttm_lock_init(struct ttm_lock *lock)
47{
48 spin_lock_init(&lock->lock);
49 init_waitqueue_head(&lock->queue);
50 lock->rw = 0;
51 lock->flags = 0;
52 lock->kill_takers = false;
53 lock->signal = SIGKILL;
54}
55EXPORT_SYMBOL(ttm_lock_init);
56
57void ttm_read_unlock(struct ttm_lock *lock)
58{
59 spin_lock(&lock->lock);
60 if (--lock->rw == 0)
61 wake_up_all(&lock->queue);
62 spin_unlock(&lock->lock);
63}
64EXPORT_SYMBOL(ttm_read_unlock);
65
66static bool __ttm_read_lock(struct ttm_lock *lock)
67{
68 bool locked = false;
69
70 spin_lock(&lock->lock);
71 if (unlikely(lock->kill_takers)) {
72 send_sig(lock->signal, current, 0);
73 spin_unlock(&lock->lock);
74 return false;
75 }
76 if (lock->rw >= 0 && lock->flags == 0) {
77 ++lock->rw;
78 locked = true;
79 }
80 spin_unlock(&lock->lock);
81 return locked;
82}
83
84int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
85{
86 int ret = 0;
87
88 if (interruptible)
89 ret = wait_event_interruptible(lock->queue,
90 __ttm_read_lock(lock));
91 else
92 wait_event(lock->queue, __ttm_read_lock(lock));
93 return ret;
94}
95EXPORT_SYMBOL(ttm_read_lock);
96
97static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
98{
99 bool block = true;
100
101 *locked = false;
102
103 spin_lock(&lock->lock);
104 if (unlikely(lock->kill_takers)) {
105 send_sig(lock->signal, current, 0);
106 spin_unlock(&lock->lock);
107 return false;
108 }
109 if (lock->rw >= 0 && lock->flags == 0) {
110 ++lock->rw;
111 block = false;
112 *locked = true;
113 } else if (lock->flags == 0) {
114 block = false;
115 }
116 spin_unlock(&lock->lock);
117
118 return !block;
119}
120
121int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
122{
123 int ret = 0;
124 bool locked;
125
126 if (interruptible)
127 ret = wait_event_interruptible
128 (lock->queue, __ttm_read_trylock(lock, &locked));
129 else
130 wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
131
132 if (unlikely(ret != 0)) {
133 BUG_ON(locked);
134 return ret;
135 }
136
137 return (locked) ? 0 : -EBUSY;
138}
139
140void ttm_write_unlock(struct ttm_lock *lock)
141{
142 spin_lock(&lock->lock);
143 lock->rw = 0;
144 wake_up_all(&lock->queue);
145 spin_unlock(&lock->lock);
146}
147EXPORT_SYMBOL(ttm_write_unlock);
148
149static bool __ttm_write_lock(struct ttm_lock *lock)
150{
151 bool locked = false;
152
153 spin_lock(&lock->lock);
154 if (unlikely(lock->kill_takers)) {
155 send_sig(lock->signal, current, 0);
156 spin_unlock(&lock->lock);
157 return false;
158 }
159 if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
160 lock->rw = -1;
161 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
162 locked = true;
163 } else {
164 lock->flags |= TTM_WRITE_LOCK_PENDING;
165 }
166 spin_unlock(&lock->lock);
167 return locked;
168}
169
170int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
171{
172 int ret = 0;
173
174 if (interruptible) {
175 ret = wait_event_interruptible(lock->queue,
176 __ttm_write_lock(lock));
177 if (unlikely(ret != 0)) {
178 spin_lock(&lock->lock);
179 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
180 wake_up_all(&lock->queue);
181 spin_unlock(&lock->lock);
182 }
183 } else
184 wait_event(lock->queue, __ttm_write_lock(lock));
185
186 return ret;
187}
188EXPORT_SYMBOL(ttm_write_lock);
189
190static int __ttm_vt_unlock(struct ttm_lock *lock)
191{
192 int ret = 0;
193
194 spin_lock(&lock->lock);
195 if (unlikely(!(lock->flags & TTM_VT_LOCK)))
196 ret = -EINVAL;
197 lock->flags &= ~TTM_VT_LOCK;
198 wake_up_all(&lock->queue);
199 spin_unlock(&lock->lock);
200
201 return ret;
202}
203
204static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
205{
206 struct ttm_base_object *base = *p_base;
207 struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
208 int ret;
209
210 *p_base = NULL;
211 ret = __ttm_vt_unlock(lock);
212 BUG_ON(ret != 0);
213}
214
215static bool __ttm_vt_lock(struct ttm_lock *lock)
216{
217 bool locked = false;
218
219 spin_lock(&lock->lock);
220 if (lock->rw == 0) {
221 lock->flags &= ~TTM_VT_LOCK_PENDING;
222 lock->flags |= TTM_VT_LOCK;
223 locked = true;
224 } else {
225 lock->flags |= TTM_VT_LOCK_PENDING;
226 }
227 spin_unlock(&lock->lock);
228 return locked;
229}
230
231int ttm_vt_lock(struct ttm_lock *lock,
232 bool interruptible,
233 struct ttm_object_file *tfile)
234{
235 int ret = 0;
236
237 if (interruptible) {
238 ret = wait_event_interruptible(lock->queue,
239 __ttm_vt_lock(lock));
240 if (unlikely(ret != 0)) {
241 spin_lock(&lock->lock);
242 lock->flags &= ~TTM_VT_LOCK_PENDING;
243 wake_up_all(&lock->queue);
244 spin_unlock(&lock->lock);
245 return ret;
246 }
247 } else
248 wait_event(lock->queue, __ttm_vt_lock(lock));
249
250
251
252
253
254
255
256 ret = ttm_base_object_init(tfile, &lock->base, false,
257 ttm_lock_type, &ttm_vt_lock_remove, NULL);
258 if (ret)
259 (void)__ttm_vt_unlock(lock);
260 else
261 lock->vt_holder = tfile;
262
263 return ret;
264}
265EXPORT_SYMBOL(ttm_vt_lock);
266
267int ttm_vt_unlock(struct ttm_lock *lock)
268{
269 return ttm_ref_object_base_unref(lock->vt_holder,
270 lock->base.hash.key, TTM_REF_USAGE);
271}
272EXPORT_SYMBOL(ttm_vt_unlock);
273
274void ttm_suspend_unlock(struct ttm_lock *lock)
275{
276 spin_lock(&lock->lock);
277 lock->flags &= ~TTM_SUSPEND_LOCK;
278 wake_up_all(&lock->queue);
279 spin_unlock(&lock->lock);
280}
281EXPORT_SYMBOL(ttm_suspend_unlock);
282
283static bool __ttm_suspend_lock(struct ttm_lock *lock)
284{
285 bool locked = false;
286
287 spin_lock(&lock->lock);
288 if (lock->rw == 0) {
289 lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
290 lock->flags |= TTM_SUSPEND_LOCK;
291 locked = true;
292 } else {
293 lock->flags |= TTM_SUSPEND_LOCK_PENDING;
294 }
295 spin_unlock(&lock->lock);
296 return locked;
297}
298
299void ttm_suspend_lock(struct ttm_lock *lock)
300{
301 wait_event(lock->queue, __ttm_suspend_lock(lock));
302}
303EXPORT_SYMBOL(ttm_suspend_lock);
304