1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/slab.h>
22#include <linux/export.h>
23#include <linux/atomic.h>
24#include <linux/fence.h>
25
26#define CREATE_TRACE_POINTS
27#include <trace/events/fence.h>
28
29EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
30EXPORT_TRACEPOINT_SYMBOL(fence_emit);
31
32
33
34
35
36
37
38static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
39
40
41
42
43
44
45
46
47u64 fence_context_alloc(unsigned num)
48{
49 BUG_ON(!num);
50 return atomic64_add_return(num, &fence_context_counter) - num;
51}
52EXPORT_SYMBOL(fence_context_alloc);
53
54
55
56
57
58
59
60
61
62
63
64
65
66int fence_signal_locked(struct fence *fence)
67{
68 struct fence_cb *cur, *tmp;
69 int ret = 0;
70
71 if (WARN_ON(!fence))
72 return -EINVAL;
73
74 if (!ktime_to_ns(fence->timestamp)) {
75 fence->timestamp = ktime_get();
76 smp_mb__before_atomic();
77 }
78
79 if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
80 ret = -EINVAL;
81
82
83
84
85
86 } else
87 trace_fence_signaled(fence);
88
89 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
90 list_del_init(&cur->node);
91 cur->func(fence, cur);
92 }
93 return ret;
94}
95EXPORT_SYMBOL(fence_signal_locked);
96
97
98
99
100
101
102
103
104
105
106
107int fence_signal(struct fence *fence)
108{
109 unsigned long flags;
110
111 if (!fence)
112 return -EINVAL;
113
114 if (!ktime_to_ns(fence->timestamp)) {
115 fence->timestamp = ktime_get();
116 smp_mb__before_atomic();
117 }
118
119 if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
120 return -EINVAL;
121
122 trace_fence_signaled(fence);
123
124 if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
125 struct fence_cb *cur, *tmp;
126
127 spin_lock_irqsave(fence->lock, flags);
128 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
129 list_del_init(&cur->node);
130 cur->func(fence, cur);
131 }
132 spin_unlock_irqrestore(fence->lock, flags);
133 }
134 return 0;
135}
136EXPORT_SYMBOL(fence_signal);
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154signed long
155fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
156{
157 signed long ret;
158
159 if (WARN_ON(timeout < 0))
160 return -EINVAL;
161
162 if (timeout == 0)
163 return fence_is_signaled(fence);
164
165 trace_fence_wait_start(fence);
166 ret = fence->ops->wait(fence, intr, timeout);
167 trace_fence_wait_end(fence);
168 return ret;
169}
170EXPORT_SYMBOL(fence_wait_timeout);
171
172void fence_release(struct kref *kref)
173{
174 struct fence *fence =
175 container_of(kref, struct fence, refcount);
176
177 trace_fence_destroy(fence);
178
179 BUG_ON(!list_empty(&fence->cb_list));
180
181 if (fence->ops->release)
182 fence->ops->release(fence);
183 else
184 fence_free(fence);
185}
186EXPORT_SYMBOL(fence_release);
187
188void fence_free(struct fence *fence)
189{
190 kfree_rcu(fence, rcu);
191}
192EXPORT_SYMBOL(fence_free);
193
194
195
196
197
198
199
200
201void fence_enable_sw_signaling(struct fence *fence)
202{
203 unsigned long flags;
204
205 if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
206 !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
207 trace_fence_enable_signal(fence);
208
209 spin_lock_irqsave(fence->lock, flags);
210
211 if (!fence->ops->enable_signaling(fence))
212 fence_signal_locked(fence);
213
214 spin_unlock_irqrestore(fence->lock, flags);
215 }
216}
217EXPORT_SYMBOL(fence_enable_sw_signaling);
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242int fence_add_callback(struct fence *fence, struct fence_cb *cb,
243 fence_func_t func)
244{
245 unsigned long flags;
246 int ret = 0;
247 bool was_set;
248
249 if (WARN_ON(!fence || !func))
250 return -EINVAL;
251
252 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
253 INIT_LIST_HEAD(&cb->node);
254 return -ENOENT;
255 }
256
257 spin_lock_irqsave(fence->lock, flags);
258
259 was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
260
261 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
262 ret = -ENOENT;
263 else if (!was_set) {
264 trace_fence_enable_signal(fence);
265
266 if (!fence->ops->enable_signaling(fence)) {
267 fence_signal_locked(fence);
268 ret = -ENOENT;
269 }
270 }
271
272 if (!ret) {
273 cb->func = func;
274 list_add_tail(&cb->node, &fence->cb_list);
275 } else
276 INIT_LIST_HEAD(&cb->node);
277 spin_unlock_irqrestore(fence->lock, flags);
278
279 return ret;
280}
281EXPORT_SYMBOL(fence_add_callback);
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298bool
299fence_remove_callback(struct fence *fence, struct fence_cb *cb)
300{
301 unsigned long flags;
302 bool ret;
303
304 spin_lock_irqsave(fence->lock, flags);
305
306 ret = !list_empty(&cb->node);
307 if (ret)
308 list_del_init(&cb->node);
309
310 spin_unlock_irqrestore(fence->lock, flags);
311
312 return ret;
313}
314EXPORT_SYMBOL(fence_remove_callback);
315
316struct default_wait_cb {
317 struct fence_cb base;
318 struct task_struct *task;
319};
320
321static void
322fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
323{
324 struct default_wait_cb *wait =
325 container_of(cb, struct default_wait_cb, base);
326
327 wake_up_state(wait->task, TASK_NORMAL);
328}
329
330
331
332
333
334
335
336
337
338
339
340signed long
341fence_default_wait(struct fence *fence, bool intr, signed long timeout)
342{
343 struct default_wait_cb cb;
344 unsigned long flags;
345 signed long ret = timeout;
346 bool was_set;
347
348 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
349 return timeout;
350
351 spin_lock_irqsave(fence->lock, flags);
352
353 if (intr && signal_pending(current)) {
354 ret = -ERESTARTSYS;
355 goto out;
356 }
357
358 was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
359
360 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
361 goto out;
362
363 if (!was_set) {
364 trace_fence_enable_signal(fence);
365
366 if (!fence->ops->enable_signaling(fence)) {
367 fence_signal_locked(fence);
368 goto out;
369 }
370 }
371
372 cb.base.func = fence_default_wait_cb;
373 cb.task = current;
374 list_add(&cb.base.node, &fence->cb_list);
375
376 while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
377 if (intr)
378 __set_current_state(TASK_INTERRUPTIBLE);
379 else
380 __set_current_state(TASK_UNINTERRUPTIBLE);
381 spin_unlock_irqrestore(fence->lock, flags);
382
383 ret = schedule_timeout(ret);
384
385 spin_lock_irqsave(fence->lock, flags);
386 if (ret > 0 && intr && signal_pending(current))
387 ret = -ERESTARTSYS;
388 }
389
390 if (!list_empty(&cb.base.node))
391 list_del(&cb.base.node);
392 __set_current_state(TASK_RUNNING);
393
394out:
395 spin_unlock_irqrestore(fence->lock, flags);
396 return ret;
397}
398EXPORT_SYMBOL(fence_default_wait);
399
400static bool
401fence_test_signaled_any(struct fence **fences, uint32_t count)
402{
403 int i;
404
405 for (i = 0; i < count; ++i) {
406 struct fence *fence = fences[i];
407 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
408 return true;
409 }
410 return false;
411}
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429signed long
430fence_wait_any_timeout(struct fence **fences, uint32_t count,
431 bool intr, signed long timeout)
432{
433 struct default_wait_cb *cb;
434 signed long ret = timeout;
435 unsigned i;
436
437 if (WARN_ON(!fences || !count || timeout < 0))
438 return -EINVAL;
439
440 if (timeout == 0) {
441 for (i = 0; i < count; ++i)
442 if (fence_is_signaled(fences[i]))
443 return 1;
444
445 return 0;
446 }
447
448 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
449 if (cb == NULL) {
450 ret = -ENOMEM;
451 goto err_free_cb;
452 }
453
454 for (i = 0; i < count; ++i) {
455 struct fence *fence = fences[i];
456
457 if (fence->ops->wait != fence_default_wait) {
458 ret = -EINVAL;
459 goto fence_rm_cb;
460 }
461
462 cb[i].task = current;
463 if (fence_add_callback(fence, &cb[i].base,
464 fence_default_wait_cb)) {
465
466 goto fence_rm_cb;
467 }
468 }
469
470 while (ret > 0) {
471 if (intr)
472 set_current_state(TASK_INTERRUPTIBLE);
473 else
474 set_current_state(TASK_UNINTERRUPTIBLE);
475
476 if (fence_test_signaled_any(fences, count))
477 break;
478
479 ret = schedule_timeout(ret);
480
481 if (ret > 0 && intr && signal_pending(current))
482 ret = -ERESTARTSYS;
483 }
484
485 __set_current_state(TASK_RUNNING);
486
487fence_rm_cb:
488 while (i-- > 0)
489 fence_remove_callback(fences[i], &cb[i].base);
490
491err_free_cb:
492 kfree(cb);
493
494 return ret;
495}
496EXPORT_SYMBOL(fence_wait_any_timeout);
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514void
515fence_init(struct fence *fence, const struct fence_ops *ops,
516 spinlock_t *lock, u64 context, unsigned seqno)
517{
518 BUG_ON(!lock);
519 BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
520 !ops->get_driver_name || !ops->get_timeline_name);
521
522 kref_init(&fence->refcount);
523 fence->ops = ops;
524 INIT_LIST_HEAD(&fence->cb_list);
525 fence->lock = lock;
526 fence->context = context;
527 fence->seqno = seqno;
528 fence->flags = 0UL;
529
530 trace_fence_init(fence);
531}
532EXPORT_SYMBOL(fence_init);
533