1
2
3
4
5
6
7#include "i915_drv.h"
8
9#include "i915_active.h"
10#include "i915_syncmap.h"
11#include "intel_gt.h"
12#include "intel_ring.h"
13#include "intel_timeline.h"
14
15#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
16#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
17
18#define CACHELINE_BITS 6
19#define CACHELINE_FREE CACHELINE_BITS
20
21struct intel_timeline_hwsp {
22 struct intel_gt *gt;
23 struct intel_gt_timelines *gt_timelines;
24 struct list_head free_link;
25 struct i915_vma *vma;
26 u64 free_bitmap;
27};
28
29static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
30{
31 struct drm_i915_private *i915 = gt->i915;
32 struct drm_i915_gem_object *obj;
33 struct i915_vma *vma;
34
35 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
36 if (IS_ERR(obj))
37 return ERR_CAST(obj);
38
39 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
40
41 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
42 if (IS_ERR(vma))
43 i915_gem_object_put(obj);
44
45 return vma;
46}
47
48static struct i915_vma *
49hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
50{
51 struct intel_gt_timelines *gt = &timeline->gt->timelines;
52 struct intel_timeline_hwsp *hwsp;
53
54 BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
55
56 spin_lock_irq(>->hwsp_lock);
57
58
59 hwsp = list_first_entry_or_null(>->hwsp_free_list,
60 typeof(*hwsp), free_link);
61 if (!hwsp) {
62 struct i915_vma *vma;
63
64 spin_unlock_irq(>->hwsp_lock);
65
66 hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
67 if (!hwsp)
68 return ERR_PTR(-ENOMEM);
69
70 vma = __hwsp_alloc(timeline->gt);
71 if (IS_ERR(vma)) {
72 kfree(hwsp);
73 return vma;
74 }
75
76 GT_TRACE(timeline->gt, "new HWSP allocated\n");
77
78 vma->private = hwsp;
79 hwsp->gt = timeline->gt;
80 hwsp->vma = vma;
81 hwsp->free_bitmap = ~0ull;
82 hwsp->gt_timelines = gt;
83
84 spin_lock_irq(>->hwsp_lock);
85 list_add(&hwsp->free_link, >->hwsp_free_list);
86 }
87
88 GEM_BUG_ON(!hwsp->free_bitmap);
89 *cacheline = __ffs64(hwsp->free_bitmap);
90 hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
91 if (!hwsp->free_bitmap)
92 list_del(&hwsp->free_link);
93
94 spin_unlock_irq(>->hwsp_lock);
95
96 GEM_BUG_ON(hwsp->vma->private != hwsp);
97 return hwsp->vma;
98}
99
100static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
101{
102 struct intel_gt_timelines *gt = hwsp->gt_timelines;
103 unsigned long flags;
104
105 spin_lock_irqsave(>->hwsp_lock, flags);
106
107
108 if (!hwsp->free_bitmap)
109 list_add_tail(&hwsp->free_link, >->hwsp_free_list);
110
111 GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
112 hwsp->free_bitmap |= BIT_ULL(cacheline);
113
114
115 if (hwsp->free_bitmap == ~0ull) {
116 i915_vma_put(hwsp->vma);
117 list_del(&hwsp->free_link);
118 kfree(hwsp);
119 }
120
121 spin_unlock_irqrestore(>->hwsp_lock, flags);
122}
123
124static void __rcu_cacheline_free(struct rcu_head *rcu)
125{
126 struct intel_timeline_cacheline *cl =
127 container_of(rcu, typeof(*cl), rcu);
128
129 i915_active_fini(&cl->active);
130 kfree(cl);
131}
132
133static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
134{
135 GEM_BUG_ON(!i915_active_is_idle(&cl->active));
136
137 i915_gem_object_unpin_map(cl->hwsp->vma->obj);
138 i915_vma_put(cl->hwsp->vma);
139 __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
140
141 call_rcu(&cl->rcu, __rcu_cacheline_free);
142}
143
144__i915_active_call
145static void __cacheline_retire(struct i915_active *active)
146{
147 struct intel_timeline_cacheline *cl =
148 container_of(active, typeof(*cl), active);
149
150 i915_vma_unpin(cl->hwsp->vma);
151 if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
152 __idle_cacheline_free(cl);
153}
154
155static int __cacheline_active(struct i915_active *active)
156{
157 struct intel_timeline_cacheline *cl =
158 container_of(active, typeof(*cl), active);
159
160 __i915_vma_pin(cl->hwsp->vma);
161 return 0;
162}
163
164static struct intel_timeline_cacheline *
165cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
166{
167 struct intel_timeline_cacheline *cl;
168 void *vaddr;
169
170 GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
171
172 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
173 if (!cl)
174 return ERR_PTR(-ENOMEM);
175
176 vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
177 if (IS_ERR(vaddr)) {
178 kfree(cl);
179 return ERR_CAST(vaddr);
180 }
181
182 i915_vma_get(hwsp->vma);
183 cl->hwsp = hwsp;
184 cl->vaddr = page_pack_bits(vaddr, cacheline);
185
186 i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
187
188 return cl;
189}
190
191static void cacheline_acquire(struct intel_timeline_cacheline *cl)
192{
193 if (cl)
194 i915_active_acquire(&cl->active);
195}
196
197static void cacheline_release(struct intel_timeline_cacheline *cl)
198{
199 if (cl)
200 i915_active_release(&cl->active);
201}
202
203static void cacheline_free(struct intel_timeline_cacheline *cl)
204{
205 if (!i915_active_acquire_if_busy(&cl->active)) {
206 __idle_cacheline_free(cl);
207 return;
208 }
209
210 GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
211 cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
212
213 i915_active_release(&cl->active);
214}
215
216static int intel_timeline_init(struct intel_timeline *timeline,
217 struct intel_gt *gt,
218 struct i915_vma *hwsp)
219{
220 void *vaddr;
221
222 kref_init(&timeline->kref);
223 atomic_set(&timeline->pin_count, 0);
224
225 timeline->gt = gt;
226
227 timeline->has_initial_breadcrumb = !hwsp;
228 timeline->hwsp_cacheline = NULL;
229
230 if (!hwsp) {
231 struct intel_timeline_cacheline *cl;
232 unsigned int cacheline;
233
234 hwsp = hwsp_alloc(timeline, &cacheline);
235 if (IS_ERR(hwsp))
236 return PTR_ERR(hwsp);
237
238 cl = cacheline_alloc(hwsp->private, cacheline);
239 if (IS_ERR(cl)) {
240 __idle_hwsp_free(hwsp->private, cacheline);
241 return PTR_ERR(cl);
242 }
243
244 timeline->hwsp_cacheline = cl;
245 timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
246
247 vaddr = page_mask_bits(cl->vaddr);
248 } else {
249 timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
250
251 vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
252 if (IS_ERR(vaddr))
253 return PTR_ERR(vaddr);
254 }
255
256 timeline->hwsp_seqno =
257 memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
258
259 timeline->hwsp_ggtt = i915_vma_get(hwsp);
260 GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
261
262 timeline->fence_context = dma_fence_context_alloc(1);
263
264 mutex_init(&timeline->mutex);
265
266 INIT_ACTIVE_FENCE(&timeline->last_request);
267 INIT_LIST_HEAD(&timeline->requests);
268
269 i915_syncmap_init(&timeline->sync);
270
271 return 0;
272}
273
274void intel_gt_init_timelines(struct intel_gt *gt)
275{
276 struct intel_gt_timelines *timelines = >->timelines;
277
278 spin_lock_init(&timelines->lock);
279 INIT_LIST_HEAD(&timelines->active_list);
280
281 spin_lock_init(&timelines->hwsp_lock);
282 INIT_LIST_HEAD(&timelines->hwsp_free_list);
283}
284
285static void intel_timeline_fini(struct intel_timeline *timeline)
286{
287 GEM_BUG_ON(atomic_read(&timeline->pin_count));
288 GEM_BUG_ON(!list_empty(&timeline->requests));
289 GEM_BUG_ON(timeline->retire);
290
291 if (timeline->hwsp_cacheline)
292 cacheline_free(timeline->hwsp_cacheline);
293 else
294 i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
295
296 i915_vma_put(timeline->hwsp_ggtt);
297}
298
299struct intel_timeline *
300intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
301{
302 struct intel_timeline *timeline;
303 int err;
304
305 timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
306 if (!timeline)
307 return ERR_PTR(-ENOMEM);
308
309 err = intel_timeline_init(timeline, gt, global_hwsp);
310 if (err) {
311 kfree(timeline);
312 return ERR_PTR(err);
313 }
314
315 return timeline;
316}
317
318int intel_timeline_pin(struct intel_timeline *tl)
319{
320 int err;
321
322 if (atomic_add_unless(&tl->pin_count, 1, 0))
323 return 0;
324
325 err = i915_ggtt_pin(tl->hwsp_ggtt, 0, PIN_HIGH);
326 if (err)
327 return err;
328
329 tl->hwsp_offset =
330 i915_ggtt_offset(tl->hwsp_ggtt) +
331 offset_in_page(tl->hwsp_offset);
332 GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
333 tl->fence_context, tl->hwsp_offset);
334
335 cacheline_acquire(tl->hwsp_cacheline);
336 if (atomic_fetch_inc(&tl->pin_count)) {
337 cacheline_release(tl->hwsp_cacheline);
338 __i915_vma_unpin(tl->hwsp_ggtt);
339 }
340
341 return 0;
342}
343
344void intel_timeline_reset_seqno(const struct intel_timeline *tl)
345{
346
347 GEM_BUG_ON(!atomic_read(&tl->pin_count));
348 WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
349}
350
351void intel_timeline_enter(struct intel_timeline *tl)
352{
353 struct intel_gt_timelines *timelines = &tl->gt->timelines;
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373 lockdep_assert_held(&tl->mutex);
374
375 if (atomic_add_unless(&tl->active_count, 1, 0))
376 return;
377
378 spin_lock(&timelines->lock);
379 if (!atomic_fetch_inc(&tl->active_count)) {
380
381
382
383
384
385
386 intel_timeline_reset_seqno(tl);
387 list_add_tail(&tl->link, &timelines->active_list);
388 }
389 spin_unlock(&timelines->lock);
390}
391
392void intel_timeline_exit(struct intel_timeline *tl)
393{
394 struct intel_gt_timelines *timelines = &tl->gt->timelines;
395
396
397 lockdep_assert_held(&tl->mutex);
398
399 GEM_BUG_ON(!atomic_read(&tl->active_count));
400 if (atomic_add_unless(&tl->active_count, -1, 1))
401 return;
402
403 spin_lock(&timelines->lock);
404 if (atomic_dec_and_test(&tl->active_count))
405 list_del(&tl->link);
406 spin_unlock(&timelines->lock);
407
408
409
410
411
412
413 i915_syncmap_free(&tl->sync);
414}
415
416static u32 timeline_advance(struct intel_timeline *tl)
417{
418 GEM_BUG_ON(!atomic_read(&tl->pin_count));
419 GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
420
421 return tl->seqno += 1 + tl->has_initial_breadcrumb;
422}
423
424static void timeline_rollback(struct intel_timeline *tl)
425{
426 tl->seqno -= 1 + tl->has_initial_breadcrumb;
427}
428
429static noinline int
430__intel_timeline_get_seqno(struct intel_timeline *tl,
431 struct i915_request *rq,
432 u32 *seqno)
433{
434 struct intel_timeline_cacheline *cl;
435 unsigned int cacheline;
436 struct i915_vma *vma;
437 void *vaddr;
438 int err;
439
440 might_lock(&tl->gt->ggtt->vm.mutex);
441 GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context);
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462 vma = hwsp_alloc(tl, &cacheline);
463 if (IS_ERR(vma)) {
464 err = PTR_ERR(vma);
465 goto err_rollback;
466 }
467
468 err = i915_ggtt_pin(vma, 0, PIN_HIGH);
469 if (err) {
470 __idle_hwsp_free(vma->private, cacheline);
471 goto err_rollback;
472 }
473
474 cl = cacheline_alloc(vma->private, cacheline);
475 if (IS_ERR(cl)) {
476 err = PTR_ERR(cl);
477 __idle_hwsp_free(vma->private, cacheline);
478 goto err_unpin;
479 }
480 GEM_BUG_ON(cl->hwsp->vma != vma);
481
482
483
484
485
486
487 err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence);
488 if (err)
489 goto err_cacheline;
490
491 cacheline_release(tl->hwsp_cacheline);
492 cacheline_free(tl->hwsp_cacheline);
493
494 i915_vma_unpin(tl->hwsp_ggtt);
495 i915_vma_put(tl->hwsp_ggtt);
496
497 tl->hwsp_ggtt = i915_vma_get(vma);
498
499 vaddr = page_mask_bits(cl->vaddr);
500 tl->hwsp_offset = cacheline * CACHELINE_BYTES;
501 tl->hwsp_seqno =
502 memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
503
504 tl->hwsp_offset += i915_ggtt_offset(vma);
505 GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
506 tl->fence_context, tl->hwsp_offset);
507
508 cacheline_acquire(cl);
509 tl->hwsp_cacheline = cl;
510
511 *seqno = timeline_advance(tl);
512 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
513 return 0;
514
515err_cacheline:
516 cacheline_free(cl);
517err_unpin:
518 i915_vma_unpin(vma);
519err_rollback:
520 timeline_rollback(tl);
521 return err;
522}
523
524int intel_timeline_get_seqno(struct intel_timeline *tl,
525 struct i915_request *rq,
526 u32 *seqno)
527{
528 *seqno = timeline_advance(tl);
529
530
531 if (unlikely(!*seqno && tl->hwsp_cacheline))
532 return __intel_timeline_get_seqno(tl, rq, seqno);
533
534 return 0;
535}
536
537static int cacheline_ref(struct intel_timeline_cacheline *cl,
538 struct i915_request *rq)
539{
540 return i915_active_add_request(&cl->active, rq);
541}
542
543int intel_timeline_read_hwsp(struct i915_request *from,
544 struct i915_request *to,
545 u32 *hwsp)
546{
547 struct intel_timeline_cacheline *cl;
548 int err;
549
550 GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline));
551
552 rcu_read_lock();
553 cl = rcu_dereference(from->hwsp_cacheline);
554 if (i915_request_completed(from))
555 goto unlock;
556 if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
557 goto unlock;
558 if (unlikely(i915_request_completed(from)))
559 goto release;
560 rcu_read_unlock();
561
562 err = cacheline_ref(cl, to);
563 if (err)
564 goto out;
565
566 *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
567 ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
568
569out:
570 i915_active_release(&cl->active);
571 return err;
572
573release:
574 i915_active_release(&cl->active);
575unlock:
576 rcu_read_unlock();
577 return 1;
578}
579
580void intel_timeline_unpin(struct intel_timeline *tl)
581{
582 GEM_BUG_ON(!atomic_read(&tl->pin_count));
583 if (!atomic_dec_and_test(&tl->pin_count))
584 return;
585
586 cacheline_release(tl->hwsp_cacheline);
587
588 __i915_vma_unpin(tl->hwsp_ggtt);
589}
590
591void __intel_timeline_free(struct kref *kref)
592{
593 struct intel_timeline *timeline =
594 container_of(kref, typeof(*timeline), kref);
595
596 intel_timeline_fini(timeline);
597 kfree_rcu(timeline, rcu);
598}
599
600void intel_gt_fini_timelines(struct intel_gt *gt)
601{
602 struct intel_gt_timelines *timelines = >->timelines;
603
604 GEM_BUG_ON(!list_empty(&timelines->active_list));
605 GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
606}
607
608#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
609#include "gt/selftests/mock_timeline.c"
610#include "gt/selftest_timeline.c"
611#endif
612