1
2
3
4
5
6#include <linux/circ_buf.h>
7
8#include "gem/i915_gem_context.h"
9#include "gt/intel_context.h"
10#include "gt/intel_engine_pm.h"
11#include "gt/intel_gt.h"
12#include "gt/intel_gt_pm.h"
13#include "gt/intel_lrc_reg.h"
14#include "gt/intel_ring.h"
15
16#include "intel_guc_submission.h"
17
18#include "i915_drv.h"
19#include "i915_trace.h"
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57static inline struct i915_priolist *to_priolist(struct rb_node *rb)
58{
59 return rb_entry(rb, struct i915_priolist, node);
60}
61
62static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
63{
64 struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
65
66 return &base[id];
67}
68
69static int guc_workqueue_create(struct intel_guc *guc)
70{
71 return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue,
72 &guc->workqueue_vaddr);
73}
74
75static void guc_workqueue_destroy(struct intel_guc *guc)
76{
77 i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP);
78}
79
80
81
82
83static int guc_proc_desc_create(struct intel_guc *guc)
84{
85 const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc));
86
87 return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc,
88 &guc->proc_desc_vaddr);
89}
90
91static void guc_proc_desc_destroy(struct intel_guc *guc)
92{
93 i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP);
94}
95
96static void guc_proc_desc_init(struct intel_guc *guc)
97{
98 struct guc_process_desc *desc;
99
100 desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc));
101
102
103
104
105
106
107
108 desc->wq_base_addr = 0;
109 desc->db_base_addr = 0;
110
111 desc->wq_size_bytes = GUC_WQ_SIZE;
112 desc->wq_status = WQ_STATUS_ACTIVE;
113 desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
114}
115
116static void guc_proc_desc_fini(struct intel_guc *guc)
117{
118 memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc));
119}
120
121static int guc_stage_desc_pool_create(struct intel_guc *guc)
122{
123 u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
124 GUC_MAX_STAGE_DESCRIPTORS);
125
126 return intel_guc_allocate_and_map_vma(guc, size, &guc->stage_desc_pool,
127 &guc->stage_desc_pool_vaddr);
128}
129
130static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
131{
132 i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
133}
134
135
136
137
138
139
140
141
142static void guc_stage_desc_init(struct intel_guc *guc)
143{
144 struct guc_stage_desc *desc;
145
146
147 desc = __get_stage_desc(guc, 0);
148 memset(desc, 0, sizeof(*desc));
149
150 desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
151 GUC_STAGE_DESC_ATTR_KERNEL;
152
153 desc->stage_id = 0;
154 desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
155
156 desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc);
157 desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue);
158 desc->wq_size = GUC_WQ_SIZE;
159}
160
161static void guc_stage_desc_fini(struct intel_guc *guc)
162{
163 struct guc_stage_desc *desc;
164
165 desc = __get_stage_desc(guc, 0);
166 memset(desc, 0, sizeof(*desc));
167}
168
169
170static void guc_wq_item_append(struct intel_guc *guc,
171 u32 target_engine, u32 context_desc,
172 u32 ring_tail, u32 fence_id)
173{
174
175 const size_t wqi_size = sizeof(struct guc_wq_item);
176 const u32 wqi_len = wqi_size / sizeof(u32) - 1;
177 struct guc_process_desc *desc = guc->proc_desc_vaddr;
178 struct guc_wq_item *wqi;
179 u32 wq_off;
180
181 lockdep_assert_held(&guc->wq_lock);
182
183
184
185
186
187
188
189
190 BUILD_BUG_ON(wqi_size != 16);
191
192
193 GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
194
195
196 wq_off = READ_ONCE(desc->tail);
197 GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
198 GUC_WQ_SIZE) < wqi_size);
199 GEM_BUG_ON(wq_off & (wqi_size - 1));
200
201 wqi = guc->workqueue_vaddr + wq_off;
202
203
204 wqi->header = WQ_TYPE_INORDER |
205 (wqi_len << WQ_LEN_SHIFT) |
206 (target_engine << WQ_TARGET_SHIFT) |
207 WQ_NO_WCFLUSH_WAIT;
208 wqi->context_desc = context_desc;
209 wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
210 GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
211 wqi->fence_id = fence_id;
212
213
214 WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
215}
216
217static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
218{
219 struct intel_engine_cs *engine = rq->engine;
220 u32 ctx_desc = rq->context->lrc.ccid;
221 u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
222
223 guc_wq_item_append(guc, engine->guc_id, ctx_desc,
224 ring_tail, rq->fence.seqno);
225}
226
227
228
229
230
231
232
233
234static void flush_ggtt_writes(struct i915_vma *vma)
235{
236 if (i915_vma_is_map_and_fenceable(vma))
237 intel_uncore_posting_read_fw(vma->vm->gt->uncore,
238 GUC_STATUS);
239}
240
241static void guc_submit(struct intel_engine_cs *engine,
242 struct i915_request **out,
243 struct i915_request **end)
244{
245 struct intel_guc *guc = &engine->gt->uc.guc;
246
247 spin_lock(&guc->wq_lock);
248
249 do {
250 struct i915_request *rq = *out++;
251
252 flush_ggtt_writes(rq->ring->vma);
253 guc_add_request(guc, rq);
254 } while (out != end);
255
256 spin_unlock(&guc->wq_lock);
257}
258
259static inline int rq_prio(const struct i915_request *rq)
260{
261 return rq->sched.attr.priority | __NO_PREEMPTION;
262}
263
264static struct i915_request *schedule_in(struct i915_request *rq, int idx)
265{
266 trace_i915_request_in(rq, idx);
267
268
269
270
271
272
273
274
275 __intel_gt_pm_get(rq->engine->gt);
276 return i915_request_get(rq);
277}
278
279static void schedule_out(struct i915_request *rq)
280{
281 trace_i915_request_out(rq);
282
283 intel_gt_pm_put_async(rq->engine->gt);
284 i915_request_put(rq);
285}
286
287static void __guc_dequeue(struct intel_engine_cs *engine)
288{
289 struct intel_engine_execlists * const execlists = &engine->execlists;
290 struct i915_request **first = execlists->inflight;
291 struct i915_request ** const last_port = first + execlists->port_mask;
292 struct i915_request *last = first[0];
293 struct i915_request **port;
294 bool submit = false;
295 struct rb_node *rb;
296
297 lockdep_assert_held(&engine->active.lock);
298
299 if (last) {
300 if (*++first)
301 return;
302
303 last = NULL;
304 }
305
306
307
308
309
310
311 port = first;
312 while ((rb = rb_first_cached(&execlists->queue))) {
313 struct i915_priolist *p = to_priolist(rb);
314 struct i915_request *rq, *rn;
315 int i;
316
317 priolist_for_each_request_consume(rq, rn, p, i) {
318 if (last && rq->context != last->context) {
319 if (port == last_port)
320 goto done;
321
322 *port = schedule_in(last,
323 port - execlists->inflight);
324 port++;
325 }
326
327 list_del_init(&rq->sched.link);
328 __i915_request_submit(rq);
329 submit = true;
330 last = rq;
331 }
332
333 rb_erase_cached(&p->node, &execlists->queue);
334 i915_priolist_free(p);
335 }
336done:
337 execlists->queue_priority_hint =
338 rb ? to_priolist(rb)->priority : INT_MIN;
339 if (submit) {
340 *port = schedule_in(last, port - execlists->inflight);
341 *++port = NULL;
342 guc_submit(engine, first, port);
343 }
344 execlists->active = execlists->inflight;
345}
346
347static void guc_submission_tasklet(unsigned long data)
348{
349 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
350 struct intel_engine_execlists * const execlists = &engine->execlists;
351 struct i915_request **port, *rq;
352 unsigned long flags;
353
354 spin_lock_irqsave(&engine->active.lock, flags);
355
356 for (port = execlists->inflight; (rq = *port); port++) {
357 if (!i915_request_completed(rq))
358 break;
359
360 schedule_out(rq);
361 }
362 if (port != execlists->inflight) {
363 int idx = port - execlists->inflight;
364 int rem = ARRAY_SIZE(execlists->inflight) - idx;
365 memmove(execlists->inflight, port, rem * sizeof(*port));
366 }
367
368 __guc_dequeue(engine);
369
370 spin_unlock_irqrestore(&engine->active.lock, flags);
371}
372
373static void guc_reset_prepare(struct intel_engine_cs *engine)
374{
375 struct intel_engine_execlists * const execlists = &engine->execlists;
376
377 ENGINE_TRACE(engine, "\n");
378
379
380
381
382
383
384
385
386
387
388 __tasklet_disable_sync_once(&execlists->tasklet);
389}
390
391static void
392cancel_port_requests(struct intel_engine_execlists * const execlists)
393{
394 struct i915_request * const *port, *rq;
395
396
397
398 for (port = execlists->active; (rq = *port); port++)
399 schedule_out(rq);
400 execlists->active =
401 memset(execlists->inflight, 0, sizeof(execlists->inflight));
402}
403
404static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
405{
406 struct intel_engine_execlists * const execlists = &engine->execlists;
407 struct i915_request *rq;
408 unsigned long flags;
409
410 spin_lock_irqsave(&engine->active.lock, flags);
411
412 cancel_port_requests(execlists);
413
414
415 rq = execlists_unwind_incomplete_requests(execlists);
416 if (!rq)
417 goto out_unlock;
418
419 if (!i915_request_started(rq))
420 stalled = false;
421
422 __i915_request_reset(rq, stalled);
423 intel_lr_context_reset(engine, rq->context, rq->head, stalled);
424
425out_unlock:
426 spin_unlock_irqrestore(&engine->active.lock, flags);
427}
428
429static void guc_reset_cancel(struct intel_engine_cs *engine)
430{
431 struct intel_engine_execlists * const execlists = &engine->execlists;
432 struct i915_request *rq, *rn;
433 struct rb_node *rb;
434 unsigned long flags;
435
436 ENGINE_TRACE(engine, "\n");
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452 spin_lock_irqsave(&engine->active.lock, flags);
453
454
455 cancel_port_requests(execlists);
456
457
458 list_for_each_entry(rq, &engine->active.requests, sched.link) {
459 i915_request_set_error_once(rq, -EIO);
460 i915_request_mark_complete(rq);
461 }
462
463
464 while ((rb = rb_first_cached(&execlists->queue))) {
465 struct i915_priolist *p = to_priolist(rb);
466 int i;
467
468 priolist_for_each_request_consume(rq, rn, p, i) {
469 list_del_init(&rq->sched.link);
470 __i915_request_submit(rq);
471 dma_fence_set_error(&rq->fence, -EIO);
472 i915_request_mark_complete(rq);
473 }
474
475 rb_erase_cached(&p->node, &execlists->queue);
476 i915_priolist_free(p);
477 }
478
479
480
481 execlists->queue_priority_hint = INT_MIN;
482 execlists->queue = RB_ROOT_CACHED;
483
484 spin_unlock_irqrestore(&engine->active.lock, flags);
485}
486
487static void guc_reset_finish(struct intel_engine_cs *engine)
488{
489 struct intel_engine_execlists * const execlists = &engine->execlists;
490
491 if (__tasklet_enable(&execlists->tasklet))
492
493 tasklet_hi_schedule(&execlists->tasklet);
494
495 ENGINE_TRACE(engine, "depth->%d\n",
496 atomic_read(&execlists->tasklet.count));
497}
498
499
500
501
502
503
504
505
506
507
508
509int intel_guc_submission_init(struct intel_guc *guc)
510{
511 int ret;
512
513 if (guc->stage_desc_pool)
514 return 0;
515
516 ret = guc_stage_desc_pool_create(guc);
517 if (ret)
518 return ret;
519
520
521
522
523 GEM_BUG_ON(!guc->stage_desc_pool);
524
525 ret = guc_workqueue_create(guc);
526 if (ret)
527 goto err_pool;
528
529 ret = guc_proc_desc_create(guc);
530 if (ret)
531 goto err_workqueue;
532
533 spin_lock_init(&guc->wq_lock);
534
535 return 0;
536
537err_workqueue:
538 guc_workqueue_destroy(guc);
539err_pool:
540 guc_stage_desc_pool_destroy(guc);
541 return ret;
542}
543
544void intel_guc_submission_fini(struct intel_guc *guc)
545{
546 if (guc->stage_desc_pool) {
547 guc_proc_desc_destroy(guc);
548 guc_workqueue_destroy(guc);
549 guc_stage_desc_pool_destroy(guc);
550 }
551}
552
553static void guc_interrupts_capture(struct intel_gt *gt)
554{
555 struct intel_uncore *uncore = gt->uncore;
556 u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
557 u32 dmask = irqs << 16 | irqs;
558
559 GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
560
561
562 intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0);
563 intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0);
564}
565
566static void guc_interrupts_release(struct intel_gt *gt)
567{
568 struct intel_uncore *uncore = gt->uncore;
569 u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
570 u32 dmask = irqs << 16 | irqs;
571
572 GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
573
574
575 intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask);
576 intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
577}
578
579static void guc_set_default_submission(struct intel_engine_cs *engine)
580{
581
582
583
584
585
586
587
588
589
590
591
592 intel_execlists_set_default_submission(engine);
593
594 engine->execlists.tasklet.func = guc_submission_tasklet;
595
596
597 engine->park = engine->unpark = NULL;
598
599 engine->reset.prepare = guc_reset_prepare;
600 engine->reset.rewind = guc_reset_rewind;
601 engine->reset.cancel = guc_reset_cancel;
602 engine->reset.finish = guc_reset_finish;
603
604 engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
605 engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
606
607
608
609
610
611
612
613 GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
614}
615
616void intel_guc_submission_enable(struct intel_guc *guc)
617{
618 struct intel_gt *gt = guc_to_gt(guc);
619 struct intel_engine_cs *engine;
620 enum intel_engine_id id;
621
622
623
624
625
626
627
628
629
630
631 BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
632 sizeof(struct guc_wq_item) *
633 I915_NUM_ENGINES > GUC_WQ_SIZE);
634
635 guc_proc_desc_init(guc);
636 guc_stage_desc_init(guc);
637
638
639 guc_interrupts_capture(gt);
640
641 for_each_engine(engine, gt, id) {
642 engine->set_default_submission = guc_set_default_submission;
643 engine->set_default_submission(engine);
644 }
645}
646
647void intel_guc_submission_disable(struct intel_guc *guc)
648{
649 struct intel_gt *gt = guc_to_gt(guc);
650
651 GEM_BUG_ON(gt->awake);
652
653
654
655 guc_interrupts_release(gt);
656
657 guc_stage_desc_fini(guc);
658 guc_proc_desc_fini(guc);
659}
660
661static bool __guc_submission_selected(struct intel_guc *guc)
662{
663 if (!intel_guc_submission_is_supported(guc))
664 return false;
665
666 return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
667}
668
669void intel_guc_submission_init_early(struct intel_guc *guc)
670{
671 guc->submission_selected = __guc_submission_selected(guc);
672}
673
674bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
675{
676 return engine->set_default_submission == guc_set_default_submission;
677}
678