1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/firmware.h>
25#include <linux/circ_buf.h>
26#include "i915_drv.h"
27#include "intel_guc.h"
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
71 u32 *status)
72{
73 u32 val = I915_READ(SOFT_SCRATCH(0));
74 *status = val;
75 return GUC2HOST_IS_RESPONSE(val);
76}
77
78static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
79{
80 struct drm_i915_private *dev_priv = guc_to_i915(guc);
81 u32 status;
82 int i;
83 int ret;
84
85 if (WARN_ON(len < 1 || len > 15))
86 return -EINVAL;
87
88 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
89
90 dev_priv->guc.action_count += 1;
91 dev_priv->guc.action_cmd = data[0];
92
93 for (i = 0; i < len; i++)
94 I915_WRITE(SOFT_SCRATCH(i), data[i]);
95
96 POSTING_READ(SOFT_SCRATCH(i - 1));
97
98 I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
99
100
101
102
103
104
105 ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10);
106 if (ret)
107 ret = wait_for(host2guc_action_response(dev_priv, &status), 10);
108 if (status != GUC2HOST_STATUS_SUCCESS) {
109
110
111
112
113
114 if (ret != -ETIMEDOUT)
115 ret = -EIO;
116
117 DRM_WARN("Action 0x%X failed; ret=%d status=0x%08X response=0x%08X\n",
118 data[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
119
120 dev_priv->guc.action_fail += 1;
121 dev_priv->guc.action_err = ret;
122 }
123 dev_priv->guc.action_status = status;
124
125 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
126
127 return ret;
128}
129
130
131
132
133
134static int host2guc_allocate_doorbell(struct intel_guc *guc,
135 struct i915_guc_client *client)
136{
137 u32 data[2];
138
139 data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL;
140 data[1] = client->ctx_index;
141
142 return host2guc_action(guc, data, 2);
143}
144
145static int host2guc_release_doorbell(struct intel_guc *guc,
146 struct i915_guc_client *client)
147{
148 u32 data[2];
149
150 data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL;
151 data[1] = client->ctx_index;
152
153 return host2guc_action(guc, data, 2);
154}
155
156static int host2guc_sample_forcewake(struct intel_guc *guc,
157 struct i915_guc_client *client)
158{
159 struct drm_i915_private *dev_priv = guc_to_i915(guc);
160 u32 data[2];
161
162 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
163
164 if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
165 data[1] = 0;
166 else
167
168 data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
169
170 return host2guc_action(guc, data, ARRAY_SIZE(data));
171}
172
173
174
175
176
177
178
179
180static int guc_update_doorbell_id(struct intel_guc *guc,
181 struct i915_guc_client *client,
182 u16 new_id)
183{
184 struct sg_table *sg = guc->ctx_pool_vma->pages;
185 void *doorbell_bitmap = guc->doorbell_bitmap;
186 struct guc_doorbell_info *doorbell;
187 struct guc_context_desc desc;
188 size_t len;
189
190 doorbell = client->client_base + client->doorbell_offset;
191
192 if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
193 test_bit(client->doorbell_id, doorbell_bitmap)) {
194
195 doorbell->db_status = GUC_DOORBELL_DISABLED;
196 (void)host2guc_release_doorbell(guc, client);
197 __clear_bit(client->doorbell_id, doorbell_bitmap);
198 }
199
200
201 len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
202 sizeof(desc) * client->ctx_index);
203 if (len != sizeof(desc))
204 return -EFAULT;
205 desc.db_id = new_id;
206 len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
207 sizeof(desc) * client->ctx_index);
208 if (len != sizeof(desc))
209 return -EFAULT;
210
211 client->doorbell_id = new_id;
212 if (new_id == GUC_INVALID_DOORBELL_ID)
213 return 0;
214
215
216 __set_bit(new_id, doorbell_bitmap);
217 doorbell->cookie = 0;
218 doorbell->db_status = GUC_DOORBELL_ENABLED;
219 return host2guc_allocate_doorbell(guc, client);
220}
221
222static int guc_init_doorbell(struct intel_guc *guc,
223 struct i915_guc_client *client,
224 uint16_t db_id)
225{
226 return guc_update_doorbell_id(guc, client, db_id);
227}
228
229static void guc_disable_doorbell(struct intel_guc *guc,
230 struct i915_guc_client *client)
231{
232 (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
233
234
235
236}
237
238static uint16_t
239select_doorbell_register(struct intel_guc *guc, uint32_t priority)
240{
241
242
243
244
245
246
247
248 const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
249 const uint16_t half = GUC_MAX_DOORBELLS / 2;
250 const uint16_t start = hi_pri ? half : 0;
251 const uint16_t end = start + half;
252 uint16_t id;
253
254 id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
255 if (id == end)
256 id = GUC_INVALID_DOORBELL_ID;
257
258 DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
259 hi_pri ? "high" : "normal", id);
260
261 return id;
262}
263
264
265
266
267
268
269
270
271static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
272{
273 const uint32_t cacheline_size = cache_line_size();
274 uint32_t offset;
275
276
277 offset = offset_in_page(guc->db_cacheline);
278
279
280 guc->db_cacheline += cacheline_size;
281
282 DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
283 offset, guc->db_cacheline, cacheline_size);
284
285 return offset;
286}
287
288
289
290
291static void guc_proc_desc_init(struct intel_guc *guc,
292 struct i915_guc_client *client)
293{
294 struct guc_process_desc *desc;
295
296 desc = client->client_base + client->proc_desc_offset;
297
298 memset(desc, 0, sizeof(*desc));
299
300
301
302
303
304
305
306 desc->wq_base_addr = 0;
307 desc->db_base_addr = 0;
308
309 desc->context_id = client->ctx_index;
310 desc->wq_size_bytes = client->wq_size;
311 desc->wq_status = WQ_STATUS_ACTIVE;
312 desc->priority = client->priority;
313}
314
315
316
317
318
319
320
321
322
323static void guc_ctx_desc_init(struct intel_guc *guc,
324 struct i915_guc_client *client)
325{
326 struct drm_i915_private *dev_priv = guc_to_i915(guc);
327 struct intel_engine_cs *engine;
328 struct i915_gem_context *ctx = client->owner;
329 struct guc_context_desc desc;
330 struct sg_table *sg;
331 unsigned int tmp;
332 u32 gfx_addr;
333
334 memset(&desc, 0, sizeof(desc));
335
336 desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
337 desc.context_id = client->ctx_index;
338 desc.priority = client->priority;
339 desc.db_id = client->doorbell_id;
340
341 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
342 struct intel_context *ce = &ctx->engine[engine->id];
343 uint32_t guc_engine_id = engine->guc_id;
344 struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
345
346
347
348
349
350
351
352
353 if (!ce->state)
354 break;
355
356 lrc->context_desc = lower_32_bits(ce->lrc_desc);
357
358
359 lrc->ring_lcra =
360 i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
361 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
362 (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
363
364 lrc->ring_begin = i915_ggtt_offset(ce->ring->vma);
365 lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
366 lrc->ring_next_free_location = lrc->ring_begin;
367 lrc->ring_current_tail_pointer_value = 0;
368
369 desc.engines_used |= (1 << guc_engine_id);
370 }
371
372 DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
373 client->engines, desc.engines_used);
374 WARN_ON(desc.engines_used == 0);
375
376
377
378
379
380 gfx_addr = i915_ggtt_offset(client->vma);
381 desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
382 client->doorbell_offset;
383 desc.db_trigger_cpu = (uintptr_t)client->client_base +
384 client->doorbell_offset;
385 desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
386 desc.process_desc = gfx_addr + client->proc_desc_offset;
387 desc.wq_addr = gfx_addr + client->wq_offset;
388 desc.wq_size = client->wq_size;
389
390
391
392
393
394 desc.desc_private = (uintptr_t)client;
395
396
397 sg = guc->ctx_pool_vma->pages;
398 sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
399 sizeof(desc) * client->ctx_index);
400}
401
402static void guc_ctx_desc_fini(struct intel_guc *guc,
403 struct i915_guc_client *client)
404{
405 struct guc_context_desc desc;
406 struct sg_table *sg;
407
408 memset(&desc, 0, sizeof(desc));
409
410 sg = guc->ctx_pool_vma->pages;
411 sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
412 sizeof(desc) * client->ctx_index);
413}
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
432{
433 const size_t wqi_size = sizeof(struct guc_wq_item);
434 struct i915_guc_client *gc = request->i915->guc.execbuf_client;
435 struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
436 u32 freespace;
437 int ret;
438
439 spin_lock(&gc->wq_lock);
440 freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
441 freespace -= gc->wq_rsvd;
442 if (likely(freespace >= wqi_size)) {
443 gc->wq_rsvd += wqi_size;
444 ret = 0;
445 } else {
446 gc->no_wq_space++;
447 ret = -EAGAIN;
448 }
449 spin_unlock(&gc->wq_lock);
450
451 return ret;
452}
453
454void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
455{
456 const size_t wqi_size = sizeof(struct guc_wq_item);
457 struct i915_guc_client *gc = request->i915->guc.execbuf_client;
458
459 GEM_BUG_ON(READ_ONCE(gc->wq_rsvd) < wqi_size);
460
461 spin_lock(&gc->wq_lock);
462 gc->wq_rsvd -= wqi_size;
463 spin_unlock(&gc->wq_lock);
464}
465
466
467static void guc_wq_item_append(struct i915_guc_client *gc,
468 struct drm_i915_gem_request *rq)
469{
470
471 const size_t wqi_size = sizeof(struct guc_wq_item);
472 const u32 wqi_len = wqi_size/sizeof(u32) - 1;
473 struct intel_engine_cs *engine = rq->engine;
474 struct guc_process_desc *desc;
475 struct guc_wq_item *wqi;
476 void *base;
477 u32 freespace, tail, wq_off, wq_page;
478
479 desc = gc->client_base + gc->proc_desc_offset;
480
481
482 freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
483 GEM_BUG_ON(freespace < wqi_size);
484
485
486 tail = rq->tail;
487 GEM_BUG_ON(tail & 7);
488 tail >>= 3;
489 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
490
491
492
493
494
495
496
497
498 BUILD_BUG_ON(wqi_size != 16);
499 GEM_BUG_ON(gc->wq_rsvd < wqi_size);
500
501
502 wq_off = gc->wq_tail;
503 GEM_BUG_ON(wq_off & (wqi_size - 1));
504 gc->wq_tail += wqi_size;
505 gc->wq_tail &= gc->wq_size - 1;
506 gc->wq_rsvd -= wqi_size;
507
508
509 wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
510 wq_off &= PAGE_SIZE - 1;
511 base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page));
512 wqi = (struct guc_wq_item *)((char *)base + wq_off);
513
514
515 wqi->header = WQ_TYPE_INORDER |
516 (wqi_len << WQ_LEN_SHIFT) |
517 (engine->guc_id << WQ_TARGET_SHIFT) |
518 WQ_NO_WCFLUSH_WAIT;
519
520
521 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
522
523 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
524 wqi->fence_id = rq->fence.seqno;
525
526 kunmap_atomic(base);
527}
528
529static int guc_ring_doorbell(struct i915_guc_client *gc)
530{
531 struct guc_process_desc *desc;
532 union guc_doorbell_qw db_cmp, db_exc, db_ret;
533 union guc_doorbell_qw *db;
534 int attempt = 2, ret = -EAGAIN;
535
536 desc = gc->client_base + gc->proc_desc_offset;
537
538
539 desc->tail = gc->wq_tail;
540
541
542 db_cmp.db_status = GUC_DOORBELL_ENABLED;
543 db_cmp.cookie = gc->cookie;
544
545
546 db_exc.db_status = GUC_DOORBELL_ENABLED;
547 db_exc.cookie = gc->cookie + 1;
548 if (db_exc.cookie == 0)
549 db_exc.cookie = 1;
550
551
552 db = gc->client_base + gc->doorbell_offset;
553
554 while (attempt--) {
555
556 db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
557 db_cmp.value_qw, db_exc.value_qw);
558
559
560 if (db_ret.value_qw == db_cmp.value_qw) {
561
562 gc->cookie = db_exc.cookie;
563 ret = 0;
564 break;
565 }
566
567
568 if (db_ret.db_status == GUC_DOORBELL_DISABLED)
569 break;
570
571 DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
572 db_cmp.cookie, db_ret.cookie);
573
574
575 db_cmp.cookie = db_ret.cookie;
576 db_exc.cookie = db_ret.cookie + 1;
577 if (db_exc.cookie == 0)
578 db_exc.cookie = 1;
579 }
580
581 return ret;
582}
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602static void i915_guc_submit(struct drm_i915_gem_request *rq)
603{
604 unsigned int engine_id = rq->engine->id;
605 struct intel_guc *guc = &rq->i915->guc;
606 struct i915_guc_client *client = guc->execbuf_client;
607 int b_ret;
608
609 spin_lock(&client->wq_lock);
610 guc_wq_item_append(client, rq);
611 b_ret = guc_ring_doorbell(client);
612
613 client->submissions[engine_id] += 1;
614 client->retcode = b_ret;
615 if (b_ret)
616 client->b_fail += 1;
617
618 guc->submissions[engine_id] += 1;
619 guc->last_seqno[engine_id] = rq->fence.seqno;
620 spin_unlock(&client->wq_lock);
621}
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
643{
644 struct drm_i915_private *dev_priv = guc_to_i915(guc);
645 struct drm_i915_gem_object *obj;
646 struct i915_vma *vma;
647 int ret;
648
649 obj = i915_gem_object_create(&dev_priv->drm, size);
650 if (IS_ERR(obj))
651 return ERR_CAST(obj);
652
653 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
654 if (IS_ERR(vma))
655 goto err;
656
657 ret = i915_vma_pin(vma, 0, PAGE_SIZE,
658 PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
659 if (ret) {
660 vma = ERR_PTR(ret);
661 goto err;
662 }
663
664
665 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
666
667 return vma;
668
669err:
670 i915_gem_object_put(obj);
671 return vma;
672}
673
674static void
675guc_client_free(struct drm_i915_private *dev_priv,
676 struct i915_guc_client *client)
677{
678 struct intel_guc *guc = &dev_priv->guc;
679
680 if (!client)
681 return;
682
683
684
685
686
687
688 if (client->client_base) {
689
690
691
692
693 guc_disable_doorbell(guc, client);
694
695 kunmap(kmap_to_page(client->client_base));
696 }
697
698 i915_vma_unpin_and_release(&client->vma);
699
700 if (client->ctx_index != GUC_INVALID_CTX_ID) {
701 guc_ctx_desc_fini(guc, client);
702 ida_simple_remove(&guc->ctx_ids, client->ctx_index);
703 }
704
705 kfree(client);
706}
707
708
709static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id)
710{
711 struct drm_i915_private *dev_priv = guc_to_i915(guc);
712 i915_reg_t drbreg = GEN8_DRBREGL(db_id);
713 uint32_t value = I915_READ(drbreg);
714 bool enabled = (value & GUC_DOORBELL_ENABLED) != 0;
715 bool expected = test_bit(db_id, guc->doorbell_bitmap);
716
717 if (enabled == expected)
718 return true;
719
720 DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n",
721 db_id, drbreg.reg, value,
722 expected ? "active" : "inactive");
723
724 return false;
725}
726
727
728
729
730
731static void guc_init_doorbell_hw(struct intel_guc *guc)
732{
733 struct i915_guc_client *client = guc->execbuf_client;
734 uint16_t db_id;
735 int i, err;
736
737
738 db_id = client->doorbell_id;
739
740 for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
741
742 if (guc_doorbell_check(guc, i))
743 continue;
744
745 err = guc_update_doorbell_id(guc, client, i);
746 if (err)
747 DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n",
748 i, err);
749 }
750
751
752 err = guc_update_doorbell_id(guc, client, db_id);
753 if (err)
754 DRM_WARN("Failed to restore doorbell to %d, err %d\n",
755 db_id, err);
756
757
758 for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
759 (void)guc_doorbell_check(guc, i);
760}
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775static struct i915_guc_client *
776guc_client_alloc(struct drm_i915_private *dev_priv,
777 uint32_t engines,
778 uint32_t priority,
779 struct i915_gem_context *ctx)
780{
781 struct i915_guc_client *client;
782 struct intel_guc *guc = &dev_priv->guc;
783 struct i915_vma *vma;
784 uint16_t db_id;
785
786 client = kzalloc(sizeof(*client), GFP_KERNEL);
787 if (!client)
788 return NULL;
789
790 client->owner = ctx;
791 client->guc = guc;
792 client->engines = engines;
793 client->priority = priority;
794 client->doorbell_id = GUC_INVALID_DOORBELL_ID;
795
796 client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
797 GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
798 if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
799 client->ctx_index = GUC_INVALID_CTX_ID;
800 goto err;
801 }
802
803
804 vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
805 if (IS_ERR(vma))
806 goto err;
807
808
809 client->vma = vma;
810 client->client_base = kmap(i915_vma_first_page(vma));
811
812 spin_lock_init(&client->wq_lock);
813 client->wq_offset = GUC_DB_SIZE;
814 client->wq_size = GUC_WQ_SIZE;
815
816 db_id = select_doorbell_register(guc, client->priority);
817 if (db_id == GUC_INVALID_DOORBELL_ID)
818
819 goto err;
820
821 client->doorbell_offset = select_doorbell_cacheline(guc);
822
823
824
825
826
827
828 if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
829 client->proc_desc_offset = 0;
830 else
831 client->proc_desc_offset = (GUC_DB_SIZE / 2);
832
833 guc_proc_desc_init(guc, client);
834 guc_ctx_desc_init(guc, client);
835 if (guc_init_doorbell(guc, client, db_id))
836 goto err;
837
838 DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
839 priority, client, client->engines, client->ctx_index);
840 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
841 client->doorbell_id, client->doorbell_offset);
842
843 return client;
844
845err:
846 guc_client_free(dev_priv, client);
847 return NULL;
848}
849
850static void guc_log_create(struct intel_guc *guc)
851{
852 struct i915_vma *vma;
853 unsigned long offset;
854 uint32_t size, flags;
855
856 if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
857 return;
858
859 if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
860 i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
861
862
863
864 size = (1 + GUC_LOG_DPC_PAGES + 1 +
865 GUC_LOG_ISR_PAGES + 1 +
866 GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
867
868 vma = guc->log_vma;
869 if (!vma) {
870 vma = guc_allocate_vma(guc, size);
871 if (IS_ERR(vma)) {
872
873 i915.guc_log_level = -1;
874 return;
875 }
876
877 guc->log_vma = vma;
878 }
879
880
881 flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
882 (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
883 (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
884 (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
885
886 offset = i915_ggtt_offset(vma) >> PAGE_SHIFT;
887 guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
888}
889
890static void guc_policies_init(struct guc_policies *policies)
891{
892 struct guc_policy *policy;
893 u32 p, i;
894
895 policies->dpc_promote_time = 500000;
896 policies->max_num_work_items = POLICY_MAX_NUM_WI;
897
898 for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
899 for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
900 policy = &policies->policy[p][i];
901
902 policy->execution_quantum = 1000000;
903 policy->preemption_time = 500000;
904 policy->fault_time = 250000;
905 policy->policy_flags = 0;
906 }
907 }
908
909 policies->is_valid = 1;
910}
911
912static void guc_addon_create(struct intel_guc *guc)
913{
914 struct drm_i915_private *dev_priv = guc_to_i915(guc);
915 struct i915_vma *vma;
916 struct guc_ads *ads;
917 struct guc_policies *policies;
918 struct guc_mmio_reg_state *reg_state;
919 struct intel_engine_cs *engine;
920 struct page *page;
921 u32 size;
922
923
924 size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
925 sizeof(struct guc_mmio_reg_state) +
926 GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
927
928 vma = guc->ads_vma;
929 if (!vma) {
930 vma = guc_allocate_vma(guc, PAGE_ALIGN(size));
931 if (IS_ERR(vma))
932 return;
933
934 guc->ads_vma = vma;
935 }
936
937 page = i915_vma_first_page(vma);
938 ads = kmap(page);
939
940
941
942
943
944
945
946
947 engine = &dev_priv->engine[RCS];
948 ads->golden_context_lrca = engine->status_page.ggtt_offset;
949
950 for_each_engine(engine, dev_priv)
951 ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
952
953
954 policies = (void *)ads + sizeof(struct guc_ads);
955 guc_policies_init(policies);
956
957 ads->scheduler_policies =
958 i915_ggtt_offset(vma) + sizeof(struct guc_ads);
959
960
961 reg_state = (void *)policies + sizeof(struct guc_policies);
962
963 for_each_engine(engine, dev_priv) {
964 reg_state->mmio_white_list[engine->guc_id].mmio_start =
965 engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
966
967
968 reg_state->mmio_white_list[engine->guc_id].count = 0;
969 }
970
971 ads->reg_state_addr = ads->scheduler_policies +
972 sizeof(struct guc_policies);
973
974 ads->reg_state_buffer = ads->reg_state_addr +
975 sizeof(struct guc_mmio_reg_state);
976
977 kunmap(page);
978}
979
980
981
982
983
984int i915_guc_submission_init(struct drm_i915_private *dev_priv)
985{
986 const size_t ctxsize = sizeof(struct guc_context_desc);
987 const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
988 const size_t gemsize = round_up(poolsize, PAGE_SIZE);
989 struct intel_guc *guc = &dev_priv->guc;
990 struct i915_vma *vma;
991
992
993 bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
994 i915_guc_submission_disable(dev_priv);
995
996 if (!i915.enable_guc_submission)
997 return 0;
998
999 if (guc->ctx_pool_vma)
1000 return 0;
1001
1002 vma = guc_allocate_vma(guc, gemsize);
1003 if (IS_ERR(vma))
1004 return PTR_ERR(vma);
1005
1006 guc->ctx_pool_vma = vma;
1007 ida_init(&guc->ctx_ids);
1008 guc_log_create(guc);
1009 guc_addon_create(guc);
1010
1011 return 0;
1012}
1013
1014int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
1015{
1016 struct intel_guc *guc = &dev_priv->guc;
1017 struct i915_guc_client *client;
1018 struct intel_engine_cs *engine;
1019 struct drm_i915_gem_request *request;
1020
1021
1022 client = guc_client_alloc(dev_priv,
1023 INTEL_INFO(dev_priv)->ring_mask,
1024 GUC_CTX_PRIORITY_KMD_NORMAL,
1025 dev_priv->kernel_context);
1026 if (!client) {
1027 DRM_ERROR("Failed to create normal GuC client!\n");
1028 return -ENOMEM;
1029 }
1030
1031 guc->execbuf_client = client;
1032 host2guc_sample_forcewake(guc, client);
1033 guc_init_doorbell_hw(guc);
1034
1035
1036 for_each_engine(engine, dev_priv) {
1037 engine->submit_request = i915_guc_submit;
1038
1039
1040 list_for_each_entry(request, &engine->request_list, link) {
1041 client->wq_rsvd += sizeof(struct guc_wq_item);
1042 if (i915_sw_fence_done(&request->submit))
1043 i915_guc_submit(request);
1044 }
1045 }
1046
1047 return 0;
1048}
1049
1050void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
1051{
1052 struct intel_guc *guc = &dev_priv->guc;
1053
1054 if (!guc->execbuf_client)
1055 return;
1056
1057
1058 intel_execlists_enable_submission(dev_priv);
1059
1060 guc_client_free(dev_priv, guc->execbuf_client);
1061 guc->execbuf_client = NULL;
1062}
1063
1064void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
1065{
1066 struct intel_guc *guc = &dev_priv->guc;
1067
1068 i915_vma_unpin_and_release(&guc->ads_vma);
1069 i915_vma_unpin_and_release(&guc->log_vma);
1070
1071 if (guc->ctx_pool_vma)
1072 ida_destroy(&guc->ctx_ids);
1073 i915_vma_unpin_and_release(&guc->ctx_pool_vma);
1074}
1075
1076
1077
1078
1079
1080int intel_guc_suspend(struct drm_device *dev)
1081{
1082 struct drm_i915_private *dev_priv = to_i915(dev);
1083 struct intel_guc *guc = &dev_priv->guc;
1084 struct i915_gem_context *ctx;
1085 u32 data[3];
1086
1087 if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
1088 return 0;
1089
1090 ctx = dev_priv->kernel_context;
1091
1092 data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
1093
1094 data[1] = GUC_POWER_D1;
1095
1096 data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
1097
1098 return host2guc_action(guc, data, ARRAY_SIZE(data));
1099}
1100
1101
1102
1103
1104
1105
1106int intel_guc_resume(struct drm_device *dev)
1107{
1108 struct drm_i915_private *dev_priv = to_i915(dev);
1109 struct intel_guc *guc = &dev_priv->guc;
1110 struct i915_gem_context *ctx;
1111 u32 data[3];
1112
1113 if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
1114 return 0;
1115
1116 ctx = dev_priv->kernel_context;
1117
1118 data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
1119 data[1] = GUC_POWER_D0;
1120
1121 data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
1122
1123 return host2guc_action(guc, data, ARRAY_SIZE(data));
1124}
1125