1
2
3
4
5
6#include "gem/i915_gem_lmem.h"
7#include "gt/intel_gt.h"
8#include "gt/intel_gt_irq.h"
9#include "gt/intel_gt_pm_irq.h"
10#include "intel_guc.h"
11#include "intel_guc_slpc.h"
12#include "intel_guc_ads.h"
13#include "intel_guc_submission.h"
14#include "i915_drv.h"
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37void intel_guc_notify(struct intel_guc *guc)
38{
39 struct intel_gt *gt = guc_to_gt(guc);
40
41
42
43
44
45
46
47 intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
48}
49
50static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
51{
52 GEM_BUG_ON(!guc->send_regs.base);
53 GEM_BUG_ON(!guc->send_regs.count);
54 GEM_BUG_ON(i >= guc->send_regs.count);
55
56 return _MMIO(guc->send_regs.base + 4 * i);
57}
58
59void intel_guc_init_send_regs(struct intel_guc *guc)
60{
61 struct intel_gt *gt = guc_to_gt(guc);
62 enum forcewake_domains fw_domains = 0;
63 unsigned int i;
64
65 GEM_BUG_ON(!guc->send_regs.base);
66 GEM_BUG_ON(!guc->send_regs.count);
67
68 for (i = 0; i < guc->send_regs.count; i++) {
69 fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
70 guc_send_reg(guc, i),
71 FW_REG_READ | FW_REG_WRITE);
72 }
73 guc->send_regs.fw_domains = fw_domains;
74}
75
76static void gen9_reset_guc_interrupts(struct intel_guc *guc)
77{
78 struct intel_gt *gt = guc_to_gt(guc);
79
80 assert_rpm_wakelock_held(>->i915->runtime_pm);
81
82 spin_lock_irq(>->irq_lock);
83 gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
84 spin_unlock_irq(>->irq_lock);
85}
86
87static void gen9_enable_guc_interrupts(struct intel_guc *guc)
88{
89 struct intel_gt *gt = guc_to_gt(guc);
90
91 assert_rpm_wakelock_held(>->i915->runtime_pm);
92
93 spin_lock_irq(>->irq_lock);
94 WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
95 gt->pm_guc_events);
96 gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
97 spin_unlock_irq(>->irq_lock);
98}
99
100static void gen9_disable_guc_interrupts(struct intel_guc *guc)
101{
102 struct intel_gt *gt = guc_to_gt(guc);
103
104 assert_rpm_wakelock_held(>->i915->runtime_pm);
105
106 spin_lock_irq(>->irq_lock);
107
108 gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
109
110 spin_unlock_irq(>->irq_lock);
111 intel_synchronize_irq(gt->i915);
112
113 gen9_reset_guc_interrupts(guc);
114}
115
116static void gen11_reset_guc_interrupts(struct intel_guc *guc)
117{
118 struct intel_gt *gt = guc_to_gt(guc);
119
120 spin_lock_irq(>->irq_lock);
121 gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
122 spin_unlock_irq(>->irq_lock);
123}
124
125static void gen11_enable_guc_interrupts(struct intel_guc *guc)
126{
127 struct intel_gt *gt = guc_to_gt(guc);
128 u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
129
130 spin_lock_irq(>->irq_lock);
131 WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
132 intel_uncore_write(gt->uncore,
133 GEN11_GUC_SG_INTR_ENABLE, events);
134 intel_uncore_write(gt->uncore,
135 GEN11_GUC_SG_INTR_MASK, ~events);
136 spin_unlock_irq(>->irq_lock);
137}
138
139static void gen11_disable_guc_interrupts(struct intel_guc *guc)
140{
141 struct intel_gt *gt = guc_to_gt(guc);
142
143 spin_lock_irq(>->irq_lock);
144
145 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
146 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
147
148 spin_unlock_irq(>->irq_lock);
149 intel_synchronize_irq(gt->i915);
150
151 gen11_reset_guc_interrupts(guc);
152}
153
154void intel_guc_init_early(struct intel_guc *guc)
155{
156 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
157
158 intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
159 intel_guc_ct_init_early(&guc->ct);
160 intel_guc_log_init_early(&guc->log);
161 intel_guc_submission_init_early(guc);
162 intel_guc_slpc_init_early(&guc->slpc);
163 intel_guc_rc_init_early(guc);
164
165 mutex_init(&guc->send_mutex);
166 spin_lock_init(&guc->irq_lock);
167 if (GRAPHICS_VER(i915) >= 11) {
168 guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
169 guc->interrupts.reset = gen11_reset_guc_interrupts;
170 guc->interrupts.enable = gen11_enable_guc_interrupts;
171 guc->interrupts.disable = gen11_disable_guc_interrupts;
172 guc->send_regs.base =
173 i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
174 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
175
176 } else {
177 guc->notify_reg = GUC_SEND_INTERRUPT;
178 guc->interrupts.reset = gen9_reset_guc_interrupts;
179 guc->interrupts.enable = gen9_enable_guc_interrupts;
180 guc->interrupts.disable = gen9_disable_guc_interrupts;
181 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
182 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
183 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
184 }
185}
186
187void intel_guc_init_late(struct intel_guc *guc)
188{
189 intel_guc_ads_init_late(guc);
190}
191
192static u32 guc_ctl_debug_flags(struct intel_guc *guc)
193{
194 u32 level = intel_guc_log_get_level(&guc->log);
195 u32 flags = 0;
196
197 if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
198 flags |= GUC_LOG_DISABLED;
199 else
200 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
201 GUC_LOG_VERBOSITY_SHIFT;
202
203 return flags;
204}
205
206static u32 guc_ctl_feature_flags(struct intel_guc *guc)
207{
208 u32 flags = 0;
209
210 if (!intel_guc_submission_is_used(guc))
211 flags |= GUC_CTL_DISABLE_SCHEDULER;
212
213 if (intel_guc_slpc_is_used(guc))
214 flags |= GUC_CTL_ENABLE_SLPC;
215
216 return flags;
217}
218
219static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
220{
221 u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
222 u32 flags;
223
224 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
225 #define UNIT SZ_1M
226 #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
227 #else
228 #define UNIT SZ_4K
229 #define FLAG 0
230 #endif
231
232 BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
233 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
234 BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
235 BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, UNIT));
236
237 BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
238 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
239 BUILD_BUG_ON((DEBUG_BUFFER_SIZE / UNIT - 1) >
240 (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
241
242 flags = GUC_LOG_VALID |
243 GUC_LOG_NOTIFY_ON_HALF_FULL |
244 FLAG |
245 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
246 ((DEBUG_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
247 (offset << GUC_LOG_BUF_ADDR_SHIFT);
248
249 #undef UNIT
250 #undef FLAG
251
252 return flags;
253}
254
255static u32 guc_ctl_ads_flags(struct intel_guc *guc)
256{
257 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
258 u32 flags = ads << GUC_ADS_ADDR_SHIFT;
259
260 return flags;
261}
262
263
264
265
266
267
268static void guc_init_params(struct intel_guc *guc)
269{
270 u32 *params = guc->params;
271 int i;
272
273 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
274
275 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
276 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
277 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
278 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
279
280 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
281 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
282}
283
284
285
286
287
288
289void intel_guc_write_params(struct intel_guc *guc)
290{
291 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
292 int i;
293
294
295
296
297
298
299 intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
300
301 intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
302
303 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
304 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
305
306 intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
307}
308
309int intel_guc_init(struct intel_guc *guc)
310{
311 struct intel_gt *gt = guc_to_gt(guc);
312 int ret;
313
314 ret = intel_uc_fw_init(&guc->fw);
315 if (ret)
316 goto out;
317
318 ret = intel_guc_log_create(&guc->log);
319 if (ret)
320 goto err_fw;
321
322 ret = intel_guc_ads_create(guc);
323 if (ret)
324 goto err_log;
325 GEM_BUG_ON(!guc->ads_vma);
326
327 ret = intel_guc_ct_init(&guc->ct);
328 if (ret)
329 goto err_ads;
330
331 if (intel_guc_submission_is_used(guc)) {
332
333
334
335
336 ret = intel_guc_submission_init(guc);
337 if (ret)
338 goto err_ct;
339 }
340
341 if (intel_guc_slpc_is_used(guc)) {
342 ret = intel_guc_slpc_init(&guc->slpc);
343 if (ret)
344 goto err_submission;
345 }
346
347
348 guc_init_params(guc);
349
350
351 i915_ggtt_enable_guc(gt->ggtt);
352
353 intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
354
355 return 0;
356
357err_submission:
358 intel_guc_submission_fini(guc);
359err_ct:
360 intel_guc_ct_fini(&guc->ct);
361err_ads:
362 intel_guc_ads_destroy(guc);
363err_log:
364 intel_guc_log_destroy(&guc->log);
365err_fw:
366 intel_uc_fw_fini(&guc->fw);
367out:
368 i915_probe_error(gt->i915, "failed with %d\n", ret);
369 return ret;
370}
371
372void intel_guc_fini(struct intel_guc *guc)
373{
374 struct intel_gt *gt = guc_to_gt(guc);
375
376 if (!intel_uc_fw_is_loadable(&guc->fw))
377 return;
378
379 i915_ggtt_disable_guc(gt->ggtt);
380
381 if (intel_guc_slpc_is_used(guc))
382 intel_guc_slpc_fini(&guc->slpc);
383
384 if (intel_guc_submission_is_used(guc))
385 intel_guc_submission_fini(guc);
386
387 intel_guc_ct_fini(&guc->ct);
388
389 intel_guc_ads_destroy(guc);
390 intel_guc_log_destroy(&guc->log);
391 intel_uc_fw_fini(&guc->fw);
392}
393
394
395
396
397int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
398 u32 *response_buf, u32 response_buf_size)
399{
400 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
401 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
402 u32 header;
403 int i;
404 int ret;
405
406 GEM_BUG_ON(!len);
407 GEM_BUG_ON(len > guc->send_regs.count);
408
409 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST);
410 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST);
411
412 mutex_lock(&guc->send_mutex);
413 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
414
415retry:
416 for (i = 0; i < len; i++)
417 intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
418
419 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
420
421 intel_guc_notify(guc);
422
423
424
425
426
427 ret = __intel_wait_for_register_fw(uncore,
428 guc_send_reg(guc, 0),
429 GUC_HXG_MSG_0_ORIGIN,
430 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
431 GUC_HXG_ORIGIN_GUC),
432 10, 10, &header);
433 if (unlikely(ret)) {
434timeout:
435 drm_err(&i915->drm, "mmio request %#x: no reply %x\n",
436 request[0], header);
437 goto out;
438 }
439
440 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
441#define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
442 FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \
443 FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; })
444
445 ret = wait_for(done, 1000);
446 if (unlikely(ret))
447 goto timeout;
448 if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
449 GUC_HXG_ORIGIN_GUC))
450 goto proto;
451#undef done
452 }
453
454 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
455 u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
456
457 drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n",
458 request[0], reason);
459 goto retry;
460 }
461
462 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) {
463 u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
464 u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
465
466 drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n",
467 request[0], error, hint);
468 ret = -ENXIO;
469 goto out;
470 }
471
472 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
473proto:
474 drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n",
475 request[0], header);
476 ret = -EPROTO;
477 goto out;
478 }
479
480 if (response_buf) {
481 int count = min(response_buf_size, guc->send_regs.count);
482
483 GEM_BUG_ON(!count);
484
485 response_buf[0] = header;
486
487 for (i = 1; i < count; i++)
488 response_buf[i] = intel_uncore_read(uncore,
489 guc_send_reg(guc, i));
490
491
492 ret = count;
493 } else {
494
495 ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
496 }
497
498out:
499 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
500 mutex_unlock(&guc->send_mutex);
501
502 return ret;
503}
504
505int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
506 const u32 *payload, u32 len)
507{
508 u32 msg;
509
510 if (unlikely(!len))
511 return -EPROTO;
512
513
514 msg = payload[0] & guc->msg_enabled_mask;
515
516 if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
517 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
518 intel_guc_log_handle_flush_event(&guc->log);
519
520 return 0;
521}
522
523
524
525
526
527
528
529
530
531
532
533
534int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
535{
536 u32 action[] = {
537 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
538 rsa_offset
539 };
540
541 return intel_guc_send(guc, action, ARRAY_SIZE(action));
542}
543
544
545
546
547
548int intel_guc_suspend(struct intel_guc *guc)
549{
550 int ret;
551 u32 action[] = {
552 INTEL_GUC_ACTION_RESET_CLIENT,
553 };
554
555 if (!intel_guc_is_ready(guc))
556 return 0;
557
558 if (intel_guc_submission_is_used(guc)) {
559
560
561
562
563
564
565
566
567
568
569
570 ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
571 if (ret)
572 DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret);
573 }
574
575
576 intel_guc_sanitize(guc);
577
578 return 0;
579}
580
581
582
583
584
585int intel_guc_resume(struct intel_guc *guc)
586{
587
588
589
590
591
592
593 return 0;
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
644{
645 struct intel_gt *gt = guc_to_gt(guc);
646 struct drm_i915_gem_object *obj;
647 struct i915_vma *vma;
648 u64 flags;
649 int ret;
650
651 if (HAS_LMEM(gt->i915))
652 obj = i915_gem_object_create_lmem(gt->i915, size,
653 I915_BO_ALLOC_CPU_CLEAR |
654 I915_BO_ALLOC_CONTIGUOUS |
655 I915_BO_ALLOC_PM_EARLY);
656 else
657 obj = i915_gem_object_create_shmem(gt->i915, size);
658
659 if (IS_ERR(obj))
660 return ERR_CAST(obj);
661
662 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
663 if (IS_ERR(vma))
664 goto err;
665
666 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
667 ret = i915_ggtt_pin(vma, NULL, 0, flags);
668 if (ret) {
669 vma = ERR_PTR(ret);
670 goto err;
671 }
672
673 return i915_vma_make_unshrinkable(vma);
674
675err:
676 i915_gem_object_put(obj);
677 return vma;
678}
679
680
681
682
683
684
685
686
687
688
689
690
691
692int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
693 struct i915_vma **out_vma, void **out_vaddr)
694{
695 struct i915_vma *vma;
696 void *vaddr;
697
698 vma = intel_guc_allocate_vma(guc, size);
699 if (IS_ERR(vma))
700 return PTR_ERR(vma);
701
702 vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
703 i915_coherent_map_type(guc_to_gt(guc)->i915,
704 vma->obj, true));
705 if (IS_ERR(vaddr)) {
706 i915_vma_unpin_and_release(&vma, 0);
707 return PTR_ERR(vaddr);
708 }
709
710 *out_vma = vma;
711 *out_vaddr = vaddr;
712
713 return 0;
714}
715
716
717
718
719
720
721
722
723void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
724{
725 struct intel_gt *gt = guc_to_gt(guc);
726 struct intel_uncore *uncore = gt->uncore;
727 intel_wakeref_t wakeref;
728
729 if (!intel_guc_is_supported(guc)) {
730 drm_printf(p, "GuC not supported\n");
731 return;
732 }
733
734 if (!intel_guc_is_wanted(guc)) {
735 drm_printf(p, "GuC disabled\n");
736 return;
737 }
738
739 intel_uc_fw_dump(&guc->fw, p);
740
741 with_intel_runtime_pm(uncore->rpm, wakeref) {
742 u32 status = intel_uncore_read(uncore, GUC_STATUS);
743 u32 i;
744
745 drm_printf(p, "\nGuC status 0x%08x:\n", status);
746 drm_printf(p, "\tBootrom status = 0x%x\n",
747 (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
748 drm_printf(p, "\tuKernel status = 0x%x\n",
749 (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
750 drm_printf(p, "\tMIA Core status = 0x%x\n",
751 (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
752 drm_puts(p, "\nScratch registers:\n");
753 for (i = 0; i < 16; i++) {
754 drm_printf(p, "\t%2d: \t0x%x\n",
755 i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
756 }
757 }
758}
759
760void intel_guc_write_barrier(struct intel_guc *guc)
761{
762 struct intel_gt *gt = guc_to_gt(guc);
763
764 if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
765
766
767
768
769 GEM_BUG_ON(guc->send_regs.fw_domains);
770
771
772
773
774
775
776
777
778
779
780
781
782 intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
783 } else {
784
785 wmb();
786 }
787}
788