1
2
3
4
5
6#include "gt/intel_gt.h"
7#include "gt/intel_reset.h"
8#include "intel_guc.h"
9#include "intel_guc_ads.h"
10#include "intel_guc_submission.h"
11#include "intel_uc.h"
12
13#include "i915_drv.h"
14
15static const struct intel_uc_ops uc_ops_off;
16static const struct intel_uc_ops uc_ops_on;
17
18
19
20static int __intel_uc_reset_hw(struct intel_uc *uc)
21{
22 struct intel_gt *gt = uc_to_gt(uc);
23 int ret;
24 u32 guc_status;
25
26 ret = i915_inject_probe_error(gt->i915, -ENXIO);
27 if (ret)
28 return ret;
29
30 ret = intel_reset_guc(gt);
31 if (ret) {
32 DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
33 return ret;
34 }
35
36 guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
37 WARN(!(guc_status & GS_MIA_IN_RESET),
38 "GuC status: 0x%x, MIA core expected to be in reset\n",
39 guc_status);
40
41 return ret;
42}
43
44static void __confirm_options(struct intel_uc *uc)
45{
46 struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
47
48 DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
49 "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
50 i915_modparams.enable_guc,
51 yesno(intel_uc_wants_guc(uc)),
52 yesno(intel_uc_wants_guc_submission(uc)),
53 yesno(intel_uc_wants_huc(uc)));
54
55 if (i915_modparams.enable_guc == -1)
56 return;
57
58 if (i915_modparams.enable_guc == 0) {
59 GEM_BUG_ON(intel_uc_wants_guc(uc));
60 GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
61 GEM_BUG_ON(intel_uc_wants_huc(uc));
62 return;
63 }
64
65 if (!intel_uc_supports_guc(uc))
66 dev_info(i915->drm.dev,
67 "Incompatible option enable_guc=%d - %s\n",
68 i915_modparams.enable_guc, "GuC is not supported!");
69
70 if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
71 !intel_uc_supports_huc(uc))
72 dev_info(i915->drm.dev,
73 "Incompatible option enable_guc=%d - %s\n",
74 i915_modparams.enable_guc, "HuC is not supported!");
75
76 if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
77 !intel_uc_supports_guc_submission(uc))
78 dev_info(i915->drm.dev,
79 "Incompatible option enable_guc=%d - %s\n",
80 i915_modparams.enable_guc, "GuC submission is N/A");
81
82 if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
83 ENABLE_GUC_LOAD_HUC))
84 dev_info(i915->drm.dev,
85 "Incompatible option enable_guc=%d - %s\n",
86 i915_modparams.enable_guc, "undocumented flag");
87}
88
89void intel_uc_init_early(struct intel_uc *uc)
90{
91 intel_guc_init_early(&uc->guc);
92 intel_huc_init_early(&uc->huc);
93
94 __confirm_options(uc);
95
96 if (intel_uc_wants_guc(uc))
97 uc->ops = &uc_ops_on;
98 else
99 uc->ops = &uc_ops_off;
100}
101
102void intel_uc_driver_late_release(struct intel_uc *uc)
103{
104}
105
106
107
108
109
110
111
112
113void intel_uc_init_mmio(struct intel_uc *uc)
114{
115 intel_guc_init_send_regs(&uc->guc);
116}
117
118static void __uc_capture_load_err_log(struct intel_uc *uc)
119{
120 struct intel_guc *guc = &uc->guc;
121
122 if (guc->log.vma && !uc->load_err_log)
123 uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
124}
125
126static void __uc_free_load_err_log(struct intel_uc *uc)
127{
128 struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
129
130 if (log)
131 i915_gem_object_put(log);
132}
133
134static inline bool guc_communication_enabled(struct intel_guc *guc)
135{
136 return intel_guc_ct_enabled(&guc->ct);
137}
138
139
140
141
142
143
144
145static void guc_clear_mmio_msg(struct intel_guc *guc)
146{
147 intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
148}
149
150static void guc_get_mmio_msg(struct intel_guc *guc)
151{
152 u32 val;
153
154 spin_lock_irq(&guc->irq_lock);
155
156 val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
157 guc->mmio_msg |= val & guc->msg_enabled_mask;
158
159
160
161
162
163
164 guc_clear_mmio_msg(guc);
165
166 spin_unlock_irq(&guc->irq_lock);
167}
168
169static void guc_handle_mmio_msg(struct intel_guc *guc)
170{
171 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
172
173
174 GEM_BUG_ON(!guc_communication_enabled(guc));
175
176 if (!guc->mmio_msg)
177 return;
178
179 spin_lock_irq(&i915->irq_lock);
180 intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
181 spin_unlock_irq(&i915->irq_lock);
182
183 guc->mmio_msg = 0;
184}
185
186static void guc_reset_interrupts(struct intel_guc *guc)
187{
188 guc->interrupts.reset(guc);
189}
190
191static void guc_enable_interrupts(struct intel_guc *guc)
192{
193 guc->interrupts.enable(guc);
194}
195
196static void guc_disable_interrupts(struct intel_guc *guc)
197{
198 guc->interrupts.disable(guc);
199}
200
201static int guc_enable_communication(struct intel_guc *guc)
202{
203 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
204 int ret;
205
206 GEM_BUG_ON(guc_communication_enabled(guc));
207
208 ret = i915_inject_probe_error(i915, -ENXIO);
209 if (ret)
210 return ret;
211
212 ret = intel_guc_ct_enable(&guc->ct);
213 if (ret)
214 return ret;
215
216
217 guc_get_mmio_msg(guc);
218 guc_handle_mmio_msg(guc);
219
220 guc_enable_interrupts(guc);
221
222
223 spin_lock_irq(&i915->irq_lock);
224 intel_guc_ct_event_handler(&guc->ct);
225 spin_unlock_irq(&i915->irq_lock);
226
227 DRM_INFO("GuC communication enabled\n");
228
229 return 0;
230}
231
232static void guc_disable_communication(struct intel_guc *guc)
233{
234
235
236
237
238
239 guc_clear_mmio_msg(guc);
240
241 guc_disable_interrupts(guc);
242
243 intel_guc_ct_disable(&guc->ct);
244
245
246
247
248
249
250
251 guc_get_mmio_msg(guc);
252
253 DRM_INFO("GuC communication disabled\n");
254}
255
256static void __uc_fetch_firmwares(struct intel_uc *uc)
257{
258 int err;
259
260 GEM_BUG_ON(!intel_uc_wants_guc(uc));
261
262 err = intel_uc_fw_fetch(&uc->guc.fw);
263 if (err)
264 return;
265
266 if (intel_uc_wants_huc(uc))
267 intel_uc_fw_fetch(&uc->huc.fw);
268}
269
270static void __uc_cleanup_firmwares(struct intel_uc *uc)
271{
272 intel_uc_fw_cleanup_fetch(&uc->huc.fw);
273 intel_uc_fw_cleanup_fetch(&uc->guc.fw);
274}
275
276static int __uc_init(struct intel_uc *uc)
277{
278 struct intel_guc *guc = &uc->guc;
279 struct intel_huc *huc = &uc->huc;
280 int ret;
281
282 GEM_BUG_ON(!intel_uc_wants_guc(uc));
283
284 if (!intel_uc_uses_guc(uc))
285 return 0;
286
287 if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
288 return -ENOMEM;
289
290
291 GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
292
293 ret = intel_guc_init(guc);
294 if (ret)
295 return ret;
296
297 if (intel_uc_uses_huc(uc)) {
298 ret = intel_huc_init(huc);
299 if (ret)
300 goto out_guc;
301 }
302
303 return 0;
304
305out_guc:
306 intel_guc_fini(guc);
307 return ret;
308}
309
310static void __uc_fini(struct intel_uc *uc)
311{
312 intel_huc_fini(&uc->huc);
313 intel_guc_fini(&uc->guc);
314
315 __uc_free_load_err_log(uc);
316}
317
318static int __uc_sanitize(struct intel_uc *uc)
319{
320 struct intel_guc *guc = &uc->guc;
321 struct intel_huc *huc = &uc->huc;
322
323 GEM_BUG_ON(!intel_uc_supports_guc(uc));
324
325 intel_huc_sanitize(huc);
326 intel_guc_sanitize(guc);
327
328 return __intel_uc_reset_hw(uc);
329}
330
331
332static int uc_init_wopcm(struct intel_uc *uc)
333{
334 struct intel_gt *gt = uc_to_gt(uc);
335 struct intel_uncore *uncore = gt->uncore;
336 u32 base = intel_wopcm_guc_base(>->i915->wopcm);
337 u32 size = intel_wopcm_guc_size(>->i915->wopcm);
338 u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
339 u32 mask;
340 int err;
341
342 if (unlikely(!base || !size)) {
343 i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
344 return -E2BIG;
345 }
346
347 GEM_BUG_ON(!intel_uc_supports_guc(uc));
348 GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
349 GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
350 GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
351 GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
352
353 err = i915_inject_probe_error(gt->i915, -ENXIO);
354 if (err)
355 return err;
356
357 mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
358 err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
359 size | GUC_WOPCM_SIZE_LOCKED);
360 if (err)
361 goto err_out;
362
363 mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
364 err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
365 base | huc_agent, mask,
366 base | huc_agent |
367 GUC_WOPCM_OFFSET_VALID);
368 if (err)
369 goto err_out;
370
371 return 0;
372
373err_out:
374 i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
375 i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
376 i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
377 intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
378 i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
379 i915_mmio_reg_offset(GUC_WOPCM_SIZE),
380 intel_uncore_read(uncore, GUC_WOPCM_SIZE));
381
382 return err;
383}
384
385static bool uc_is_wopcm_locked(struct intel_uc *uc)
386{
387 struct intel_gt *gt = uc_to_gt(uc);
388 struct intel_uncore *uncore = gt->uncore;
389
390 return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
391 (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
392}
393
394static int __uc_check_hw(struct intel_uc *uc)
395{
396 if (!intel_uc_supports_guc(uc))
397 return 0;
398
399
400
401
402
403
404 if (uc_is_wopcm_locked(uc))
405 return -EIO;
406
407 return 0;
408}
409
410static int __uc_init_hw(struct intel_uc *uc)
411{
412 struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
413 struct intel_guc *guc = &uc->guc;
414 struct intel_huc *huc = &uc->huc;
415 int ret, attempts;
416
417 GEM_BUG_ON(!intel_uc_supports_guc(uc));
418 GEM_BUG_ON(!intel_uc_wants_guc(uc));
419
420 if (!intel_uc_fw_is_loadable(&guc->fw)) {
421 ret = __uc_check_hw(uc) ||
422 intel_uc_fw_is_overridden(&guc->fw) ||
423 intel_uc_wants_guc_submission(uc) ?
424 intel_uc_fw_status_to_error(guc->fw.status) : 0;
425 goto err_out;
426 }
427
428 ret = uc_init_wopcm(uc);
429 if (ret)
430 goto err_out;
431
432 guc_reset_interrupts(guc);
433
434
435
436 if (IS_GEN(i915, 9))
437 attempts = 3;
438 else
439 attempts = 1;
440
441 while (attempts--) {
442
443
444
445
446 ret = __uc_sanitize(uc);
447 if (ret)
448 goto err_out;
449
450 intel_huc_fw_upload(huc);
451 intel_guc_ads_reset(guc);
452 intel_guc_write_params(guc);
453 ret = intel_guc_fw_upload(guc);
454 if (ret == 0)
455 break;
456
457 DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
458 "retry %d more time(s)\n", ret, attempts);
459 }
460
461
462 if (ret)
463 goto err_log_capture;
464
465 ret = guc_enable_communication(guc);
466 if (ret)
467 goto err_log_capture;
468
469 intel_huc_auth(huc);
470
471 ret = intel_guc_sample_forcewake(guc);
472 if (ret)
473 goto err_communication;
474
475 if (intel_uc_uses_guc_submission(uc))
476 intel_guc_submission_enable(guc);
477
478 dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
479 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
480 guc->fw.major_ver_found, guc->fw.minor_ver_found,
481 "submission",
482 enableddisabled(intel_uc_uses_guc_submission(uc)));
483
484 if (intel_uc_uses_huc(uc)) {
485 dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
486 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
487 huc->fw.path,
488 huc->fw.major_ver_found, huc->fw.minor_ver_found,
489 "authenticated",
490 yesno(intel_huc_is_authenticated(huc)));
491 }
492
493 return 0;
494
495
496
497
498err_communication:
499 guc_disable_communication(guc);
500err_log_capture:
501 __uc_capture_load_err_log(uc);
502err_out:
503 __uc_sanitize(uc);
504
505 if (!ret) {
506 dev_notice(i915->drm.dev, "GuC is uninitialized\n");
507
508 return 0;
509 }
510
511 i915_probe_error(i915, "GuC initialization failed %d\n", ret);
512
513
514 return -EIO;
515}
516
517static void __uc_fini_hw(struct intel_uc *uc)
518{
519 struct intel_guc *guc = &uc->guc;
520
521 if (!intel_guc_is_fw_running(guc))
522 return;
523
524 if (intel_uc_uses_guc_submission(uc))
525 intel_guc_submission_disable(guc);
526
527 if (guc_communication_enabled(guc))
528 guc_disable_communication(guc);
529
530 __uc_sanitize(uc);
531}
532
533
534
535
536
537
538
539void intel_uc_reset_prepare(struct intel_uc *uc)
540{
541 struct intel_guc *guc = &uc->guc;
542
543 if (!intel_guc_is_ready(guc))
544 return;
545
546 guc_disable_communication(guc);
547 __uc_sanitize(uc);
548}
549
550void intel_uc_runtime_suspend(struct intel_uc *uc)
551{
552 struct intel_guc *guc = &uc->guc;
553 int err;
554
555 if (!intel_guc_is_ready(guc))
556 return;
557
558 err = intel_guc_suspend(guc);
559 if (err)
560 DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
561
562 guc_disable_communication(guc);
563}
564
565void intel_uc_suspend(struct intel_uc *uc)
566{
567 struct intel_guc *guc = &uc->guc;
568 intel_wakeref_t wakeref;
569
570 if (!intel_guc_is_ready(guc))
571 return;
572
573 with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
574 intel_uc_runtime_suspend(uc);
575}
576
577static int __uc_resume(struct intel_uc *uc, bool enable_communication)
578{
579 struct intel_guc *guc = &uc->guc;
580 int err;
581
582 if (!intel_guc_is_fw_running(guc))
583 return 0;
584
585
586 GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
587
588 if (enable_communication)
589 guc_enable_communication(guc);
590
591 err = intel_guc_resume(guc);
592 if (err) {
593 DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
594 return err;
595 }
596
597 return 0;
598}
599
600int intel_uc_resume(struct intel_uc *uc)
601{
602
603
604
605
606 return __uc_resume(uc, false);
607}
608
609int intel_uc_runtime_resume(struct intel_uc *uc)
610{
611
612
613
614
615 return __uc_resume(uc, true);
616}
617
618static const struct intel_uc_ops uc_ops_off = {
619 .init_hw = __uc_check_hw,
620};
621
622static const struct intel_uc_ops uc_ops_on = {
623 .sanitize = __uc_sanitize,
624
625 .init_fw = __uc_fetch_firmwares,
626 .fini_fw = __uc_cleanup_firmwares,
627
628 .init = __uc_init,
629 .fini = __uc_fini,
630
631 .init_hw = __uc_init_hw,
632 .fini_hw = __uc_fini_hw,
633};
634