1
2
3
4
5
6#include "gt/intel_gt.h"
7#include "gt/intel_reset.h"
8#include "intel_guc.h"
9#include "intel_guc_ads.h"
10#include "intel_guc_submission.h"
11#include "intel_uc.h"
12
13#include "i915_drv.h"
14
15
16
17static int __intel_uc_reset_hw(struct intel_uc *uc)
18{
19 struct intel_gt *gt = uc_to_gt(uc);
20 int ret;
21 u32 guc_status;
22
23 ret = i915_inject_load_error(gt->i915, -ENXIO);
24 if (ret)
25 return ret;
26
27 ret = intel_reset_guc(gt);
28 if (ret) {
29 DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
30 return ret;
31 }
32
33 guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
34 WARN(!(guc_status & GS_MIA_IN_RESET),
35 "GuC status: 0x%x, MIA core expected to be in reset\n",
36 guc_status);
37
38 return ret;
39}
40
41static void __confirm_options(struct intel_uc *uc)
42{
43 struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
44
45 DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
46 "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
47 i915_modparams.enable_guc,
48 yesno(intel_uc_uses_guc(uc)),
49 yesno(intel_uc_uses_guc_submission(uc)),
50 yesno(intel_uc_uses_huc(uc)));
51
52 if (i915_modparams.enable_guc == -1)
53 return;
54
55 if (i915_modparams.enable_guc == 0) {
56 GEM_BUG_ON(intel_uc_uses_guc(uc));
57 GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
58 GEM_BUG_ON(intel_uc_uses_huc(uc));
59 return;
60 }
61
62 if (!intel_uc_supports_guc(uc))
63 dev_info(i915->drm.dev,
64 "Incompatible option enable_guc=%d - %s\n",
65 i915_modparams.enable_guc, "GuC is not supported!");
66
67 if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
68 !intel_uc_supports_huc(uc))
69 dev_info(i915->drm.dev,
70 "Incompatible option enable_guc=%d - %s\n",
71 i915_modparams.enable_guc, "HuC is not supported!");
72
73 if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
74 !intel_uc_supports_guc_submission(uc))
75 dev_info(i915->drm.dev,
76 "Incompatible option enable_guc=%d - %s\n",
77 i915_modparams.enable_guc, "GuC submission is N/A");
78
79 if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
80 ENABLE_GUC_LOAD_HUC))
81 dev_info(i915->drm.dev,
82 "Incompatible option enable_guc=%d - %s\n",
83 i915_modparams.enable_guc, "undocumented flag");
84}
85
86void intel_uc_init_early(struct intel_uc *uc)
87{
88 intel_guc_init_early(&uc->guc);
89 intel_huc_init_early(&uc->huc);
90
91 __confirm_options(uc);
92}
93
94void intel_uc_driver_late_release(struct intel_uc *uc)
95{
96}
97
98
99
100
101
102
103
104
105void intel_uc_init_mmio(struct intel_uc *uc)
106{
107 intel_guc_init_send_regs(&uc->guc);
108}
109
110static void __uc_capture_load_err_log(struct intel_uc *uc)
111{
112 struct intel_guc *guc = &uc->guc;
113
114 if (guc->log.vma && !uc->load_err_log)
115 uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
116}
117
118static void __uc_free_load_err_log(struct intel_uc *uc)
119{
120 struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
121
122 if (log)
123 i915_gem_object_put(log);
124}
125
126
127
128
129
130
131
132static void guc_clear_mmio_msg(struct intel_guc *guc)
133{
134 intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
135}
136
137static void guc_get_mmio_msg(struct intel_guc *guc)
138{
139 u32 val;
140
141 spin_lock_irq(&guc->irq_lock);
142
143 val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
144 guc->mmio_msg |= val & guc->msg_enabled_mask;
145
146
147
148
149
150
151 guc_clear_mmio_msg(guc);
152
153 spin_unlock_irq(&guc->irq_lock);
154}
155
156static void guc_handle_mmio_msg(struct intel_guc *guc)
157{
158 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
159
160
161 GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);
162
163 if (!guc->mmio_msg)
164 return;
165
166 spin_lock_irq(&i915->irq_lock);
167 intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
168 spin_unlock_irq(&i915->irq_lock);
169
170 guc->mmio_msg = 0;
171}
172
173static void guc_reset_interrupts(struct intel_guc *guc)
174{
175 guc->interrupts.reset(guc);
176}
177
178static void guc_enable_interrupts(struct intel_guc *guc)
179{
180 guc->interrupts.enable(guc);
181}
182
183static void guc_disable_interrupts(struct intel_guc *guc)
184{
185 guc->interrupts.disable(guc);
186}
187
188static inline bool guc_communication_enabled(struct intel_guc *guc)
189{
190 return guc->send != intel_guc_send_nop;
191}
192
193static int guc_enable_communication(struct intel_guc *guc)
194{
195 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
196 int ret;
197
198 GEM_BUG_ON(guc_communication_enabled(guc));
199
200 ret = i915_inject_load_error(i915, -ENXIO);
201 if (ret)
202 return ret;
203
204 ret = intel_guc_ct_enable(&guc->ct);
205 if (ret)
206 return ret;
207
208 guc->send = intel_guc_send_ct;
209 guc->handler = intel_guc_to_host_event_handler_ct;
210
211
212 guc_get_mmio_msg(guc);
213 guc_handle_mmio_msg(guc);
214
215 guc_enable_interrupts(guc);
216
217
218 spin_lock_irq(&i915->irq_lock);
219 intel_guc_to_host_event_handler_ct(guc);
220 spin_unlock_irq(&i915->irq_lock);
221
222 DRM_INFO("GuC communication enabled\n");
223
224 return 0;
225}
226
227static void guc_stop_communication(struct intel_guc *guc)
228{
229 intel_guc_ct_stop(&guc->ct);
230
231 guc->send = intel_guc_send_nop;
232 guc->handler = intel_guc_to_host_event_handler_nop;
233
234 guc_clear_mmio_msg(guc);
235}
236
237static void guc_disable_communication(struct intel_guc *guc)
238{
239
240
241
242
243
244 guc_clear_mmio_msg(guc);
245
246 guc_disable_interrupts(guc);
247
248 guc->send = intel_guc_send_nop;
249 guc->handler = intel_guc_to_host_event_handler_nop;
250
251 intel_guc_ct_disable(&guc->ct);
252
253
254
255
256
257
258
259 guc_get_mmio_msg(guc);
260
261 DRM_INFO("GuC communication disabled\n");
262}
263
264void intel_uc_fetch_firmwares(struct intel_uc *uc)
265{
266 struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
267 int err;
268
269 if (!intel_uc_uses_guc(uc))
270 return;
271
272 err = intel_uc_fw_fetch(&uc->guc.fw, i915);
273 if (err)
274 return;
275
276 if (intel_uc_uses_huc(uc))
277 intel_uc_fw_fetch(&uc->huc.fw, i915);
278}
279
280void intel_uc_cleanup_firmwares(struct intel_uc *uc)
281{
282 if (!intel_uc_uses_guc(uc))
283 return;
284
285 if (intel_uc_uses_huc(uc))
286 intel_uc_fw_cleanup_fetch(&uc->huc.fw);
287
288 intel_uc_fw_cleanup_fetch(&uc->guc.fw);
289}
290
291void intel_uc_init(struct intel_uc *uc)
292{
293 struct intel_guc *guc = &uc->guc;
294 struct intel_huc *huc = &uc->huc;
295 int ret;
296
297 if (!intel_uc_uses_guc(uc))
298 return;
299
300
301 GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
302
303 ret = intel_guc_init(guc);
304 if (ret) {
305 intel_uc_fw_cleanup_fetch(&huc->fw);
306 return;
307 }
308
309 if (intel_uc_uses_huc(uc))
310 intel_huc_init(huc);
311}
312
313void intel_uc_fini(struct intel_uc *uc)
314{
315 struct intel_guc *guc = &uc->guc;
316
317 if (!intel_uc_uses_guc(uc))
318 return;
319
320 if (intel_uc_uses_huc(uc))
321 intel_huc_fini(&uc->huc);
322
323 intel_guc_fini(guc);
324
325 __uc_free_load_err_log(uc);
326}
327
328static int __uc_sanitize(struct intel_uc *uc)
329{
330 struct intel_guc *guc = &uc->guc;
331 struct intel_huc *huc = &uc->huc;
332
333 GEM_BUG_ON(!intel_uc_supports_guc(uc));
334
335 intel_huc_sanitize(huc);
336 intel_guc_sanitize(guc);
337
338 return __intel_uc_reset_hw(uc);
339}
340
341void intel_uc_sanitize(struct intel_uc *uc)
342{
343 if (!intel_uc_supports_guc(uc))
344 return;
345
346 __uc_sanitize(uc);
347}
348
349
350static int uc_init_wopcm(struct intel_uc *uc)
351{
352 struct intel_gt *gt = uc_to_gt(uc);
353 struct intel_uncore *uncore = gt->uncore;
354 u32 base = intel_wopcm_guc_base(>->i915->wopcm);
355 u32 size = intel_wopcm_guc_size(>->i915->wopcm);
356 u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
357 u32 mask;
358 int err;
359
360 if (unlikely(!base || !size)) {
361 i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
362 return -E2BIG;
363 }
364
365 GEM_BUG_ON(!intel_uc_supports_guc(uc));
366 GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
367 GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
368 GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
369 GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
370
371 err = i915_inject_load_error(gt->i915, -ENXIO);
372 if (err)
373 return err;
374
375 mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
376 err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
377 size | GUC_WOPCM_SIZE_LOCKED);
378 if (err)
379 goto err_out;
380
381 mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
382 err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
383 base | huc_agent, mask,
384 base | huc_agent |
385 GUC_WOPCM_OFFSET_VALID);
386 if (err)
387 goto err_out;
388
389 return 0;
390
391err_out:
392 i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
393 i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
394 i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
395 intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
396 i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
397 i915_mmio_reg_offset(GUC_WOPCM_SIZE),
398 intel_uncore_read(uncore, GUC_WOPCM_SIZE));
399
400 return err;
401}
402
403static bool uc_is_wopcm_locked(struct intel_uc *uc)
404{
405 struct intel_gt *gt = uc_to_gt(uc);
406 struct intel_uncore *uncore = gt->uncore;
407
408 return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
409 (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
410}
411
412int intel_uc_init_hw(struct intel_uc *uc)
413{
414 struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
415 struct intel_guc *guc = &uc->guc;
416 struct intel_huc *huc = &uc->huc;
417 int ret, attempts;
418
419 if (!intel_uc_supports_guc(uc))
420 return 0;
421
422
423
424
425
426
427 if (!intel_uc_uses_guc(uc) && !uc_is_wopcm_locked(uc))
428 return 0;
429
430 if (!intel_uc_fw_is_available(&guc->fw)) {
431 ret = uc_is_wopcm_locked(uc) ||
432 intel_uc_fw_is_overridden(&guc->fw) ||
433 intel_uc_supports_guc_submission(uc) ?
434 intel_uc_fw_status_to_error(guc->fw.status) : 0;
435 goto err_out;
436 }
437
438 ret = uc_init_wopcm(uc);
439 if (ret)
440 goto err_out;
441
442 guc_reset_interrupts(guc);
443
444
445
446 if (IS_GEN(i915, 9))
447 attempts = 3;
448 else
449 attempts = 1;
450
451 while (attempts--) {
452
453
454
455
456 ret = __uc_sanitize(uc);
457 if (ret)
458 goto err_out;
459
460 intel_huc_fw_upload(huc);
461 intel_guc_ads_reset(guc);
462 intel_guc_write_params(guc);
463 ret = intel_guc_fw_upload(guc);
464 if (ret == 0)
465 break;
466
467 DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
468 "retry %d more time(s)\n", ret, attempts);
469 }
470
471
472 if (ret)
473 goto err_log_capture;
474
475 ret = guc_enable_communication(guc);
476 if (ret)
477 goto err_log_capture;
478
479 intel_huc_auth(huc);
480
481 ret = intel_guc_sample_forcewake(guc);
482 if (ret)
483 goto err_communication;
484
485 if (intel_uc_supports_guc_submission(uc)) {
486 ret = intel_guc_submission_enable(guc);
487 if (ret)
488 goto err_communication;
489 }
490
491 dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
492 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
493 guc->fw.major_ver_found, guc->fw.minor_ver_found,
494 "submission",
495 enableddisabled(intel_uc_supports_guc_submission(uc)));
496
497 if (intel_uc_uses_huc(uc)) {
498 dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
499 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
500 huc->fw.path,
501 huc->fw.major_ver_found, huc->fw.minor_ver_found,
502 "authenticated",
503 yesno(intel_huc_is_authenticated(huc)));
504 }
505
506 return 0;
507
508
509
510
511err_communication:
512 guc_disable_communication(guc);
513err_log_capture:
514 __uc_capture_load_err_log(uc);
515err_out:
516 __uc_sanitize(uc);
517
518 if (!ret) {
519 dev_notice(i915->drm.dev, "GuC is uninitialized\n");
520
521 return 0;
522 }
523
524 i915_probe_error(i915, "GuC initialization failed %d\n", ret);
525
526
527 return -EIO;
528}
529
530void intel_uc_fini_hw(struct intel_uc *uc)
531{
532 struct intel_guc *guc = &uc->guc;
533
534 if (!intel_guc_is_running(guc))
535 return;
536
537 if (intel_uc_supports_guc_submission(uc))
538 intel_guc_submission_disable(guc);
539
540 guc_disable_communication(guc);
541 __uc_sanitize(uc);
542}
543
544
545
546
547
548
549
550void intel_uc_reset_prepare(struct intel_uc *uc)
551{
552 struct intel_guc *guc = &uc->guc;
553
554 if (!intel_guc_is_running(guc))
555 return;
556
557 guc_stop_communication(guc);
558 __uc_sanitize(uc);
559}
560
561void intel_uc_runtime_suspend(struct intel_uc *uc)
562{
563 struct intel_guc *guc = &uc->guc;
564 int err;
565
566 if (!intel_guc_is_running(guc))
567 return;
568
569 err = intel_guc_suspend(guc);
570 if (err)
571 DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
572
573 guc_disable_communication(guc);
574}
575
576void intel_uc_suspend(struct intel_uc *uc)
577{
578 struct intel_guc *guc = &uc->guc;
579 intel_wakeref_t wakeref;
580
581 if (!intel_guc_is_running(guc))
582 return;
583
584 with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)
585 intel_uc_runtime_suspend(uc);
586}
587
588static int __uc_resume(struct intel_uc *uc, bool enable_communication)
589{
590 struct intel_guc *guc = &uc->guc;
591 int err;
592
593 if (!intel_guc_is_running(guc))
594 return 0;
595
596
597 GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
598
599 if (enable_communication)
600 guc_enable_communication(guc);
601
602 err = intel_guc_resume(guc);
603 if (err) {
604 DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
605 return err;
606 }
607
608 return 0;
609}
610
611int intel_uc_resume(struct intel_uc *uc)
612{
613
614
615
616
617 return __uc_resume(uc, false);
618}
619
620int intel_uc_runtime_resume(struct intel_uc *uc)
621{
622
623
624
625
626 return __uc_resume(uc, true);
627}
628