1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/firmware.h>
30#include "i915_drv.h"
31#include "intel_guc.h"
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62#define SKL_FW_MAJOR 6
63#define SKL_FW_MINOR 1
64
65#define BXT_FW_MAJOR 8
66#define BXT_FW_MINOR 7
67
68#define KBL_FW_MAJOR 9
69#define KBL_FW_MINOR 14
70
71#define GUC_FW_PATH(platform, major, minor) \
72 "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
73
74#define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR)
75MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
76
77#define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR)
78MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
79
80#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
81MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
82
83
84const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
85{
86 switch (status) {
87 case GUC_FIRMWARE_FAIL:
88 return "FAIL";
89 case GUC_FIRMWARE_NONE:
90 return "NONE";
91 case GUC_FIRMWARE_PENDING:
92 return "PENDING";
93 case GUC_FIRMWARE_SUCCESS:
94 return "SUCCESS";
95 default:
96 return "UNKNOWN!";
97 }
98};
99
100static void guc_interrupts_release(struct drm_i915_private *dev_priv)
101{
102 struct intel_engine_cs *engine;
103 enum intel_engine_id id;
104 int irqs;
105
106
107 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
108 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
109 for_each_engine(engine, dev_priv, id)
110 I915_WRITE(RING_MODE_GEN7(engine), irqs);
111
112
113 I915_WRITE(GUC_BCS_RCS_IER, 0);
114 I915_WRITE(GUC_VCS2_VCS1_IER, 0);
115 I915_WRITE(GUC_WD_VECS_IER, 0);
116}
117
118static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
119{
120 struct intel_engine_cs *engine;
121 enum intel_engine_id id;
122 int irqs;
123 u32 tmp;
124
125
126 irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
127 for_each_engine(engine, dev_priv, id)
128 I915_WRITE(RING_MODE_GEN7(engine), irqs);
129
130
131 irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
132 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
133
134 I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
135 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
136 I915_WRITE(GUC_WD_VECS_IER, ~irqs);
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157 tmp = I915_READ(GEN6_PMINTRMSK);
158 if (tmp & GEN8_PMINTR_REDIRECT_TO_GUC) {
159 dev_priv->rps.pm_intr_keep |= ~tmp;
160 dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_GUC;
161 }
162}
163
164static u32 get_gttype(struct drm_i915_private *dev_priv)
165{
166
167 return 0;
168}
169
170static u32 get_core_family(struct drm_i915_private *dev_priv)
171{
172 u32 gen = INTEL_GEN(dev_priv);
173
174 switch (gen) {
175 case 9:
176 return GFXCORE_FAMILY_GEN9;
177
178 default:
179 WARN(1, "GEN%d does not support GuC operation!\n", gen);
180 return GFXCORE_FAMILY_UNKNOWN;
181 }
182}
183
184
185
186
187
188
189static void guc_params_init(struct drm_i915_private *dev_priv)
190{
191 struct intel_guc *guc = &dev_priv->guc;
192 u32 params[GUC_CTL_MAX_DWORDS];
193 int i;
194
195 memset(¶ms, 0, sizeof(params));
196
197 params[GUC_CTL_DEVICE_INFO] |=
198 (get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
199 (get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
200
201
202
203
204
205
206 params[GUC_CTL_ARAT_HIGH] = 0;
207 params[GUC_CTL_ARAT_LOW] = 100000000;
208
209 params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
210
211 params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
212 GUC_CTL_VCS2_ENABLED;
213
214 params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
215
216 if (i915.guc_log_level >= 0) {
217 params[GUC_CTL_DEBUG] =
218 i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
219 } else
220 params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
221
222 if (guc->ads_vma) {
223 u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
224 params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
225 params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
226 }
227
228
229 if (i915.enable_guc_submission) {
230 u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma);
231 u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
232
233 pgs >>= PAGE_SHIFT;
234 params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
235 (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
236
237 params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
238
239
240 params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
241 }
242
243 I915_WRITE(SOFT_SCRATCH(0), 0);
244
245 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
246 I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
247}
248
249
250
251
252
253
254
255
256
257
258static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
259 u32 *status)
260{
261 u32 val = I915_READ(GUC_STATUS);
262 u32 uk_val = val & GS_UKERNEL_MASK;
263 *status = val;
264 return (uk_val == GS_UKERNEL_READY ||
265 ((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
266}
267
268
269
270
271
272
273
274
275
276
277
278static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
279 struct i915_vma *vma)
280{
281 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
282 unsigned long offset;
283 struct sg_table *sg = vma->pages;
284 u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
285 int i, ret = 0;
286
287
288 offset = guc_fw->rsa_offset;
289
290
291 sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
292 for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
293 I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
294
295
296
297 I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
298
299
300 offset = i915_ggtt_offset(vma) + guc_fw->header_offset;
301 I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
302 I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
303
304
305
306
307
308 I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
309 I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
310
311
312 I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
313
314
315
316
317
318
319
320
321
322 ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
323
324 DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
325 I915_READ(DMA_CTRL), status);
326
327 if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
328 DRM_ERROR("GuC firmware signature verification failed\n");
329 ret = -ENOEXEC;
330 }
331
332 DRM_DEBUG_DRIVER("returning %d\n", ret);
333
334 return ret;
335}
336
337static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
338{
339 u32 wopcm_size = GUC_WOPCM_TOP;
340
341
342 if (IS_BROXTON(dev_priv))
343 wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
344
345 return wopcm_size;
346}
347
348
349
350
351static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
352{
353 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
354 struct i915_vma *vma;
355 int ret;
356
357 ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
358 if (ret) {
359 DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
360 return ret;
361 }
362
363 vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
364 if (IS_ERR(vma)) {
365 DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
366 return PTR_ERR(vma);
367 }
368
369
370 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
371
372 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
373
374
375 I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
376 I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
377
378
379 I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
380
381
382 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
383 I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
384 ~GUC_ENABLE_MIA_CLOCK_GATING));
385 }
386
387
388 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
389 I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
390
391 if (IS_BROXTON(dev_priv))
392 I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
393 else
394 I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
395
396 if (IS_GEN9(dev_priv)) {
397
398 I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
399 I915_READ(GEN7_MISCCPCTL)));
400
401
402 I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
403 }
404
405 guc_params_init(dev_priv);
406
407 ret = guc_ucode_xfer_dma(dev_priv, vma);
408
409 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
410
411
412
413
414
415 i915_vma_unpin(vma);
416
417 return ret;
418}
419
420static int guc_hw_reset(struct drm_i915_private *dev_priv)
421{
422 int ret;
423 u32 guc_status;
424
425 ret = intel_guc_reset(dev_priv);
426 if (ret) {
427 DRM_ERROR("GuC reset failed, ret = %d\n", ret);
428 return ret;
429 }
430
431 guc_status = I915_READ(GUC_STATUS);
432 WARN(!(guc_status & GS_MIA_IN_RESET),
433 "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
434
435 return ret;
436}
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451int intel_guc_setup(struct drm_device *dev)
452{
453 struct drm_i915_private *dev_priv = to_i915(dev);
454 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
455 const char *fw_path = guc_fw->guc_fw_path;
456 int retries, ret, err;
457
458 DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
459 fw_path,
460 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
461 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
462
463
464 if (!i915.enable_guc_loading) {
465 err = 0;
466 goto fail;
467 } else if (fw_path == NULL) {
468
469 err = -ENXIO;
470 goto fail;
471 } else if (*fw_path == '\0') {
472
473 WARN(1, "No GuC firmware known for this platform!\n");
474 err = -ENODEV;
475 goto fail;
476 }
477
478
479 if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
480 err = -EIO;
481 goto fail;
482 } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
483 err = -ENOEXEC;
484 goto fail;
485 }
486
487 guc_interrupts_release(dev_priv);
488 gen9_reset_guc_interrupts(dev_priv);
489
490 guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
491
492 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
493 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
494 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
495
496 err = i915_guc_submission_init(dev_priv);
497 if (err)
498 goto fail;
499
500
501
502
503
504
505
506 for (retries = 3; ; ) {
507
508
509
510
511 err = guc_hw_reset(dev_priv);
512 if (err)
513 goto fail;
514
515 err = guc_ucode_xfer(dev_priv);
516 if (!err)
517 break;
518
519 if (--retries == 0)
520 goto fail;
521
522 DRM_INFO("GuC fw load failed: %d; will reset and "
523 "retry %d more time(s)\n", err, retries);
524 }
525
526 guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
527
528 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
529 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
530 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
531
532 if (i915.enable_guc_submission) {
533 if (i915.guc_log_level >= 0)
534 gen9_enable_guc_interrupts(dev_priv);
535
536 err = i915_guc_submission_enable(dev_priv);
537 if (err)
538 goto fail;
539 guc_interrupts_capture(dev_priv);
540 }
541
542 return 0;
543
544fail:
545 if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
546 guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
547
548 guc_interrupts_release(dev_priv);
549 i915_guc_submission_disable(dev_priv);
550 i915_guc_submission_fini(dev_priv);
551
552
553
554
555
556
557
558
559
560
561 if (i915.enable_guc_loading > 1) {
562 ret = -EIO;
563 } else if (i915.enable_guc_submission > 1) {
564 ret = -EIO;
565 } else {
566 ret = 0;
567 }
568
569 if (err == 0 && !HAS_GUC_UCODE(dev_priv))
570 ;
571 else if (err == 0)
572 DRM_INFO("GuC firmware load skipped\n");
573 else if (ret != -EIO)
574 DRM_NOTE("GuC firmware load failed: %d\n", err);
575 else
576 DRM_WARN("GuC firmware load failed: %d\n", err);
577
578 if (i915.enable_guc_submission) {
579 if (fw_path == NULL)
580 DRM_INFO("GuC submission without firmware not supported\n");
581 if (ret == 0)
582 DRM_NOTE("Falling back from GuC submission to execlist mode\n");
583 else
584 DRM_ERROR("GuC init failed: %d\n", ret);
585 }
586 i915.enable_guc_submission = 0;
587
588 return ret;
589}
590
591static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
592{
593 struct pci_dev *pdev = dev->pdev;
594 struct drm_i915_gem_object *obj;
595 const struct firmware *fw;
596 struct guc_css_header *css;
597 size_t size;
598 int err;
599
600 DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
601 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
602
603 err = request_firmware(&fw, guc_fw->guc_fw_path, &pdev->dev);
604 if (err)
605 goto fail;
606 if (!fw)
607 goto fail;
608
609 DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
610 guc_fw->guc_fw_path, fw);
611
612
613 if (fw->size < sizeof(struct guc_css_header)) {
614 DRM_NOTE("Firmware header is missing\n");
615 goto fail;
616 }
617
618 css = (struct guc_css_header *)fw->data;
619
620
621 guc_fw->header_offset = 0;
622 guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
623 css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
624
625 if (guc_fw->header_size != sizeof(struct guc_css_header)) {
626 DRM_NOTE("CSS header definition mismatch\n");
627 goto fail;
628 }
629
630
631 guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
632 guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
633
634
635 if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
636 DRM_NOTE("RSA key size is bad\n");
637 goto fail;
638 }
639 guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
640 guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
641
642
643 size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
644 if (fw->size < size) {
645 DRM_NOTE("Missing firmware components\n");
646 goto fail;
647 }
648
649
650 size = guc_fw->header_size + guc_fw->ucode_size;
651 if (size > guc_wopcm_size(to_i915(dev))) {
652 DRM_NOTE("Firmware is too large to fit in WOPCM\n");
653 goto fail;
654 }
655
656
657
658
659
660
661
662 guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
663 guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
664
665 if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
666 guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
667 DRM_NOTE("GuC firmware version %d.%d, required %d.%d\n",
668 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
669 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
670 err = -ENOEXEC;
671 goto fail;
672 }
673
674 DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
675 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
676 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
677
678 mutex_lock(&dev->struct_mutex);
679 obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
680 mutex_unlock(&dev->struct_mutex);
681 if (IS_ERR_OR_NULL(obj)) {
682 err = obj ? PTR_ERR(obj) : -ENOMEM;
683 goto fail;
684 }
685
686 guc_fw->guc_fw_obj = obj;
687 guc_fw->guc_fw_size = fw->size;
688
689 DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
690 guc_fw->guc_fw_obj);
691
692 release_firmware(fw);
693 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
694 return;
695
696fail:
697 DRM_WARN("Failed to fetch valid GuC firmware from %s (error %d)\n",
698 guc_fw->guc_fw_path, err);
699 DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
700 err, fw, guc_fw->guc_fw_obj);
701
702 mutex_lock(&dev->struct_mutex);
703 obj = guc_fw->guc_fw_obj;
704 if (obj)
705 i915_gem_object_put(obj);
706 guc_fw->guc_fw_obj = NULL;
707 mutex_unlock(&dev->struct_mutex);
708
709 release_firmware(fw);
710 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
711}
712
713
714
715
716
717
718
719
720
721
722void intel_guc_init(struct drm_device *dev)
723{
724 struct drm_i915_private *dev_priv = to_i915(dev);
725 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
726 const char *fw_path;
727
728 if (!HAS_GUC(dev_priv)) {
729 i915.enable_guc_loading = 0;
730 i915.enable_guc_submission = 0;
731 } else {
732
733 if (i915.enable_guc_loading < 0)
734 i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
735 if (i915.enable_guc_submission < 0)
736 i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
737 }
738
739 if (!HAS_GUC_UCODE(dev_priv)) {
740 fw_path = NULL;
741 } else if (IS_SKYLAKE(dev_priv)) {
742 fw_path = I915_SKL_GUC_UCODE;
743 guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
744 guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
745 } else if (IS_BROXTON(dev_priv)) {
746 fw_path = I915_BXT_GUC_UCODE;
747 guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
748 guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
749 } else if (IS_KABYLAKE(dev_priv)) {
750 fw_path = I915_KBL_GUC_UCODE;
751 guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
752 guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
753 } else {
754 fw_path = "";
755 }
756
757 guc_fw->guc_dev = dev;
758 guc_fw->guc_fw_path = fw_path;
759 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
760 guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
761
762
763 if (!i915.enable_guc_loading)
764 return;
765 if (fw_path == NULL)
766 return;
767 if (*fw_path == '\0')
768 return;
769
770 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
771 DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
772 guc_fw_fetch(dev, guc_fw);
773
774}
775
776
777
778
779
780void intel_guc_fini(struct drm_device *dev)
781{
782 struct drm_i915_private *dev_priv = to_i915(dev);
783 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
784
785 mutex_lock(&dev->struct_mutex);
786 guc_interrupts_release(dev_priv);
787 i915_guc_submission_disable(dev_priv);
788 i915_guc_submission_fini(dev_priv);
789
790 if (guc_fw->guc_fw_obj)
791 i915_gem_object_put(guc_fw->guc_fw_obj);
792 guc_fw->guc_fw_obj = NULL;
793 mutex_unlock(&dev->struct_mutex);
794
795 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
796}
797