1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/drm_print.h>
26
27#include "i915_drv.h"
28#include "i915_vgpu.h"
29#include "intel_ringbuffer.h"
30#include "intel_lrc.h"
31
32
33
34
35
36
37
38
39#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
40
41#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
42#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
43#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
44#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
45#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
46
47#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
48
49struct engine_class_info {
50 const char *name;
51 int (*init_legacy)(struct intel_engine_cs *engine);
52 int (*init_execlists)(struct intel_engine_cs *engine);
53
54 u8 uabi_class;
55};
56
57static const struct engine_class_info intel_engine_classes[] = {
58 [RENDER_CLASS] = {
59 .name = "rcs",
60 .init_execlists = logical_render_ring_init,
61 .init_legacy = intel_init_render_ring_buffer,
62 .uabi_class = I915_ENGINE_CLASS_RENDER,
63 },
64 [COPY_ENGINE_CLASS] = {
65 .name = "bcs",
66 .init_execlists = logical_xcs_ring_init,
67 .init_legacy = intel_init_blt_ring_buffer,
68 .uabi_class = I915_ENGINE_CLASS_COPY,
69 },
70 [VIDEO_DECODE_CLASS] = {
71 .name = "vcs",
72 .init_execlists = logical_xcs_ring_init,
73 .init_legacy = intel_init_bsd_ring_buffer,
74 .uabi_class = I915_ENGINE_CLASS_VIDEO,
75 },
76 [VIDEO_ENHANCEMENT_CLASS] = {
77 .name = "vecs",
78 .init_execlists = logical_xcs_ring_init,
79 .init_legacy = intel_init_vebox_ring_buffer,
80 .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
81 },
82};
83
84struct engine_info {
85 unsigned int hw_id;
86 unsigned int uabi_id;
87 u8 class;
88 u8 instance;
89 u32 mmio_base;
90 unsigned irq_shift;
91};
92
93static const struct engine_info intel_engines[] = {
94 [RCS] = {
95 .hw_id = RCS_HW,
96 .uabi_id = I915_EXEC_RENDER,
97 .class = RENDER_CLASS,
98 .instance = 0,
99 .mmio_base = RENDER_RING_BASE,
100 .irq_shift = GEN8_RCS_IRQ_SHIFT,
101 },
102 [BCS] = {
103 .hw_id = BCS_HW,
104 .uabi_id = I915_EXEC_BLT,
105 .class = COPY_ENGINE_CLASS,
106 .instance = 0,
107 .mmio_base = BLT_RING_BASE,
108 .irq_shift = GEN8_BCS_IRQ_SHIFT,
109 },
110 [VCS] = {
111 .hw_id = VCS_HW,
112 .uabi_id = I915_EXEC_BSD,
113 .class = VIDEO_DECODE_CLASS,
114 .instance = 0,
115 .mmio_base = GEN6_BSD_RING_BASE,
116 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
117 },
118 [VCS2] = {
119 .hw_id = VCS2_HW,
120 .uabi_id = I915_EXEC_BSD,
121 .class = VIDEO_DECODE_CLASS,
122 .instance = 1,
123 .mmio_base = GEN8_BSD2_RING_BASE,
124 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
125 },
126 [VCS3] = {
127 .hw_id = VCS3_HW,
128 .uabi_id = I915_EXEC_BSD,
129 .class = VIDEO_DECODE_CLASS,
130 .instance = 2,
131 .mmio_base = GEN11_BSD3_RING_BASE,
132 .irq_shift = 0,
133 },
134 [VCS4] = {
135 .hw_id = VCS4_HW,
136 .uabi_id = I915_EXEC_BSD,
137 .class = VIDEO_DECODE_CLASS,
138 .instance = 3,
139 .mmio_base = GEN11_BSD4_RING_BASE,
140 .irq_shift = 0,
141 },
142 [VECS] = {
143 .hw_id = VECS_HW,
144 .uabi_id = I915_EXEC_VEBOX,
145 .class = VIDEO_ENHANCEMENT_CLASS,
146 .instance = 0,
147 .mmio_base = VEBOX_RING_BASE,
148 .irq_shift = GEN8_VECS_IRQ_SHIFT,
149 },
150 [VECS2] = {
151 .hw_id = VECS2_HW,
152 .uabi_id = I915_EXEC_VEBOX,
153 .class = VIDEO_ENHANCEMENT_CLASS,
154 .instance = 1,
155 .mmio_base = GEN11_VEBOX2_RING_BASE,
156 .irq_shift = 0,
157 },
158};
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174static u32
175__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
176{
177 u32 cxt_size;
178
179 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
180
181 switch (class) {
182 case RENDER_CLASS:
183 switch (INTEL_GEN(dev_priv)) {
184 default:
185 MISSING_CASE(INTEL_GEN(dev_priv));
186 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
187 case 11:
188 return GEN11_LR_CONTEXT_RENDER_SIZE;
189 case 10:
190 return GEN10_LR_CONTEXT_RENDER_SIZE;
191 case 9:
192 return GEN9_LR_CONTEXT_RENDER_SIZE;
193 case 8:
194 return GEN8_LR_CONTEXT_RENDER_SIZE;
195 case 7:
196 if (IS_HASWELL(dev_priv))
197 return HSW_CXT_TOTAL_SIZE;
198
199 cxt_size = I915_READ(GEN7_CXT_SIZE);
200 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
201 PAGE_SIZE);
202 case 6:
203 cxt_size = I915_READ(CXT_SIZE);
204 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
205 PAGE_SIZE);
206 case 5:
207 case 4:
208 case 3:
209 case 2:
210
211 case 1:
212 return 0;
213 }
214 break;
215 default:
216 MISSING_CASE(class);
217 case VIDEO_DECODE_CLASS:
218 case VIDEO_ENHANCEMENT_CLASS:
219 case COPY_ENGINE_CLASS:
220 if (INTEL_GEN(dev_priv) < 8)
221 return 0;
222 return GEN8_LR_CONTEXT_OTHER_SIZE;
223 }
224}
225
226static int
227intel_engine_setup(struct drm_i915_private *dev_priv,
228 enum intel_engine_id id)
229{
230 const struct engine_info *info = &intel_engines[id];
231 const struct engine_class_info *class_info;
232 struct intel_engine_cs *engine;
233
234 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
235 class_info = &intel_engine_classes[info->class];
236
237 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
238 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
239
240 if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
241 return -EINVAL;
242
243 if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
244 return -EINVAL;
245
246 if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
247 return -EINVAL;
248
249 GEM_BUG_ON(dev_priv->engine[id]);
250 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
251 if (!engine)
252 return -ENOMEM;
253
254 engine->id = id;
255 engine->i915 = dev_priv;
256 WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
257 class_info->name, info->instance) >=
258 sizeof(engine->name));
259 engine->hw_id = engine->guc_id = info->hw_id;
260 if (INTEL_GEN(dev_priv) >= 11) {
261 switch (engine->id) {
262 case VCS:
263 engine->mmio_base = GEN11_BSD_RING_BASE;
264 break;
265 case VCS2:
266 engine->mmio_base = GEN11_BSD2_RING_BASE;
267 break;
268 case VECS:
269 engine->mmio_base = GEN11_VEBOX_RING_BASE;
270 break;
271 default:
272
273 engine->mmio_base = info->mmio_base;
274 break;
275 }
276 } else {
277 engine->mmio_base = info->mmio_base;
278 }
279 engine->irq_shift = info->irq_shift;
280 engine->class = info->class;
281 engine->instance = info->instance;
282
283 engine->uabi_id = info->uabi_id;
284 engine->uabi_class = class_info->uabi_class;
285
286 engine->context_size = __intel_engine_context_size(dev_priv,
287 engine->class);
288 if (WARN_ON(engine->context_size > BIT(20)))
289 engine->context_size = 0;
290
291
292 engine->schedule = NULL;
293
294 spin_lock_init(&engine->stats.lock);
295
296 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
297
298 dev_priv->engine_class[info->class][info->instance] = engine;
299 dev_priv->engine[id] = engine;
300 return 0;
301}
302
303
304
305
306
307
308
309int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
310{
311 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
312 const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
313 struct intel_engine_cs *engine;
314 enum intel_engine_id id;
315 unsigned int mask = 0;
316 unsigned int i;
317 int err;
318
319 WARN_ON(ring_mask == 0);
320 WARN_ON(ring_mask &
321 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
322
323 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
324 if (!HAS_ENGINE(dev_priv, i))
325 continue;
326
327 err = intel_engine_setup(dev_priv, i);
328 if (err)
329 goto cleanup;
330
331 mask |= ENGINE_MASK(i);
332 }
333
334
335
336
337
338
339 if (WARN_ON(mask != ring_mask))
340 device_info->ring_mask = mask;
341
342
343 if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
344 err = -ENODEV;
345 goto cleanup;
346 }
347
348 device_info->num_rings = hweight32(mask);
349
350 i915_check_and_clear_faults(dev_priv);
351
352 return 0;
353
354cleanup:
355 for_each_engine(engine, dev_priv, id)
356 kfree(engine);
357 return err;
358}
359
360
361
362
363
364
365
366int intel_engines_init(struct drm_i915_private *dev_priv)
367{
368 struct intel_engine_cs *engine;
369 enum intel_engine_id id, err_id;
370 int err;
371
372 for_each_engine(engine, dev_priv, id) {
373 const struct engine_class_info *class_info =
374 &intel_engine_classes[engine->class];
375 int (*init)(struct intel_engine_cs *engine);
376
377 if (HAS_EXECLISTS(dev_priv))
378 init = class_info->init_execlists;
379 else
380 init = class_info->init_legacy;
381
382 err = -EINVAL;
383 err_id = id;
384
385 if (GEM_WARN_ON(!init))
386 goto cleanup;
387
388 err = init(engine);
389 if (err)
390 goto cleanup;
391
392 GEM_BUG_ON(!engine->submit_request);
393 }
394
395 return 0;
396
397cleanup:
398 for_each_engine(engine, dev_priv, id) {
399 if (id >= err_id) {
400 kfree(engine);
401 dev_priv->engine[id] = NULL;
402 } else {
403 dev_priv->gt.cleanup_engine(engine);
404 }
405 }
406 return err;
407}
408
409void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
410{
411 struct drm_i915_private *dev_priv = engine->i915;
412
413
414
415
416
417
418
419
420
421 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
422 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
423 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
424 if (HAS_VEBOX(dev_priv))
425 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
426 }
427
428 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
429 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
430
431
432
433
434 intel_engine_wakeup(engine);
435
436 GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
437}
438
439static void intel_engine_init_timeline(struct intel_engine_cs *engine)
440{
441 engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
442}
443
444static bool csb_force_mmio(struct drm_i915_private *i915)
445{
446
447
448
449
450
451 if (intel_vtd_active())
452 return true;
453
454
455 if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
456 return true;
457
458 return false;
459}
460
461static void intel_engine_init_execlist(struct intel_engine_cs *engine)
462{
463 struct intel_engine_execlists * const execlists = &engine->execlists;
464
465 execlists->csb_use_mmio = csb_force_mmio(engine->i915);
466
467 execlists->port_mask = 1;
468 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
469 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
470
471 execlists->queue_priority = INT_MIN;
472 execlists->queue = RB_ROOT;
473 execlists->first = NULL;
474}
475
476
477
478
479
480
481
482
483
484
485void intel_engine_setup_common(struct intel_engine_cs *engine)
486{
487 intel_engine_init_execlist(engine);
488
489 intel_engine_init_timeline(engine);
490 intel_engine_init_hangcheck(engine);
491 i915_gem_batch_pool_init(engine, &engine->batch_pool);
492
493 intel_engine_init_cmd_parser(engine);
494}
495
496int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
497{
498 struct drm_i915_gem_object *obj;
499 struct i915_vma *vma;
500 int ret;
501
502 WARN_ON(engine->scratch);
503
504 obj = i915_gem_object_create_stolen(engine->i915, size);
505 if (!obj)
506 obj = i915_gem_object_create_internal(engine->i915, size);
507 if (IS_ERR(obj)) {
508 DRM_ERROR("Failed to allocate scratch page\n");
509 return PTR_ERR(obj);
510 }
511
512 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
513 if (IS_ERR(vma)) {
514 ret = PTR_ERR(vma);
515 goto err_unref;
516 }
517
518 ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
519 if (ret)
520 goto err_unref;
521
522 engine->scratch = vma;
523 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
524 engine->name, i915_ggtt_offset(vma));
525 return 0;
526
527err_unref:
528 i915_gem_object_put(obj);
529 return ret;
530}
531
532static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
533{
534 i915_vma_unpin_and_release(&engine->scratch);
535}
536
537static void cleanup_phys_status_page(struct intel_engine_cs *engine)
538{
539 struct drm_i915_private *dev_priv = engine->i915;
540
541 if (!dev_priv->status_page_dmah)
542 return;
543
544 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
545 engine->status_page.page_addr = NULL;
546}
547
548static void cleanup_status_page(struct intel_engine_cs *engine)
549{
550 struct i915_vma *vma;
551 struct drm_i915_gem_object *obj;
552
553 vma = fetch_and_zero(&engine->status_page.vma);
554 if (!vma)
555 return;
556
557 obj = vma->obj;
558
559 i915_vma_unpin(vma);
560 i915_vma_close(vma);
561
562 i915_gem_object_unpin_map(obj);
563 __i915_gem_object_release_unless_active(obj);
564}
565
566static int init_status_page(struct intel_engine_cs *engine)
567{
568 struct drm_i915_gem_object *obj;
569 struct i915_vma *vma;
570 unsigned int flags;
571 void *vaddr;
572 int ret;
573
574 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
575 if (IS_ERR(obj)) {
576 DRM_ERROR("Failed to allocate status page\n");
577 return PTR_ERR(obj);
578 }
579
580 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
581 if (ret)
582 goto err;
583
584 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
585 if (IS_ERR(vma)) {
586 ret = PTR_ERR(vma);
587 goto err;
588 }
589
590 flags = PIN_GLOBAL;
591 if (!HAS_LLC(engine->i915))
592
593
594
595
596
597
598
599
600
601
602 flags |= PIN_MAPPABLE;
603 else
604 flags |= PIN_HIGH;
605 ret = i915_vma_pin(vma, 0, 4096, flags);
606 if (ret)
607 goto err;
608
609 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
610 if (IS_ERR(vaddr)) {
611 ret = PTR_ERR(vaddr);
612 goto err_unpin;
613 }
614
615 engine->status_page.vma = vma;
616 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
617 engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
618
619 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
620 engine->name, i915_ggtt_offset(vma));
621 return 0;
622
623err_unpin:
624 i915_vma_unpin(vma);
625err:
626 i915_gem_object_put(obj);
627 return ret;
628}
629
630static int init_phys_status_page(struct intel_engine_cs *engine)
631{
632 struct drm_i915_private *dev_priv = engine->i915;
633
634 GEM_BUG_ON(engine->id != RCS);
635
636 dev_priv->status_page_dmah =
637 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
638 if (!dev_priv->status_page_dmah)
639 return -ENOMEM;
640
641 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
642 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
643
644 return 0;
645}
646
647
648
649
650
651
652
653
654
655
656
657
658int intel_engine_init_common(struct intel_engine_cs *engine)
659{
660 struct intel_ring *ring;
661 int ret;
662
663 engine->set_default_submission(engine);
664
665
666
667
668
669
670
671
672 ring = engine->context_pin(engine, engine->i915->kernel_context);
673 if (IS_ERR(ring))
674 return PTR_ERR(ring);
675
676
677
678
679
680 if (engine->i915->preempt_context) {
681 ring = engine->context_pin(engine,
682 engine->i915->preempt_context);
683 if (IS_ERR(ring)) {
684 ret = PTR_ERR(ring);
685 goto err_unpin_kernel;
686 }
687 }
688
689 ret = intel_engine_init_breadcrumbs(engine);
690 if (ret)
691 goto err_unpin_preempt;
692
693 if (HWS_NEEDS_PHYSICAL(engine->i915))
694 ret = init_phys_status_page(engine);
695 else
696 ret = init_status_page(engine);
697 if (ret)
698 goto err_breadcrumbs;
699
700 return 0;
701
702err_breadcrumbs:
703 intel_engine_fini_breadcrumbs(engine);
704err_unpin_preempt:
705 if (engine->i915->preempt_context)
706 engine->context_unpin(engine, engine->i915->preempt_context);
707err_unpin_kernel:
708 engine->context_unpin(engine, engine->i915->kernel_context);
709 return ret;
710}
711
712
713
714
715
716
717
718
719void intel_engine_cleanup_common(struct intel_engine_cs *engine)
720{
721 intel_engine_cleanup_scratch(engine);
722
723 if (HWS_NEEDS_PHYSICAL(engine->i915))
724 cleanup_phys_status_page(engine);
725 else
726 cleanup_status_page(engine);
727
728 intel_engine_fini_breadcrumbs(engine);
729 intel_engine_cleanup_cmd_parser(engine);
730 i915_gem_batch_pool_fini(&engine->batch_pool);
731
732 if (engine->default_state)
733 i915_gem_object_put(engine->default_state);
734
735 if (engine->i915->preempt_context)
736 engine->context_unpin(engine, engine->i915->preempt_context);
737 engine->context_unpin(engine, engine->i915->kernel_context);
738}
739
740u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
741{
742 struct drm_i915_private *dev_priv = engine->i915;
743 u64 acthd;
744
745 if (INTEL_GEN(dev_priv) >= 8)
746 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
747 RING_ACTHD_UDW(engine->mmio_base));
748 else if (INTEL_GEN(dev_priv) >= 4)
749 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
750 else
751 acthd = I915_READ(ACTHD);
752
753 return acthd;
754}
755
756u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
757{
758 struct drm_i915_private *dev_priv = engine->i915;
759 u64 bbaddr;
760
761 if (INTEL_GEN(dev_priv) >= 8)
762 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
763 RING_BBADDR_UDW(engine->mmio_base));
764 else
765 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
766
767 return bbaddr;
768}
769
770const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
771{
772 switch (type) {
773 case I915_CACHE_NONE: return " uncached";
774 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
775 case I915_CACHE_L3_LLC: return " L3+LLC";
776 case I915_CACHE_WT: return " WT";
777 default: return "";
778 }
779}
780
781static inline uint32_t
782read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
783 int subslice, i915_reg_t reg)
784{
785 uint32_t mcr;
786 uint32_t ret;
787 enum forcewake_domains fw_domains;
788
789 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
790 FW_REG_READ);
791 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
792 GEN8_MCR_SELECTOR,
793 FW_REG_READ | FW_REG_WRITE);
794
795 spin_lock_irq(&dev_priv->uncore.lock);
796 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
797
798 mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
799
800
801
802
803 WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
804 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
805 mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
806 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
807
808 ret = I915_READ_FW(reg);
809
810 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
811 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
812
813 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
814 spin_unlock_irq(&dev_priv->uncore.lock);
815
816 return ret;
817}
818
819
820void intel_engine_get_instdone(struct intel_engine_cs *engine,
821 struct intel_instdone *instdone)
822{
823 struct drm_i915_private *dev_priv = engine->i915;
824 u32 mmio_base = engine->mmio_base;
825 int slice;
826 int subslice;
827
828 memset(instdone, 0, sizeof(*instdone));
829
830 switch (INTEL_GEN(dev_priv)) {
831 default:
832 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
833
834 if (engine->id != RCS)
835 break;
836
837 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
838 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
839 instdone->sampler[slice][subslice] =
840 read_subslice_reg(dev_priv, slice, subslice,
841 GEN7_SAMPLER_INSTDONE);
842 instdone->row[slice][subslice] =
843 read_subslice_reg(dev_priv, slice, subslice,
844 GEN7_ROW_INSTDONE);
845 }
846 break;
847 case 7:
848 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
849
850 if (engine->id != RCS)
851 break;
852
853 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
854 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
855 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
856
857 break;
858 case 6:
859 case 5:
860 case 4:
861 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
862
863 if (engine->id == RCS)
864
865 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
866 break;
867 case 3:
868 case 2:
869 instdone->instdone = I915_READ(GEN2_INSTDONE);
870 break;
871 }
872}
873
874static int wa_add(struct drm_i915_private *dev_priv,
875 i915_reg_t addr,
876 const u32 mask, const u32 val)
877{
878 const u32 idx = dev_priv->workarounds.count;
879
880 if (WARN_ON(idx >= I915_MAX_WA_REGS))
881 return -ENOSPC;
882
883 dev_priv->workarounds.reg[idx].addr = addr;
884 dev_priv->workarounds.reg[idx].value = val;
885 dev_priv->workarounds.reg[idx].mask = mask;
886
887 dev_priv->workarounds.count++;
888
889 return 0;
890}
891
892#define WA_REG(addr, mask, val) do { \
893 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
894 if (r) \
895 return r; \
896 } while (0)
897
898#define WA_SET_BIT_MASKED(addr, mask) \
899 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
900
901#define WA_CLR_BIT_MASKED(addr, mask) \
902 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
903
904#define WA_SET_FIELD_MASKED(addr, mask, value) \
905 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
906
907static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
908 i915_reg_t reg)
909{
910 struct drm_i915_private *dev_priv = engine->i915;
911 struct i915_workarounds *wa = &dev_priv->workarounds;
912 const uint32_t index = wa->hw_whitelist_count[engine->id];
913
914 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
915 return -EINVAL;
916
917 I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
918 i915_mmio_reg_offset(reg));
919 wa->hw_whitelist_count[engine->id]++;
920
921 return 0;
922}
923
924static int gen8_init_workarounds(struct intel_engine_cs *engine)
925{
926 struct drm_i915_private *dev_priv = engine->i915;
927
928 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
929
930
931 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
932
933
934 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
935 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
936
937
938
939
940
941
942
943 WA_SET_BIT_MASKED(HDC_CHICKEN0,
944 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
945 HDC_FORCE_NON_COHERENT);
946
947
948
949
950
951
952
953
954
955 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
956
957
958 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
959
960
961
962
963
964
965
966
967
968 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
969 GEN6_WIZ_HASHING_MASK,
970 GEN6_WIZ_HASHING_16x4);
971
972 return 0;
973}
974
975static int bdw_init_workarounds(struct intel_engine_cs *engine)
976{
977 struct drm_i915_private *dev_priv = engine->i915;
978 int ret;
979
980 ret = gen8_init_workarounds(engine);
981 if (ret)
982 return ret;
983
984
985 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
986
987
988
989
990
991
992 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
993 DOP_CLOCK_GATING_DISABLE);
994
995 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
996 GEN8_SAMPLER_POWER_BYPASS_DIS);
997
998 WA_SET_BIT_MASKED(HDC_CHICKEN0,
999
1000 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
1001
1002 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
1003
1004 return 0;
1005}
1006
1007static int chv_init_workarounds(struct intel_engine_cs *engine)
1008{
1009 struct drm_i915_private *dev_priv = engine->i915;
1010 int ret;
1011
1012 ret = gen8_init_workarounds(engine);
1013 if (ret)
1014 return ret;
1015
1016
1017 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
1018
1019
1020 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
1021
1022 return 0;
1023}
1024
1025static int gen9_init_workarounds(struct intel_engine_cs *engine)
1026{
1027 struct drm_i915_private *dev_priv = engine->i915;
1028 int ret;
1029
1030
1031 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
1032
1033
1034 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
1035 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
1036
1037
1038 if (!IS_COFFEELAKE(dev_priv))
1039 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
1040 ECOCHK_DIS_TLB);
1041
1042 if (HAS_LLC(dev_priv)) {
1043
1044
1045
1046
1047
1048 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1049 GEN9_PBE_COMPRESSED_HASH_SELECTION);
1050 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
1051 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
1052
1053 I915_WRITE(MMCD_MISC_CTRL,
1054 I915_READ(MMCD_MISC_CTRL) |
1055 MMCD_PCLA |
1056 MMCD_HOTSPOT_EN);
1057 }
1058
1059
1060
1061 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1062 FLOW_CONTROL_ENABLE |
1063 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
1064
1065
1066 if (!IS_COFFEELAKE(dev_priv))
1067 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
1068 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
1069
1070
1071
1072 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
1073 GEN9_ENABLE_YV12_BUGFIX |
1074 GEN9_ENABLE_GPGPU_PREEMPTION);
1075
1076
1077
1078 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
1079 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
1080
1081
1082 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
1083 GEN9_CCS_TLB_PREFETCH_ENABLE);
1084
1085
1086 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1087 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
1088 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1105 HDC_FORCE_NON_COHERENT);
1106
1107
1108 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
1109 BDW_DISABLE_HDC_INVALIDATION);
1110
1111
1112 if (IS_SKYLAKE(dev_priv) ||
1113 IS_KABYLAKE(dev_priv) ||
1114 IS_COFFEELAKE(dev_priv))
1115 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
1116 GEN8_SAMPLER_POWER_BYPASS_DIS);
1117
1118
1119 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
1120
1121
1122 if (IS_GEN9_LP(dev_priv)) {
1123 u32 val = I915_READ(GEN8_L3SQCREG1);
1124
1125 val &= ~L3_PRIO_CREDITS_MASK;
1126 val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
1127 I915_WRITE(GEN8_L3SQCREG1, val);
1128 }
1129
1130
1131 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
1132 GEN8_LQSC_FLUSH_COHERENT_LINES));
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
1147
1148
1149 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
1150 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
1151
1152
1153 if (IS_GEN9_LP(dev_priv))
1154 WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
1155
1156
1157 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
1158 if (ret)
1159 return ret;
1160
1161
1162 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1163 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1164 ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
1165 if (ret)
1166 return ret;
1167
1168
1169 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
1170 if (ret)
1171 return ret;
1172
1173 return 0;
1174}
1175
1176static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1177{
1178 struct drm_i915_private *dev_priv = engine->i915;
1179 u8 vals[3] = { 0, 0, 0 };
1180 unsigned int i;
1181
1182 for (i = 0; i < 3; i++) {
1183 u8 ss;
1184
1185
1186
1187
1188
1189 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
1190 continue;
1191
1192
1193
1194
1195
1196
1197
1198 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
1199 vals[i] = 3 - ss;
1200 }
1201
1202 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
1203 return 0;
1204
1205
1206 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
1207 GEN9_IZ_HASHING_MASK(2) |
1208 GEN9_IZ_HASHING_MASK(1) |
1209 GEN9_IZ_HASHING_MASK(0),
1210 GEN9_IZ_HASHING(2, vals[2]) |
1211 GEN9_IZ_HASHING(1, vals[1]) |
1212 GEN9_IZ_HASHING(0, vals[0]));
1213
1214 return 0;
1215}
1216
1217static int skl_init_workarounds(struct intel_engine_cs *engine)
1218{
1219 struct drm_i915_private *dev_priv = engine->i915;
1220 int ret;
1221
1222 ret = gen9_init_workarounds(engine);
1223 if (ret)
1224 return ret;
1225
1226
1227 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1228 GEN9_GAPS_TSV_CREDIT_DISABLE));
1229
1230
1231 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1232 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
1233
1234
1235 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
1236 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1237 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1238 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1239
1240
1241 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1242 if (ret)
1243 return ret;
1244
1245 return skl_tune_iz_hashing(engine);
1246}
1247
1248static int bxt_init_workarounds(struct intel_engine_cs *engine)
1249{
1250 struct drm_i915_private *dev_priv = engine->i915;
1251 int ret;
1252
1253 ret = gen9_init_workarounds(engine);
1254 if (ret)
1255 return ret;
1256
1257
1258 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1259 STALL_DOP_GATING_DISABLE);
1260
1261
1262 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1263 _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
1264
1265
1266 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1267 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1268
1269
1270 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1271 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1272 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1273
1274 return 0;
1275}
1276
1277static int cnl_init_workarounds(struct intel_engine_cs *engine)
1278{
1279 struct drm_i915_private *dev_priv = engine->i915;
1280 int ret;
1281
1282
1283 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
1284 I915_WRITE(GAMT_CHKN_BIT_REG,
1285 (I915_READ(GAMT_CHKN_BIT_REG) |
1286 GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT));
1287
1288
1289 WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
1290 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
1291
1292
1293 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
1294 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
1295
1296
1297 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1298 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1299
1300
1301 if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
1302 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1303 GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
1304
1305
1306 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1307 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1308 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1309
1310
1311 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
1312
1313
1314 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
1315
1316
1317 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
1318
1319
1320 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
1321 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
1322
1323
1324 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1325 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1326 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
1327 if (ret)
1328 return ret;
1329
1330
1331 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
1332
1333 return 0;
1334}
1335
1336static int kbl_init_workarounds(struct intel_engine_cs *engine)
1337{
1338 struct drm_i915_private *dev_priv = engine->i915;
1339 int ret;
1340
1341 ret = gen9_init_workarounds(engine);
1342 if (ret)
1343 return ret;
1344
1345
1346 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1347 GEN9_GAPS_TSV_CREDIT_DISABLE));
1348
1349
1350 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1351 I915_WRITE(GAMT_CHKN_BIT_REG,
1352 (I915_READ(GAMT_CHKN_BIT_REG) |
1353 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING));
1354
1355
1356 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1357 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1358 HDC_FENCE_DEST_SLM_DISABLE);
1359
1360
1361 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1362 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1363 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1364
1365
1366 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1367 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
1368
1369
1370 WA_SET_BIT_MASKED(
1371 GEN7_HALF_SLICE_CHICKEN1,
1372 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1373
1374
1375 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1376 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1377 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1378
1379
1380 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1381 if (ret)
1382 return ret;
1383
1384 return 0;
1385}
1386
1387static int glk_init_workarounds(struct intel_engine_cs *engine)
1388{
1389 struct drm_i915_private *dev_priv = engine->i915;
1390 int ret;
1391
1392 ret = gen9_init_workarounds(engine);
1393 if (ret)
1394 return ret;
1395
1396
1397 ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1398 if (ret)
1399 return ret;
1400
1401
1402 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1403 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1404
1405 return 0;
1406}
1407
1408static int cfl_init_workarounds(struct intel_engine_cs *engine)
1409{
1410 struct drm_i915_private *dev_priv = engine->i915;
1411 int ret;
1412
1413 ret = gen9_init_workarounds(engine);
1414 if (ret)
1415 return ret;
1416
1417
1418 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1419 GEN9_GAPS_TSV_CREDIT_DISABLE));
1420
1421
1422 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1423 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1424
1425
1426 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1427 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
1428
1429
1430 WA_SET_BIT_MASKED(
1431 GEN7_HALF_SLICE_CHICKEN1,
1432 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1433
1434
1435 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1436 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1437 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1438
1439 return 0;
1440}
1441
1442int init_workarounds_ring(struct intel_engine_cs *engine)
1443{
1444 struct drm_i915_private *dev_priv = engine->i915;
1445 int err;
1446
1447 if (GEM_WARN_ON(engine->id != RCS))
1448 return -EINVAL;
1449
1450 dev_priv->workarounds.count = 0;
1451 dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
1452
1453 if (IS_BROADWELL(dev_priv))
1454 err = bdw_init_workarounds(engine);
1455 else if (IS_CHERRYVIEW(dev_priv))
1456 err = chv_init_workarounds(engine);
1457 else if (IS_SKYLAKE(dev_priv))
1458 err = skl_init_workarounds(engine);
1459 else if (IS_BROXTON(dev_priv))
1460 err = bxt_init_workarounds(engine);
1461 else if (IS_KABYLAKE(dev_priv))
1462 err = kbl_init_workarounds(engine);
1463 else if (IS_GEMINILAKE(dev_priv))
1464 err = glk_init_workarounds(engine);
1465 else if (IS_COFFEELAKE(dev_priv))
1466 err = cfl_init_workarounds(engine);
1467 else if (IS_CANNONLAKE(dev_priv))
1468 err = cnl_init_workarounds(engine);
1469 else
1470 err = 0;
1471 if (err)
1472 return err;
1473
1474 DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
1475 engine->name, dev_priv->workarounds.count);
1476 return 0;
1477}
1478
1479int intel_ring_workarounds_emit(struct i915_request *rq)
1480{
1481 struct i915_workarounds *w = &rq->i915->workarounds;
1482 u32 *cs;
1483 int ret, i;
1484
1485 if (w->count == 0)
1486 return 0;
1487
1488 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1489 if (ret)
1490 return ret;
1491
1492 cs = intel_ring_begin(rq, w->count * 2 + 2);
1493 if (IS_ERR(cs))
1494 return PTR_ERR(cs);
1495
1496 *cs++ = MI_LOAD_REGISTER_IMM(w->count);
1497 for (i = 0; i < w->count; i++) {
1498 *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
1499 *cs++ = w->reg[i].value;
1500 }
1501 *cs++ = MI_NOOP;
1502
1503 intel_ring_advance(rq, cs);
1504
1505 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1506 if (ret)
1507 return ret;
1508
1509 return 0;
1510}
1511
1512static bool ring_is_idle(struct intel_engine_cs *engine)
1513{
1514 struct drm_i915_private *dev_priv = engine->i915;
1515 bool idle = true;
1516
1517
1518 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1519 return true;
1520
1521
1522 if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
1523 (I915_READ_TAIL(engine) & TAIL_ADDR))
1524 idle = false;
1525
1526
1527 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1528 idle = false;
1529
1530 intel_runtime_pm_put(dev_priv);
1531
1532 return idle;
1533}
1534
1535
1536
1537
1538
1539
1540
1541
1542bool intel_engine_is_idle(struct intel_engine_cs *engine)
1543{
1544 struct drm_i915_private *dev_priv = engine->i915;
1545
1546
1547 if (i915_terminally_wedged(&dev_priv->gpu_error))
1548 return true;
1549
1550
1551 if (!i915_seqno_passed(intel_engine_get_seqno(engine),
1552 intel_engine_last_submit(engine)))
1553 return false;
1554
1555 if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
1556 return true;
1557
1558
1559 if (READ_ONCE(engine->execlists.active))
1560 return false;
1561
1562
1563 if (READ_ONCE(engine->execlists.first))
1564 return false;
1565
1566
1567 if (!ring_is_idle(engine))
1568 return false;
1569
1570 return true;
1571}
1572
1573bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
1574{
1575 struct intel_engine_cs *engine;
1576 enum intel_engine_id id;
1577
1578
1579
1580
1581
1582 if (i915_terminally_wedged(&dev_priv->gpu_error))
1583 return true;
1584
1585 for_each_engine(engine, dev_priv, id) {
1586 if (!intel_engine_is_idle(engine))
1587 return false;
1588 }
1589
1590 return true;
1591}
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
1602{
1603 const struct i915_gem_context * const kernel_context =
1604 engine->i915->kernel_context;
1605 struct i915_request *rq;
1606
1607 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1608
1609
1610
1611
1612
1613
1614 rq = __i915_gem_active_peek(&engine->timeline->last_request);
1615 if (rq)
1616 return rq->ctx == kernel_context;
1617 else
1618 return engine->last_retired_context == kernel_context;
1619}
1620
1621void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1622{
1623 struct intel_engine_cs *engine;
1624 enum intel_engine_id id;
1625
1626 for_each_engine(engine, i915, id)
1627 engine->set_default_submission(engine);
1628}
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638void intel_engines_park(struct drm_i915_private *i915)
1639{
1640 struct intel_engine_cs *engine;
1641 enum intel_engine_id id;
1642
1643 for_each_engine(engine, i915, id) {
1644
1645 intel_engine_disarm_breadcrumbs(engine);
1646 tasklet_kill(&engine->execlists.tasklet);
1647
1648
1649
1650
1651
1652
1653 if (wait_for(intel_engine_is_idle(engine), 10)) {
1654 struct drm_printer p = drm_debug_printer(__func__);
1655
1656 dev_err(i915->drm.dev,
1657 "%s is not idle before parking\n",
1658 engine->name);
1659 intel_engine_dump(engine, &p, NULL);
1660 }
1661
1662 if (engine->park)
1663 engine->park(engine);
1664
1665 i915_gem_batch_pool_fini(&engine->batch_pool);
1666 engine->execlists.no_priolist = false;
1667 }
1668}
1669
1670
1671
1672
1673
1674
1675
1676void intel_engines_unpark(struct drm_i915_private *i915)
1677{
1678 struct intel_engine_cs *engine;
1679 enum intel_engine_id id;
1680
1681 for_each_engine(engine, i915, id) {
1682 if (engine->unpark)
1683 engine->unpark(engine);
1684 }
1685}
1686
1687bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1688{
1689 switch (INTEL_GEN(engine->i915)) {
1690 case 2:
1691 return false;
1692 case 3:
1693
1694 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1695 case 6:
1696 return engine->class != VIDEO_DECODE_CLASS;
1697 default:
1698 return true;
1699 }
1700}
1701
1702unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
1703{
1704 struct intel_engine_cs *engine;
1705 enum intel_engine_id id;
1706 unsigned int which;
1707
1708 which = 0;
1709 for_each_engine(engine, i915, id)
1710 if (engine->default_state)
1711 which |= BIT(engine->uabi_class);
1712
1713 return which;
1714}
1715
1716static void print_request(struct drm_printer *m,
1717 struct i915_request *rq,
1718 const char *prefix)
1719{
1720 drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
1721 rq->global_seqno,
1722 i915_request_completed(rq) ? "!" : "",
1723 rq->fence.context, rq->fence.seqno,
1724 rq->priotree.priority,
1725 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1726 rq->timeline->common->name);
1727}
1728
1729static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1730{
1731 const size_t rowsize = 8 * sizeof(u32);
1732 const void *prev = NULL;
1733 bool skip = false;
1734 size_t pos;
1735
1736 for (pos = 0; pos < len; pos += rowsize) {
1737 char line[128];
1738
1739 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1740 if (!skip) {
1741 drm_printf(m, "*\n");
1742 skip = true;
1743 }
1744 continue;
1745 }
1746
1747 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1748 rowsize, sizeof(u32),
1749 line, sizeof(line),
1750 false) >= sizeof(line));
1751 drm_printf(m, "%08zx %s\n", pos, line);
1752
1753 prev = buf + pos;
1754 skip = false;
1755 }
1756}
1757
1758static void intel_engine_print_registers(const struct intel_engine_cs *engine,
1759 struct drm_printer *m)
1760{
1761 struct drm_i915_private *dev_priv = engine->i915;
1762 const struct intel_engine_execlists * const execlists =
1763 &engine->execlists;
1764 u64 addr;
1765
1766 drm_printf(m, "\tRING_START: 0x%08x\n",
1767 I915_READ(RING_START(engine->mmio_base)));
1768 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1769 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
1770 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1771 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
1772 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
1773 I915_READ(RING_CTL(engine->mmio_base)),
1774 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1775 if (INTEL_GEN(engine->i915) > 2) {
1776 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1777 I915_READ(RING_MI_MODE(engine->mmio_base)),
1778 I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
1779 }
1780
1781 if (INTEL_GEN(dev_priv) >= 6) {
1782 drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
1783 }
1784
1785 if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
1786 drm_printf(m, "\tSYNC_0: 0x%08x\n",
1787 I915_READ(RING_SYNC_0(engine->mmio_base)));
1788 drm_printf(m, "\tSYNC_1: 0x%08x\n",
1789 I915_READ(RING_SYNC_1(engine->mmio_base)));
1790 if (HAS_VEBOX(dev_priv))
1791 drm_printf(m, "\tSYNC_2: 0x%08x\n",
1792 I915_READ(RING_SYNC_2(engine->mmio_base)));
1793 }
1794
1795 addr = intel_engine_get_active_head(engine);
1796 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1797 upper_32_bits(addr), lower_32_bits(addr));
1798 addr = intel_engine_get_last_batch_head(engine);
1799 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1800 upper_32_bits(addr), lower_32_bits(addr));
1801 if (INTEL_GEN(dev_priv) >= 8)
1802 addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
1803 RING_DMA_FADD_UDW(engine->mmio_base));
1804 else if (INTEL_GEN(dev_priv) >= 4)
1805 addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
1806 else
1807 addr = I915_READ(DMA_FADD_I8XX);
1808 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1809 upper_32_bits(addr), lower_32_bits(addr));
1810 if (INTEL_GEN(dev_priv) >= 4) {
1811 drm_printf(m, "\tIPEIR: 0x%08x\n",
1812 I915_READ(RING_IPEIR(engine->mmio_base)));
1813 drm_printf(m, "\tIPEHR: 0x%08x\n",
1814 I915_READ(RING_IPEHR(engine->mmio_base)));
1815 } else {
1816 drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
1817 drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
1818 }
1819
1820 if (HAS_EXECLISTS(dev_priv)) {
1821 const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
1822 u32 ptr, read, write;
1823 unsigned int idx;
1824
1825 drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
1826 I915_READ(RING_EXECLIST_STATUS_LO(engine)),
1827 I915_READ(RING_EXECLIST_STATUS_HI(engine)));
1828
1829 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
1830 read = GEN8_CSB_READ_PTR(ptr);
1831 write = GEN8_CSB_WRITE_PTR(ptr);
1832 drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
1833 read, execlists->csb_head,
1834 write,
1835 intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
1836 yesno(test_bit(ENGINE_IRQ_EXECLIST,
1837 &engine->irq_posted)));
1838 if (read >= GEN8_CSB_ENTRIES)
1839 read = 0;
1840 if (write >= GEN8_CSB_ENTRIES)
1841 write = 0;
1842 if (read > write)
1843 write += GEN8_CSB_ENTRIES;
1844 while (read < write) {
1845 idx = ++read % GEN8_CSB_ENTRIES;
1846 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
1847 idx,
1848 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
1849 hws[idx * 2],
1850 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
1851 hws[idx * 2 + 1]);
1852 }
1853
1854 rcu_read_lock();
1855 for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
1856 struct i915_request *rq;
1857 unsigned int count;
1858
1859 rq = port_unpack(&execlists->port[idx], &count);
1860 if (rq) {
1861 char hdr[80];
1862
1863 snprintf(hdr, sizeof(hdr),
1864 "\t\tELSP[%d] count=%d, rq: ",
1865 idx, count);
1866 print_request(m, rq, hdr);
1867 } else {
1868 drm_printf(m, "\t\tELSP[%d] idle\n", idx);
1869 }
1870 }
1871 drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
1872 rcu_read_unlock();
1873 } else if (INTEL_GEN(dev_priv) > 6) {
1874 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1875 I915_READ(RING_PP_DIR_BASE(engine)));
1876 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1877 I915_READ(RING_PP_DIR_BASE_READ(engine)));
1878 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1879 I915_READ(RING_PP_DIR_DCLV(engine)));
1880 }
1881}
1882
1883void intel_engine_dump(struct intel_engine_cs *engine,
1884 struct drm_printer *m,
1885 const char *header, ...)
1886{
1887 struct intel_breadcrumbs * const b = &engine->breadcrumbs;
1888 const struct intel_engine_execlists * const execlists = &engine->execlists;
1889 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1890 struct i915_request *rq;
1891 struct rb_node *rb;
1892
1893 if (header) {
1894 va_list ap;
1895
1896 va_start(ap, header);
1897 drm_vprintf(m, header, &ap);
1898 va_end(ap);
1899 }
1900
1901 if (i915_terminally_wedged(&engine->i915->gpu_error))
1902 drm_printf(m, "*** WEDGED ***\n");
1903
1904 drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
1905 intel_engine_get_seqno(engine),
1906 intel_engine_last_submit(engine),
1907 engine->hangcheck.seqno,
1908 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
1909 engine->timeline->inflight_seqnos);
1910 drm_printf(m, "\tReset count: %d (global %d)\n",
1911 i915_reset_engine_count(error, engine),
1912 i915_reset_count(error));
1913
1914 rcu_read_lock();
1915
1916 drm_printf(m, "\tRequests:\n");
1917
1918 rq = list_first_entry(&engine->timeline->requests,
1919 struct i915_request, link);
1920 if (&rq->link != &engine->timeline->requests)
1921 print_request(m, rq, "\t\tfirst ");
1922
1923 rq = list_last_entry(&engine->timeline->requests,
1924 struct i915_request, link);
1925 if (&rq->link != &engine->timeline->requests)
1926 print_request(m, rq, "\t\tlast ");
1927
1928 rq = i915_gem_find_active_request(engine);
1929 if (rq) {
1930 print_request(m, rq, "\t\tactive ");
1931 drm_printf(m,
1932 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
1933 rq->head, rq->postfix, rq->tail,
1934 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1935 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1936 drm_printf(m, "\t\tring->start: 0x%08x\n",
1937 i915_ggtt_offset(rq->ring->vma));
1938 drm_printf(m, "\t\tring->head: 0x%08x\n",
1939 rq->ring->head);
1940 drm_printf(m, "\t\tring->tail: 0x%08x\n",
1941 rq->ring->tail);
1942 }
1943
1944 rcu_read_unlock();
1945
1946 if (intel_runtime_pm_get_if_in_use(engine->i915)) {
1947 intel_engine_print_registers(engine, m);
1948 intel_runtime_pm_put(engine->i915);
1949 } else {
1950 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1951 }
1952
1953 spin_lock_irq(&engine->timeline->lock);
1954 list_for_each_entry(rq, &engine->timeline->requests, link)
1955 print_request(m, rq, "\t\tE ");
1956 drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
1957 for (rb = execlists->first; rb; rb = rb_next(rb)) {
1958 struct i915_priolist *p =
1959 rb_entry(rb, typeof(*p), node);
1960
1961 list_for_each_entry(rq, &p->requests, priotree.link)
1962 print_request(m, rq, "\t\tQ ");
1963 }
1964 spin_unlock_irq(&engine->timeline->lock);
1965
1966 spin_lock_irq(&b->rb_lock);
1967 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1968 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1969
1970 drm_printf(m, "\t%s [%d] waiting for %x\n",
1971 w->tsk->comm, w->tsk->pid, w->seqno);
1972 }
1973 spin_unlock_irq(&b->rb_lock);
1974
1975 drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
1976 engine->irq_posted,
1977 yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
1978 &engine->irq_posted)),
1979 yesno(test_bit(ENGINE_IRQ_EXECLIST,
1980 &engine->irq_posted)));
1981
1982 drm_printf(m, "HWSP:\n");
1983 hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
1984
1985 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1986}
1987
1988static u8 user_class_map[] = {
1989 [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
1990 [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
1991 [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
1992 [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
1993};
1994
1995struct intel_engine_cs *
1996intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
1997{
1998 if (class >= ARRAY_SIZE(user_class_map))
1999 return NULL;
2000
2001 class = user_class_map[class];
2002
2003 GEM_BUG_ON(class > MAX_ENGINE_CLASS);
2004
2005 if (instance > MAX_ENGINE_INSTANCE)
2006 return NULL;
2007
2008 return i915->engine_class[class][instance];
2009}
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019int intel_enable_engine_stats(struct intel_engine_cs *engine)
2020{
2021 struct intel_engine_execlists *execlists = &engine->execlists;
2022 unsigned long flags;
2023 int err = 0;
2024
2025 if (!intel_engine_supports_stats(engine))
2026 return -ENODEV;
2027
2028 tasklet_disable(&execlists->tasklet);
2029 spin_lock_irqsave(&engine->stats.lock, flags);
2030
2031 if (unlikely(engine->stats.enabled == ~0)) {
2032 err = -EBUSY;
2033 goto unlock;
2034 }
2035
2036 if (engine->stats.enabled++ == 0) {
2037 const struct execlist_port *port = execlists->port;
2038 unsigned int num_ports = execlists_num_ports(execlists);
2039
2040 engine->stats.enabled_at = ktime_get();
2041
2042
2043 while (num_ports-- && port_isset(port)) {
2044 engine->stats.active++;
2045 port++;
2046 }
2047
2048 if (engine->stats.active)
2049 engine->stats.start = engine->stats.enabled_at;
2050 }
2051
2052unlock:
2053 spin_unlock_irqrestore(&engine->stats.lock, flags);
2054 tasklet_enable(&execlists->tasklet);
2055
2056 return err;
2057}
2058
2059static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
2060{
2061 ktime_t total = engine->stats.total;
2062
2063
2064
2065
2066
2067 if (engine->stats.active)
2068 total = ktime_add(total,
2069 ktime_sub(ktime_get(), engine->stats.start));
2070
2071 return total;
2072}
2073
2074
2075
2076
2077
2078
2079
2080ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
2081{
2082 ktime_t total;
2083 unsigned long flags;
2084
2085 spin_lock_irqsave(&engine->stats.lock, flags);
2086 total = __intel_engine_get_busy_time(engine);
2087 spin_unlock_irqrestore(&engine->stats.lock, flags);
2088
2089 return total;
2090}
2091
2092
2093
2094
2095
2096
2097
2098void intel_disable_engine_stats(struct intel_engine_cs *engine)
2099{
2100 unsigned long flags;
2101
2102 if (!intel_engine_supports_stats(engine))
2103 return;
2104
2105 spin_lock_irqsave(&engine->stats.lock, flags);
2106 WARN_ON_ONCE(engine->stats.enabled == 0);
2107 if (--engine->stats.enabled == 0) {
2108 engine->stats.total = __intel_engine_get_busy_time(engine);
2109 engine->stats.active = 0;
2110 }
2111 spin_unlock_irqrestore(&engine->stats.lock, flags);
2112}
2113
2114#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2115#include "selftests/mock_engine.c"
2116#endif
2117