1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/drm_print.h>
26
27#include "intel_device_info.h"
28#include "i915_drv.h"
29
30#define PLATFORM_NAME(x) [INTEL_##x] = #x
31static const char * const platform_names[] = {
32 PLATFORM_NAME(I830),
33 PLATFORM_NAME(I845G),
34 PLATFORM_NAME(I85X),
35 PLATFORM_NAME(I865G),
36 PLATFORM_NAME(I915G),
37 PLATFORM_NAME(I915GM),
38 PLATFORM_NAME(I945G),
39 PLATFORM_NAME(I945GM),
40 PLATFORM_NAME(G33),
41 PLATFORM_NAME(PINEVIEW),
42 PLATFORM_NAME(I965G),
43 PLATFORM_NAME(I965GM),
44 PLATFORM_NAME(G45),
45 PLATFORM_NAME(GM45),
46 PLATFORM_NAME(IRONLAKE),
47 PLATFORM_NAME(SANDYBRIDGE),
48 PLATFORM_NAME(IVYBRIDGE),
49 PLATFORM_NAME(VALLEYVIEW),
50 PLATFORM_NAME(HASWELL),
51 PLATFORM_NAME(BROADWELL),
52 PLATFORM_NAME(CHERRYVIEW),
53 PLATFORM_NAME(SKYLAKE),
54 PLATFORM_NAME(BROXTON),
55 PLATFORM_NAME(KABYLAKE),
56 PLATFORM_NAME(GEMINILAKE),
57 PLATFORM_NAME(COFFEELAKE),
58 PLATFORM_NAME(CANNONLAKE),
59 PLATFORM_NAME(ICELAKE),
60 PLATFORM_NAME(ELKHARTLAKE),
61};
62#undef PLATFORM_NAME
63
64const char *intel_platform_name(enum intel_platform platform)
65{
66 BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);
67
68 if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
69 platform_names[platform] == NULL))
70 return "<unknown>";
71
72 return platform_names[platform];
73}
74
75void intel_device_info_dump_flags(const struct intel_device_info *info,
76 struct drm_printer *p)
77{
78#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
79 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
80#undef PRINT_FLAG
81
82#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
83 DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
84#undef PRINT_FLAG
85}
86
87static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
88{
89 int s;
90
91 drm_printf(p, "slice total: %u, mask=%04x\n",
92 hweight8(sseu->slice_mask), sseu->slice_mask);
93 drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
94 for (s = 0; s < sseu->max_slices; s++) {
95 drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
96 s, intel_sseu_subslices_per_slice(sseu, s),
97 sseu->subslice_mask[s]);
98 }
99 drm_printf(p, "EU total: %u\n", sseu->eu_total);
100 drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
101 drm_printf(p, "has slice power gating: %s\n",
102 yesno(sseu->has_slice_pg));
103 drm_printf(p, "has subslice power gating: %s\n",
104 yesno(sseu->has_subslice_pg));
105 drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
106}
107
108void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
109 struct drm_printer *p)
110{
111 sseu_dump(&info->sseu, p);
112
113 drm_printf(p, "CS timestamp frequency: %u kHz\n",
114 info->cs_timestamp_frequency_khz);
115}
116
117static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
118 int subslice)
119{
120 int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
121 int slice_stride = sseu->max_subslices * subslice_stride;
122
123 return slice * slice_stride + subslice * subslice_stride;
124}
125
126static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
127 int subslice)
128{
129 int i, offset = sseu_eu_idx(sseu, slice, subslice);
130 u16 eu_mask = 0;
131
132 for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
133 eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
134 (i * BITS_PER_BYTE);
135 }
136
137 return eu_mask;
138}
139
140static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
141 u16 eu_mask)
142{
143 int i, offset = sseu_eu_idx(sseu, slice, subslice);
144
145 for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
146 sseu->eu_mask[offset + i] =
147 (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
148 }
149}
150
151void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
152 struct drm_printer *p)
153{
154 int s, ss;
155
156 if (sseu->max_slices == 0) {
157 drm_printf(p, "Unavailable\n");
158 return;
159 }
160
161 for (s = 0; s < sseu->max_slices; s++) {
162 drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
163 s, intel_sseu_subslices_per_slice(sseu, s),
164 sseu->subslice_mask[s]);
165
166 for (ss = 0; ss < sseu->max_subslices; ss++) {
167 u16 enabled_eus = sseu_get_eus(sseu, s, ss);
168
169 drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
170 ss, hweight16(enabled_eus), enabled_eus);
171 }
172 }
173}
174
175static u16 compute_eu_total(const struct sseu_dev_info *sseu)
176{
177 u16 i, total = 0;
178
179 for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
180 total += hweight8(sseu->eu_mask[i]);
181
182 return total;
183}
184
185static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
186{
187 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
188 u8 s_en;
189 u32 ss_en, ss_en_mask;
190 u8 eu_en;
191 int s;
192
193 if (IS_ELKHARTLAKE(dev_priv)) {
194 sseu->max_slices = 1;
195 sseu->max_subslices = 4;
196 sseu->max_eus_per_subslice = 8;
197 } else {
198 sseu->max_slices = 1;
199 sseu->max_subslices = 8;
200 sseu->max_eus_per_subslice = 8;
201 }
202
203 s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
204 ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
205 ss_en_mask = BIT(sseu->max_subslices) - 1;
206 eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
207
208 for (s = 0; s < sseu->max_slices; s++) {
209 if (s_en & BIT(s)) {
210 int ss_idx = sseu->max_subslices * s;
211 int ss;
212
213 sseu->slice_mask |= BIT(s);
214 sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
215 for (ss = 0; ss < sseu->max_subslices; ss++) {
216 if (sseu->subslice_mask[s] & BIT(ss))
217 sseu_set_eus(sseu, s, ss, eu_en);
218 }
219 }
220 }
221 sseu->eu_per_subslice = hweight8(eu_en);
222 sseu->eu_total = compute_eu_total(sseu);
223
224
225 sseu->has_slice_pg = 1;
226 sseu->has_subslice_pg = 1;
227 sseu->has_eu_pg = 1;
228}
229
230static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
231{
232 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
233 const u32 fuse2 = I915_READ(GEN8_FUSE2);
234 int s, ss;
235 const int eu_mask = 0xff;
236 u32 subslice_mask, eu_en;
237
238 sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
239 GEN10_F2_S_ENA_SHIFT;
240 sseu->max_slices = 6;
241 sseu->max_subslices = 4;
242 sseu->max_eus_per_subslice = 8;
243
244 subslice_mask = (1 << 4) - 1;
245 subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
246 GEN10_F2_SS_DIS_SHIFT);
247
248
249
250
251
252 sseu->subslice_mask[0] = subslice_mask;
253 for (s = 1; s < sseu->max_slices; s++)
254 sseu->subslice_mask[s] = subslice_mask & 0x3;
255
256
257 eu_en = ~I915_READ(GEN8_EU_DISABLE0);
258 for (ss = 0; ss < sseu->max_subslices; ss++)
259 sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
260
261 sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
262 eu_en = ~I915_READ(GEN8_EU_DISABLE1);
263 sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
264
265 sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
266 sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
267
268 sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
269 eu_en = ~I915_READ(GEN8_EU_DISABLE2);
270 sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
271
272 sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
273 sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
274
275 sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
276 eu_en = ~I915_READ(GEN10_EU_DISABLE3);
277 sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
278
279
280
281
282 for (s = 0; s < sseu->max_slices; s++) {
283 for (ss = 0; ss < sseu->max_subslices; ss++) {
284 if (sseu_get_eus(sseu, s, ss) == 0)
285 sseu->subslice_mask[s] &= ~BIT(ss);
286 }
287 }
288
289 sseu->eu_total = compute_eu_total(sseu);
290
291
292
293
294
295
296
297 sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
298 DIV_ROUND_UP(sseu->eu_total,
299 intel_sseu_subslice_total(sseu)) :
300 0;
301
302
303 sseu->has_slice_pg = 1;
304 sseu->has_subslice_pg = 1;
305 sseu->has_eu_pg = 1;
306}
307
308static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
309{
310 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
311 u32 fuse;
312
313 fuse = I915_READ(CHV_FUSE_GT);
314
315 sseu->slice_mask = BIT(0);
316 sseu->max_slices = 1;
317 sseu->max_subslices = 2;
318 sseu->max_eus_per_subslice = 8;
319
320 if (!(fuse & CHV_FGT_DISABLE_SS0)) {
321 u8 disabled_mask =
322 ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
323 CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
324 (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
325 CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
326
327 sseu->subslice_mask[0] |= BIT(0);
328 sseu_set_eus(sseu, 0, 0, ~disabled_mask);
329 }
330
331 if (!(fuse & CHV_FGT_DISABLE_SS1)) {
332 u8 disabled_mask =
333 ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
334 CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
335 (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
336 CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
337
338 sseu->subslice_mask[0] |= BIT(1);
339 sseu_set_eus(sseu, 0, 1, ~disabled_mask);
340 }
341
342 sseu->eu_total = compute_eu_total(sseu);
343
344
345
346
347
348 sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
349 sseu->eu_total /
350 intel_sseu_subslice_total(sseu) :
351 0;
352
353
354
355
356
357 sseu->has_slice_pg = 0;
358 sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
359 sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
360}
361
362static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
363{
364 struct intel_device_info *info = mkwrite_device_info(dev_priv);
365 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
366 int s, ss;
367 u32 fuse2, eu_disable, subslice_mask;
368 const u8 eu_mask = 0xff;
369
370 fuse2 = I915_READ(GEN8_FUSE2);
371 sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
372
373
374 sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3;
375 sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4;
376 sseu->max_eus_per_subslice = 8;
377
378
379
380
381
382 subslice_mask = (1 << sseu->max_subslices) - 1;
383 subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
384 GEN9_F2_SS_DIS_SHIFT);
385
386
387
388
389
390 for (s = 0; s < sseu->max_slices; s++) {
391 if (!(sseu->slice_mask & BIT(s)))
392
393 continue;
394
395 sseu->subslice_mask[s] = subslice_mask;
396
397 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
398 for (ss = 0; ss < sseu->max_subslices; ss++) {
399 int eu_per_ss;
400 u8 eu_disabled_mask;
401
402 if (!(sseu->subslice_mask[s] & BIT(ss)))
403
404 continue;
405
406 eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
407
408 sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
409
410 eu_per_ss = sseu->max_eus_per_subslice -
411 hweight8(eu_disabled_mask);
412
413
414
415
416
417
418 if (eu_per_ss == 7)
419 sseu->subslice_7eu[s] |= BIT(ss);
420 }
421 }
422
423 sseu->eu_total = compute_eu_total(sseu);
424
425
426
427
428
429
430
431
432 sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
433 DIV_ROUND_UP(sseu->eu_total,
434 intel_sseu_subslice_total(sseu)) :
435 0;
436
437
438
439
440
441
442
443
444 sseu->has_slice_pg =
445 !IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
446 sseu->has_subslice_pg =
447 IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
448 sseu->has_eu_pg = sseu->eu_per_subslice > 2;
449
450 if (IS_GEN9_LP(dev_priv)) {
451#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
452 info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
453
454 sseu->min_eu_in_pool = 0;
455 if (info->has_pooled_eu) {
456 if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
457 sseu->min_eu_in_pool = 3;
458 else if (IS_SS_DISABLED(1))
459 sseu->min_eu_in_pool = 6;
460 else
461 sseu->min_eu_in_pool = 9;
462 }
463#undef IS_SS_DISABLED
464 }
465}
466
467static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
468{
469 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
470 int s, ss;
471 u32 fuse2, subslice_mask, eu_disable[3];
472
473 fuse2 = I915_READ(GEN8_FUSE2);
474 sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
475 sseu->max_slices = 3;
476 sseu->max_subslices = 3;
477 sseu->max_eus_per_subslice = 8;
478
479
480
481
482
483 subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
484 subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
485 GEN8_F2_SS_DIS_SHIFT);
486
487 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
488 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
489 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
490 (32 - GEN8_EU_DIS0_S1_SHIFT));
491 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
492 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
493 (32 - GEN8_EU_DIS1_S2_SHIFT));
494
495
496
497
498
499 for (s = 0; s < sseu->max_slices; s++) {
500 if (!(sseu->slice_mask & BIT(s)))
501
502 continue;
503
504 sseu->subslice_mask[s] = subslice_mask;
505
506 for (ss = 0; ss < sseu->max_subslices; ss++) {
507 u8 eu_disabled_mask;
508 u32 n_disabled;
509
510 if (!(sseu->subslice_mask[s] & BIT(ss)))
511
512 continue;
513
514 eu_disabled_mask =
515 eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
516
517 sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
518
519 n_disabled = hweight8(eu_disabled_mask);
520
521
522
523
524 if (sseu->max_eus_per_subslice - n_disabled == 7)
525 sseu->subslice_7eu[s] |= 1 << ss;
526 }
527 }
528
529 sseu->eu_total = compute_eu_total(sseu);
530
531
532
533
534
535
536 sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
537 DIV_ROUND_UP(sseu->eu_total,
538 intel_sseu_subslice_total(sseu)) :
539 0;
540
541
542
543
544
545 sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
546 sseu->has_subslice_pg = 0;
547 sseu->has_eu_pg = 0;
548}
549
550static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
551{
552 struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
553 u32 fuse1;
554 int s, ss;
555
556
557
558
559
560 switch (INTEL_INFO(dev_priv)->gt) {
561 default:
562 MISSING_CASE(INTEL_INFO(dev_priv)->gt);
563
564 case 1:
565 sseu->slice_mask = BIT(0);
566 sseu->subslice_mask[0] = BIT(0);
567 break;
568 case 2:
569 sseu->slice_mask = BIT(0);
570 sseu->subslice_mask[0] = BIT(0) | BIT(1);
571 break;
572 case 3:
573 sseu->slice_mask = BIT(0) | BIT(1);
574 sseu->subslice_mask[0] = BIT(0) | BIT(1);
575 sseu->subslice_mask[1] = BIT(0) | BIT(1);
576 break;
577 }
578
579 sseu->max_slices = hweight8(sseu->slice_mask);
580 sseu->max_subslices = hweight8(sseu->subslice_mask[0]);
581
582 fuse1 = I915_READ(HSW_PAVP_FUSE1);
583 switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
584 default:
585 MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
586 HSW_F1_EU_DIS_SHIFT);
587
588 case HSW_F1_EU_DIS_10EUS:
589 sseu->eu_per_subslice = 10;
590 break;
591 case HSW_F1_EU_DIS_8EUS:
592 sseu->eu_per_subslice = 8;
593 break;
594 case HSW_F1_EU_DIS_6EUS:
595 sseu->eu_per_subslice = 6;
596 break;
597 }
598 sseu->max_eus_per_subslice = sseu->eu_per_subslice;
599
600 for (s = 0; s < sseu->max_slices; s++) {
601 for (ss = 0; ss < sseu->max_subslices; ss++) {
602 sseu_set_eus(sseu, s, ss,
603 (1UL << sseu->eu_per_subslice) - 1);
604 }
605 }
606
607 sseu->eu_total = compute_eu_total(sseu);
608
609
610 sseu->has_slice_pg = 0;
611 sseu->has_subslice_pg = 0;
612 sseu->has_eu_pg = 0;
613}
614
615static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
616{
617 u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
618 u32 base_freq, frac_freq;
619
620 base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
621 GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
622 base_freq *= 1000;
623
624 frac_freq = ((ts_override &
625 GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
626 GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
627 frac_freq = 1000 / (frac_freq + 1);
628
629 return base_freq + frac_freq;
630}
631
632static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
633 u32 rpm_config_reg)
634{
635 u32 f19_2_mhz = 19200;
636 u32 f24_mhz = 24000;
637 u32 crystal_clock = (rpm_config_reg &
638 GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
639 GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
640
641 switch (crystal_clock) {
642 case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
643 return f19_2_mhz;
644 case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
645 return f24_mhz;
646 default:
647 MISSING_CASE(crystal_clock);
648 return 0;
649 }
650}
651
652static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
653 u32 rpm_config_reg)
654{
655 u32 f19_2_mhz = 19200;
656 u32 f24_mhz = 24000;
657 u32 f25_mhz = 25000;
658 u32 f38_4_mhz = 38400;
659 u32 crystal_clock = (rpm_config_reg &
660 GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
661 GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
662
663 switch (crystal_clock) {
664 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
665 return f24_mhz;
666 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
667 return f19_2_mhz;
668 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
669 return f38_4_mhz;
670 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
671 return f25_mhz;
672 default:
673 MISSING_CASE(crystal_clock);
674 return 0;
675 }
676}
677
678static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
679{
680 u32 f12_5_mhz = 12500;
681 u32 f19_2_mhz = 19200;
682 u32 f24_mhz = 24000;
683
684 if (INTEL_GEN(dev_priv) <= 4) {
685
686
687
688
689
690
691 return dev_priv->rawclk_freq / 16;
692 } else if (INTEL_GEN(dev_priv) <= 8) {
693
694
695
696
697
698
699 return f12_5_mhz;
700 } else if (INTEL_GEN(dev_priv) <= 9) {
701 u32 ctc_reg = I915_READ(CTC_MODE);
702 u32 freq = 0;
703
704 if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
705 freq = read_reference_ts_freq(dev_priv);
706 } else {
707 freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
708
709
710
711
712
713 freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
714 CTC_SHIFT_PARAMETER_SHIFT);
715 }
716
717 return freq;
718 } else if (INTEL_GEN(dev_priv) <= 11) {
719 u32 ctc_reg = I915_READ(CTC_MODE);
720 u32 freq = 0;
721
722
723
724
725
726
727 if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
728 freq = read_reference_ts_freq(dev_priv);
729 } else {
730 u32 rpm_config_reg = I915_READ(RPM_CONFIG0);
731
732 if (INTEL_GEN(dev_priv) <= 10)
733 freq = gen10_get_crystal_clock_freq(dev_priv,
734 rpm_config_reg);
735 else
736 freq = gen11_get_crystal_clock_freq(dev_priv,
737 rpm_config_reg);
738
739
740
741
742
743 freq >>= 3 - ((rpm_config_reg &
744 GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
745 GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
746 }
747
748 return freq;
749 }
750
751 MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
752 return 0;
753}
754
755#undef INTEL_VGA_DEVICE
756#define INTEL_VGA_DEVICE(id, info) (id)
757
758static const u16 subplatform_ult_ids[] = {
759 INTEL_HSW_ULT_GT1_IDS(0),
760 INTEL_HSW_ULT_GT2_IDS(0),
761 INTEL_HSW_ULT_GT3_IDS(0),
762 INTEL_BDW_ULT_GT1_IDS(0),
763 INTEL_BDW_ULT_GT2_IDS(0),
764 INTEL_BDW_ULT_GT3_IDS(0),
765 INTEL_BDW_ULT_RSVD_IDS(0),
766 INTEL_SKL_ULT_GT1_IDS(0),
767 INTEL_SKL_ULT_GT2_IDS(0),
768 INTEL_SKL_ULT_GT3_IDS(0),
769 INTEL_KBL_ULT_GT1_IDS(0),
770 INTEL_KBL_ULT_GT2_IDS(0),
771 INTEL_KBL_ULT_GT3_IDS(0),
772 INTEL_CFL_U_GT2_IDS(0),
773 INTEL_CFL_U_GT3_IDS(0),
774 INTEL_WHL_U_GT1_IDS(0),
775 INTEL_WHL_U_GT2_IDS(0),
776 INTEL_WHL_U_GT3_IDS(0),
777};
778
779static const u16 subplatform_ulx_ids[] = {
780 INTEL_HSW_ULX_GT1_IDS(0),
781 INTEL_HSW_ULX_GT2_IDS(0),
782 INTEL_BDW_ULX_GT1_IDS(0),
783 INTEL_BDW_ULX_GT2_IDS(0),
784 INTEL_BDW_ULX_GT3_IDS(0),
785 INTEL_BDW_ULX_RSVD_IDS(0),
786 INTEL_SKL_ULX_GT1_IDS(0),
787 INTEL_SKL_ULX_GT2_IDS(0),
788 INTEL_KBL_ULX_GT1_IDS(0),
789 INTEL_KBL_ULX_GT2_IDS(0),
790 INTEL_AML_KBL_GT2_IDS(0),
791 INTEL_AML_CFL_GT2_IDS(0),
792};
793
794static const u16 subplatform_portf_ids[] = {
795 INTEL_CNL_PORT_F_IDS(0),
796 INTEL_ICL_PORT_F_IDS(0),
797};
798
799static bool find_devid(u16 id, const u16 *p, unsigned int num)
800{
801 for (; num; num--, p++) {
802 if (*p == id)
803 return true;
804 }
805
806 return false;
807}
808
809void intel_device_info_subplatform_init(struct drm_i915_private *i915)
810{
811 const struct intel_device_info *info = INTEL_INFO(i915);
812 const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
813 const unsigned int pi = __platform_mask_index(rinfo, info->platform);
814 const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
815 u16 devid = INTEL_DEVID(i915);
816 u32 mask = 0;
817
818
819 RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);
820
821
822 if (find_devid(devid, subplatform_ult_ids,
823 ARRAY_SIZE(subplatform_ult_ids))) {
824 mask = BIT(INTEL_SUBPLATFORM_ULT);
825 } else if (find_devid(devid, subplatform_ulx_ids,
826 ARRAY_SIZE(subplatform_ulx_ids))) {
827 mask = BIT(INTEL_SUBPLATFORM_ULX);
828 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
829
830 mask |= BIT(INTEL_SUBPLATFORM_ULT);
831 }
832 } else if (find_devid(devid, subplatform_portf_ids,
833 ARRAY_SIZE(subplatform_portf_ids))) {
834 mask = BIT(INTEL_SUBPLATFORM_PORTF);
835 }
836
837 GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
838
839 RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
840}
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
859{
860 struct intel_device_info *info = mkwrite_device_info(dev_priv);
861 struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
862 enum pipe pipe;
863
864 if (INTEL_GEN(dev_priv) >= 10) {
865 for_each_pipe(dev_priv, pipe)
866 runtime->num_scalers[pipe] = 2;
867 } else if (IS_GEN(dev_priv, 9)) {
868 runtime->num_scalers[PIPE_A] = 2;
869 runtime->num_scalers[PIPE_B] = 2;
870 runtime->num_scalers[PIPE_C] = 1;
871 }
872
873 BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
874
875 if (INTEL_GEN(dev_priv) >= 11)
876 for_each_pipe(dev_priv, pipe)
877 runtime->num_sprites[pipe] = 6;
878 else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
879 for_each_pipe(dev_priv, pipe)
880 runtime->num_sprites[pipe] = 3;
881 else if (IS_BROXTON(dev_priv)) {
882
883
884
885
886
887
888
889
890
891 runtime->num_sprites[PIPE_A] = 2;
892 runtime->num_sprites[PIPE_B] = 2;
893 runtime->num_sprites[PIPE_C] = 1;
894 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
895 for_each_pipe(dev_priv, pipe)
896 runtime->num_sprites[pipe] = 2;
897 } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
898 for_each_pipe(dev_priv, pipe)
899 runtime->num_sprites[pipe] = 1;
900 }
901
902 if (i915_modparams.disable_display) {
903 DRM_INFO("Display disabled (module parameter)\n");
904 info->num_pipes = 0;
905 } else if (HAS_DISPLAY(dev_priv) &&
906 (IS_GEN_RANGE(dev_priv, 7, 8)) &&
907 HAS_PCH_SPLIT(dev_priv)) {
908 u32 fuse_strap = I915_READ(FUSE_STRAP);
909 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
910
911
912
913
914
915
916
917
918
919
920 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
921 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
922 (HAS_PCH_CPT(dev_priv) &&
923 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
924 DRM_INFO("Display fused off, disabling\n");
925 info->num_pipes = 0;
926 } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
927 DRM_INFO("PipeC fused off\n");
928 info->num_pipes -= 1;
929 }
930 } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
931 u32 dfsm = I915_READ(SKL_DFSM);
932 u8 disabled_mask = 0;
933 bool invalid;
934 int num_bits;
935
936 if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
937 disabled_mask |= BIT(PIPE_A);
938 if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
939 disabled_mask |= BIT(PIPE_B);
940 if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
941 disabled_mask |= BIT(PIPE_C);
942
943 num_bits = hweight8(disabled_mask);
944
945 switch (disabled_mask) {
946 case BIT(PIPE_A):
947 case BIT(PIPE_B):
948 case BIT(PIPE_A) | BIT(PIPE_B):
949 case BIT(PIPE_A) | BIT(PIPE_C):
950 invalid = true;
951 break;
952 default:
953 invalid = false;
954 }
955
956 if (num_bits > info->num_pipes || invalid)
957 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
958 disabled_mask);
959 else
960 info->num_pipes -= num_bits;
961 }
962
963
964 if (IS_HASWELL(dev_priv))
965 haswell_sseu_info_init(dev_priv);
966 else if (IS_CHERRYVIEW(dev_priv))
967 cherryview_sseu_info_init(dev_priv);
968 else if (IS_BROADWELL(dev_priv))
969 broadwell_sseu_info_init(dev_priv);
970 else if (IS_GEN(dev_priv, 9))
971 gen9_sseu_info_init(dev_priv);
972 else if (IS_GEN(dev_priv, 10))
973 gen10_sseu_info_init(dev_priv);
974 else if (INTEL_GEN(dev_priv) >= 11)
975 gen11_sseu_info_init(dev_priv);
976
977 if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
978 DRM_INFO("Disabling ppGTT for VT-d support\n");
979 info->ppgtt_type = INTEL_PPGTT_NONE;
980 }
981
982
983 runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
984}
985
986void intel_driver_caps_print(const struct intel_driver_caps *caps,
987 struct drm_printer *p)
988{
989 drm_printf(p, "Has logical contexts? %s\n",
990 yesno(caps->has_logical_contexts));
991 drm_printf(p, "scheduler: %x\n", caps->scheduler);
992}
993
994
995
996
997
998
999
1000void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
1001{
1002 struct intel_device_info *info = mkwrite_device_info(dev_priv);
1003 unsigned int logical_vdbox = 0;
1004 unsigned int i;
1005 u32 media_fuse;
1006 u16 vdbox_mask;
1007 u16 vebox_mask;
1008
1009 if (INTEL_GEN(dev_priv) < 11)
1010 return;
1011
1012 media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
1013
1014 vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
1015 vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
1016 GEN11_GT_VEBOX_DISABLE_SHIFT;
1017
1018 for (i = 0; i < I915_MAX_VCS; i++) {
1019 if (!HAS_ENGINE(dev_priv, _VCS(i)))
1020 continue;
1021
1022 if (!(BIT(i) & vdbox_mask)) {
1023 info->engine_mask &= ~BIT(_VCS(i));
1024 DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
1025 continue;
1026 }
1027
1028
1029
1030
1031
1032 if (logical_vdbox++ % 2 == 0)
1033 RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
1034 }
1035 DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
1036 vdbox_mask, VDBOX_MASK(dev_priv));
1037 GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
1038
1039 for (i = 0; i < I915_MAX_VECS; i++) {
1040 if (!HAS_ENGINE(dev_priv, _VECS(i)))
1041 continue;
1042
1043 if (!(BIT(i) & vebox_mask)) {
1044 info->engine_mask &= ~BIT(_VECS(i));
1045 DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
1046 }
1047 }
1048 DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
1049 vebox_mask, VEBOX_MASK(dev_priv));
1050 GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
1051}
1052