1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/pm_runtime.h>
30
31#include <drm/drm_atomic_helper.h>
32#include <drm/drm_fourcc.h>
33#include <drm/drm_plane_helper.h>
34
35#include "display/intel_atomic.h"
36#include "display/intel_display_types.h"
37#include "display/intel_fbc.h"
38#include "display/intel_sprite.h"
39
40#include "gt/intel_llc.h"
41
42#include "i915_drv.h"
43#include "i915_irq.h"
44#include "i915_trace.h"
45#include "intel_pm.h"
46#include "intel_sideband.h"
47#include "../../../platform/x86/intel_ips.h"
48
49static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
50{
51 if (HAS_LLC(dev_priv)) {
52
53
54
55
56
57
58
59 I915_WRITE(CHICKEN_PAR1_1,
60 I915_READ(CHICKEN_PAR1_1) |
61 SKL_DE_COMPRESSED_HASH_MODE);
62 }
63
64
65 I915_WRITE(CHICKEN_PAR1_1,
66 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
67
68
69 I915_WRITE(GEN8_CHICKEN_DCPR_1,
70 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
71
72
73
74 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
75 DISP_FBC_WM_DIS |
76 DISP_FBC_MEMORY_WAKE);
77
78
79 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
80 ILK_DPFC_DISABLE_DUMMY0);
81
82 if (IS_SKYLAKE(dev_priv)) {
83
84 I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
85 & ~GEN7_DOP_CLOCK_GATE_ENABLE);
86 }
87}
88
89static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
90{
91 gen9_init_clock_gating(dev_priv);
92
93
94 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
95 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
96
97
98
99
100
101 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
102 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
103
104
105
106
107
108 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
109 PWM1_GATING_DIS | PWM2_GATING_DIS);
110
111
112
113
114
115
116
117 I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
118}
119
120static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
121{
122 gen9_init_clock_gating(dev_priv);
123
124
125
126
127
128
129 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
130 PWM1_GATING_DIS | PWM2_GATING_DIS);
131
132
133 if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
134 u32 val = I915_READ(CHICKEN_MISC_2);
135 val &= ~(GLK_CL0_PWR_DOWN |
136 GLK_CL1_PWR_DOWN |
137 GLK_CL2_PWR_DOWN);
138 I915_WRITE(CHICKEN_MISC_2, val);
139 }
140
141}
142
143static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
144{
145 u32 tmp;
146
147 tmp = I915_READ(CLKCFG);
148
149 switch (tmp & CLKCFG_FSB_MASK) {
150 case CLKCFG_FSB_533:
151 dev_priv->fsb_freq = 533;
152 break;
153 case CLKCFG_FSB_800:
154 dev_priv->fsb_freq = 800;
155 break;
156 case CLKCFG_FSB_667:
157 dev_priv->fsb_freq = 667;
158 break;
159 case CLKCFG_FSB_400:
160 dev_priv->fsb_freq = 400;
161 break;
162 }
163
164 switch (tmp & CLKCFG_MEM_MASK) {
165 case CLKCFG_MEM_533:
166 dev_priv->mem_freq = 533;
167 break;
168 case CLKCFG_MEM_667:
169 dev_priv->mem_freq = 667;
170 break;
171 case CLKCFG_MEM_800:
172 dev_priv->mem_freq = 800;
173 break;
174 }
175
176
177 tmp = I915_READ(CSHRDDR3CTL);
178 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
179}
180
181static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
182{
183 u16 ddrpll, csipll;
184
185 ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
186 csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
187
188 switch (ddrpll & 0xff) {
189 case 0xc:
190 dev_priv->mem_freq = 800;
191 break;
192 case 0x10:
193 dev_priv->mem_freq = 1066;
194 break;
195 case 0x14:
196 dev_priv->mem_freq = 1333;
197 break;
198 case 0x18:
199 dev_priv->mem_freq = 1600;
200 break;
201 default:
202 drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
203 ddrpll & 0xff);
204 dev_priv->mem_freq = 0;
205 break;
206 }
207
208 switch (csipll & 0x3ff) {
209 case 0x00c:
210 dev_priv->fsb_freq = 3200;
211 break;
212 case 0x00e:
213 dev_priv->fsb_freq = 3733;
214 break;
215 case 0x010:
216 dev_priv->fsb_freq = 4266;
217 break;
218 case 0x012:
219 dev_priv->fsb_freq = 4800;
220 break;
221 case 0x014:
222 dev_priv->fsb_freq = 5333;
223 break;
224 case 0x016:
225 dev_priv->fsb_freq = 5866;
226 break;
227 case 0x018:
228 dev_priv->fsb_freq = 6400;
229 break;
230 default:
231 drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
232 csipll & 0x3ff);
233 dev_priv->fsb_freq = 0;
234 break;
235 }
236}
237
238static const struct cxsr_latency cxsr_latency_table[] = {
239 {1, 0, 800, 400, 3382, 33382, 3983, 33983},
240 {1, 0, 800, 667, 3354, 33354, 3807, 33807},
241 {1, 0, 800, 800, 3347, 33347, 3763, 33763},
242 {1, 1, 800, 667, 6420, 36420, 6873, 36873},
243 {1, 1, 800, 800, 5902, 35902, 6318, 36318},
244
245 {1, 0, 667, 400, 3400, 33400, 4021, 34021},
246 {1, 0, 667, 667, 3372, 33372, 3845, 33845},
247 {1, 0, 667, 800, 3386, 33386, 3822, 33822},
248 {1, 1, 667, 667, 6438, 36438, 6911, 36911},
249 {1, 1, 667, 800, 5941, 35941, 6377, 36377},
250
251 {1, 0, 400, 400, 3472, 33472, 4173, 34173},
252 {1, 0, 400, 667, 3443, 33443, 3996, 33996},
253 {1, 0, 400, 800, 3430, 33430, 3946, 33946},
254 {1, 1, 400, 667, 6509, 36509, 7062, 37062},
255 {1, 1, 400, 800, 5985, 35985, 6501, 36501},
256
257 {0, 0, 800, 400, 3438, 33438, 4065, 34065},
258 {0, 0, 800, 667, 3410, 33410, 3889, 33889},
259 {0, 0, 800, 800, 3403, 33403, 3845, 33845},
260 {0, 1, 800, 667, 6476, 36476, 6955, 36955},
261 {0, 1, 800, 800, 5958, 35958, 6400, 36400},
262
263 {0, 0, 667, 400, 3456, 33456, 4103, 34106},
264 {0, 0, 667, 667, 3428, 33428, 3927, 33927},
265 {0, 0, 667, 800, 3443, 33443, 3905, 33905},
266 {0, 1, 667, 667, 6494, 36494, 6993, 36993},
267 {0, 1, 667, 800, 5998, 35998, 6460, 36460},
268
269 {0, 0, 400, 400, 3528, 33528, 4255, 34255},
270 {0, 0, 400, 667, 3500, 33500, 4079, 34079},
271 {0, 0, 400, 800, 3487, 33487, 4029, 34029},
272 {0, 1, 400, 667, 6566, 36566, 7145, 37145},
273 {0, 1, 400, 800, 6042, 36042, 6584, 36584},
274};
275
276static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
277 bool is_ddr3,
278 int fsb,
279 int mem)
280{
281 const struct cxsr_latency *latency;
282 int i;
283
284 if (fsb == 0 || mem == 0)
285 return NULL;
286
287 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
288 latency = &cxsr_latency_table[i];
289 if (is_desktop == latency->is_desktop &&
290 is_ddr3 == latency->is_ddr3 &&
291 fsb == latency->fsb_freq && mem == latency->mem_freq)
292 return latency;
293 }
294
295 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
296
297 return NULL;
298}
299
300static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
301{
302 u32 val;
303
304 vlv_punit_get(dev_priv);
305
306 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
307 if (enable)
308 val &= ~FORCE_DDR_HIGH_FREQ;
309 else
310 val |= FORCE_DDR_HIGH_FREQ;
311 val &= ~FORCE_DDR_LOW_FREQ;
312 val |= FORCE_DDR_FREQ_REQ_ACK;
313 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
314
315 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
316 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
317 drm_err(&dev_priv->drm,
318 "timed out waiting for Punit DDR DVFS request\n");
319
320 vlv_punit_put(dev_priv);
321}
322
323static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
324{
325 u32 val;
326
327 vlv_punit_get(dev_priv);
328
329 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
330 if (enable)
331 val |= DSP_MAXFIFO_PM5_ENABLE;
332 else
333 val &= ~DSP_MAXFIFO_PM5_ENABLE;
334 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
335
336 vlv_punit_put(dev_priv);
337}
338
339#define FW_WM(value, plane) \
340 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
341
342static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
343{
344 bool was_enabled;
345 u32 val;
346
347 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
348 was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
349 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
350 POSTING_READ(FW_BLC_SELF_VLV);
351 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
352 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
353 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
354 POSTING_READ(FW_BLC_SELF);
355 } else if (IS_PINEVIEW(dev_priv)) {
356 val = I915_READ(DSPFW3);
357 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
358 if (enable)
359 val |= PINEVIEW_SELF_REFRESH_EN;
360 else
361 val &= ~PINEVIEW_SELF_REFRESH_EN;
362 I915_WRITE(DSPFW3, val);
363 POSTING_READ(DSPFW3);
364 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
365 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
366 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
367 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
368 I915_WRITE(FW_BLC_SELF, val);
369 POSTING_READ(FW_BLC_SELF);
370 } else if (IS_I915GM(dev_priv)) {
371
372
373
374
375
376 was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
377 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
378 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
379 I915_WRITE(INSTPM, val);
380 POSTING_READ(INSTPM);
381 } else {
382 return false;
383 }
384
385 trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
386
387 drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
388 enableddisabled(enable),
389 enableddisabled(was_enabled));
390
391 return was_enabled;
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
432{
433 bool ret;
434
435 mutex_lock(&dev_priv->wm.wm_mutex);
436 ret = _intel_set_memory_cxsr(dev_priv, enable);
437 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
438 dev_priv->wm.vlv.cxsr = enable;
439 else if (IS_G4X(dev_priv))
440 dev_priv->wm.g4x.cxsr = enable;
441 mutex_unlock(&dev_priv->wm.wm_mutex);
442
443 return ret;
444}
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460static const int pessimal_latency_ns = 5000;
461
462#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
463 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
464
465static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
466{
467 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
468 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
469 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
470 enum pipe pipe = crtc->pipe;
471 int sprite0_start, sprite1_start;
472
473 switch (pipe) {
474 u32 dsparb, dsparb2, dsparb3;
475 case PIPE_A:
476 dsparb = I915_READ(DSPARB);
477 dsparb2 = I915_READ(DSPARB2);
478 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
479 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
480 break;
481 case PIPE_B:
482 dsparb = I915_READ(DSPARB);
483 dsparb2 = I915_READ(DSPARB2);
484 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
485 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
486 break;
487 case PIPE_C:
488 dsparb2 = I915_READ(DSPARB2);
489 dsparb3 = I915_READ(DSPARB3);
490 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
491 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
492 break;
493 default:
494 MISSING_CASE(pipe);
495 return;
496 }
497
498 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
499 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
500 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
501 fifo_state->plane[PLANE_CURSOR] = 63;
502}
503
504static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
505 enum i9xx_plane_id i9xx_plane)
506{
507 u32 dsparb = I915_READ(DSPARB);
508 int size;
509
510 size = dsparb & 0x7f;
511 if (i9xx_plane == PLANE_B)
512 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
513
514 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
515 dsparb, plane_name(i9xx_plane), size);
516
517 return size;
518}
519
520static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
521 enum i9xx_plane_id i9xx_plane)
522{
523 u32 dsparb = I915_READ(DSPARB);
524 int size;
525
526 size = dsparb & 0x1ff;
527 if (i9xx_plane == PLANE_B)
528 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
529 size >>= 1;
530
531 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
532 dsparb, plane_name(i9xx_plane), size);
533
534 return size;
535}
536
537static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
538 enum i9xx_plane_id i9xx_plane)
539{
540 u32 dsparb = I915_READ(DSPARB);
541 int size;
542
543 size = dsparb & 0x7f;
544 size >>= 2;
545
546 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
547 dsparb, plane_name(i9xx_plane), size);
548
549 return size;
550}
551
552
553static const struct intel_watermark_params pnv_display_wm = {
554 .fifo_size = PINEVIEW_DISPLAY_FIFO,
555 .max_wm = PINEVIEW_MAX_WM,
556 .default_wm = PINEVIEW_DFT_WM,
557 .guard_size = PINEVIEW_GUARD_WM,
558 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
559};
560
561static const struct intel_watermark_params pnv_display_hplloff_wm = {
562 .fifo_size = PINEVIEW_DISPLAY_FIFO,
563 .max_wm = PINEVIEW_MAX_WM,
564 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
565 .guard_size = PINEVIEW_GUARD_WM,
566 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
567};
568
569static const struct intel_watermark_params pnv_cursor_wm = {
570 .fifo_size = PINEVIEW_CURSOR_FIFO,
571 .max_wm = PINEVIEW_CURSOR_MAX_WM,
572 .default_wm = PINEVIEW_CURSOR_DFT_WM,
573 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
574 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
575};
576
577static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
578 .fifo_size = PINEVIEW_CURSOR_FIFO,
579 .max_wm = PINEVIEW_CURSOR_MAX_WM,
580 .default_wm = PINEVIEW_CURSOR_DFT_WM,
581 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
582 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
583};
584
585static const struct intel_watermark_params i965_cursor_wm_info = {
586 .fifo_size = I965_CURSOR_FIFO,
587 .max_wm = I965_CURSOR_MAX_WM,
588 .default_wm = I965_CURSOR_DFT_WM,
589 .guard_size = 2,
590 .cacheline_size = I915_FIFO_LINE_SIZE,
591};
592
593static const struct intel_watermark_params i945_wm_info = {
594 .fifo_size = I945_FIFO_SIZE,
595 .max_wm = I915_MAX_WM,
596 .default_wm = 1,
597 .guard_size = 2,
598 .cacheline_size = I915_FIFO_LINE_SIZE,
599};
600
601static const struct intel_watermark_params i915_wm_info = {
602 .fifo_size = I915_FIFO_SIZE,
603 .max_wm = I915_MAX_WM,
604 .default_wm = 1,
605 .guard_size = 2,
606 .cacheline_size = I915_FIFO_LINE_SIZE,
607};
608
609static const struct intel_watermark_params i830_a_wm_info = {
610 .fifo_size = I855GM_FIFO_SIZE,
611 .max_wm = I915_MAX_WM,
612 .default_wm = 1,
613 .guard_size = 2,
614 .cacheline_size = I830_FIFO_LINE_SIZE,
615};
616
617static const struct intel_watermark_params i830_bc_wm_info = {
618 .fifo_size = I855GM_FIFO_SIZE,
619 .max_wm = I915_MAX_WM/2,
620 .default_wm = 1,
621 .guard_size = 2,
622 .cacheline_size = I830_FIFO_LINE_SIZE,
623};
624
625static const struct intel_watermark_params i845_wm_info = {
626 .fifo_size = I830_FIFO_SIZE,
627 .max_wm = I915_MAX_WM,
628 .default_wm = 1,
629 .guard_size = 2,
630 .cacheline_size = I830_FIFO_LINE_SIZE,
631};
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666static unsigned int intel_wm_method1(unsigned int pixel_rate,
667 unsigned int cpp,
668 unsigned int latency)
669{
670 u64 ret;
671
672 ret = mul_u32_u32(pixel_rate, cpp * latency);
673 ret = DIV_ROUND_UP_ULL(ret, 10000);
674
675 return ret;
676}
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708static unsigned int intel_wm_method2(unsigned int pixel_rate,
709 unsigned int htotal,
710 unsigned int width,
711 unsigned int cpp,
712 unsigned int latency)
713{
714 unsigned int ret;
715
716
717
718
719
720 if (WARN_ON_ONCE(htotal == 0))
721 htotal = 1;
722
723 ret = (latency * pixel_rate) / (htotal * 10000);
724 ret = (ret + 1) * width * cpp;
725
726 return ret;
727}
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748static unsigned int intel_calculate_wm(int pixel_rate,
749 const struct intel_watermark_params *wm,
750 int fifo_size, int cpp,
751 unsigned int latency_ns)
752{
753 int entries, wm_size;
754
755
756
757
758
759
760
761 entries = intel_wm_method1(pixel_rate, cpp,
762 latency_ns / 100);
763 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
764 wm->guard_size;
765 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
766
767 wm_size = fifo_size - entries;
768 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
769
770
771 if (wm_size > wm->max_wm)
772 wm_size = wm->max_wm;
773 if (wm_size <= 0)
774 wm_size = wm->default_wm;
775
776
777
778
779
780
781
782
783 if (wm_size <= 8)
784 wm_size = 8;
785
786 return wm_size;
787}
788
789static bool is_disabling(int old, int new, int threshold)
790{
791 return old >= threshold && new < threshold;
792}
793
794static bool is_enabling(int old, int new, int threshold)
795{
796 return old < threshold && new >= threshold;
797}
798
799static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
800{
801 return dev_priv->wm.max_level + 1;
802}
803
804static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
805 const struct intel_plane_state *plane_state)
806{
807 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
808
809
810 if (!crtc_state->hw.active)
811 return false;
812
813
814
815
816
817
818
819
820
821 if (plane->id == PLANE_CURSOR)
822 return plane_state->hw.fb != NULL;
823 else
824 return plane_state->uapi.visible;
825}
826
827static bool intel_crtc_active(struct intel_crtc *crtc)
828{
829
830
831
832
833
834
835
836
837
838
839
840
841
842 return crtc->active && crtc->base.primary->state->fb &&
843 crtc->config->hw.adjusted_mode.crtc_clock;
844}
845
846static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
847{
848 struct intel_crtc *crtc, *enabled = NULL;
849
850 for_each_intel_crtc(&dev_priv->drm, crtc) {
851 if (intel_crtc_active(crtc)) {
852 if (enabled)
853 return NULL;
854 enabled = crtc;
855 }
856 }
857
858 return enabled;
859}
860
861static void pnv_update_wm(struct intel_crtc *unused_crtc)
862{
863 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
864 struct intel_crtc *crtc;
865 const struct cxsr_latency *latency;
866 u32 reg;
867 unsigned int wm;
868
869 latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
870 dev_priv->is_ddr3,
871 dev_priv->fsb_freq,
872 dev_priv->mem_freq);
873 if (!latency) {
874 drm_dbg_kms(&dev_priv->drm,
875 "Unknown FSB/MEM found, disable CxSR\n");
876 intel_set_memory_cxsr(dev_priv, false);
877 return;
878 }
879
880 crtc = single_enabled_crtc(dev_priv);
881 if (crtc) {
882 const struct drm_display_mode *adjusted_mode =
883 &crtc->config->hw.adjusted_mode;
884 const struct drm_framebuffer *fb =
885 crtc->base.primary->state->fb;
886 int cpp = fb->format->cpp[0];
887 int clock = adjusted_mode->crtc_clock;
888
889
890 wm = intel_calculate_wm(clock, &pnv_display_wm,
891 pnv_display_wm.fifo_size,
892 cpp, latency->display_sr);
893 reg = I915_READ(DSPFW1);
894 reg &= ~DSPFW_SR_MASK;
895 reg |= FW_WM(wm, SR);
896 I915_WRITE(DSPFW1, reg);
897 drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
898
899
900 wm = intel_calculate_wm(clock, &pnv_cursor_wm,
901 pnv_display_wm.fifo_size,
902 4, latency->cursor_sr);
903 reg = I915_READ(DSPFW3);
904 reg &= ~DSPFW_CURSOR_SR_MASK;
905 reg |= FW_WM(wm, CURSOR_SR);
906 I915_WRITE(DSPFW3, reg);
907
908
909 wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
910 pnv_display_hplloff_wm.fifo_size,
911 cpp, latency->display_hpll_disable);
912 reg = I915_READ(DSPFW3);
913 reg &= ~DSPFW_HPLL_SR_MASK;
914 reg |= FW_WM(wm, HPLL_SR);
915 I915_WRITE(DSPFW3, reg);
916
917
918 wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
919 pnv_display_hplloff_wm.fifo_size,
920 4, latency->cursor_hpll_disable);
921 reg = I915_READ(DSPFW3);
922 reg &= ~DSPFW_HPLL_CURSOR_MASK;
923 reg |= FW_WM(wm, HPLL_CURSOR);
924 I915_WRITE(DSPFW3, reg);
925 drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
926
927 intel_set_memory_cxsr(dev_priv, true);
928 } else {
929 intel_set_memory_cxsr(dev_priv, false);
930 }
931}
932
933
934
935
936
937
938
939
940
941
942
943static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
944{
945 int tlb_miss = fifo_size * 64 - width * cpp * 8;
946
947 return max(0, tlb_miss);
948}
949
950static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
951 const struct g4x_wm_values *wm)
952{
953 enum pipe pipe;
954
955 for_each_pipe(dev_priv, pipe)
956 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
957
958 I915_WRITE(DSPFW1,
959 FW_WM(wm->sr.plane, SR) |
960 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
961 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
962 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
963 I915_WRITE(DSPFW2,
964 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
965 FW_WM(wm->sr.fbc, FBC_SR) |
966 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
967 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
968 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
969 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
970 I915_WRITE(DSPFW3,
971 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
972 FW_WM(wm->sr.cursor, CURSOR_SR) |
973 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
974 FW_WM(wm->hpll.plane, HPLL_SR));
975
976 POSTING_READ(DSPFW1);
977}
978
979#define FW_WM_VLV(value, plane) \
980 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
981
982static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
983 const struct vlv_wm_values *wm)
984{
985 enum pipe pipe;
986
987 for_each_pipe(dev_priv, pipe) {
988 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
989
990 I915_WRITE(VLV_DDL(pipe),
991 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
992 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
993 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
994 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
995 }
996
997
998
999
1000
1001
1002 I915_WRITE(DSPHOWM, 0);
1003 I915_WRITE(DSPHOWM1, 0);
1004 I915_WRITE(DSPFW4, 0);
1005 I915_WRITE(DSPFW5, 0);
1006 I915_WRITE(DSPFW6, 0);
1007
1008 I915_WRITE(DSPFW1,
1009 FW_WM(wm->sr.plane, SR) |
1010 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
1011 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1012 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1013 I915_WRITE(DSPFW2,
1014 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1015 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1016 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1017 I915_WRITE(DSPFW3,
1018 FW_WM(wm->sr.cursor, CURSOR_SR));
1019
1020 if (IS_CHERRYVIEW(dev_priv)) {
1021 I915_WRITE(DSPFW7_CHV,
1022 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1023 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1024 I915_WRITE(DSPFW8_CHV,
1025 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1026 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1027 I915_WRITE(DSPFW9_CHV,
1028 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1029 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1030 I915_WRITE(DSPHOWM,
1031 FW_WM(wm->sr.plane >> 9, SR_HI) |
1032 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1033 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1034 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1035 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1036 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1037 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1038 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1039 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1040 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1041 } else {
1042 I915_WRITE(DSPFW7,
1043 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1044 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1045 I915_WRITE(DSPHOWM,
1046 FW_WM(wm->sr.plane >> 9, SR_HI) |
1047 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1048 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1049 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1050 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1051 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1052 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1053 }
1054
1055 POSTING_READ(DSPFW1);
1056}
1057
1058#undef FW_WM_VLV
1059
1060static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1061{
1062
1063 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1064 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1065 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1066
1067 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1068}
1069
1070static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1071{
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086 switch (plane_id) {
1087 case PLANE_CURSOR:
1088 return 63;
1089 case PLANE_PRIMARY:
1090 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1091 case PLANE_SPRITE0:
1092 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1093 default:
1094 MISSING_CASE(plane_id);
1095 return 0;
1096 }
1097}
1098
1099static int g4x_fbc_fifo_size(int level)
1100{
1101 switch (level) {
1102 case G4X_WM_LEVEL_SR:
1103 return 7;
1104 case G4X_WM_LEVEL_HPLL:
1105 return 15;
1106 default:
1107 MISSING_CASE(level);
1108 return 0;
1109 }
1110}
1111
1112static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1113 const struct intel_plane_state *plane_state,
1114 int level)
1115{
1116 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1117 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1118 const struct drm_display_mode *adjusted_mode =
1119 &crtc_state->hw.adjusted_mode;
1120 unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
1121 unsigned int clock, htotal, cpp, width, wm;
1122
1123 if (latency == 0)
1124 return USHRT_MAX;
1125
1126 if (!intel_wm_plane_visible(crtc_state, plane_state))
1127 return 0;
1128
1129 cpp = plane_state->hw.fb->format->cpp[0];
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142 if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
1143 level != G4X_WM_LEVEL_NORMAL)
1144 cpp = max(cpp, 4u);
1145
1146 clock = adjusted_mode->crtc_clock;
1147 htotal = adjusted_mode->crtc_htotal;
1148
1149 width = drm_rect_width(&plane_state->uapi.dst);
1150
1151 if (plane->id == PLANE_CURSOR) {
1152 wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1153 } else if (plane->id == PLANE_PRIMARY &&
1154 level == G4X_WM_LEVEL_NORMAL) {
1155 wm = intel_wm_method1(clock, cpp, latency);
1156 } else {
1157 unsigned int small, large;
1158
1159 small = intel_wm_method1(clock, cpp, latency);
1160 large = intel_wm_method2(clock, htotal, width, cpp, latency);
1161
1162 wm = min(small, large);
1163 }
1164
1165 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1166 width, cpp);
1167
1168 wm = DIV_ROUND_UP(wm, 64) + 2;
1169
1170 return min_t(unsigned int, wm, USHRT_MAX);
1171}
1172
1173static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1174 int level, enum plane_id plane_id, u16 value)
1175{
1176 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1177 bool dirty = false;
1178
1179 for (; level < intel_wm_num_levels(dev_priv); level++) {
1180 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1181
1182 dirty |= raw->plane[plane_id] != value;
1183 raw->plane[plane_id] = value;
1184 }
1185
1186 return dirty;
1187}
1188
1189static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1190 int level, u16 value)
1191{
1192 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1193 bool dirty = false;
1194
1195
1196 level = max(level, G4X_WM_LEVEL_SR);
1197
1198 for (; level < intel_wm_num_levels(dev_priv); level++) {
1199 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1200
1201 dirty |= raw->fbc != value;
1202 raw->fbc = value;
1203 }
1204
1205 return dirty;
1206}
1207
1208static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
1209 const struct intel_plane_state *plane_state,
1210 u32 pri_val);
1211
1212static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1213 const struct intel_plane_state *plane_state)
1214{
1215 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1216 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1217 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1218 enum plane_id plane_id = plane->id;
1219 bool dirty = false;
1220 int level;
1221
1222 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1223 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1224 if (plane_id == PLANE_PRIMARY)
1225 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1226 goto out;
1227 }
1228
1229 for (level = 0; level < num_levels; level++) {
1230 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1231 int wm, max_wm;
1232
1233 wm = g4x_compute_wm(crtc_state, plane_state, level);
1234 max_wm = g4x_plane_fifo_size(plane_id, level);
1235
1236 if (wm > max_wm)
1237 break;
1238
1239 dirty |= raw->plane[plane_id] != wm;
1240 raw->plane[plane_id] = wm;
1241
1242 if (plane_id != PLANE_PRIMARY ||
1243 level == G4X_WM_LEVEL_NORMAL)
1244 continue;
1245
1246 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1247 raw->plane[plane_id]);
1248 max_wm = g4x_fbc_fifo_size(level);
1249
1250
1251
1252
1253
1254 if (wm > max_wm)
1255 wm = USHRT_MAX;
1256
1257 dirty |= raw->fbc != wm;
1258 raw->fbc = wm;
1259 }
1260
1261
1262 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1263
1264 if (plane_id == PLANE_PRIMARY)
1265 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1266
1267 out:
1268 if (dirty) {
1269 drm_dbg_kms(&dev_priv->drm,
1270 "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1271 plane->base.name,
1272 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1273 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1274 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1275
1276 if (plane_id == PLANE_PRIMARY)
1277 drm_dbg_kms(&dev_priv->drm,
1278 "FBC watermarks: SR=%d, HPLL=%d\n",
1279 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1280 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1281 }
1282
1283 return dirty;
1284}
1285
1286static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1287 enum plane_id plane_id, int level)
1288{
1289 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1290
1291 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1292}
1293
1294static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1295 int level)
1296{
1297 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1298
1299 if (level > dev_priv->wm.max_level)
1300 return false;
1301
1302 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1303 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1304 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1305}
1306
1307
1308static void g4x_invalidate_wms(struct intel_crtc *crtc,
1309 struct g4x_wm_state *wm_state, int level)
1310{
1311 if (level <= G4X_WM_LEVEL_NORMAL) {
1312 enum plane_id plane_id;
1313
1314 for_each_plane_id_on_crtc(crtc, plane_id)
1315 wm_state->wm.plane[plane_id] = USHRT_MAX;
1316 }
1317
1318 if (level <= G4X_WM_LEVEL_SR) {
1319 wm_state->cxsr = false;
1320 wm_state->sr.cursor = USHRT_MAX;
1321 wm_state->sr.plane = USHRT_MAX;
1322 wm_state->sr.fbc = USHRT_MAX;
1323 }
1324
1325 if (level <= G4X_WM_LEVEL_HPLL) {
1326 wm_state->hpll_en = false;
1327 wm_state->hpll.cursor = USHRT_MAX;
1328 wm_state->hpll.plane = USHRT_MAX;
1329 wm_state->hpll.fbc = USHRT_MAX;
1330 }
1331}
1332
1333static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1334{
1335 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1336 struct intel_atomic_state *state =
1337 to_intel_atomic_state(crtc_state->uapi.state);
1338 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1339 int num_active_planes = hweight8(crtc_state->active_planes &
1340 ~BIT(PLANE_CURSOR));
1341 const struct g4x_pipe_wm *raw;
1342 const struct intel_plane_state *old_plane_state;
1343 const struct intel_plane_state *new_plane_state;
1344 struct intel_plane *plane;
1345 enum plane_id plane_id;
1346 int i, level;
1347 unsigned int dirty = 0;
1348
1349 for_each_oldnew_intel_plane_in_state(state, plane,
1350 old_plane_state,
1351 new_plane_state, i) {
1352 if (new_plane_state->hw.crtc != &crtc->base &&
1353 old_plane_state->hw.crtc != &crtc->base)
1354 continue;
1355
1356 if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1357 dirty |= BIT(plane->id);
1358 }
1359
1360 if (!dirty)
1361 return 0;
1362
1363 level = G4X_WM_LEVEL_NORMAL;
1364 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1365 goto out;
1366
1367 raw = &crtc_state->wm.g4x.raw[level];
1368 for_each_plane_id_on_crtc(crtc, plane_id)
1369 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1370
1371 level = G4X_WM_LEVEL_SR;
1372
1373 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1374 goto out;
1375
1376 raw = &crtc_state->wm.g4x.raw[level];
1377 wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1378 wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1379 wm_state->sr.fbc = raw->fbc;
1380
1381 wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
1382
1383 level = G4X_WM_LEVEL_HPLL;
1384
1385 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1386 goto out;
1387
1388 raw = &crtc_state->wm.g4x.raw[level];
1389 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1390 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1391 wm_state->hpll.fbc = raw->fbc;
1392
1393 wm_state->hpll_en = wm_state->cxsr;
1394
1395 level++;
1396
1397 out:
1398 if (level == G4X_WM_LEVEL_NORMAL)
1399 return -EINVAL;
1400
1401
1402 g4x_invalidate_wms(crtc, wm_state, level);
1403
1404
1405
1406
1407
1408
1409
1410 wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;
1411
1412 if (level >= G4X_WM_LEVEL_SR &&
1413 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1414 wm_state->fbc_en = false;
1415 else if (level >= G4X_WM_LEVEL_HPLL &&
1416 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1417 wm_state->fbc_en = false;
1418
1419 return 0;
1420}
1421
1422static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
1423{
1424 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1425 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1426 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1427 struct intel_atomic_state *intel_state =
1428 to_intel_atomic_state(new_crtc_state->uapi.state);
1429 const struct intel_crtc_state *old_crtc_state =
1430 intel_atomic_get_old_crtc_state(intel_state, crtc);
1431 const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1432 enum plane_id plane_id;
1433
1434 if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
1435 *intermediate = *optimal;
1436
1437 intermediate->cxsr = false;
1438 intermediate->hpll_en = false;
1439 goto out;
1440 }
1441
1442 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1443 !new_crtc_state->disable_cxsr;
1444 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1445 !new_crtc_state->disable_cxsr;
1446 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1447
1448 for_each_plane_id_on_crtc(crtc, plane_id) {
1449 intermediate->wm.plane[plane_id] =
1450 max(optimal->wm.plane[plane_id],
1451 active->wm.plane[plane_id]);
1452
1453 WARN_ON(intermediate->wm.plane[plane_id] >
1454 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1455 }
1456
1457 intermediate->sr.plane = max(optimal->sr.plane,
1458 active->sr.plane);
1459 intermediate->sr.cursor = max(optimal->sr.cursor,
1460 active->sr.cursor);
1461 intermediate->sr.fbc = max(optimal->sr.fbc,
1462 active->sr.fbc);
1463
1464 intermediate->hpll.plane = max(optimal->hpll.plane,
1465 active->hpll.plane);
1466 intermediate->hpll.cursor = max(optimal->hpll.cursor,
1467 active->hpll.cursor);
1468 intermediate->hpll.fbc = max(optimal->hpll.fbc,
1469 active->hpll.fbc);
1470
1471 WARN_ON((intermediate->sr.plane >
1472 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1473 intermediate->sr.cursor >
1474 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1475 intermediate->cxsr);
1476 WARN_ON((intermediate->sr.plane >
1477 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1478 intermediate->sr.cursor >
1479 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1480 intermediate->hpll_en);
1481
1482 WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1483 intermediate->fbc_en && intermediate->cxsr);
1484 WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1485 intermediate->fbc_en && intermediate->hpll_en);
1486
1487out:
1488
1489
1490
1491
1492 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1493 new_crtc_state->wm.need_postvbl_update = true;
1494
1495 return 0;
1496}
1497
1498static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1499 struct g4x_wm_values *wm)
1500{
1501 struct intel_crtc *crtc;
1502 int num_active_pipes = 0;
1503
1504 wm->cxsr = true;
1505 wm->hpll_en = true;
1506 wm->fbc_en = true;
1507
1508 for_each_intel_crtc(&dev_priv->drm, crtc) {
1509 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1510
1511 if (!crtc->active)
1512 continue;
1513
1514 if (!wm_state->cxsr)
1515 wm->cxsr = false;
1516 if (!wm_state->hpll_en)
1517 wm->hpll_en = false;
1518 if (!wm_state->fbc_en)
1519 wm->fbc_en = false;
1520
1521 num_active_pipes++;
1522 }
1523
1524 if (num_active_pipes != 1) {
1525 wm->cxsr = false;
1526 wm->hpll_en = false;
1527 wm->fbc_en = false;
1528 }
1529
1530 for_each_intel_crtc(&dev_priv->drm, crtc) {
1531 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1532 enum pipe pipe = crtc->pipe;
1533
1534 wm->pipe[pipe] = wm_state->wm;
1535 if (crtc->active && wm->cxsr)
1536 wm->sr = wm_state->sr;
1537 if (crtc->active && wm->hpll_en)
1538 wm->hpll = wm_state->hpll;
1539 }
1540}
1541
1542static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1543{
1544 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1545 struct g4x_wm_values new_wm = {};
1546
1547 g4x_merge_wm(dev_priv, &new_wm);
1548
1549 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1550 return;
1551
1552 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1553 _intel_set_memory_cxsr(dev_priv, false);
1554
1555 g4x_write_wm_values(dev_priv, &new_wm);
1556
1557 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1558 _intel_set_memory_cxsr(dev_priv, true);
1559
1560 *old_wm = new_wm;
1561}
1562
1563static void g4x_initial_watermarks(struct intel_atomic_state *state,
1564 struct intel_crtc *crtc)
1565{
1566 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1567 const struct intel_crtc_state *crtc_state =
1568 intel_atomic_get_new_crtc_state(state, crtc);
1569
1570 mutex_lock(&dev_priv->wm.wm_mutex);
1571 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1572 g4x_program_watermarks(dev_priv);
1573 mutex_unlock(&dev_priv->wm.wm_mutex);
1574}
1575
1576static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1577 struct intel_crtc *crtc)
1578{
1579 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1580 const struct intel_crtc_state *crtc_state =
1581 intel_atomic_get_new_crtc_state(state, crtc);
1582
1583 if (!crtc_state->wm.need_postvbl_update)
1584 return;
1585
1586 mutex_lock(&dev_priv->wm.wm_mutex);
1587 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1588 g4x_program_watermarks(dev_priv);
1589 mutex_unlock(&dev_priv->wm.wm_mutex);
1590}
1591
1592
1593static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1594 unsigned int htotal,
1595 unsigned int width,
1596 unsigned int cpp,
1597 unsigned int latency)
1598{
1599 unsigned int ret;
1600
1601 ret = intel_wm_method2(pixel_rate, htotal,
1602 width, cpp, latency);
1603 ret = DIV_ROUND_UP(ret, 64);
1604
1605 return ret;
1606}
1607
1608static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1609{
1610
1611 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1612
1613 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1614
1615 if (IS_CHERRYVIEW(dev_priv)) {
1616 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1617 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1618
1619 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1620 }
1621}
1622
1623static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1624 const struct intel_plane_state *plane_state,
1625 int level)
1626{
1627 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1628 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1629 const struct drm_display_mode *adjusted_mode =
1630 &crtc_state->hw.adjusted_mode;
1631 unsigned int clock, htotal, cpp, width, wm;
1632
1633 if (dev_priv->wm.pri_latency[level] == 0)
1634 return USHRT_MAX;
1635
1636 if (!intel_wm_plane_visible(crtc_state, plane_state))
1637 return 0;
1638
1639 cpp = plane_state->hw.fb->format->cpp[0];
1640 clock = adjusted_mode->crtc_clock;
1641 htotal = adjusted_mode->crtc_htotal;
1642 width = crtc_state->pipe_src_w;
1643
1644 if (plane->id == PLANE_CURSOR) {
1645
1646
1647
1648
1649
1650
1651 wm = 63;
1652 } else {
1653 wm = vlv_wm_method2(clock, htotal, width, cpp,
1654 dev_priv->wm.pri_latency[level] * 10);
1655 }
1656
1657 return min_t(unsigned int, wm, USHRT_MAX);
1658}
1659
1660static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1661{
1662 return (active_planes & (BIT(PLANE_SPRITE0) |
1663 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1664}
1665
1666static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1667{
1668 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1669 const struct g4x_pipe_wm *raw =
1670 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1671 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1672 unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1673 int num_active_planes = hweight8(active_planes);
1674 const int fifo_size = 511;
1675 int fifo_extra, fifo_left = fifo_size;
1676 int sprite0_fifo_extra = 0;
1677 unsigned int total_rate;
1678 enum plane_id plane_id;
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 if (vlv_need_sprite0_fifo_workaround(active_planes))
1689 sprite0_fifo_extra = 1;
1690
1691 total_rate = raw->plane[PLANE_PRIMARY] +
1692 raw->plane[PLANE_SPRITE0] +
1693 raw->plane[PLANE_SPRITE1] +
1694 sprite0_fifo_extra;
1695
1696 if (total_rate > fifo_size)
1697 return -EINVAL;
1698
1699 if (total_rate == 0)
1700 total_rate = 1;
1701
1702 for_each_plane_id_on_crtc(crtc, plane_id) {
1703 unsigned int rate;
1704
1705 if ((active_planes & BIT(plane_id)) == 0) {
1706 fifo_state->plane[plane_id] = 0;
1707 continue;
1708 }
1709
1710 rate = raw->plane[plane_id];
1711 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1712 fifo_left -= fifo_state->plane[plane_id];
1713 }
1714
1715 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1716 fifo_left -= sprite0_fifo_extra;
1717
1718 fifo_state->plane[PLANE_CURSOR] = 63;
1719
1720 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1721
1722
1723 for_each_plane_id_on_crtc(crtc, plane_id) {
1724 int plane_extra;
1725
1726 if (fifo_left == 0)
1727 break;
1728
1729 if ((active_planes & BIT(plane_id)) == 0)
1730 continue;
1731
1732 plane_extra = min(fifo_extra, fifo_left);
1733 fifo_state->plane[plane_id] += plane_extra;
1734 fifo_left -= plane_extra;
1735 }
1736
1737 WARN_ON(active_planes != 0 && fifo_left != 0);
1738
1739
1740 if (active_planes == 0) {
1741 WARN_ON(fifo_left != fifo_size);
1742 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1743 }
1744
1745 return 0;
1746}
1747
1748
1749static void vlv_invalidate_wms(struct intel_crtc *crtc,
1750 struct vlv_wm_state *wm_state, int level)
1751{
1752 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1753
1754 for (; level < intel_wm_num_levels(dev_priv); level++) {
1755 enum plane_id plane_id;
1756
1757 for_each_plane_id_on_crtc(crtc, plane_id)
1758 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1759
1760 wm_state->sr[level].cursor = USHRT_MAX;
1761 wm_state->sr[level].plane = USHRT_MAX;
1762 }
1763}
1764
1765static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1766{
1767 if (wm > fifo_size)
1768 return USHRT_MAX;
1769 else
1770 return fifo_size - wm;
1771}
1772
1773
1774
1775
1776
1777static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1778 int level, enum plane_id plane_id, u16 value)
1779{
1780 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1781 int num_levels = intel_wm_num_levels(dev_priv);
1782 bool dirty = false;
1783
1784 for (; level < num_levels; level++) {
1785 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1786
1787 dirty |= raw->plane[plane_id] != value;
1788 raw->plane[plane_id] = value;
1789 }
1790
1791 return dirty;
1792}
1793
1794static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1795 const struct intel_plane_state *plane_state)
1796{
1797 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1798 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1799 enum plane_id plane_id = plane->id;
1800 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1801 int level;
1802 bool dirty = false;
1803
1804 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1805 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1806 goto out;
1807 }
1808
1809 for (level = 0; level < num_levels; level++) {
1810 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1811 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1812 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1813
1814 if (wm > max_wm)
1815 break;
1816
1817 dirty |= raw->plane[plane_id] != wm;
1818 raw->plane[plane_id] = wm;
1819 }
1820
1821
1822 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1823
1824out:
1825 if (dirty)
1826 drm_dbg_kms(&dev_priv->drm,
1827 "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1828 plane->base.name,
1829 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1830 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1831 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1832
1833 return dirty;
1834}
1835
1836static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1837 enum plane_id plane_id, int level)
1838{
1839 const struct g4x_pipe_wm *raw =
1840 &crtc_state->wm.vlv.raw[level];
1841 const struct vlv_fifo_state *fifo_state =
1842 &crtc_state->wm.vlv.fifo_state;
1843
1844 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1845}
1846
1847static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1848{
1849 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1850 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1851 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1852 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1853}
1854
1855static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1856{
1857 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1858 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1859 struct intel_atomic_state *state =
1860 to_intel_atomic_state(crtc_state->uapi.state);
1861 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1862 const struct vlv_fifo_state *fifo_state =
1863 &crtc_state->wm.vlv.fifo_state;
1864 int num_active_planes = hweight8(crtc_state->active_planes &
1865 ~BIT(PLANE_CURSOR));
1866 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
1867 const struct intel_plane_state *old_plane_state;
1868 const struct intel_plane_state *new_plane_state;
1869 struct intel_plane *plane;
1870 enum plane_id plane_id;
1871 int level, ret, i;
1872 unsigned int dirty = 0;
1873
1874 for_each_oldnew_intel_plane_in_state(state, plane,
1875 old_plane_state,
1876 new_plane_state, i) {
1877 if (new_plane_state->hw.crtc != &crtc->base &&
1878 old_plane_state->hw.crtc != &crtc->base)
1879 continue;
1880
1881 if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1882 dirty |= BIT(plane->id);
1883 }
1884
1885
1886
1887
1888
1889
1890
1891 if (needs_modeset)
1892 crtc_state->fifo_changed = true;
1893
1894 if (!dirty)
1895 return 0;
1896
1897
1898 if (dirty & ~BIT(PLANE_CURSOR)) {
1899 const struct intel_crtc_state *old_crtc_state =
1900 intel_atomic_get_old_crtc_state(state, crtc);
1901 const struct vlv_fifo_state *old_fifo_state =
1902 &old_crtc_state->wm.vlv.fifo_state;
1903
1904 ret = vlv_compute_fifo(crtc_state);
1905 if (ret)
1906 return ret;
1907
1908 if (needs_modeset ||
1909 memcmp(old_fifo_state, fifo_state,
1910 sizeof(*fifo_state)) != 0)
1911 crtc_state->fifo_changed = true;
1912 }
1913
1914
1915 wm_state->num_levels = intel_wm_num_levels(dev_priv);
1916
1917
1918
1919
1920
1921 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1922
1923 for (level = 0; level < wm_state->num_levels; level++) {
1924 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1925 const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
1926
1927 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1928 break;
1929
1930 for_each_plane_id_on_crtc(crtc, plane_id) {
1931 wm_state->wm[level].plane[plane_id] =
1932 vlv_invert_wm_value(raw->plane[plane_id],
1933 fifo_state->plane[plane_id]);
1934 }
1935
1936 wm_state->sr[level].plane =
1937 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1938 raw->plane[PLANE_SPRITE0],
1939 raw->plane[PLANE_SPRITE1]),
1940 sr_fifo_size);
1941
1942 wm_state->sr[level].cursor =
1943 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1944 63);
1945 }
1946
1947 if (level == 0)
1948 return -EINVAL;
1949
1950
1951 wm_state->num_levels = level;
1952
1953
1954 vlv_invalidate_wms(crtc, wm_state, level);
1955
1956 return 0;
1957}
1958
1959#define VLV_FIFO(plane, value) \
1960 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1961
1962static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1963 struct intel_crtc *crtc)
1964{
1965 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1966 struct intel_uncore *uncore = &dev_priv->uncore;
1967 const struct intel_crtc_state *crtc_state =
1968 intel_atomic_get_new_crtc_state(state, crtc);
1969 const struct vlv_fifo_state *fifo_state =
1970 &crtc_state->wm.vlv.fifo_state;
1971 int sprite0_start, sprite1_start, fifo_size;
1972
1973 if (!crtc_state->fifo_changed)
1974 return;
1975
1976 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1977 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1978 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1979
1980 WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
1981 WARN_ON(fifo_size != 511);
1982
1983 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994 spin_lock(&uncore->lock);
1995
1996 switch (crtc->pipe) {
1997 u32 dsparb, dsparb2, dsparb3;
1998 case PIPE_A:
1999 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2000 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2001
2002 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
2003 VLV_FIFO(SPRITEB, 0xff));
2004 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
2005 VLV_FIFO(SPRITEB, sprite1_start));
2006
2007 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
2008 VLV_FIFO(SPRITEB_HI, 0x1));
2009 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
2010 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
2011
2012 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2013 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2014 break;
2015 case PIPE_B:
2016 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2017 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2018
2019 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
2020 VLV_FIFO(SPRITED, 0xff));
2021 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
2022 VLV_FIFO(SPRITED, sprite1_start));
2023
2024 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
2025 VLV_FIFO(SPRITED_HI, 0xff));
2026 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
2027 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
2028
2029 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2030 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2031 break;
2032 case PIPE_C:
2033 dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
2034 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2035
2036 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
2037 VLV_FIFO(SPRITEF, 0xff));
2038 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
2039 VLV_FIFO(SPRITEF, sprite1_start));
2040
2041 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
2042 VLV_FIFO(SPRITEF_HI, 0xff));
2043 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
2044 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
2045
2046 intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
2047 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2048 break;
2049 default:
2050 break;
2051 }
2052
2053 intel_uncore_posting_read_fw(uncore, DSPARB);
2054
2055 spin_unlock(&uncore->lock);
2056}
2057
2058#undef VLV_FIFO
2059
2060static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
2061{
2062 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2063 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2064 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2065 struct intel_atomic_state *intel_state =
2066 to_intel_atomic_state(new_crtc_state->uapi.state);
2067 const struct intel_crtc_state *old_crtc_state =
2068 intel_atomic_get_old_crtc_state(intel_state, crtc);
2069 const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2070 int level;
2071
2072 if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
2073 *intermediate = *optimal;
2074
2075 intermediate->cxsr = false;
2076 goto out;
2077 }
2078
2079 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2080 intermediate->cxsr = optimal->cxsr && active->cxsr &&
2081 !new_crtc_state->disable_cxsr;
2082
2083 for (level = 0; level < intermediate->num_levels; level++) {
2084 enum plane_id plane_id;
2085
2086 for_each_plane_id_on_crtc(crtc, plane_id) {
2087 intermediate->wm[level].plane[plane_id] =
2088 min(optimal->wm[level].plane[plane_id],
2089 active->wm[level].plane[plane_id]);
2090 }
2091
2092 intermediate->sr[level].plane = min(optimal->sr[level].plane,
2093 active->sr[level].plane);
2094 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2095 active->sr[level].cursor);
2096 }
2097
2098 vlv_invalidate_wms(crtc, intermediate, level);
2099
2100out:
2101
2102
2103
2104
2105 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2106 new_crtc_state->wm.need_postvbl_update = true;
2107
2108 return 0;
2109}
2110
2111static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2112 struct vlv_wm_values *wm)
2113{
2114 struct intel_crtc *crtc;
2115 int num_active_pipes = 0;
2116
2117 wm->level = dev_priv->wm.max_level;
2118 wm->cxsr = true;
2119
2120 for_each_intel_crtc(&dev_priv->drm, crtc) {
2121 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2122
2123 if (!crtc->active)
2124 continue;
2125
2126 if (!wm_state->cxsr)
2127 wm->cxsr = false;
2128
2129 num_active_pipes++;
2130 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2131 }
2132
2133 if (num_active_pipes != 1)
2134 wm->cxsr = false;
2135
2136 if (num_active_pipes > 1)
2137 wm->level = VLV_WM_LEVEL_PM2;
2138
2139 for_each_intel_crtc(&dev_priv->drm, crtc) {
2140 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2141 enum pipe pipe = crtc->pipe;
2142
2143 wm->pipe[pipe] = wm_state->wm[wm->level];
2144 if (crtc->active && wm->cxsr)
2145 wm->sr = wm_state->sr[wm->level];
2146
2147 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2148 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2149 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2150 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2151 }
2152}
2153
2154static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2155{
2156 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2157 struct vlv_wm_values new_wm = {};
2158
2159 vlv_merge_wm(dev_priv, &new_wm);
2160
2161 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2162 return;
2163
2164 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2165 chv_set_memory_dvfs(dev_priv, false);
2166
2167 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2168 chv_set_memory_pm5(dev_priv, false);
2169
2170 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2171 _intel_set_memory_cxsr(dev_priv, false);
2172
2173 vlv_write_wm_values(dev_priv, &new_wm);
2174
2175 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2176 _intel_set_memory_cxsr(dev_priv, true);
2177
2178 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2179 chv_set_memory_pm5(dev_priv, true);
2180
2181 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2182 chv_set_memory_dvfs(dev_priv, true);
2183
2184 *old_wm = new_wm;
2185}
2186
2187static void vlv_initial_watermarks(struct intel_atomic_state *state,
2188 struct intel_crtc *crtc)
2189{
2190 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2191 const struct intel_crtc_state *crtc_state =
2192 intel_atomic_get_new_crtc_state(state, crtc);
2193
2194 mutex_lock(&dev_priv->wm.wm_mutex);
2195 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2196 vlv_program_watermarks(dev_priv);
2197 mutex_unlock(&dev_priv->wm.wm_mutex);
2198}
2199
2200static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2201 struct intel_crtc *crtc)
2202{
2203 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2204 const struct intel_crtc_state *crtc_state =
2205 intel_atomic_get_new_crtc_state(state, crtc);
2206
2207 if (!crtc_state->wm.need_postvbl_update)
2208 return;
2209
2210 mutex_lock(&dev_priv->wm.wm_mutex);
2211 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2212 vlv_program_watermarks(dev_priv);
2213 mutex_unlock(&dev_priv->wm.wm_mutex);
2214}
2215
2216static void i965_update_wm(struct intel_crtc *unused_crtc)
2217{
2218 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2219 struct intel_crtc *crtc;
2220 int srwm = 1;
2221 int cursor_sr = 16;
2222 bool cxsr_enabled;
2223
2224
2225 crtc = single_enabled_crtc(dev_priv);
2226 if (crtc) {
2227
2228 static const int sr_latency_ns = 12000;
2229 const struct drm_display_mode *adjusted_mode =
2230 &crtc->config->hw.adjusted_mode;
2231 const struct drm_framebuffer *fb =
2232 crtc->base.primary->state->fb;
2233 int clock = adjusted_mode->crtc_clock;
2234 int htotal = adjusted_mode->crtc_htotal;
2235 int hdisplay = crtc->config->pipe_src_w;
2236 int cpp = fb->format->cpp[0];
2237 int entries;
2238
2239 entries = intel_wm_method2(clock, htotal,
2240 hdisplay, cpp, sr_latency_ns / 100);
2241 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2242 srwm = I965_FIFO_SIZE - entries;
2243 if (srwm < 0)
2244 srwm = 1;
2245 srwm &= 0x1ff;
2246 drm_dbg_kms(&dev_priv->drm,
2247 "self-refresh entries: %d, wm: %d\n",
2248 entries, srwm);
2249
2250 entries = intel_wm_method2(clock, htotal,
2251 crtc->base.cursor->state->crtc_w, 4,
2252 sr_latency_ns / 100);
2253 entries = DIV_ROUND_UP(entries,
2254 i965_cursor_wm_info.cacheline_size) +
2255 i965_cursor_wm_info.guard_size;
2256
2257 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2258 if (cursor_sr > i965_cursor_wm_info.max_wm)
2259 cursor_sr = i965_cursor_wm_info.max_wm;
2260
2261 drm_dbg_kms(&dev_priv->drm,
2262 "self-refresh watermark: display plane %d "
2263 "cursor %d\n", srwm, cursor_sr);
2264
2265 cxsr_enabled = true;
2266 } else {
2267 cxsr_enabled = false;
2268
2269 intel_set_memory_cxsr(dev_priv, false);
2270 }
2271
2272 drm_dbg_kms(&dev_priv->drm,
2273 "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2274 srwm);
2275
2276
2277 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
2278 FW_WM(8, CURSORB) |
2279 FW_WM(8, PLANEB) |
2280 FW_WM(8, PLANEA));
2281 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
2282 FW_WM(8, PLANEC_OLD));
2283
2284 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2285
2286 if (cxsr_enabled)
2287 intel_set_memory_cxsr(dev_priv, true);
2288}
2289
2290#undef FW_WM
2291
2292static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2293{
2294 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2295 const struct intel_watermark_params *wm_info;
2296 u32 fwater_lo;
2297 u32 fwater_hi;
2298 int cwm, srwm = 1;
2299 int fifo_size;
2300 int planea_wm, planeb_wm;
2301 struct intel_crtc *crtc, *enabled = NULL;
2302
2303 if (IS_I945GM(dev_priv))
2304 wm_info = &i945_wm_info;
2305 else if (!IS_GEN(dev_priv, 2))
2306 wm_info = &i915_wm_info;
2307 else
2308 wm_info = &i830_a_wm_info;
2309
2310 fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
2311 crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
2312 if (intel_crtc_active(crtc)) {
2313 const struct drm_display_mode *adjusted_mode =
2314 &crtc->config->hw.adjusted_mode;
2315 const struct drm_framebuffer *fb =
2316 crtc->base.primary->state->fb;
2317 int cpp;
2318
2319 if (IS_GEN(dev_priv, 2))
2320 cpp = 4;
2321 else
2322 cpp = fb->format->cpp[0];
2323
2324 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2325 wm_info, fifo_size, cpp,
2326 pessimal_latency_ns);
2327 enabled = crtc;
2328 } else {
2329 planea_wm = fifo_size - wm_info->guard_size;
2330 if (planea_wm > (long)wm_info->max_wm)
2331 planea_wm = wm_info->max_wm;
2332 }
2333
2334 if (IS_GEN(dev_priv, 2))
2335 wm_info = &i830_bc_wm_info;
2336
2337 fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
2338 crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
2339 if (intel_crtc_active(crtc)) {
2340 const struct drm_display_mode *adjusted_mode =
2341 &crtc->config->hw.adjusted_mode;
2342 const struct drm_framebuffer *fb =
2343 crtc->base.primary->state->fb;
2344 int cpp;
2345
2346 if (IS_GEN(dev_priv, 2))
2347 cpp = 4;
2348 else
2349 cpp = fb->format->cpp[0];
2350
2351 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2352 wm_info, fifo_size, cpp,
2353 pessimal_latency_ns);
2354 if (enabled == NULL)
2355 enabled = crtc;
2356 else
2357 enabled = NULL;
2358 } else {
2359 planeb_wm = fifo_size - wm_info->guard_size;
2360 if (planeb_wm > (long)wm_info->max_wm)
2361 planeb_wm = wm_info->max_wm;
2362 }
2363
2364 drm_dbg_kms(&dev_priv->drm,
2365 "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2366
2367 if (IS_I915GM(dev_priv) && enabled) {
2368 struct drm_i915_gem_object *obj;
2369
2370 obj = intel_fb_obj(enabled->base.primary->state->fb);
2371
2372
2373 if (!i915_gem_object_is_tiled(obj))
2374 enabled = NULL;
2375 }
2376
2377
2378
2379
2380 cwm = 2;
2381
2382
2383 intel_set_memory_cxsr(dev_priv, false);
2384
2385
2386 if (HAS_FW_BLC(dev_priv) && enabled) {
2387
2388 static const int sr_latency_ns = 6000;
2389 const struct drm_display_mode *adjusted_mode =
2390 &enabled->config->hw.adjusted_mode;
2391 const struct drm_framebuffer *fb =
2392 enabled->base.primary->state->fb;
2393 int clock = adjusted_mode->crtc_clock;
2394 int htotal = adjusted_mode->crtc_htotal;
2395 int hdisplay = enabled->config->pipe_src_w;
2396 int cpp;
2397 int entries;
2398
2399 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2400 cpp = 4;
2401 else
2402 cpp = fb->format->cpp[0];
2403
2404 entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
2405 sr_latency_ns / 100);
2406 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2407 drm_dbg_kms(&dev_priv->drm,
2408 "self-refresh entries: %d\n", entries);
2409 srwm = wm_info->fifo_size - entries;
2410 if (srwm < 0)
2411 srwm = 1;
2412
2413 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2414 I915_WRITE(FW_BLC_SELF,
2415 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2416 else
2417 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2418 }
2419
2420 drm_dbg_kms(&dev_priv->drm,
2421 "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2422 planea_wm, planeb_wm, cwm, srwm);
2423
2424 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2425 fwater_hi = (cwm & 0x1f);
2426
2427
2428 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2429 fwater_hi = fwater_hi | (1 << 8);
2430
2431 I915_WRITE(FW_BLC, fwater_lo);
2432 I915_WRITE(FW_BLC2, fwater_hi);
2433
2434 if (enabled)
2435 intel_set_memory_cxsr(dev_priv, true);
2436}
2437
2438static void i845_update_wm(struct intel_crtc *unused_crtc)
2439{
2440 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2441 struct intel_crtc *crtc;
2442 const struct drm_display_mode *adjusted_mode;
2443 u32 fwater_lo;
2444 int planea_wm;
2445
2446 crtc = single_enabled_crtc(dev_priv);
2447 if (crtc == NULL)
2448 return;
2449
2450 adjusted_mode = &crtc->config->hw.adjusted_mode;
2451 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2452 &i845_wm_info,
2453 dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
2454 4, pessimal_latency_ns);
2455 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2456 fwater_lo |= (3<<8) | planea_wm;
2457
2458 drm_dbg_kms(&dev_priv->drm,
2459 "Setting FIFO watermarks - A: %d\n", planea_wm);
2460
2461 I915_WRITE(FW_BLC, fwater_lo);
2462}
2463
2464
2465static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2466 unsigned int cpp,
2467 unsigned int latency)
2468{
2469 unsigned int ret;
2470
2471 ret = intel_wm_method1(pixel_rate, cpp, latency);
2472 ret = DIV_ROUND_UP(ret, 64) + 2;
2473
2474 return ret;
2475}
2476
2477
2478static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2479 unsigned int htotal,
2480 unsigned int width,
2481 unsigned int cpp,
2482 unsigned int latency)
2483{
2484 unsigned int ret;
2485
2486 ret = intel_wm_method2(pixel_rate, htotal,
2487 width, cpp, latency);
2488 ret = DIV_ROUND_UP(ret, 64) + 2;
2489
2490 return ret;
2491}
2492
2493static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
2494{
2495
2496
2497
2498
2499
2500
2501 if (WARN_ON(!cpp))
2502 return 0;
2503 if (WARN_ON(!horiz_pixels))
2504 return 0;
2505
2506 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2507}
2508
2509struct ilk_wm_maximums {
2510 u16 pri;
2511 u16 spr;
2512 u16 cur;
2513 u16 fbc;
2514};
2515
2516
2517
2518
2519
2520static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
2521 const struct intel_plane_state *plane_state,
2522 u32 mem_value, bool is_lp)
2523{
2524 u32 method1, method2;
2525 int cpp;
2526
2527 if (mem_value == 0)
2528 return U32_MAX;
2529
2530 if (!intel_wm_plane_visible(crtc_state, plane_state))
2531 return 0;
2532
2533 cpp = plane_state->hw.fb->format->cpp[0];
2534
2535 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2536
2537 if (!is_lp)
2538 return method1;
2539
2540 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2541 crtc_state->hw.adjusted_mode.crtc_htotal,
2542 drm_rect_width(&plane_state->uapi.dst),
2543 cpp, mem_value);
2544
2545 return min(method1, method2);
2546}
2547
2548
2549
2550
2551
2552static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
2553 const struct intel_plane_state *plane_state,
2554 u32 mem_value)
2555{
2556 u32 method1, method2;
2557 int cpp;
2558
2559 if (mem_value == 0)
2560 return U32_MAX;
2561
2562 if (!intel_wm_plane_visible(crtc_state, plane_state))
2563 return 0;
2564
2565 cpp = plane_state->hw.fb->format->cpp[0];
2566
2567 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2568 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2569 crtc_state->hw.adjusted_mode.crtc_htotal,
2570 drm_rect_width(&plane_state->uapi.dst),
2571 cpp, mem_value);
2572 return min(method1, method2);
2573}
2574
2575
2576
2577
2578
2579static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
2580 const struct intel_plane_state *plane_state,
2581 u32 mem_value)
2582{
2583 int cpp;
2584
2585 if (mem_value == 0)
2586 return U32_MAX;
2587
2588 if (!intel_wm_plane_visible(crtc_state, plane_state))
2589 return 0;
2590
2591 cpp = plane_state->hw.fb->format->cpp[0];
2592
2593 return ilk_wm_method2(crtc_state->pixel_rate,
2594 crtc_state->hw.adjusted_mode.crtc_htotal,
2595 drm_rect_width(&plane_state->uapi.dst),
2596 cpp, mem_value);
2597}
2598
2599
2600static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
2601 const struct intel_plane_state *plane_state,
2602 u32 pri_val)
2603{
2604 int cpp;
2605
2606 if (!intel_wm_plane_visible(crtc_state, plane_state))
2607 return 0;
2608
2609 cpp = plane_state->hw.fb->format->cpp[0];
2610
2611 return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst),
2612 cpp);
2613}
2614
2615static unsigned int
2616ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2617{
2618 if (INTEL_GEN(dev_priv) >= 8)
2619 return 3072;
2620 else if (INTEL_GEN(dev_priv) >= 7)
2621 return 768;
2622 else
2623 return 512;
2624}
2625
2626static unsigned int
2627ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2628 int level, bool is_sprite)
2629{
2630 if (INTEL_GEN(dev_priv) >= 8)
2631
2632 return level == 0 ? 255 : 2047;
2633 else if (INTEL_GEN(dev_priv) >= 7)
2634
2635 return level == 0 ? 127 : 1023;
2636 else if (!is_sprite)
2637
2638 return level == 0 ? 127 : 511;
2639 else
2640
2641 return level == 0 ? 63 : 255;
2642}
2643
2644static unsigned int
2645ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2646{
2647 if (INTEL_GEN(dev_priv) >= 7)
2648 return level == 0 ? 63 : 255;
2649 else
2650 return level == 0 ? 31 : 63;
2651}
2652
2653static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2654{
2655 if (INTEL_GEN(dev_priv) >= 8)
2656 return 31;
2657 else
2658 return 15;
2659}
2660
2661
2662static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2663 int level,
2664 const struct intel_wm_config *config,
2665 enum intel_ddb_partitioning ddb_partitioning,
2666 bool is_sprite)
2667{
2668 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2669
2670
2671 if (is_sprite && !config->sprites_enabled)
2672 return 0;
2673
2674
2675 if (level == 0 || config->num_pipes_active > 1) {
2676 fifo_size /= INTEL_NUM_PIPES(dev_priv);
2677
2678
2679
2680
2681
2682
2683 if (INTEL_GEN(dev_priv) <= 6)
2684 fifo_size /= 2;
2685 }
2686
2687 if (config->sprites_enabled) {
2688
2689 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2690 if (is_sprite)
2691 fifo_size *= 5;
2692 fifo_size /= 6;
2693 } else {
2694 fifo_size /= 2;
2695 }
2696 }
2697
2698
2699 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2700}
2701
2702
2703static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2704 int level,
2705 const struct intel_wm_config *config)
2706{
2707
2708 if (level > 0 && config->num_pipes_active > 1)
2709 return 64;
2710
2711
2712 return ilk_cursor_wm_reg_max(dev_priv, level);
2713}
2714
2715static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2716 int level,
2717 const struct intel_wm_config *config,
2718 enum intel_ddb_partitioning ddb_partitioning,
2719 struct ilk_wm_maximums *max)
2720{
2721 max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2722 max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2723 max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2724 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2725}
2726
2727static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2728 int level,
2729 struct ilk_wm_maximums *max)
2730{
2731 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2732 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2733 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2734 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2735}
2736
2737static bool ilk_validate_wm_level(int level,
2738 const struct ilk_wm_maximums *max,
2739 struct intel_wm_level *result)
2740{
2741 bool ret;
2742
2743
2744 if (!result->enable)
2745 return false;
2746
2747 result->enable = result->pri_val <= max->pri &&
2748 result->spr_val <= max->spr &&
2749 result->cur_val <= max->cur;
2750
2751 ret = result->enable;
2752
2753
2754
2755
2756
2757
2758 if (level == 0 && !result->enable) {
2759 if (result->pri_val > max->pri)
2760 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2761 level, result->pri_val, max->pri);
2762 if (result->spr_val > max->spr)
2763 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2764 level, result->spr_val, max->spr);
2765 if (result->cur_val > max->cur)
2766 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2767 level, result->cur_val, max->cur);
2768
2769 result->pri_val = min_t(u32, result->pri_val, max->pri);
2770 result->spr_val = min_t(u32, result->spr_val, max->spr);
2771 result->cur_val = min_t(u32, result->cur_val, max->cur);
2772 result->enable = true;
2773 }
2774
2775 return ret;
2776}
2777
2778static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2779 const struct intel_crtc *intel_crtc,
2780 int level,
2781 struct intel_crtc_state *crtc_state,
2782 const struct intel_plane_state *pristate,
2783 const struct intel_plane_state *sprstate,
2784 const struct intel_plane_state *curstate,
2785 struct intel_wm_level *result)
2786{
2787 u16 pri_latency = dev_priv->wm.pri_latency[level];
2788 u16 spr_latency = dev_priv->wm.spr_latency[level];
2789 u16 cur_latency = dev_priv->wm.cur_latency[level];
2790
2791
2792 if (level > 0) {
2793 pri_latency *= 5;
2794 spr_latency *= 5;
2795 cur_latency *= 5;
2796 }
2797
2798 if (pristate) {
2799 result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
2800 pri_latency, level);
2801 result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
2802 }
2803
2804 if (sprstate)
2805 result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
2806
2807 if (curstate)
2808 result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
2809
2810 result->enable = true;
2811}
2812
2813static u32
2814hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
2815{
2816 const struct intel_atomic_state *intel_state =
2817 to_intel_atomic_state(crtc_state->uapi.state);
2818 const struct drm_display_mode *adjusted_mode =
2819 &crtc_state->hw.adjusted_mode;
2820 u32 linetime, ips_linetime;
2821
2822 if (!crtc_state->hw.active)
2823 return 0;
2824 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2825 return 0;
2826 if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
2827 return 0;
2828
2829
2830
2831
2832 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2833 adjusted_mode->crtc_clock);
2834 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2835 intel_state->cdclk.logical.cdclk);
2836
2837 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2838 PIPE_WM_LINETIME_TIME(linetime);
2839}
2840
2841static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2842 u16 wm[8])
2843{
2844 struct intel_uncore *uncore = &dev_priv->uncore;
2845
2846 if (INTEL_GEN(dev_priv) >= 9) {
2847 u32 val;
2848 int ret, i;
2849 int level, max_level = ilk_wm_max_level(dev_priv);
2850
2851
2852 val = 0;
2853 ret = sandybridge_pcode_read(dev_priv,
2854 GEN9_PCODE_READ_MEM_LATENCY,
2855 &val, NULL);
2856
2857 if (ret) {
2858 drm_err(&dev_priv->drm,
2859 "SKL Mailbox read error = %d\n", ret);
2860 return;
2861 }
2862
2863 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2864 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2865 GEN9_MEM_LATENCY_LEVEL_MASK;
2866 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2867 GEN9_MEM_LATENCY_LEVEL_MASK;
2868 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2869 GEN9_MEM_LATENCY_LEVEL_MASK;
2870
2871
2872 val = 1;
2873 ret = sandybridge_pcode_read(dev_priv,
2874 GEN9_PCODE_READ_MEM_LATENCY,
2875 &val, NULL);
2876 if (ret) {
2877 drm_err(&dev_priv->drm,
2878 "SKL Mailbox read error = %d\n", ret);
2879 return;
2880 }
2881
2882 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2883 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2884 GEN9_MEM_LATENCY_LEVEL_MASK;
2885 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2886 GEN9_MEM_LATENCY_LEVEL_MASK;
2887 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2888 GEN9_MEM_LATENCY_LEVEL_MASK;
2889
2890
2891
2892
2893
2894
2895 for (level = 1; level <= max_level; level++) {
2896 if (wm[level] == 0) {
2897 for (i = level + 1; i <= max_level; i++)
2898 wm[i] = 0;
2899 break;
2900 }
2901 }
2902
2903
2904
2905
2906
2907
2908
2909
2910 if (wm[0] == 0) {
2911 wm[0] += 2;
2912 for (level = 1; level <= max_level; level++) {
2913 if (wm[level] == 0)
2914 break;
2915 wm[level] += 2;
2916 }
2917 }
2918
2919
2920
2921
2922
2923
2924
2925 if (dev_priv->dram_info.is_16gb_dimm)
2926 wm[0] += 1;
2927
2928 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2929 u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
2930
2931 wm[0] = (sskpd >> 56) & 0xFF;
2932 if (wm[0] == 0)
2933 wm[0] = sskpd & 0xF;
2934 wm[1] = (sskpd >> 4) & 0xFF;
2935 wm[2] = (sskpd >> 12) & 0xFF;
2936 wm[3] = (sskpd >> 20) & 0x1FF;
2937 wm[4] = (sskpd >> 32) & 0x1FF;
2938 } else if (INTEL_GEN(dev_priv) >= 6) {
2939 u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
2940
2941 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2942 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2943 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2944 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2945 } else if (INTEL_GEN(dev_priv) >= 5) {
2946 u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
2947
2948
2949 wm[0] = 7;
2950 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2951 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2952 } else {
2953 MISSING_CASE(INTEL_DEVID(dev_priv));
2954 }
2955}
2956
2957static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2958 u16 wm[5])
2959{
2960
2961 if (IS_GEN(dev_priv, 5))
2962 wm[0] = 13;
2963}
2964
2965static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2966 u16 wm[5])
2967{
2968
2969 if (IS_GEN(dev_priv, 5))
2970 wm[0] = 13;
2971}
2972
2973int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2974{
2975
2976 if (INTEL_GEN(dev_priv) >= 9)
2977 return 7;
2978 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2979 return 4;
2980 else if (INTEL_GEN(dev_priv) >= 6)
2981 return 3;
2982 else
2983 return 2;
2984}
2985
2986static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2987 const char *name,
2988 const u16 wm[8])
2989{
2990 int level, max_level = ilk_wm_max_level(dev_priv);
2991
2992 for (level = 0; level <= max_level; level++) {
2993 unsigned int latency = wm[level];
2994
2995 if (latency == 0) {
2996 drm_dbg_kms(&dev_priv->drm,
2997 "%s WM%d latency not provided\n",
2998 name, level);
2999 continue;
3000 }
3001
3002
3003
3004
3005
3006 if (INTEL_GEN(dev_priv) >= 9)
3007 latency *= 10;
3008 else if (level > 0)
3009 latency *= 5;
3010
3011 drm_dbg_kms(&dev_priv->drm,
3012 "%s WM%d latency %u (%u.%u usec)\n", name, level,
3013 wm[level], latency / 10, latency % 10);
3014 }
3015}
3016
3017static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
3018 u16 wm[5], u16 min)
3019{
3020 int level, max_level = ilk_wm_max_level(dev_priv);
3021
3022 if (wm[0] >= min)
3023 return false;
3024
3025 wm[0] = max(wm[0], min);
3026 for (level = 1; level <= max_level; level++)
3027 wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
3028
3029 return true;
3030}
3031
3032static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3033{
3034 bool changed;
3035
3036
3037
3038
3039
3040 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
3041 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
3042 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
3043
3044 if (!changed)
3045 return;
3046
3047 drm_dbg_kms(&dev_priv->drm,
3048 "WM latency values increased to avoid potential underruns\n");
3049 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3050 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3051 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3052}
3053
3054static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3055{
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067 if (dev_priv->wm.pri_latency[3] == 0 &&
3068 dev_priv->wm.spr_latency[3] == 0 &&
3069 dev_priv->wm.cur_latency[3] == 0)
3070 return;
3071
3072 dev_priv->wm.pri_latency[3] = 0;
3073 dev_priv->wm.spr_latency[3] = 0;
3074 dev_priv->wm.cur_latency[3] = 0;
3075
3076 drm_dbg_kms(&dev_priv->drm,
3077 "LP3 watermarks disabled due to potential for lost interrupts\n");
3078 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3079 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3080 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3081}
3082
3083static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3084{
3085 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3086
3087 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
3088 sizeof(dev_priv->wm.pri_latency));
3089 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
3090 sizeof(dev_priv->wm.pri_latency));
3091
3092 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3093 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3094
3095 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3096 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3097 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3098
3099 if (IS_GEN(dev_priv, 6)) {
3100 snb_wm_latency_quirk(dev_priv);
3101 snb_wm_lp3_irq_quirk(dev_priv);
3102 }
3103}
3104
3105static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3106{
3107 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3108 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3109}
3110
3111static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
3112 struct intel_pipe_wm *pipe_wm)
3113{
3114
3115 const struct intel_wm_config config = {
3116 .num_pipes_active = 1,
3117 .sprites_enabled = pipe_wm->sprites_enabled,
3118 .sprites_scaled = pipe_wm->sprites_scaled,
3119 };
3120 struct ilk_wm_maximums max;
3121
3122
3123 ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
3124
3125
3126 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3127 drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
3128 return false;
3129 }
3130
3131 return true;
3132}
3133
3134
3135static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
3136{
3137 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3138 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
3139 struct intel_pipe_wm *pipe_wm;
3140 struct intel_plane *plane;
3141 const struct intel_plane_state *plane_state;
3142 const struct intel_plane_state *pristate = NULL;
3143 const struct intel_plane_state *sprstate = NULL;
3144 const struct intel_plane_state *curstate = NULL;
3145 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3146 struct ilk_wm_maximums max;
3147
3148 pipe_wm = &crtc_state->wm.ilk.optimal;
3149
3150 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
3151 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
3152 pristate = plane_state;
3153 else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
3154 sprstate = plane_state;
3155 else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
3156 curstate = plane_state;
3157 }
3158
3159 pipe_wm->pipe_enabled = crtc_state->hw.active;
3160 if (sprstate) {
3161 pipe_wm->sprites_enabled = sprstate->uapi.visible;
3162 pipe_wm->sprites_scaled = sprstate->uapi.visible &&
3163 (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
3164 drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
3165 }
3166
3167 usable_level = max_level;
3168
3169
3170 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3171 usable_level = 1;
3172
3173
3174 if (pipe_wm->sprites_scaled)
3175 usable_level = 0;
3176
3177 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3178 ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
3179 pristate, sprstate, curstate, &pipe_wm->wm[0]);
3180
3181 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3182 pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state);
3183
3184 if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
3185 return -EINVAL;
3186
3187 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3188
3189 for (level = 1; level <= usable_level; level++) {
3190 struct intel_wm_level *wm = &pipe_wm->wm[level];
3191
3192 ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
3193 pristate, sprstate, curstate, wm);
3194
3195
3196
3197
3198
3199
3200 if (!ilk_validate_wm_level(level, &max, wm)) {
3201 memset(wm, 0, sizeof(*wm));
3202 break;
3203 }
3204 }
3205
3206 return 0;
3207}
3208
3209
3210
3211
3212
3213
3214static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
3215{
3216 struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc);
3217 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3218 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3219 struct intel_atomic_state *intel_state =
3220 to_intel_atomic_state(newstate->uapi.state);
3221 const struct intel_crtc_state *oldstate =
3222 intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
3223 const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
3224 int level, max_level = ilk_wm_max_level(dev_priv);
3225
3226
3227
3228
3229
3230
3231 *a = newstate->wm.ilk.optimal;
3232 if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) ||
3233 intel_state->skip_intermediate_wm)
3234 return 0;
3235
3236 a->pipe_enabled |= b->pipe_enabled;
3237 a->sprites_enabled |= b->sprites_enabled;
3238 a->sprites_scaled |= b->sprites_scaled;
3239
3240 for (level = 0; level <= max_level; level++) {
3241 struct intel_wm_level *a_wm = &a->wm[level];
3242 const struct intel_wm_level *b_wm = &b->wm[level];
3243
3244 a_wm->enable &= b_wm->enable;
3245 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3246 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3247 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3248 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3249 }
3250
3251
3252
3253
3254
3255
3256
3257 if (!ilk_validate_pipe_wm(dev_priv, a))
3258 return -EINVAL;
3259
3260
3261
3262
3263
3264 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
3265 newstate->wm.need_postvbl_update = true;
3266
3267 return 0;
3268}
3269
3270
3271
3272
3273static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
3274 int level,
3275 struct intel_wm_level *ret_wm)
3276{
3277 const struct intel_crtc *intel_crtc;
3278
3279 ret_wm->enable = true;
3280
3281 for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3282 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3283 const struct intel_wm_level *wm = &active->wm[level];
3284
3285 if (!active->pipe_enabled)
3286 continue;
3287
3288
3289
3290
3291
3292
3293 if (!wm->enable)
3294 ret_wm->enable = false;
3295
3296 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3297 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3298 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3299 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3300 }
3301}
3302
3303
3304
3305
3306static void ilk_wm_merge(struct drm_i915_private *dev_priv,
3307 const struct intel_wm_config *config,
3308 const struct ilk_wm_maximums *max,
3309 struct intel_pipe_wm *merged)
3310{
3311 int level, max_level = ilk_wm_max_level(dev_priv);
3312 int last_enabled_level = max_level;
3313
3314
3315 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3316 config->num_pipes_active > 1)
3317 last_enabled_level = 0;
3318
3319
3320 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
3321
3322
3323 for (level = 1; level <= max_level; level++) {
3324 struct intel_wm_level *wm = &merged->wm[level];
3325
3326 ilk_merge_wm_level(dev_priv, level, wm);
3327
3328 if (level > last_enabled_level)
3329 wm->enable = false;
3330 else if (!ilk_validate_wm_level(level, max, wm))
3331
3332 last_enabled_level = level - 1;
3333
3334
3335
3336
3337
3338 if (wm->fbc_val > max->fbc) {
3339 if (wm->enable)
3340 merged->fbc_wm_enabled = false;
3341 wm->fbc_val = 0;
3342 }
3343 }
3344
3345
3346
3347
3348
3349
3350
3351 if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
3352 intel_fbc_is_active(dev_priv)) {
3353 for (level = 2; level <= max_level; level++) {
3354 struct intel_wm_level *wm = &merged->wm[level];
3355
3356 wm->enable = false;
3357 }
3358 }
3359}
3360
3361static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3362{
3363
3364 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3365}
3366
3367
3368static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3369 int level)
3370{
3371 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3372 return 2 * level;
3373 else
3374 return dev_priv->wm.pri_latency[level];
3375}
3376
3377static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3378 const struct intel_pipe_wm *merged,
3379 enum intel_ddb_partitioning partitioning,
3380 struct ilk_wm_values *results)
3381{
3382 struct intel_crtc *intel_crtc;
3383 int level, wm_lp;
3384
3385 results->enable_fbc_wm = merged->fbc_wm_enabled;
3386 results->partitioning = partitioning;
3387
3388
3389 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3390 const struct intel_wm_level *r;
3391
3392 level = ilk_wm_lp_to_level(wm_lp, merged);
3393
3394 r = &merged->wm[level];
3395
3396
3397
3398
3399
3400 results->wm_lp[wm_lp - 1] =
3401 (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
3402 (r->pri_val << WM1_LP_SR_SHIFT) |
3403 r->cur_val;
3404
3405 if (r->enable)
3406 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
3407
3408 if (INTEL_GEN(dev_priv) >= 8)
3409 results->wm_lp[wm_lp - 1] |=
3410 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
3411 else
3412 results->wm_lp[wm_lp - 1] |=
3413 r->fbc_val << WM1_LP_FBC_SHIFT;
3414
3415
3416
3417
3418
3419 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
3420 WARN_ON(wm_lp != 1);
3421 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
3422 } else
3423 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3424 }
3425
3426
3427 for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3428 enum pipe pipe = intel_crtc->pipe;
3429 const struct intel_wm_level *r =
3430 &intel_crtc->wm.active.ilk.wm[0];
3431
3432 if (WARN_ON(!r->enable))
3433 continue;
3434
3435 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
3436
3437 results->wm_pipe[pipe] =
3438 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
3439 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
3440 r->cur_val;
3441 }
3442}
3443
3444
3445
3446static struct intel_pipe_wm *
3447ilk_find_best_result(struct drm_i915_private *dev_priv,
3448 struct intel_pipe_wm *r1,
3449 struct intel_pipe_wm *r2)
3450{
3451 int level, max_level = ilk_wm_max_level(dev_priv);
3452 int level1 = 0, level2 = 0;
3453
3454 for (level = 1; level <= max_level; level++) {
3455 if (r1->wm[level].enable)
3456 level1 = level;
3457 if (r2->wm[level].enable)
3458 level2 = level;
3459 }
3460
3461 if (level1 == level2) {
3462 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3463 return r2;
3464 else
3465 return r1;
3466 } else if (level1 > level2) {
3467 return r1;
3468 } else {
3469 return r2;
3470 }
3471}
3472
3473
3474#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3475#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
3476#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3477#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3478#define WM_DIRTY_FBC (1 << 24)
3479#define WM_DIRTY_DDB (1 << 25)
3480
3481static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3482 const struct ilk_wm_values *old,
3483 const struct ilk_wm_values *new)
3484{
3485 unsigned int dirty = 0;
3486 enum pipe pipe;
3487 int wm_lp;
3488
3489 for_each_pipe(dev_priv, pipe) {
3490 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
3491 dirty |= WM_DIRTY_LINETIME(pipe);
3492
3493 dirty |= WM_DIRTY_LP_ALL;
3494 }
3495
3496 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3497 dirty |= WM_DIRTY_PIPE(pipe);
3498
3499 dirty |= WM_DIRTY_LP_ALL;
3500 }
3501 }
3502
3503 if (old->enable_fbc_wm != new->enable_fbc_wm) {
3504 dirty |= WM_DIRTY_FBC;
3505
3506 dirty |= WM_DIRTY_LP_ALL;
3507 }
3508
3509 if (old->partitioning != new->partitioning) {
3510 dirty |= WM_DIRTY_DDB;
3511
3512 dirty |= WM_DIRTY_LP_ALL;
3513 }
3514
3515
3516 if (dirty & WM_DIRTY_LP_ALL)
3517 return dirty;
3518
3519
3520 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3521 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3522 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3523 break;
3524 }
3525
3526
3527 for (; wm_lp <= 3; wm_lp++)
3528 dirty |= WM_DIRTY_LP(wm_lp);
3529
3530 return dirty;
3531}
3532
3533static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3534 unsigned int dirty)
3535{
3536 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3537 bool changed = false;
3538
3539 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
3540 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
3541 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
3542 changed = true;
3543 }
3544 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
3545 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
3546 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
3547 changed = true;
3548 }
3549 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
3550 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
3551 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
3552 changed = true;
3553 }
3554
3555
3556
3557
3558
3559
3560 return changed;
3561}
3562
3563
3564
3565
3566
3567static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3568 struct ilk_wm_values *results)
3569{
3570 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3571 unsigned int dirty;
3572 u32 val;
3573
3574 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3575 if (!dirty)
3576 return;
3577
3578 _ilk_disable_lp_wm(dev_priv, dirty);
3579
3580 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3581 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
3582 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3583 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
3584 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3585 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
3586
3587 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
3588 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
3589 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
3590 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
3591 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
3592 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
3593
3594 if (dirty & WM_DIRTY_DDB) {
3595 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3596 val = I915_READ(WM_MISC);
3597 if (results->partitioning == INTEL_DDB_PART_1_2)
3598 val &= ~WM_MISC_DATA_PARTITION_5_6;
3599 else
3600 val |= WM_MISC_DATA_PARTITION_5_6;
3601 I915_WRITE(WM_MISC, val);
3602 } else {
3603 val = I915_READ(DISP_ARB_CTL2);
3604 if (results->partitioning == INTEL_DDB_PART_1_2)
3605 val &= ~DISP_DATA_PARTITION_5_6;
3606 else
3607 val |= DISP_DATA_PARTITION_5_6;
3608 I915_WRITE(DISP_ARB_CTL2, val);
3609 }
3610 }
3611
3612 if (dirty & WM_DIRTY_FBC) {
3613 val = I915_READ(DISP_ARB_CTL);
3614 if (results->enable_fbc_wm)
3615 val &= ~DISP_FBC_WM_DIS;
3616 else
3617 val |= DISP_FBC_WM_DIS;
3618 I915_WRITE(DISP_ARB_CTL, val);
3619 }
3620
3621 if (dirty & WM_DIRTY_LP(1) &&
3622 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3623 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
3624
3625 if (INTEL_GEN(dev_priv) >= 7) {
3626 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3627 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
3628 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3629 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
3630 }
3631
3632 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3633 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
3634 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3635 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
3636 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3637 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
3638
3639 dev_priv->wm.hw = *results;
3640}
3641
3642bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
3643{
3644 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3645}
3646
3647static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
3648{
3649 u8 enabled_slices;
3650
3651
3652 enabled_slices = 1;
3653
3654
3655 if (INTEL_GEN(dev_priv) < 11)
3656 return enabled_slices;
3657
3658
3659
3660
3661
3662
3663 if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
3664 enabled_slices++;
3665
3666 return enabled_slices;
3667}
3668
3669
3670
3671
3672
3673static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
3674{
3675 return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
3676}
3677
3678static bool
3679intel_has_sagv(struct drm_i915_private *dev_priv)
3680{
3681
3682 if (IS_GEN(dev_priv, 12))
3683 return false;
3684
3685 return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
3686 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3687}
3688
3689static void
3690skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
3691{
3692 if (INTEL_GEN(dev_priv) >= 12) {
3693 u32 val = 0;
3694 int ret;
3695
3696 ret = sandybridge_pcode_read(dev_priv,
3697 GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
3698 &val, NULL);
3699 if (!ret) {
3700 dev_priv->sagv_block_time_us = val;
3701 return;
3702 }
3703
3704 drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
3705 } else if (IS_GEN(dev_priv, 11)) {
3706 dev_priv->sagv_block_time_us = 10;
3707 return;
3708 } else if (IS_GEN(dev_priv, 10)) {
3709 dev_priv->sagv_block_time_us = 20;
3710 return;
3711 } else if (IS_GEN(dev_priv, 9)) {
3712 dev_priv->sagv_block_time_us = 30;
3713 return;
3714 } else {
3715 MISSING_CASE(INTEL_GEN(dev_priv));
3716 }
3717
3718
3719 dev_priv->sagv_block_time_us = -1;
3720}
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733int
3734intel_enable_sagv(struct drm_i915_private *dev_priv)
3735{
3736 int ret;
3737
3738 if (!intel_has_sagv(dev_priv))
3739 return 0;
3740
3741 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3742 return 0;
3743
3744 drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
3745 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3746 GEN9_SAGV_ENABLE);
3747
3748
3749
3750
3751
3752
3753
3754 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3755 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3756 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3757 return 0;
3758 } else if (ret < 0) {
3759 drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
3760 return ret;
3761 }
3762
3763 dev_priv->sagv_status = I915_SAGV_ENABLED;
3764 return 0;
3765}
3766
3767int
3768intel_disable_sagv(struct drm_i915_private *dev_priv)
3769{
3770 int ret;
3771
3772 if (!intel_has_sagv(dev_priv))
3773 return 0;
3774
3775 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3776 return 0;
3777
3778 drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
3779
3780 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3781 GEN9_SAGV_DISABLE,
3782 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3783 1);
3784
3785
3786
3787
3788 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3789 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3790 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3791 return 0;
3792 } else if (ret < 0) {
3793 drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
3794 return ret;
3795 }
3796
3797 dev_priv->sagv_status = I915_SAGV_DISABLED;
3798 return 0;
3799}
3800
3801bool intel_can_enable_sagv(struct intel_atomic_state *state)
3802{
3803 struct drm_device *dev = state->base.dev;
3804 struct drm_i915_private *dev_priv = to_i915(dev);
3805 struct intel_crtc *crtc;
3806 struct intel_plane *plane;
3807 struct intel_crtc_state *crtc_state;
3808 enum pipe pipe;
3809 int level, latency;
3810
3811 if (!intel_has_sagv(dev_priv))
3812 return false;
3813
3814
3815
3816
3817 if (hweight8(state->active_pipes) == 0)
3818 return true;
3819
3820
3821
3822
3823
3824 if (hweight8(state->active_pipes) > 1)
3825 return false;
3826
3827
3828 pipe = ffs(state->active_pipes) - 1;
3829 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3830 crtc_state = to_intel_crtc_state(crtc->base.state);
3831
3832 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3833 return false;
3834
3835 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3836 struct skl_plane_wm *wm =
3837 &crtc_state->wm.skl.optimal.planes[plane->id];
3838
3839
3840 if (!wm->wm[0].plane_en)
3841 continue;
3842
3843
3844 for (level = ilk_wm_max_level(dev_priv);
3845 !wm->wm[level].plane_en; --level)
3846 { }
3847
3848 latency = dev_priv->wm.skl_latency[level];
3849
3850 if (skl_needs_memory_bw_wa(dev_priv) &&
3851 plane->base.state->fb->modifier ==
3852 I915_FORMAT_MOD_X_TILED)
3853 latency += 15;
3854
3855
3856
3857
3858
3859
3860 if (latency < dev_priv->sagv_block_time_us)
3861 return false;
3862 }
3863
3864 return true;
3865}
3866
3867static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3868 const struct intel_crtc_state *crtc_state,
3869 const u64 total_data_rate,
3870 const int num_active,
3871 struct skl_ddb_allocation *ddb)
3872{
3873 const struct drm_display_mode *adjusted_mode;
3874 u64 total_data_bw;
3875 u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3876
3877 WARN_ON(ddb_size == 0);
3878
3879 if (INTEL_GEN(dev_priv) < 11)
3880 return ddb_size - 4;
3881
3882 adjusted_mode = &crtc_state->hw.adjusted_mode;
3883 total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893 if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
3894 ddb->enabled_slices = 2;
3895 } else {
3896 ddb->enabled_slices = 1;
3897 ddb_size /= 2;
3898 }
3899
3900 return ddb_size;
3901}
3902
3903static void
3904skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
3905 const struct intel_crtc_state *crtc_state,
3906 const u64 total_data_rate,
3907 struct skl_ddb_allocation *ddb,
3908 struct skl_ddb_entry *alloc,
3909 int *num_active )
3910{
3911 struct drm_atomic_state *state = crtc_state->uapi.state;
3912 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3913 struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
3914 const struct intel_crtc *crtc;
3915 u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
3916 enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
3917 u16 ddb_size;
3918 u32 i;
3919
3920 if (WARN_ON(!state) || !crtc_state->hw.active) {
3921 alloc->start = 0;
3922 alloc->end = 0;
3923 *num_active = hweight8(dev_priv->active_pipes);
3924 return;
3925 }
3926
3927 if (intel_state->active_pipe_changes)
3928 *num_active = hweight8(intel_state->active_pipes);
3929 else
3930 *num_active = hweight8(dev_priv->active_pipes);
3931
3932 ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
3933 *num_active, ddb);
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943 if (!intel_state->active_pipe_changes && !intel_state->modeset) {
3944
3945
3946
3947
3948 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3949 return;
3950 }
3951
3952
3953
3954
3955
3956
3957 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
3958 const struct drm_display_mode *adjusted_mode =
3959 &crtc_state->hw.adjusted_mode;
3960 enum pipe pipe = crtc->pipe;
3961 int hdisplay, vdisplay;
3962
3963 if (!crtc_state->hw.enable)
3964 continue;
3965
3966 drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
3967 total_width += hdisplay;
3968
3969 if (pipe < for_pipe)
3970 width_before_pipe += hdisplay;
3971 else if (pipe == for_pipe)
3972 pipe_width = hdisplay;
3973 }
3974
3975 alloc->start = ddb_size * width_before_pipe / total_width;
3976 alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
3977}
3978
3979static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
3980 int width, const struct drm_format_info *format,
3981 u64 modifier, unsigned int rotation,
3982 u32 plane_pixel_rate, struct skl_wm_params *wp,
3983 int color_plane);
3984static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
3985 int level,
3986 const struct skl_wm_params *wp,
3987 const struct skl_wm_level *result_prev,
3988 struct skl_wm_level *result );
3989
3990static unsigned int
3991skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
3992 int num_active)
3993{
3994 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3995 int level, max_level = ilk_wm_max_level(dev_priv);
3996 struct skl_wm_level wm = {};
3997 int ret, min_ddb_alloc = 0;
3998 struct skl_wm_params wp;
3999
4000 ret = skl_compute_wm_params(crtc_state, 256,
4001 drm_format_info(DRM_FORMAT_ARGB8888),
4002 DRM_FORMAT_MOD_LINEAR,
4003 DRM_MODE_ROTATE_0,
4004 crtc_state->pixel_rate, &wp, 0);
4005 WARN_ON(ret);
4006
4007 for (level = 0; level <= max_level; level++) {
4008 skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
4009 if (wm.min_ddb_alloc == U16_MAX)
4010 break;
4011
4012 min_ddb_alloc = wm.min_ddb_alloc;
4013 }
4014
4015 return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
4016}
4017
4018static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
4019 struct skl_ddb_entry *entry, u32 reg)
4020{
4021
4022 entry->start = reg & DDB_ENTRY_MASK;
4023 entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
4024
4025 if (entry->end)
4026 entry->end += 1;
4027}
4028
4029static void
4030skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
4031 const enum pipe pipe,
4032 const enum plane_id plane_id,
4033 struct skl_ddb_entry *ddb_y,
4034 struct skl_ddb_entry *ddb_uv)
4035{
4036 u32 val, val2;
4037 u32 fourcc = 0;
4038
4039
4040 if (plane_id == PLANE_CURSOR) {
4041 val = I915_READ(CUR_BUF_CFG(pipe));
4042 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4043 return;
4044 }
4045
4046 val = I915_READ(PLANE_CTL(pipe, plane_id));
4047
4048
4049 if (val & PLANE_CTL_ENABLE)
4050 fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
4051 val & PLANE_CTL_ORDER_RGBX,
4052 val & PLANE_CTL_ALPHA_MASK);
4053
4054 if (INTEL_GEN(dev_priv) >= 11) {
4055 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4056 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4057 } else {
4058 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4059 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
4060
4061 if (fourcc &&
4062 drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
4063 swap(val, val2);
4064
4065 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4066 skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
4067 }
4068}
4069
4070void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
4071 struct skl_ddb_entry *ddb_y,
4072 struct skl_ddb_entry *ddb_uv)
4073{
4074 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4075 enum intel_display_power_domain power_domain;
4076 enum pipe pipe = crtc->pipe;
4077 intel_wakeref_t wakeref;
4078 enum plane_id plane_id;
4079
4080 power_domain = POWER_DOMAIN_PIPE(pipe);
4081 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4082 if (!wakeref)
4083 return;
4084
4085 for_each_plane_id_on_crtc(crtc, plane_id)
4086 skl_ddb_get_hw_plane_state(dev_priv, pipe,
4087 plane_id,
4088 &ddb_y[plane_id],
4089 &ddb_uv[plane_id]);
4090
4091 intel_display_power_put(dev_priv, power_domain, wakeref);
4092}
4093
4094void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
4095 struct skl_ddb_allocation *ddb )
4096{
4097 ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
4098}
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116static uint_fixed_16_16_t
4117skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
4118 const struct intel_plane_state *plane_state)
4119{
4120 u32 src_w, src_h, dst_w, dst_h;
4121 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
4122 uint_fixed_16_16_t downscale_h, downscale_w;
4123
4124 if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
4125 return u32_to_fixed16(0);
4126
4127
4128
4129
4130
4131
4132
4133
4134 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4135 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4136 dst_w = drm_rect_width(&plane_state->uapi.dst);
4137 dst_h = drm_rect_height(&plane_state->uapi.dst);
4138
4139 fp_w_ratio = div_fixed16(src_w, dst_w);
4140 fp_h_ratio = div_fixed16(src_h, dst_h);
4141 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
4142 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4143
4144 return mul_fixed16(downscale_w, downscale_h);
4145}
4146
4147static u64
4148skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
4149 const struct intel_plane_state *plane_state,
4150 int color_plane)
4151{
4152 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4153 const struct drm_framebuffer *fb = plane_state->hw.fb;
4154 u32 data_rate;
4155 u32 width = 0, height = 0;
4156 uint_fixed_16_16_t down_scale_amount;
4157 u64 rate;
4158
4159 if (!plane_state->uapi.visible)
4160 return 0;
4161
4162 if (plane->id == PLANE_CURSOR)
4163 return 0;
4164
4165 if (color_plane == 1 &&
4166 !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
4167 return 0;
4168
4169
4170
4171
4172
4173
4174 width = drm_rect_width(&plane_state->uapi.src) >> 16;
4175 height = drm_rect_height(&plane_state->uapi.src) >> 16;
4176
4177
4178 if (color_plane == 1) {
4179 width /= 2;
4180 height /= 2;
4181 }
4182
4183 data_rate = width * height;
4184
4185 down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
4186
4187 rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4188
4189 rate *= fb->format->cpp[color_plane];
4190 return rate;
4191}
4192
4193static u64
4194skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
4195 u64 *plane_data_rate,
4196 u64 *uv_plane_data_rate)
4197{
4198 struct drm_atomic_state *state = crtc_state->uapi.state;
4199 struct intel_plane *plane;
4200 const struct intel_plane_state *plane_state;
4201 u64 total_data_rate = 0;
4202
4203 if (WARN_ON(!state))
4204 return 0;
4205
4206
4207 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
4208 enum plane_id plane_id = plane->id;
4209 u64 rate;
4210
4211
4212 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4213 plane_data_rate[plane_id] = rate;
4214 total_data_rate += rate;
4215
4216
4217 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
4218 uv_plane_data_rate[plane_id] = rate;
4219 total_data_rate += rate;
4220 }
4221
4222 return total_data_rate;
4223}
4224
4225static u64
4226icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
4227 u64 *plane_data_rate)
4228{
4229 struct intel_plane *plane;
4230 const struct intel_plane_state *plane_state;
4231 u64 total_data_rate = 0;
4232
4233 if (WARN_ON(!crtc_state->uapi.state))
4234 return 0;
4235
4236
4237 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
4238 enum plane_id plane_id = plane->id;
4239 u64 rate;
4240
4241 if (!plane_state->planar_linked_plane) {
4242 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4243 plane_data_rate[plane_id] = rate;
4244 total_data_rate += rate;
4245 } else {
4246 enum plane_id y_plane_id;
4247
4248
4249
4250
4251
4252
4253
4254
4255 if (plane_state->planar_slave)
4256 continue;
4257
4258
4259 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4260 y_plane_id = plane_state->planar_linked_plane->id;
4261 plane_data_rate[y_plane_id] = rate;
4262 total_data_rate += rate;
4263
4264 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
4265 plane_data_rate[plane_id] = rate;
4266 total_data_rate += rate;
4267 }
4268 }
4269
4270 return total_data_rate;
4271}
4272
4273static int
4274skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
4275 struct skl_ddb_allocation *ddb )
4276{
4277 struct drm_atomic_state *state = crtc_state->uapi.state;
4278 struct drm_crtc *crtc = crtc_state->uapi.crtc;
4279 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4280 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4281 struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
4282 u16 alloc_size, start = 0;
4283 u16 total[I915_MAX_PLANES] = {};
4284 u16 uv_total[I915_MAX_PLANES] = {};
4285 u64 total_data_rate;
4286 enum plane_id plane_id;
4287 int num_active;
4288 u64 plane_data_rate[I915_MAX_PLANES] = {};
4289 u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
4290 u32 blocks;
4291 int level;
4292
4293
4294 memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
4295 memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
4296
4297 if (WARN_ON(!state))
4298 return 0;
4299
4300 if (!crtc_state->hw.active) {
4301 alloc->start = alloc->end = 0;
4302 return 0;
4303 }
4304
4305 if (INTEL_GEN(dev_priv) >= 11)
4306 total_data_rate =
4307 icl_get_total_relative_data_rate(crtc_state,
4308 plane_data_rate);
4309 else
4310 total_data_rate =
4311 skl_get_total_relative_data_rate(crtc_state,
4312 plane_data_rate,
4313 uv_plane_data_rate);
4314
4315
4316 skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
4317 ddb, alloc, &num_active);
4318 alloc_size = skl_ddb_entry_size(alloc);
4319 if (alloc_size == 0)
4320 return 0;
4321
4322
4323 total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
4324 alloc_size -= total[PLANE_CURSOR];
4325 crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
4326 alloc->end - total[PLANE_CURSOR];
4327 crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
4328
4329 if (total_data_rate == 0)
4330 return 0;
4331
4332
4333
4334
4335
4336 for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
4337 blocks = 0;
4338 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4339 const struct skl_plane_wm *wm =
4340 &crtc_state->wm.skl.optimal.planes[plane_id];
4341
4342 if (plane_id == PLANE_CURSOR) {
4343 if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
4344 WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
4345 blocks = U32_MAX;
4346 break;
4347 }
4348 continue;
4349 }
4350
4351 blocks += wm->wm[level].min_ddb_alloc;
4352 blocks += wm->uv_wm[level].min_ddb_alloc;
4353 }
4354
4355 if (blocks <= alloc_size) {
4356 alloc_size -= blocks;
4357 break;
4358 }
4359 }
4360
4361 if (level < 0) {
4362 drm_dbg_kms(&dev_priv->drm,
4363 "Requested display configuration exceeds system DDB limitations");
4364 drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
4365 blocks, alloc_size);
4366 return -EINVAL;
4367 }
4368
4369
4370
4371
4372
4373
4374 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4375 const struct skl_plane_wm *wm =
4376 &crtc_state->wm.skl.optimal.planes[plane_id];
4377 u64 rate;
4378 u16 extra;
4379
4380 if (plane_id == PLANE_CURSOR)
4381 continue;
4382
4383
4384
4385
4386
4387 if (total_data_rate == 0)
4388 break;
4389
4390 rate = plane_data_rate[plane_id];
4391 extra = min_t(u16, alloc_size,
4392 DIV64_U64_ROUND_UP(alloc_size * rate,
4393 total_data_rate));
4394 total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
4395 alloc_size -= extra;
4396 total_data_rate -= rate;
4397
4398 if (total_data_rate == 0)
4399 break;
4400
4401 rate = uv_plane_data_rate[plane_id];
4402 extra = min_t(u16, alloc_size,
4403 DIV64_U64_ROUND_UP(alloc_size * rate,
4404 total_data_rate));
4405 uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
4406 alloc_size -= extra;
4407 total_data_rate -= rate;
4408 }
4409 WARN_ON(alloc_size != 0 || total_data_rate != 0);
4410
4411
4412 start = alloc->start;
4413 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4414 struct skl_ddb_entry *plane_alloc =
4415 &crtc_state->wm.skl.plane_ddb_y[plane_id];
4416 struct skl_ddb_entry *uv_plane_alloc =
4417 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
4418
4419 if (plane_id == PLANE_CURSOR)
4420 continue;
4421
4422
4423 WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
4424
4425
4426 if (total[plane_id]) {
4427 plane_alloc->start = start;
4428 start += total[plane_id];
4429 plane_alloc->end = start;
4430 }
4431
4432 if (uv_total[plane_id]) {
4433 uv_plane_alloc->start = start;
4434 start += uv_total[plane_id];
4435 uv_plane_alloc->end = start;
4436 }
4437 }
4438
4439
4440
4441
4442
4443
4444
4445 for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
4446 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4447 struct skl_plane_wm *wm =
4448 &crtc_state->wm.skl.optimal.planes[plane_id];
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462 if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
4463 wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
4464 memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
4465
4466
4467
4468
4469
4470 if (IS_GEN(dev_priv, 11) &&
4471 level == 1 && wm->wm[0].plane_en) {
4472 wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
4473 wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
4474 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
4475 }
4476 }
4477 }
4478
4479
4480
4481
4482
4483 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4484 struct skl_plane_wm *wm =
4485 &crtc_state->wm.skl.optimal.planes[plane_id];
4486
4487 if (wm->trans_wm.plane_res_b >= total[plane_id])
4488 memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
4489 }
4490
4491 return 0;
4492}
4493
4494
4495
4496
4497
4498
4499
4500static uint_fixed_16_16_t
4501skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
4502 u8 cpp, u32 latency, u32 dbuf_block_size)
4503{
4504 u32 wm_intermediate_val;
4505 uint_fixed_16_16_t ret;
4506
4507 if (latency == 0)
4508 return FP_16_16_MAX;
4509
4510 wm_intermediate_val = latency * pixel_rate * cpp;
4511 ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
4512
4513 if (INTEL_GEN(dev_priv) >= 10)
4514 ret = add_fixed16_u32(ret, 1);
4515
4516 return ret;
4517}
4518
4519static uint_fixed_16_16_t
4520skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
4521 uint_fixed_16_16_t plane_blocks_per_line)
4522{
4523 u32 wm_intermediate_val;
4524 uint_fixed_16_16_t ret;
4525
4526 if (latency == 0)
4527 return FP_16_16_MAX;
4528
4529 wm_intermediate_val = latency * pixel_rate;
4530 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
4531 pipe_htotal * 1000);
4532 ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
4533 return ret;
4534}
4535
4536static uint_fixed_16_16_t
4537intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
4538{
4539 u32 pixel_rate;
4540 u32 crtc_htotal;
4541 uint_fixed_16_16_t linetime_us;
4542
4543 if (!crtc_state->hw.active)
4544 return u32_to_fixed16(0);
4545
4546 pixel_rate = crtc_state->pixel_rate;
4547
4548 if (WARN_ON(pixel_rate == 0))
4549 return u32_to_fixed16(0);
4550
4551 crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
4552 linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
4553
4554 return linetime_us;
4555}
4556
4557static u32
4558skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
4559 const struct intel_plane_state *plane_state)
4560{
4561 u64 adjusted_pixel_rate;
4562 uint_fixed_16_16_t downscale_amount;
4563
4564
4565 if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
4566 return 0;
4567
4568
4569
4570
4571
4572 adjusted_pixel_rate = crtc_state->pixel_rate;
4573 downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
4574
4575 return mul_round_up_u32_fixed16(adjusted_pixel_rate,
4576 downscale_amount);
4577}
4578
4579static int
4580skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
4581 int width, const struct drm_format_info *format,
4582 u64 modifier, unsigned int rotation,
4583 u32 plane_pixel_rate, struct skl_wm_params *wp,
4584 int color_plane)
4585{
4586 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4587 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4588 u32 interm_pbpl;
4589
4590
4591 if (color_plane == 1 &&
4592 !intel_format_info_is_yuv_semiplanar(format, modifier)) {
4593 drm_dbg_kms(&dev_priv->drm,
4594 "Non planar format have single plane\n");
4595 return -EINVAL;
4596 }
4597
4598 wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
4599 modifier == I915_FORMAT_MOD_Yf_TILED ||
4600 modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4601 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4602 wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
4603 wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4604 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4605 wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
4606
4607 wp->width = width;
4608 if (color_plane == 1 && wp->is_planar)
4609 wp->width /= 2;
4610
4611 wp->cpp = format->cpp[color_plane];
4612 wp->plane_pixel_rate = plane_pixel_rate;
4613
4614 if (INTEL_GEN(dev_priv) >= 11 &&
4615 modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
4616 wp->dbuf_block_size = 256;
4617 else
4618 wp->dbuf_block_size = 512;
4619
4620 if (drm_rotation_90_or_270(rotation)) {
4621 switch (wp->cpp) {
4622 case 1:
4623 wp->y_min_scanlines = 16;
4624 break;
4625 case 2:
4626 wp->y_min_scanlines = 8;
4627 break;
4628 case 4:
4629 wp->y_min_scanlines = 4;
4630 break;
4631 default:
4632 MISSING_CASE(wp->cpp);
4633 return -EINVAL;
4634 }
4635 } else {
4636 wp->y_min_scanlines = 4;
4637 }
4638
4639 if (skl_needs_memory_bw_wa(dev_priv))
4640 wp->y_min_scanlines *= 2;
4641
4642 wp->plane_bytes_per_line = wp->width * wp->cpp;
4643 if (wp->y_tiled) {
4644 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
4645 wp->y_min_scanlines,
4646 wp->dbuf_block_size);
4647
4648 if (INTEL_GEN(dev_priv) >= 10)
4649 interm_pbpl++;
4650
4651 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
4652 wp->y_min_scanlines);
4653 } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
4654 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
4655 wp->dbuf_block_size);
4656 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4657 } else {
4658 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
4659 wp->dbuf_block_size) + 1;
4660 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4661 }
4662
4663 wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
4664 wp->plane_blocks_per_line);
4665
4666 wp->linetime_us = fixed16_to_u32_round_up(
4667 intel_get_linetime_us(crtc_state));
4668
4669 return 0;
4670}
4671
4672static int
4673skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
4674 const struct intel_plane_state *plane_state,
4675 struct skl_wm_params *wp, int color_plane)
4676{
4677 const struct drm_framebuffer *fb = plane_state->hw.fb;
4678 int width;
4679
4680
4681
4682
4683
4684
4685 width = drm_rect_width(&plane_state->uapi.src) >> 16;
4686
4687 return skl_compute_wm_params(crtc_state, width,
4688 fb->format, fb->modifier,
4689 plane_state->hw.rotation,
4690 skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
4691 wp, color_plane);
4692}
4693
4694static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
4695{
4696 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4697 return true;
4698
4699
4700 return level > 0;
4701}
4702
4703static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
4704 int level,
4705 const struct skl_wm_params *wp,
4706 const struct skl_wm_level *result_prev,
4707 struct skl_wm_level *result )
4708{
4709 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4710 u32 latency = dev_priv->wm.skl_latency[level];
4711 uint_fixed_16_16_t method1, method2;
4712 uint_fixed_16_16_t selected_result;
4713 u32 res_blocks, res_lines, min_ddb_alloc = 0;
4714
4715 if (latency == 0) {
4716
4717 result->min_ddb_alloc = U16_MAX;
4718 return;
4719 }
4720
4721
4722
4723
4724
4725 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
4726 dev_priv->ipc_enabled)
4727 latency += 4;
4728
4729 if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
4730 latency += 15;
4731
4732 method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
4733 wp->cpp, latency, wp->dbuf_block_size);
4734 method2 = skl_wm_method2(wp->plane_pixel_rate,
4735 crtc_state->hw.adjusted_mode.crtc_htotal,
4736 latency,
4737 wp->plane_blocks_per_line);
4738
4739 if (wp->y_tiled) {
4740 selected_result = max_fixed16(method2, wp->y_tile_minimum);
4741 } else {
4742 if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal /
4743 wp->dbuf_block_size < 1) &&
4744 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
4745 selected_result = method2;
4746 } else if (latency >= wp->linetime_us) {
4747 if (IS_GEN(dev_priv, 9) &&
4748 !IS_GEMINILAKE(dev_priv))
4749 selected_result = min_fixed16(method1, method2);
4750 else
4751 selected_result = method2;
4752 } else {
4753 selected_result = method1;
4754 }
4755 }
4756
4757 res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
4758 res_lines = div_round_up_fixed16(selected_result,
4759 wp->plane_blocks_per_line);
4760
4761 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
4762
4763 if (level == 0 && wp->rc_surface)
4764 res_blocks +=
4765 fixed16_to_u32_round_up(wp->y_tile_minimum);
4766
4767
4768 if (level >= 1 && level <= 7) {
4769 if (wp->y_tiled) {
4770 res_blocks +=
4771 fixed16_to_u32_round_up(wp->y_tile_minimum);
4772 res_lines += wp->y_min_scanlines;
4773 } else {
4774 res_blocks++;
4775 }
4776
4777
4778
4779
4780
4781
4782
4783 if (result_prev->plane_res_b > res_blocks)
4784 res_blocks = result_prev->plane_res_b;
4785 }
4786 }
4787
4788 if (INTEL_GEN(dev_priv) >= 11) {
4789 if (wp->y_tiled) {
4790 int extra_lines;
4791
4792 if (res_lines % wp->y_min_scanlines == 0)
4793 extra_lines = wp->y_min_scanlines;
4794 else
4795 extra_lines = wp->y_min_scanlines * 2 -
4796 res_lines % wp->y_min_scanlines;
4797
4798 min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines,
4799 wp->plane_blocks_per_line);
4800 } else {
4801 min_ddb_alloc = res_blocks +
4802 DIV_ROUND_UP(res_blocks, 10);
4803 }
4804 }
4805
4806 if (!skl_wm_has_lines(dev_priv, level))
4807 res_lines = 0;
4808
4809 if (res_lines > 31) {
4810
4811 result->min_ddb_alloc = U16_MAX;
4812 return;
4813 }
4814
4815
4816
4817
4818
4819
4820
4821 result->plane_res_b = res_blocks;
4822 result->plane_res_l = res_lines;
4823
4824 result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1;
4825 result->plane_en = true;
4826}
4827
4828static void
4829skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
4830 const struct skl_wm_params *wm_params,
4831 struct skl_wm_level *levels)
4832{
4833 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4834 int level, max_level = ilk_wm_max_level(dev_priv);
4835 struct skl_wm_level *result_prev = &levels[0];
4836
4837 for (level = 0; level <= max_level; level++) {
4838 struct skl_wm_level *result = &levels[level];
4839
4840 skl_compute_plane_wm(crtc_state, level, wm_params,
4841 result_prev, result);
4842
4843 result_prev = result;
4844 }
4845}
4846
4847static u32
4848skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
4849{
4850 struct drm_atomic_state *state = crtc_state->uapi.state;
4851 struct drm_i915_private *dev_priv = to_i915(state->dev);
4852 uint_fixed_16_16_t linetime_us;
4853 u32 linetime_wm;
4854
4855 linetime_us = intel_get_linetime_us(crtc_state);
4856 linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
4857
4858
4859 if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
4860 linetime_wm /= 2;
4861
4862 return linetime_wm;
4863}
4864
4865static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
4866 const struct skl_wm_params *wp,
4867 struct skl_plane_wm *wm)
4868{
4869 struct drm_device *dev = crtc_state->uapi.crtc->dev;
4870 const struct drm_i915_private *dev_priv = to_i915(dev);
4871 u16 trans_min, trans_y_tile_min;
4872 const u16 trans_amount = 10;
4873 u16 wm0_sel_res_b, trans_offset_b, res_blocks;
4874
4875
4876 if (INTEL_GEN(dev_priv) <= 9)
4877 return;
4878
4879
4880 if (!dev_priv->ipc_enabled)
4881 return;
4882
4883 trans_min = 14;
4884 if (INTEL_GEN(dev_priv) >= 11)
4885 trans_min = 4;
4886
4887 trans_offset_b = trans_min + trans_amount;
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899 wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
4900
4901 if (wp->y_tiled) {
4902 trans_y_tile_min =
4903 (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
4904 res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
4905 trans_offset_b;
4906 } else {
4907 res_blocks = wm0_sel_res_b + trans_offset_b;
4908
4909
4910 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
4911 res_blocks += 1;
4912
4913 }
4914
4915
4916
4917
4918
4919
4920 wm->trans_wm.plane_res_b = res_blocks + 1;
4921 wm->trans_wm.plane_en = true;
4922}
4923
4924static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
4925 const struct intel_plane_state *plane_state,
4926 enum plane_id plane_id, int color_plane)
4927{
4928 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
4929 struct skl_wm_params wm_params;
4930 int ret;
4931
4932 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
4933 &wm_params, color_plane);
4934 if (ret)
4935 return ret;
4936
4937 skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
4938 skl_compute_transition_wm(crtc_state, &wm_params, wm);
4939
4940 return 0;
4941}
4942
4943static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
4944 const struct intel_plane_state *plane_state,
4945 enum plane_id plane_id)
4946{
4947 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
4948 struct skl_wm_params wm_params;
4949 int ret;
4950
4951 wm->is_planar = true;
4952
4953
4954 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
4955 &wm_params, 1);
4956 if (ret)
4957 return ret;
4958
4959 skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
4960
4961 return 0;
4962}
4963
4964static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
4965 const struct intel_plane_state *plane_state)
4966{
4967 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4968 const struct drm_framebuffer *fb = plane_state->hw.fb;
4969 enum plane_id plane_id = plane->id;
4970 int ret;
4971
4972 if (!intel_wm_plane_visible(crtc_state, plane_state))
4973 return 0;
4974
4975 ret = skl_build_plane_wm_single(crtc_state, plane_state,
4976 plane_id, 0);
4977 if (ret)
4978 return ret;
4979
4980 if (fb->format->is_yuv && fb->format->num_planes > 1) {
4981 ret = skl_build_plane_wm_uv(crtc_state, plane_state,
4982 plane_id);
4983 if (ret)
4984 return ret;
4985 }
4986
4987 return 0;
4988}
4989
4990static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
4991 const struct intel_plane_state *plane_state)
4992{
4993 enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id;
4994 int ret;
4995
4996
4997 if (plane_state->planar_slave)
4998 return 0;
4999
5000 if (plane_state->planar_linked_plane) {
5001 const struct drm_framebuffer *fb = plane_state->hw.fb;
5002 enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
5003
5004 WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
5005 WARN_ON(!fb->format->is_yuv ||
5006 fb->format->num_planes == 1);
5007
5008 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5009 y_plane_id, 0);
5010 if (ret)
5011 return ret;
5012
5013 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5014 plane_id, 1);
5015 if (ret)
5016 return ret;
5017 } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
5018 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5019 plane_id, 0);
5020 if (ret)
5021 return ret;
5022 }
5023
5024 return 0;
5025}
5026
5027static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
5028{
5029 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5030 struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5031 struct intel_plane *plane;
5032 const struct intel_plane_state *plane_state;
5033 int ret;
5034
5035
5036
5037
5038
5039 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
5040
5041 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state,
5042 crtc_state) {
5043
5044 if (INTEL_GEN(dev_priv) >= 11)
5045 ret = icl_build_plane_wm(crtc_state, plane_state);
5046 else
5047 ret = skl_build_plane_wm(crtc_state, plane_state);
5048 if (ret)
5049 return ret;
5050 }
5051
5052 pipe_wm->linetime = skl_compute_linetime_wm(crtc_state);
5053
5054 return 0;
5055}
5056
5057static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
5058 i915_reg_t reg,
5059 const struct skl_ddb_entry *entry)
5060{
5061 if (entry->end)
5062 I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
5063 else
5064 I915_WRITE_FW(reg, 0);
5065}
5066
5067static void skl_write_wm_level(struct drm_i915_private *dev_priv,
5068 i915_reg_t reg,
5069 const struct skl_wm_level *level)
5070{
5071 u32 val = 0;
5072
5073 if (level->plane_en)
5074 val |= PLANE_WM_EN;
5075 if (level->ignore_lines)
5076 val |= PLANE_WM_IGNORE_LINES;
5077 val |= level->plane_res_b;
5078 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
5079
5080 I915_WRITE_FW(reg, val);
5081}
5082
5083void skl_write_plane_wm(struct intel_plane *plane,
5084 const struct intel_crtc_state *crtc_state)
5085{
5086 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5087 int level, max_level = ilk_wm_max_level(dev_priv);
5088 enum plane_id plane_id = plane->id;
5089 enum pipe pipe = plane->pipe;
5090 const struct skl_plane_wm *wm =
5091 &crtc_state->wm.skl.optimal.planes[plane_id];
5092 const struct skl_ddb_entry *ddb_y =
5093 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5094 const struct skl_ddb_entry *ddb_uv =
5095 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
5096
5097 for (level = 0; level <= max_level; level++) {
5098 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
5099 &wm->wm[level]);
5100 }
5101 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5102 &wm->trans_wm);
5103
5104 if (INTEL_GEN(dev_priv) >= 11) {
5105 skl_ddb_entry_write(dev_priv,
5106 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5107 return;
5108 }
5109
5110 if (wm->is_planar)
5111 swap(ddb_y, ddb_uv);
5112
5113 skl_ddb_entry_write(dev_priv,
5114 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5115 skl_ddb_entry_write(dev_priv,
5116 PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
5117}
5118
5119void skl_write_cursor_wm(struct intel_plane *plane,
5120 const struct intel_crtc_state *crtc_state)
5121{
5122 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5123 int level, max_level = ilk_wm_max_level(dev_priv);
5124 enum plane_id plane_id = plane->id;
5125 enum pipe pipe = plane->pipe;
5126 const struct skl_plane_wm *wm =
5127 &crtc_state->wm.skl.optimal.planes[plane_id];
5128 const struct skl_ddb_entry *ddb =
5129 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5130
5131 for (level = 0; level <= max_level; level++) {
5132 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
5133 &wm->wm[level]);
5134 }
5135 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
5136
5137 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
5138}
5139
5140bool skl_wm_level_equals(const struct skl_wm_level *l1,
5141 const struct skl_wm_level *l2)
5142{
5143 return l1->plane_en == l2->plane_en &&
5144 l1->ignore_lines == l2->ignore_lines &&
5145 l1->plane_res_l == l2->plane_res_l &&
5146 l1->plane_res_b == l2->plane_res_b;
5147}
5148
5149static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
5150 const struct skl_plane_wm *wm1,
5151 const struct skl_plane_wm *wm2)
5152{
5153 int level, max_level = ilk_wm_max_level(dev_priv);
5154
5155 for (level = 0; level <= max_level; level++) {
5156 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
5157 !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
5158 return false;
5159 }
5160
5161 return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
5162}
5163
5164static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
5165 const struct skl_pipe_wm *wm1,
5166 const struct skl_pipe_wm *wm2)
5167{
5168 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5169 enum plane_id plane_id;
5170
5171 for_each_plane_id_on_crtc(crtc, plane_id) {
5172 if (!skl_plane_wm_equals(dev_priv,
5173 &wm1->planes[plane_id],
5174 &wm2->planes[plane_id]))
5175 return false;
5176 }
5177
5178 return wm1->linetime == wm2->linetime;
5179}
5180
5181static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
5182 const struct skl_ddb_entry *b)
5183{
5184 return a->start < b->end && b->start < a->end;
5185}
5186
5187bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
5188 const struct skl_ddb_entry *entries,
5189 int num_entries, int ignore_idx)
5190{
5191 int i;
5192
5193 for (i = 0; i < num_entries; i++) {
5194 if (i != ignore_idx &&
5195 skl_ddb_entries_overlap(ddb, &entries[i]))
5196 return true;
5197 }
5198
5199 return false;
5200}
5201
5202static int
5203skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
5204 struct intel_crtc_state *new_crtc_state)
5205{
5206 struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
5207 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5208 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5209 struct intel_plane *plane;
5210
5211 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5212 struct intel_plane_state *plane_state;
5213 enum plane_id plane_id = plane->id;
5214
5215 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
5216 &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
5217 skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
5218 &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
5219 continue;
5220
5221 plane_state = intel_atomic_get_plane_state(state, plane);
5222 if (IS_ERR(plane_state))
5223 return PTR_ERR(plane_state);
5224
5225 new_crtc_state->update_planes |= BIT(plane_id);
5226 }
5227
5228 return 0;
5229}
5230
5231static int
5232skl_compute_ddb(struct intel_atomic_state *state)
5233{
5234 const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5235 struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
5236 struct intel_crtc_state *old_crtc_state;
5237 struct intel_crtc_state *new_crtc_state;
5238 struct intel_crtc *crtc;
5239 int ret, i;
5240
5241 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
5242
5243 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5244 new_crtc_state, i) {
5245 ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
5246 if (ret)
5247 return ret;
5248
5249 ret = skl_ddb_add_affected_planes(old_crtc_state,
5250 new_crtc_state);
5251 if (ret)
5252 return ret;
5253 }
5254
5255 return 0;
5256}
5257
5258static char enast(bool enable)
5259{
5260 return enable ? '*' : ' ';
5261}
5262
5263static void
5264skl_print_wm_changes(struct intel_atomic_state *state)
5265{
5266 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5267 const struct intel_crtc_state *old_crtc_state;
5268 const struct intel_crtc_state *new_crtc_state;
5269 struct intel_plane *plane;
5270 struct intel_crtc *crtc;
5271 int i;
5272
5273 if (!drm_debug_enabled(DRM_UT_KMS))
5274 return;
5275
5276 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5277 new_crtc_state, i) {
5278 const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
5279
5280 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
5281 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
5282
5283 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5284 enum plane_id plane_id = plane->id;
5285 const struct skl_ddb_entry *old, *new;
5286
5287 old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
5288 new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
5289
5290 if (skl_ddb_entry_equal(old, new))
5291 continue;
5292
5293 drm_dbg_kms(&dev_priv->drm,
5294 "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
5295 plane->base.base.id, plane->base.name,
5296 old->start, old->end, new->start, new->end,
5297 skl_ddb_entry_size(old), skl_ddb_entry_size(new));
5298 }
5299
5300 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5301 enum plane_id plane_id = plane->id;
5302 const struct skl_plane_wm *old_wm, *new_wm;
5303
5304 old_wm = &old_pipe_wm->planes[plane_id];
5305 new_wm = &new_pipe_wm->planes[plane_id];
5306
5307 if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
5308 continue;
5309
5310 drm_dbg_kms(&dev_priv->drm,
5311 "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
5312 " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
5313 plane->base.base.id, plane->base.name,
5314 enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
5315 enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
5316 enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
5317 enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
5318 enast(old_wm->trans_wm.plane_en),
5319 enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
5320 enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
5321 enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
5322 enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
5323 enast(new_wm->trans_wm.plane_en));
5324
5325 drm_dbg_kms(&dev_priv->drm,
5326 "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
5327 " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
5328 plane->base.base.id, plane->base.name,
5329 enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
5330 enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
5331 enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
5332 enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
5333 enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
5334 enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
5335 enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
5336 enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
5337 enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
5338
5339 enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
5340 enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
5341 enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
5342 enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
5343 enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
5344 enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
5345 enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
5346 enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
5347 enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
5348
5349 drm_dbg_kms(&dev_priv->drm,
5350 "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5351 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5352 plane->base.base.id, plane->base.name,
5353 old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
5354 old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
5355 old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
5356 old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
5357 old_wm->trans_wm.plane_res_b,
5358 new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
5359 new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
5360 new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
5361 new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
5362 new_wm->trans_wm.plane_res_b);
5363
5364 drm_dbg_kms(&dev_priv->drm,
5365 "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5366 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5367 plane->base.base.id, plane->base.name,
5368 old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
5369 old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
5370 old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
5371 old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
5372 old_wm->trans_wm.min_ddb_alloc,
5373 new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
5374 new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
5375 new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
5376 new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
5377 new_wm->trans_wm.min_ddb_alloc);
5378 }
5379 }
5380}
5381
5382static int intel_add_all_pipes(struct intel_atomic_state *state)
5383{
5384 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5385 struct intel_crtc *crtc;
5386
5387 for_each_intel_crtc(&dev_priv->drm, crtc) {
5388 struct intel_crtc_state *crtc_state;
5389
5390 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5391 if (IS_ERR(crtc_state))
5392 return PTR_ERR(crtc_state);
5393 }
5394
5395 return 0;
5396}
5397
5398static int
5399skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
5400{
5401 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5402 int ret;
5403
5404
5405
5406
5407
5408
5409
5410 if (dev_priv->wm.distrust_bios_wm) {
5411 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5412 state->base.acquire_ctx);
5413 if (ret)
5414 return ret;
5415
5416 state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask;
5417
5418
5419
5420
5421
5422
5423
5424 if (!state->modeset)
5425 state->active_pipes = dev_priv->active_pipes;
5426 }
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441 if (state->active_pipe_changes || state->modeset) {
5442 state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask;
5443
5444 ret = intel_add_all_pipes(state);
5445 if (ret)
5446 return ret;
5447 }
5448
5449 return 0;
5450}
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
5475 struct intel_crtc *crtc)
5476{
5477 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5478 const struct intel_crtc_state *old_crtc_state =
5479 intel_atomic_get_old_crtc_state(state, crtc);
5480 struct intel_crtc_state *new_crtc_state =
5481 intel_atomic_get_new_crtc_state(state, crtc);
5482 struct intel_plane *plane;
5483
5484 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5485 struct intel_plane_state *plane_state;
5486 enum plane_id plane_id = plane->id;
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496 if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
5497 skl_plane_wm_equals(dev_priv,
5498 &old_crtc_state->wm.skl.optimal.planes[plane_id],
5499 &new_crtc_state->wm.skl.optimal.planes[plane_id]))
5500 continue;
5501
5502 plane_state = intel_atomic_get_plane_state(state, plane);
5503 if (IS_ERR(plane_state))
5504 return PTR_ERR(plane_state);
5505
5506 new_crtc_state->update_planes |= BIT(plane_id);
5507 }
5508
5509 return 0;
5510}
5511
5512static int
5513skl_compute_wm(struct intel_atomic_state *state)
5514{
5515 struct intel_crtc *crtc;
5516 struct intel_crtc_state *new_crtc_state;
5517 struct intel_crtc_state *old_crtc_state;
5518 struct skl_ddb_values *results = &state->wm_results;
5519 int ret, i;
5520
5521
5522 results->dirty_pipes = 0;
5523
5524 ret = skl_ddb_add_affected_pipes(state);
5525 if (ret)
5526 return ret;
5527
5528
5529
5530
5531
5532
5533
5534 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5535 new_crtc_state, i) {
5536 ret = skl_build_pipe_wm(new_crtc_state);
5537 if (ret)
5538 return ret;
5539
5540 ret = skl_wm_add_affected_planes(state, crtc);
5541 if (ret)
5542 return ret;
5543
5544 if (!skl_pipe_wm_equals(crtc,
5545 &old_crtc_state->wm.skl.optimal,
5546 &new_crtc_state->wm.skl.optimal))
5547 results->dirty_pipes |= BIT(crtc->pipe);
5548 }
5549
5550 ret = skl_compute_ddb(state);
5551 if (ret)
5552 return ret;
5553
5554 skl_print_wm_changes(state);
5555
5556 return 0;
5557}
5558
5559static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
5560 struct intel_crtc *crtc)
5561{
5562 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5563 const struct intel_crtc_state *crtc_state =
5564 intel_atomic_get_new_crtc_state(state, crtc);
5565 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5566 enum pipe pipe = crtc->pipe;
5567
5568 if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
5569 return;
5570
5571 I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
5572}
5573
5574static void skl_initial_wm(struct intel_atomic_state *state,
5575 struct intel_crtc *crtc)
5576{
5577 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5578 const struct intel_crtc_state *crtc_state =
5579 intel_atomic_get_new_crtc_state(state, crtc);
5580 struct skl_ddb_values *results = &state->wm_results;
5581
5582 if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
5583 return;
5584
5585 mutex_lock(&dev_priv->wm.wm_mutex);
5586
5587 if (crtc_state->uapi.active_changed)
5588 skl_atomic_update_crtc_wm(state, crtc);
5589
5590 mutex_unlock(&dev_priv->wm.wm_mutex);
5591}
5592
5593static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
5594 struct intel_wm_config *config)
5595{
5596 struct intel_crtc *crtc;
5597
5598
5599 for_each_intel_crtc(&dev_priv->drm, crtc) {
5600 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
5601
5602 if (!wm->pipe_enabled)
5603 continue;
5604
5605 config->sprites_enabled |= wm->sprites_enabled;
5606 config->sprites_scaled |= wm->sprites_scaled;
5607 config->num_pipes_active++;
5608 }
5609}
5610
5611static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
5612{
5613 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
5614 struct ilk_wm_maximums max;
5615 struct intel_wm_config config = {};
5616 struct ilk_wm_values results = {};
5617 enum intel_ddb_partitioning partitioning;
5618
5619 ilk_compute_wm_config(dev_priv, &config);
5620
5621 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
5622 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
5623
5624
5625 if (INTEL_GEN(dev_priv) >= 7 &&
5626 config.num_pipes_active == 1 && config.sprites_enabled) {
5627 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
5628 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
5629
5630 best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
5631 } else {
5632 best_lp_wm = &lp_wm_1_2;
5633 }
5634
5635 partitioning = (best_lp_wm == &lp_wm_1_2) ?
5636 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
5637
5638 ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
5639
5640 ilk_write_wm_values(dev_priv, &results);
5641}
5642
5643static void ilk_initial_watermarks(struct intel_atomic_state *state,
5644 struct intel_crtc *crtc)
5645{
5646 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5647 const struct intel_crtc_state *crtc_state =
5648 intel_atomic_get_new_crtc_state(state, crtc);
5649
5650 mutex_lock(&dev_priv->wm.wm_mutex);
5651 crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
5652 ilk_program_watermarks(dev_priv);
5653 mutex_unlock(&dev_priv->wm.wm_mutex);
5654}
5655
5656static void ilk_optimize_watermarks(struct intel_atomic_state *state,
5657 struct intel_crtc *crtc)
5658{
5659 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5660 const struct intel_crtc_state *crtc_state =
5661 intel_atomic_get_new_crtc_state(state, crtc);
5662
5663 if (!crtc_state->wm.need_postvbl_update)
5664 return;
5665
5666 mutex_lock(&dev_priv->wm.wm_mutex);
5667 crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
5668 ilk_program_watermarks(dev_priv);
5669 mutex_unlock(&dev_priv->wm.wm_mutex);
5670}
5671
5672static inline void skl_wm_level_from_reg_val(u32 val,
5673 struct skl_wm_level *level)
5674{
5675 level->plane_en = val & PLANE_WM_EN;
5676 level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
5677 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
5678 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
5679 PLANE_WM_LINES_MASK;
5680}
5681
5682void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
5683 struct skl_pipe_wm *out)
5684{
5685 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5686 enum pipe pipe = crtc->pipe;
5687 int level, max_level;
5688 enum plane_id plane_id;
5689 u32 val;
5690
5691 max_level = ilk_wm_max_level(dev_priv);
5692
5693 for_each_plane_id_on_crtc(crtc, plane_id) {
5694 struct skl_plane_wm *wm = &out->planes[plane_id];
5695
5696 for (level = 0; level <= max_level; level++) {
5697 if (plane_id != PLANE_CURSOR)
5698 val = I915_READ(PLANE_WM(pipe, plane_id, level));
5699 else
5700 val = I915_READ(CUR_WM(pipe, level));
5701
5702 skl_wm_level_from_reg_val(val, &wm->wm[level]);
5703 }
5704
5705 if (plane_id != PLANE_CURSOR)
5706 val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
5707 else
5708 val = I915_READ(CUR_WM_TRANS(pipe));
5709
5710 skl_wm_level_from_reg_val(val, &wm->trans_wm);
5711 }
5712
5713 if (!crtc->active)
5714 return;
5715
5716 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
5717}
5718
5719void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
5720{
5721 struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
5722 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
5723 struct intel_crtc *crtc;
5724 struct intel_crtc_state *crtc_state;
5725
5726 skl_ddb_get_hw_state(dev_priv, ddb);
5727 for_each_intel_crtc(&dev_priv->drm, crtc) {
5728 crtc_state = to_intel_crtc_state(crtc->base.state);
5729
5730 skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
5731
5732 if (crtc->active)
5733 hw->dirty_pipes |= BIT(crtc->pipe);
5734 }
5735
5736 if (dev_priv->active_pipes) {
5737
5738 dev_priv->wm.distrust_bios_wm = true;
5739 }
5740}
5741
5742static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
5743{
5744 struct drm_device *dev = crtc->base.dev;
5745 struct drm_i915_private *dev_priv = to_i915(dev);
5746 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5747 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
5748 struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
5749 enum pipe pipe = crtc->pipe;
5750 static const i915_reg_t wm0_pipe_reg[] = {
5751 [PIPE_A] = WM0_PIPEA_ILK,
5752 [PIPE_B] = WM0_PIPEB_ILK,
5753 [PIPE_C] = WM0_PIPEC_IVB,
5754 };
5755
5756 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
5757 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5758 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
5759
5760 memset(active, 0, sizeof(*active));
5761
5762 active->pipe_enabled = crtc->active;
5763
5764 if (active->pipe_enabled) {
5765 u32 tmp = hw->wm_pipe[pipe];
5766
5767
5768
5769
5770
5771
5772
5773 active->wm[0].enable = true;
5774 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
5775 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
5776 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
5777 active->linetime = hw->wm_linetime[pipe];
5778 } else {
5779 int level, max_level = ilk_wm_max_level(dev_priv);
5780
5781
5782
5783
5784
5785
5786 for (level = 0; level <= max_level; level++)
5787 active->wm[level].enable = true;
5788 }
5789
5790 crtc->wm.active.ilk = *active;
5791}
5792
5793#define _FW_WM(value, plane) \
5794 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
5795#define _FW_WM_VLV(value, plane) \
5796 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
5797
5798static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
5799 struct g4x_wm_values *wm)
5800{
5801 u32 tmp;
5802
5803 tmp = I915_READ(DSPFW1);
5804 wm->sr.plane = _FW_WM(tmp, SR);
5805 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5806 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
5807 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
5808
5809 tmp = I915_READ(DSPFW2);
5810 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
5811 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
5812 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
5813 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
5814 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5815 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
5816
5817 tmp = I915_READ(DSPFW3);
5818 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
5819 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5820 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
5821 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
5822}
5823
5824static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
5825 struct vlv_wm_values *wm)
5826{
5827 enum pipe pipe;
5828 u32 tmp;
5829
5830 for_each_pipe(dev_priv, pipe) {
5831 tmp = I915_READ(VLV_DDL(pipe));
5832
5833 wm->ddl[pipe].plane[PLANE_PRIMARY] =
5834 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5835 wm->ddl[pipe].plane[PLANE_CURSOR] =
5836 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5837 wm->ddl[pipe].plane[PLANE_SPRITE0] =
5838 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5839 wm->ddl[pipe].plane[PLANE_SPRITE1] =
5840 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5841 }
5842
5843 tmp = I915_READ(DSPFW1);
5844 wm->sr.plane = _FW_WM(tmp, SR);
5845 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5846 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
5847 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
5848
5849 tmp = I915_READ(DSPFW2);
5850 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
5851 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5852 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
5853
5854 tmp = I915_READ(DSPFW3);
5855 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5856
5857 if (IS_CHERRYVIEW(dev_priv)) {
5858 tmp = I915_READ(DSPFW7_CHV);
5859 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5860 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5861
5862 tmp = I915_READ(DSPFW8_CHV);
5863 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
5864 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
5865
5866 tmp = I915_READ(DSPFW9_CHV);
5867 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
5868 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
5869
5870 tmp = I915_READ(DSPHOWM);
5871 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5872 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
5873 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
5874 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
5875 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5876 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5877 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5878 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5879 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5880 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5881 } else {
5882 tmp = I915_READ(DSPFW7);
5883 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5884 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5885
5886 tmp = I915_READ(DSPHOWM);
5887 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5888 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5889 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5890 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5891 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5892 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5893 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5894 }
5895}
5896
5897#undef _FW_WM
5898#undef _FW_WM_VLV
5899
5900void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
5901{
5902 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
5903 struct intel_crtc *crtc;
5904
5905 g4x_read_wm_values(dev_priv, wm);
5906
5907 wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
5908
5909 for_each_intel_crtc(&dev_priv->drm, crtc) {
5910 struct intel_crtc_state *crtc_state =
5911 to_intel_crtc_state(crtc->base.state);
5912 struct g4x_wm_state *active = &crtc->wm.active.g4x;
5913 struct g4x_pipe_wm *raw;
5914 enum pipe pipe = crtc->pipe;
5915 enum plane_id plane_id;
5916 int level, max_level;
5917
5918 active->cxsr = wm->cxsr;
5919 active->hpll_en = wm->hpll_en;
5920 active->fbc_en = wm->fbc_en;
5921
5922 active->sr = wm->sr;
5923 active->hpll = wm->hpll;
5924
5925 for_each_plane_id_on_crtc(crtc, plane_id) {
5926 active->wm.plane[plane_id] =
5927 wm->pipe[pipe].plane[plane_id];
5928 }
5929
5930 if (wm->cxsr && wm->hpll_en)
5931 max_level = G4X_WM_LEVEL_HPLL;
5932 else if (wm->cxsr)
5933 max_level = G4X_WM_LEVEL_SR;
5934 else
5935 max_level = G4X_WM_LEVEL_NORMAL;
5936
5937 level = G4X_WM_LEVEL_NORMAL;
5938 raw = &crtc_state->wm.g4x.raw[level];
5939 for_each_plane_id_on_crtc(crtc, plane_id)
5940 raw->plane[plane_id] = active->wm.plane[plane_id];
5941
5942 if (++level > max_level)
5943 goto out;
5944
5945 raw = &crtc_state->wm.g4x.raw[level];
5946 raw->plane[PLANE_PRIMARY] = active->sr.plane;
5947 raw->plane[PLANE_CURSOR] = active->sr.cursor;
5948 raw->plane[PLANE_SPRITE0] = 0;
5949 raw->fbc = active->sr.fbc;
5950
5951 if (++level > max_level)
5952 goto out;
5953
5954 raw = &crtc_state->wm.g4x.raw[level];
5955 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
5956 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
5957 raw->plane[PLANE_SPRITE0] = 0;
5958 raw->fbc = active->hpll.fbc;
5959
5960 out:
5961 for_each_plane_id_on_crtc(crtc, plane_id)
5962 g4x_raw_plane_wm_set(crtc_state, level,
5963 plane_id, USHRT_MAX);
5964 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
5965
5966 crtc_state->wm.g4x.optimal = *active;
5967 crtc_state->wm.g4x.intermediate = *active;
5968
5969 drm_dbg_kms(&dev_priv->drm,
5970 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
5971 pipe_name(pipe),
5972 wm->pipe[pipe].plane[PLANE_PRIMARY],
5973 wm->pipe[pipe].plane[PLANE_CURSOR],
5974 wm->pipe[pipe].plane[PLANE_SPRITE0]);
5975 }
5976
5977 drm_dbg_kms(&dev_priv->drm,
5978 "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
5979 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
5980 drm_dbg_kms(&dev_priv->drm,
5981 "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
5982 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
5983 drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
5984 yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
5985}
5986
5987void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
5988{
5989 struct intel_plane *plane;
5990 struct intel_crtc *crtc;
5991
5992 mutex_lock(&dev_priv->wm.wm_mutex);
5993
5994 for_each_intel_plane(&dev_priv->drm, plane) {
5995 struct intel_crtc *crtc =
5996 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
5997 struct intel_crtc_state *crtc_state =
5998 to_intel_crtc_state(crtc->base.state);
5999 struct intel_plane_state *plane_state =
6000 to_intel_plane_state(plane->base.state);
6001 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
6002 enum plane_id plane_id = plane->id;
6003 int level;
6004
6005 if (plane_state->uapi.visible)
6006 continue;
6007
6008 for (level = 0; level < 3; level++) {
6009 struct g4x_pipe_wm *raw =
6010 &crtc_state->wm.g4x.raw[level];
6011
6012 raw->plane[plane_id] = 0;
6013 wm_state->wm.plane[plane_id] = 0;
6014 }
6015
6016 if (plane_id == PLANE_PRIMARY) {
6017 for (level = 0; level < 3; level++) {
6018 struct g4x_pipe_wm *raw =
6019 &crtc_state->wm.g4x.raw[level];
6020 raw->fbc = 0;
6021 }
6022
6023 wm_state->sr.fbc = 0;
6024 wm_state->hpll.fbc = 0;
6025 wm_state->fbc_en = false;
6026 }
6027 }
6028
6029 for_each_intel_crtc(&dev_priv->drm, crtc) {
6030 struct intel_crtc_state *crtc_state =
6031 to_intel_crtc_state(crtc->base.state);
6032
6033 crtc_state->wm.g4x.intermediate =
6034 crtc_state->wm.g4x.optimal;
6035 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
6036 }
6037
6038 g4x_program_watermarks(dev_priv);
6039
6040 mutex_unlock(&dev_priv->wm.wm_mutex);
6041}
6042
6043void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
6044{
6045 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
6046 struct intel_crtc *crtc;
6047 u32 val;
6048
6049 vlv_read_wm_values(dev_priv, wm);
6050
6051 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
6052 wm->level = VLV_WM_LEVEL_PM2;
6053
6054 if (IS_CHERRYVIEW(dev_priv)) {
6055 vlv_punit_get(dev_priv);
6056
6057 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
6058 if (val & DSP_MAXFIFO_PM5_ENABLE)
6059 wm->level = VLV_WM_LEVEL_PM5;
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6071 val |= FORCE_DDR_FREQ_REQ_ACK;
6072 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
6073
6074 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
6075 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
6076 drm_dbg_kms(&dev_priv->drm,
6077 "Punit not acking DDR DVFS request, "
6078 "assuming DDR DVFS is disabled\n");
6079 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
6080 } else {
6081 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6082 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
6083 wm->level = VLV_WM_LEVEL_DDR_DVFS;
6084 }
6085
6086 vlv_punit_put(dev_priv);
6087 }
6088
6089 for_each_intel_crtc(&dev_priv->drm, crtc) {
6090 struct intel_crtc_state *crtc_state =
6091 to_intel_crtc_state(crtc->base.state);
6092 struct vlv_wm_state *active = &crtc->wm.active.vlv;
6093 const struct vlv_fifo_state *fifo_state =
6094 &crtc_state->wm.vlv.fifo_state;
6095 enum pipe pipe = crtc->pipe;
6096 enum plane_id plane_id;
6097 int level;
6098
6099 vlv_get_fifo_size(crtc_state);
6100
6101 active->num_levels = wm->level + 1;
6102 active->cxsr = wm->cxsr;
6103
6104 for (level = 0; level < active->num_levels; level++) {
6105 struct g4x_pipe_wm *raw =
6106 &crtc_state->wm.vlv.raw[level];
6107
6108 active->sr[level].plane = wm->sr.plane;
6109 active->sr[level].cursor = wm->sr.cursor;
6110
6111 for_each_plane_id_on_crtc(crtc, plane_id) {
6112 active->wm[level].plane[plane_id] =
6113 wm->pipe[pipe].plane[plane_id];
6114
6115 raw->plane[plane_id] =
6116 vlv_invert_wm_value(active->wm[level].plane[plane_id],
6117 fifo_state->plane[plane_id]);
6118 }
6119 }
6120
6121 for_each_plane_id_on_crtc(crtc, plane_id)
6122 vlv_raw_plane_wm_set(crtc_state, level,
6123 plane_id, USHRT_MAX);
6124 vlv_invalidate_wms(crtc, active, level);
6125
6126 crtc_state->wm.vlv.optimal = *active;
6127 crtc_state->wm.vlv.intermediate = *active;
6128
6129 drm_dbg_kms(&dev_priv->drm,
6130 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
6131 pipe_name(pipe),
6132 wm->pipe[pipe].plane[PLANE_PRIMARY],
6133 wm->pipe[pipe].plane[PLANE_CURSOR],
6134 wm->pipe[pipe].plane[PLANE_SPRITE0],
6135 wm->pipe[pipe].plane[PLANE_SPRITE1]);
6136 }
6137
6138 drm_dbg_kms(&dev_priv->drm,
6139 "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
6140 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
6141}
6142
6143void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
6144{
6145 struct intel_plane *plane;
6146 struct intel_crtc *crtc;
6147
6148 mutex_lock(&dev_priv->wm.wm_mutex);
6149
6150 for_each_intel_plane(&dev_priv->drm, plane) {
6151 struct intel_crtc *crtc =
6152 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
6153 struct intel_crtc_state *crtc_state =
6154 to_intel_crtc_state(crtc->base.state);
6155 struct intel_plane_state *plane_state =
6156 to_intel_plane_state(plane->base.state);
6157 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
6158 const struct vlv_fifo_state *fifo_state =
6159 &crtc_state->wm.vlv.fifo_state;
6160 enum plane_id plane_id = plane->id;
6161 int level;
6162
6163 if (plane_state->uapi.visible)
6164 continue;
6165
6166 for (level = 0; level < wm_state->num_levels; level++) {
6167 struct g4x_pipe_wm *raw =
6168 &crtc_state->wm.vlv.raw[level];
6169
6170 raw->plane[plane_id] = 0;
6171
6172 wm_state->wm[level].plane[plane_id] =
6173 vlv_invert_wm_value(raw->plane[plane_id],
6174 fifo_state->plane[plane_id]);
6175 }
6176 }
6177
6178 for_each_intel_crtc(&dev_priv->drm, crtc) {
6179 struct intel_crtc_state *crtc_state =
6180 to_intel_crtc_state(crtc->base.state);
6181
6182 crtc_state->wm.vlv.intermediate =
6183 crtc_state->wm.vlv.optimal;
6184 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
6185 }
6186
6187 vlv_program_watermarks(dev_priv);
6188
6189 mutex_unlock(&dev_priv->wm.wm_mutex);
6190}
6191
6192
6193
6194
6195
6196static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
6197{
6198 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6199 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6200 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6201
6202
6203
6204
6205
6206}
6207
6208void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
6209{
6210 struct ilk_wm_values *hw = &dev_priv->wm.hw;
6211 struct intel_crtc *crtc;
6212
6213 ilk_init_lp_watermarks(dev_priv);
6214
6215 for_each_intel_crtc(&dev_priv->drm, crtc)
6216 ilk_pipe_wm_get_hw_state(crtc);
6217
6218 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
6219 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
6220 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
6221
6222 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
6223 if (INTEL_GEN(dev_priv) >= 7) {
6224 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
6225 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
6226 }
6227
6228 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6229 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
6230 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6231 else if (IS_IVYBRIDGE(dev_priv))
6232 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
6233 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6234
6235 hw->enable_fbc_wm =
6236 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
6237}
6238
6239
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271
6272void intel_update_watermarks(struct intel_crtc *crtc)
6273{
6274 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6275
6276 if (dev_priv->display.update_wm)
6277 dev_priv->display.update_wm(crtc);
6278}
6279
6280void intel_enable_ipc(struct drm_i915_private *dev_priv)
6281{
6282 u32 val;
6283
6284 if (!HAS_IPC(dev_priv))
6285 return;
6286
6287 val = I915_READ(DISP_ARB_CTL2);
6288
6289 if (dev_priv->ipc_enabled)
6290 val |= DISP_IPC_ENABLE;
6291 else
6292 val &= ~DISP_IPC_ENABLE;
6293
6294 I915_WRITE(DISP_ARB_CTL2, val);
6295}
6296
6297static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
6298{
6299
6300 if (IS_SKYLAKE(dev_priv))
6301 return false;
6302
6303
6304 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
6305 return dev_priv->dram_info.symmetric_memory;
6306
6307 return true;
6308}
6309
6310void intel_init_ipc(struct drm_i915_private *dev_priv)
6311{
6312 if (!HAS_IPC(dev_priv))
6313 return;
6314
6315 dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
6316
6317 intel_enable_ipc(dev_priv);
6318}
6319
6320static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
6321{
6322
6323
6324
6325
6326
6327 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6328}
6329
6330static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
6331{
6332 enum pipe pipe;
6333
6334 for_each_pipe(dev_priv, pipe) {
6335 I915_WRITE(DSPCNTR(pipe),
6336 I915_READ(DSPCNTR(pipe)) |
6337 DISPPLANE_TRICKLE_FEED_DISABLE);
6338
6339 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6340 POSTING_READ(DSPSURF(pipe));
6341 }
6342}
6343
6344static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
6345{
6346 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6347
6348
6349
6350
6351
6352 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6353 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6354 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6355
6356 I915_WRITE(PCH_3DCGDIS0,
6357 MARIUNIT_CLOCK_GATE_DISABLE |
6358 SVSMUNIT_CLOCK_GATE_DISABLE);
6359 I915_WRITE(PCH_3DCGDIS1,
6360 VFMUNIT_CLOCK_GATE_DISABLE);
6361
6362
6363
6364
6365
6366
6367
6368
6369 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6370 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6371 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6372 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6373 I915_WRITE(DISP_ARB_CTL,
6374 (I915_READ(DISP_ARB_CTL) |
6375 DISP_FBC_WM_DIS));
6376
6377
6378
6379
6380
6381
6382
6383
6384 if (IS_IRONLAKE_M(dev_priv)) {
6385
6386 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6387 I915_READ(ILK_DISPLAY_CHICKEN1) |
6388 ILK_FBCQ_DIS);
6389 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6390 I915_READ(ILK_DISPLAY_CHICKEN2) |
6391 ILK_DPARB_GATE);
6392 }
6393
6394 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6395
6396 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6397 I915_READ(ILK_DISPLAY_CHICKEN2) |
6398 ILK_ELPIN_409_SELECT);
6399 I915_WRITE(_3D_CHICKEN2,
6400 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6401 _3D_CHICKEN2_WM_READ_PIPELINED);
6402
6403
6404 I915_WRITE(CACHE_MODE_0,
6405 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6406
6407
6408 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6409
6410 g4x_disable_trickle_feed(dev_priv);
6411
6412 ibx_init_clock_gating(dev_priv);
6413}
6414
6415static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
6416{
6417 enum pipe pipe;
6418 u32 val;
6419
6420
6421
6422
6423
6424
6425 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6426 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6427 PCH_CPUNIT_CLOCK_GATE_DISABLE);
6428 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6429 DPLS_EDP_PPS_FIX_DIS);
6430
6431
6432
6433 for_each_pipe(dev_priv, pipe) {
6434 val = I915_READ(TRANS_CHICKEN2(pipe));
6435 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6436 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6437 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6438 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6439 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6440 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6441 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6442 }
6443
6444 for_each_pipe(dev_priv, pipe) {
6445 I915_WRITE(TRANS_CHICKEN1(pipe),
6446 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6447 }
6448}
6449
6450static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
6451{
6452 u32 tmp;
6453
6454 tmp = I915_READ(MCH_SSKPD);
6455 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6456 drm_dbg_kms(&dev_priv->drm,
6457 "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6458 tmp);
6459}
6460
6461static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
6462{
6463 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6464
6465 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6466
6467 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6468 I915_READ(ILK_DISPLAY_CHICKEN2) |
6469 ILK_ELPIN_409_SELECT);
6470
6471
6472 I915_WRITE(_3D_CHICKEN,
6473 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6474
6475
6476 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486 I915_WRITE(GEN6_GT_MODE,
6487 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6488
6489 I915_WRITE(CACHE_MODE_0,
6490 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6491
6492 I915_WRITE(GEN6_UCGCTL1,
6493 I915_READ(GEN6_UCGCTL1) |
6494 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6495 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510 I915_WRITE(GEN6_UCGCTL2,
6511 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6512 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6513
6514
6515 I915_WRITE(_3D_CHICKEN3,
6516 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6517
6518
6519
6520
6521
6522
6523 I915_WRITE(_3D_CHICKEN3,
6524 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6525
6526
6527
6528
6529
6530
6531
6532
6533
6534
6535
6536
6537 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6538 I915_READ(ILK_DISPLAY_CHICKEN1) |
6539 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6540 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6541 I915_READ(ILK_DISPLAY_CHICKEN2) |
6542 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6543 I915_WRITE(ILK_DSPCLK_GATE_D,
6544 I915_READ(ILK_DSPCLK_GATE_D) |
6545 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6546 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6547
6548 g4x_disable_trickle_feed(dev_priv);
6549
6550 cpt_init_clock_gating(dev_priv);
6551
6552 gen6_check_mch_setup(dev_priv);
6553}
6554
6555static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6556{
6557 u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
6558
6559
6560
6561
6562
6563
6564
6565 reg &= ~GEN7_FF_SCHED_MASK;
6566 reg |= GEN7_FF_TS_SCHED_HW;
6567 reg |= GEN7_FF_VS_SCHED_HW;
6568 reg |= GEN7_FF_DS_SCHED_HW;
6569
6570 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6571}
6572
6573static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
6574{
6575
6576
6577
6578
6579 if (HAS_PCH_LPT_LP(dev_priv))
6580 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6581 I915_READ(SOUTH_DSPCLK_GATE_D) |
6582 PCH_LP_PARTITION_LEVEL_DISABLE);
6583
6584
6585 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
6586 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
6587 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6588}
6589
6590static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
6591{
6592 if (HAS_PCH_LPT_LP(dev_priv)) {
6593 u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6594
6595 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6596 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6597 }
6598}
6599
6600static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
6601 int general_prio_credits,
6602 int high_prio_credits)
6603{
6604 u32 misccpctl;
6605 u32 val;
6606
6607
6608 misccpctl = I915_READ(GEN7_MISCCPCTL);
6609 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6610
6611 val = I915_READ(GEN8_L3SQCREG1);
6612 val &= ~L3_PRIO_CREDITS_MASK;
6613 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
6614 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
6615 I915_WRITE(GEN8_L3SQCREG1, val);
6616
6617
6618
6619
6620
6621 POSTING_READ(GEN8_L3SQCREG1);
6622 udelay(1);
6623 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6624}
6625
6626static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
6627{
6628
6629 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
6630 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
6631
6632
6633 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
6634 _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
6635
6636
6637
6638
6639
6640 intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
6641 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
6642
6643
6644 intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
6645 0, PSDUNIT_CLKGATE_DIS);
6646}
6647
6648static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
6649{
6650 u32 vd_pg_enable = 0;
6651 unsigned int i;
6652
6653
6654 intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
6655 0, VSUNIT_CLKGATE_DIS_TGL);
6656
6657
6658 for (i = 0; i < I915_MAX_VCS; i++) {
6659 if (HAS_ENGINE(dev_priv, _VCS(i)))
6660 vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
6661 VDN_MFX_POWERGATE_ENABLE(i);
6662 }
6663
6664 I915_WRITE(POWERGATE_ENABLE,
6665 I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
6666}
6667
6668static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
6669{
6670 if (!HAS_PCH_CNP(dev_priv))
6671 return;
6672
6673
6674 I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
6675 CNP_PWM_CGE_GATING_DISABLE);
6676}
6677
6678static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
6679{
6680 u32 val;
6681 cnp_init_clock_gating(dev_priv);
6682
6683
6684 I915_WRITE(_3D_CHICKEN3,
6685 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
6686
6687
6688 I915_WRITE(GEN8_CHICKEN_DCPR_1,
6689 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
6690
6691
6692 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
6693 DISP_FBC_MEMORY_WAKE);
6694
6695 val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
6696
6697 val |= RCCUNIT_CLKGATE_DIS;
6698
6699 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
6700 val |= SARBUNIT_CLKGATE_DIS;
6701 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
6702
6703
6704 val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
6705 val |= GWUNIT_CLKGATE_DIS;
6706 I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
6707
6708
6709
6710 val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
6711 val |= VFUNIT_CLKGATE_DIS;
6712 I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
6713}
6714
6715static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
6716{
6717 cnp_init_clock_gating(dev_priv);
6718 gen9_init_clock_gating(dev_priv);
6719
6720
6721 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6722 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
6723}
6724
6725static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
6726{
6727 gen9_init_clock_gating(dev_priv);
6728
6729
6730 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
6731 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6732 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6733
6734
6735 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
6736 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6737 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
6738
6739
6740 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6741 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
6742}
6743
6744static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
6745{
6746 gen9_init_clock_gating(dev_priv);
6747
6748
6749 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
6750 FBC_LLC_FULLY_OPEN);
6751
6752
6753 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6754 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
6755}
6756
6757static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
6758{
6759 enum pipe pipe;
6760
6761
6762 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6763
6764
6765 I915_WRITE(CHICKEN_PAR1_1,
6766 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6767
6768
6769 for_each_pipe(dev_priv, pipe) {
6770 I915_WRITE(CHICKEN_PIPESL_1(pipe),
6771 I915_READ(CHICKEN_PIPESL_1(pipe)) |
6772 BDW_DPRS_MASK_VBLANK_SRD);
6773 }
6774
6775
6776
6777 I915_WRITE(GEN7_FF_THREAD_MODE,
6778 I915_READ(GEN7_FF_THREAD_MODE) &
6779 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6780
6781 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6782 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6783
6784
6785 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6786 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6787
6788
6789 gen8_set_l3sqc_credits(dev_priv, 30, 2);
6790
6791
6792 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
6793 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
6794
6795 lpt_init_clock_gating(dev_priv);
6796
6797
6798
6799
6800
6801
6802 I915_WRITE(GEN6_UCGCTL1,
6803 I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
6804}
6805
6806static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
6807{
6808
6809 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6810 I915_WRITE(HSW_ROW_CHICKEN3,
6811 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6812
6813
6814 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6815 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6816 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6817
6818
6819 I915_WRITE(GEN7_FF_THREAD_MODE,
6820 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
6821
6822
6823 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6824
6825
6826 I915_WRITE(CACHE_MODE_0_GEN7,
6827 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6828
6829
6830 I915_WRITE(CACHE_MODE_1,
6831 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6832
6833
6834
6835
6836
6837
6838
6839
6840
6841 I915_WRITE(GEN7_GT_MODE,
6842 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6843
6844
6845 I915_WRITE(HALF_SLICE_CHICKEN3,
6846 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6847
6848
6849 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6850
6851 lpt_init_clock_gating(dev_priv);
6852}
6853
6854static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
6855{
6856 u32 snpcr;
6857
6858 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6859
6860
6861 I915_WRITE(_3D_CHICKEN3,
6862 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6863
6864
6865 I915_WRITE(IVB_CHICKEN3,
6866 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6867 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6868
6869
6870 if (IS_IVB_GT1(dev_priv))
6871 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6872 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6873
6874
6875 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6876
6877
6878 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6879 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6880
6881
6882 I915_WRITE(GEN7_L3CNTLREG1,
6883 GEN7_WA_FOR_GEN7_L3_CONTROL);
6884 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
6885 GEN7_WA_L3_CHICKEN_MODE);
6886 if (IS_IVB_GT1(dev_priv))
6887 I915_WRITE(GEN7_ROW_CHICKEN2,
6888 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6889 else {
6890
6891 I915_WRITE(GEN7_ROW_CHICKEN2,
6892 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6893 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6894 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6895 }
6896
6897
6898 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6899 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6900
6901
6902
6903
6904
6905 I915_WRITE(GEN6_UCGCTL2,
6906 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6907
6908
6909 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6910 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6911 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6912
6913 g4x_disable_trickle_feed(dev_priv);
6914
6915 gen7_setup_fixed_func_scheduler(dev_priv);
6916
6917 if (0) {
6918
6919 I915_WRITE(CACHE_MODE_0_GEN7,
6920 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6921 }
6922
6923
6924 I915_WRITE(CACHE_MODE_1,
6925 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6926
6927
6928
6929
6930
6931
6932
6933
6934
6935 I915_WRITE(GEN7_GT_MODE,
6936 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6937
6938 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6939 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6940 snpcr |= GEN6_MBC_SNPCR_MED;
6941 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
6942
6943 if (!HAS_PCH_NOP(dev_priv))
6944 cpt_init_clock_gating(dev_priv);
6945
6946 gen6_check_mch_setup(dev_priv);
6947}
6948
6949static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
6950{
6951
6952 I915_WRITE(_3D_CHICKEN3,
6953 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6954
6955
6956 I915_WRITE(IVB_CHICKEN3,
6957 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6958 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6959
6960
6961
6962 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6963 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6964 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6965
6966
6967 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6968
6969
6970 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6971 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6972
6973
6974 I915_WRITE(GEN7_ROW_CHICKEN2,
6975 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6976
6977
6978 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6979 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6980 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6981
6982 gen7_setup_fixed_func_scheduler(dev_priv);
6983
6984
6985
6986
6987
6988 I915_WRITE(GEN6_UCGCTL2,
6989 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6990
6991
6992
6993
6994 I915_WRITE(GEN7_UCGCTL4,
6995 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
6996
6997
6998
6999
7000
7001 I915_WRITE(CACHE_MODE_1,
7002 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7003
7004
7005
7006
7007
7008
7009
7010
7011
7012 I915_WRITE(GEN7_GT_MODE,
7013 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7014
7015
7016
7017
7018
7019 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7020
7021
7022
7023
7024
7025
7026 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7027}
7028
7029static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
7030{
7031
7032
7033 I915_WRITE(GEN7_FF_THREAD_MODE,
7034 I915_READ(GEN7_FF_THREAD_MODE) &
7035 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7036
7037
7038 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7039 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7040
7041
7042 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7043 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7044
7045
7046 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7047 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7048
7049
7050
7051
7052
7053
7054 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7055}
7056
7057static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
7058{
7059 u32 dspclk_gate;
7060
7061 I915_WRITE(RENCLK_GATE_D1, 0);
7062 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7063 GS_UNIT_CLOCK_GATE_DISABLE |
7064 CL_UNIT_CLOCK_GATE_DISABLE);
7065 I915_WRITE(RAMCLK_GATE_D, 0);
7066 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7067 OVRUNIT_CLOCK_GATE_DISABLE |
7068 OVCUNIT_CLOCK_GATE_DISABLE;
7069 if (IS_GM45(dev_priv))
7070 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7071 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7072
7073
7074 I915_WRITE(CACHE_MODE_0,
7075 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7076
7077
7078 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7079
7080 g4x_disable_trickle_feed(dev_priv);
7081}
7082
7083static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
7084{
7085 struct intel_uncore *uncore = &dev_priv->uncore;
7086
7087 intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7088 intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
7089 intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
7090 intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
7091 intel_uncore_write16(uncore, DEUC, 0);
7092 intel_uncore_write(uncore,
7093 MI_ARB_STATE,
7094 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7095
7096
7097 intel_uncore_write(uncore,
7098 CACHE_MODE_0,
7099 _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7100}
7101
7102static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
7103{
7104 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7105 I965_RCC_CLOCK_GATE_DISABLE |
7106 I965_RCPB_CLOCK_GATE_DISABLE |
7107 I965_ISC_CLOCK_GATE_DISABLE |
7108 I965_FBC_CLOCK_GATE_DISABLE);
7109 I915_WRITE(RENCLK_GATE_D2, 0);
7110 I915_WRITE(MI_ARB_STATE,
7111 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7112
7113
7114 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7115}
7116
7117static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
7118{
7119 u32 dstate = I915_READ(D_STATE);
7120
7121 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7122 DSTATE_DOT_CLOCK_GATING;
7123 I915_WRITE(D_STATE, dstate);
7124
7125 if (IS_PINEVIEW(dev_priv))
7126 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7127
7128
7129 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7130
7131
7132 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7133
7134
7135 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7136
7137 I915_WRITE(MI_ARB_STATE,
7138 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7139}
7140
7141static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
7142{
7143 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7144
7145
7146 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7147 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7148
7149 I915_WRITE(MEM_MODE,
7150 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7151}
7152
7153static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
7154{
7155 I915_WRITE(MEM_MODE,
7156 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7157 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7158}
7159
7160void intel_init_clock_gating(struct drm_i915_private *dev_priv)
7161{
7162 dev_priv->display.init_clock_gating(dev_priv);
7163}
7164
7165void intel_suspend_hw(struct drm_i915_private *dev_priv)
7166{
7167 if (HAS_PCH_LPT(dev_priv))
7168 lpt_suspend_hw(dev_priv);
7169}
7170
7171static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
7172{
7173 drm_dbg_kms(&dev_priv->drm,
7174 "No clock gating settings or workarounds applied.\n");
7175}
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7187{
7188 if (IS_GEN(dev_priv, 12))
7189 dev_priv->display.init_clock_gating = tgl_init_clock_gating;
7190 else if (IS_GEN(dev_priv, 11))
7191 dev_priv->display.init_clock_gating = icl_init_clock_gating;
7192 else if (IS_CANNONLAKE(dev_priv))
7193 dev_priv->display.init_clock_gating = cnl_init_clock_gating;
7194 else if (IS_COFFEELAKE(dev_priv))
7195 dev_priv->display.init_clock_gating = cfl_init_clock_gating;
7196 else if (IS_SKYLAKE(dev_priv))
7197 dev_priv->display.init_clock_gating = skl_init_clock_gating;
7198 else if (IS_KABYLAKE(dev_priv))
7199 dev_priv->display.init_clock_gating = kbl_init_clock_gating;
7200 else if (IS_BROXTON(dev_priv))
7201 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7202 else if (IS_GEMINILAKE(dev_priv))
7203 dev_priv->display.init_clock_gating = glk_init_clock_gating;
7204 else if (IS_BROADWELL(dev_priv))
7205 dev_priv->display.init_clock_gating = bdw_init_clock_gating;
7206 else if (IS_CHERRYVIEW(dev_priv))
7207 dev_priv->display.init_clock_gating = chv_init_clock_gating;
7208 else if (IS_HASWELL(dev_priv))
7209 dev_priv->display.init_clock_gating = hsw_init_clock_gating;
7210 else if (IS_IVYBRIDGE(dev_priv))
7211 dev_priv->display.init_clock_gating = ivb_init_clock_gating;
7212 else if (IS_VALLEYVIEW(dev_priv))
7213 dev_priv->display.init_clock_gating = vlv_init_clock_gating;
7214 else if (IS_GEN(dev_priv, 6))
7215 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7216 else if (IS_GEN(dev_priv, 5))
7217 dev_priv->display.init_clock_gating = ilk_init_clock_gating;
7218 else if (IS_G4X(dev_priv))
7219 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7220 else if (IS_I965GM(dev_priv))
7221 dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
7222 else if (IS_I965G(dev_priv))
7223 dev_priv->display.init_clock_gating = i965g_init_clock_gating;
7224 else if (IS_GEN(dev_priv, 3))
7225 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7226 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7227 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7228 else if (IS_GEN(dev_priv, 2))
7229 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7230 else {
7231 MISSING_CASE(INTEL_DEVID(dev_priv));
7232 dev_priv->display.init_clock_gating = nop_init_clock_gating;
7233 }
7234}
7235
7236
7237void intel_init_pm(struct drm_i915_private *dev_priv)
7238{
7239
7240 if (IS_PINEVIEW(dev_priv))
7241 pnv_get_mem_freq(dev_priv);
7242 else if (IS_GEN(dev_priv, 5))
7243 ilk_get_mem_freq(dev_priv);
7244
7245 if (intel_has_sagv(dev_priv))
7246 skl_setup_sagv_block_time(dev_priv);
7247
7248
7249 if (INTEL_GEN(dev_priv) >= 9) {
7250 skl_setup_wm_latency(dev_priv);
7251 dev_priv->display.initial_watermarks = skl_initial_wm;
7252 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
7253 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7254 } else if (HAS_PCH_SPLIT(dev_priv)) {
7255 ilk_setup_wm_latency(dev_priv);
7256
7257 if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
7258 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7259 (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
7260 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7261 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7262 dev_priv->display.compute_intermediate_wm =
7263 ilk_compute_intermediate_wm;
7264 dev_priv->display.initial_watermarks =
7265 ilk_initial_watermarks;
7266 dev_priv->display.optimize_watermarks =
7267 ilk_optimize_watermarks;
7268 } else {
7269 drm_dbg_kms(&dev_priv->drm,
7270 "Failed to read display plane latency. "
7271 "Disable CxSR\n");
7272 }
7273 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7274 vlv_setup_wm_latency(dev_priv);
7275 dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
7276 dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
7277 dev_priv->display.initial_watermarks = vlv_initial_watermarks;
7278 dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
7279 dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
7280 } else if (IS_G4X(dev_priv)) {
7281 g4x_setup_wm_latency(dev_priv);
7282 dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
7283 dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
7284 dev_priv->display.initial_watermarks = g4x_initial_watermarks;
7285 dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
7286 } else if (IS_PINEVIEW(dev_priv)) {
7287 if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
7288 dev_priv->is_ddr3,
7289 dev_priv->fsb_freq,
7290 dev_priv->mem_freq)) {
7291 drm_info(&dev_priv->drm,
7292 "failed to find known CxSR latency "
7293 "(found ddr%s fsb freq %d, mem freq %d), "
7294 "disabling CxSR\n",
7295 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7296 dev_priv->fsb_freq, dev_priv->mem_freq);
7297
7298 intel_set_memory_cxsr(dev_priv, false);
7299 dev_priv->display.update_wm = NULL;
7300 } else
7301 dev_priv->display.update_wm = pnv_update_wm;
7302 } else if (IS_GEN(dev_priv, 4)) {
7303 dev_priv->display.update_wm = i965_update_wm;
7304 } else if (IS_GEN(dev_priv, 3)) {
7305 dev_priv->display.update_wm = i9xx_update_wm;
7306 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7307 } else if (IS_GEN(dev_priv, 2)) {
7308 if (INTEL_NUM_PIPES(dev_priv) == 1) {
7309 dev_priv->display.update_wm = i845_update_wm;
7310 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7311 } else {
7312 dev_priv->display.update_wm = i9xx_update_wm;
7313 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7314 }
7315 } else {
7316 drm_err(&dev_priv->drm,
7317 "unexpected fall-through in %s\n", __func__);
7318 }
7319}
7320
7321void intel_pm_setup(struct drm_i915_private *dev_priv)
7322{
7323 dev_priv->runtime_pm.suspended = false;
7324 atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
7325}
7326