1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/pm_runtime.h>
30
31#include <drm/drm_atomic_helper.h>
32#include <drm/drm_fourcc.h>
33#include <drm/drm_plane_helper.h>
34
35#include "display/intel_atomic.h"
36#include "display/intel_display_types.h"
37#include "display/intel_fbc.h"
38#include "display/intel_sprite.h"
39
40#include "gt/intel_llc.h"
41
42#include "i915_drv.h"
43#include "i915_fixed.h"
44#include "i915_irq.h"
45#include "i915_trace.h"
46#include "intel_pm.h"
47#include "intel_sideband.h"
48#include "../../../platform/x86/intel_ips.h"
49
50
51struct skl_wm_params {
52 bool x_tiled, y_tiled;
53 bool rc_surface;
54 bool is_planar;
55 u32 width;
56 u8 cpp;
57 u32 plane_pixel_rate;
58 u32 y_min_scanlines;
59 u32 plane_bytes_per_line;
60 uint_fixed_16_16_t plane_blocks_per_line;
61 uint_fixed_16_16_t y_tile_minimum;
62 u32 linetime_us;
63 u32 dbuf_block_size;
64};
65
66
67struct intel_wm_config {
68 unsigned int num_pipes_active;
69 bool sprites_enabled;
70 bool sprites_scaled;
71};
72
73static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
74{
75 if (HAS_LLC(dev_priv)) {
76
77
78
79
80
81
82
83 I915_WRITE(CHICKEN_PAR1_1,
84 I915_READ(CHICKEN_PAR1_1) |
85 SKL_DE_COMPRESSED_HASH_MODE);
86 }
87
88
89 I915_WRITE(CHICKEN_PAR1_1,
90 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
91
92
93 I915_WRITE(GEN8_CHICKEN_DCPR_1,
94 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
95
96
97
98 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
99 DISP_FBC_WM_DIS |
100 DISP_FBC_MEMORY_WAKE);
101
102
103 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
104 ILK_DPFC_DISABLE_DUMMY0);
105
106 if (IS_SKYLAKE(dev_priv)) {
107
108 I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
109 & ~GEN7_DOP_CLOCK_GATE_ENABLE);
110 }
111}
112
113static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
114{
115 gen9_init_clock_gating(dev_priv);
116
117
118 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
119 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
120
121
122
123
124
125 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
126 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
127
128
129
130
131
132 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
133 PWM1_GATING_DIS | PWM2_GATING_DIS);
134
135
136
137
138
139
140
141 I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
142}
143
144static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
145{
146 gen9_init_clock_gating(dev_priv);
147
148
149
150
151
152
153 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
154 PWM1_GATING_DIS | PWM2_GATING_DIS);
155}
156
157static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
158{
159 u32 tmp;
160
161 tmp = I915_READ(CLKCFG);
162
163 switch (tmp & CLKCFG_FSB_MASK) {
164 case CLKCFG_FSB_533:
165 dev_priv->fsb_freq = 533;
166 break;
167 case CLKCFG_FSB_800:
168 dev_priv->fsb_freq = 800;
169 break;
170 case CLKCFG_FSB_667:
171 dev_priv->fsb_freq = 667;
172 break;
173 case CLKCFG_FSB_400:
174 dev_priv->fsb_freq = 400;
175 break;
176 }
177
178 switch (tmp & CLKCFG_MEM_MASK) {
179 case CLKCFG_MEM_533:
180 dev_priv->mem_freq = 533;
181 break;
182 case CLKCFG_MEM_667:
183 dev_priv->mem_freq = 667;
184 break;
185 case CLKCFG_MEM_800:
186 dev_priv->mem_freq = 800;
187 break;
188 }
189
190
191 tmp = I915_READ(CSHRDDR3CTL);
192 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
193}
194
195static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
196{
197 u16 ddrpll, csipll;
198
199 ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
200 csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
201
202 switch (ddrpll & 0xff) {
203 case 0xc:
204 dev_priv->mem_freq = 800;
205 break;
206 case 0x10:
207 dev_priv->mem_freq = 1066;
208 break;
209 case 0x14:
210 dev_priv->mem_freq = 1333;
211 break;
212 case 0x18:
213 dev_priv->mem_freq = 1600;
214 break;
215 default:
216 drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
217 ddrpll & 0xff);
218 dev_priv->mem_freq = 0;
219 break;
220 }
221
222 switch (csipll & 0x3ff) {
223 case 0x00c:
224 dev_priv->fsb_freq = 3200;
225 break;
226 case 0x00e:
227 dev_priv->fsb_freq = 3733;
228 break;
229 case 0x010:
230 dev_priv->fsb_freq = 4266;
231 break;
232 case 0x012:
233 dev_priv->fsb_freq = 4800;
234 break;
235 case 0x014:
236 dev_priv->fsb_freq = 5333;
237 break;
238 case 0x016:
239 dev_priv->fsb_freq = 5866;
240 break;
241 case 0x018:
242 dev_priv->fsb_freq = 6400;
243 break;
244 default:
245 drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
246 csipll & 0x3ff);
247 dev_priv->fsb_freq = 0;
248 break;
249 }
250}
251
252static const struct cxsr_latency cxsr_latency_table[] = {
253 {1, 0, 800, 400, 3382, 33382, 3983, 33983},
254 {1, 0, 800, 667, 3354, 33354, 3807, 33807},
255 {1, 0, 800, 800, 3347, 33347, 3763, 33763},
256 {1, 1, 800, 667, 6420, 36420, 6873, 36873},
257 {1, 1, 800, 800, 5902, 35902, 6318, 36318},
258
259 {1, 0, 667, 400, 3400, 33400, 4021, 34021},
260 {1, 0, 667, 667, 3372, 33372, 3845, 33845},
261 {1, 0, 667, 800, 3386, 33386, 3822, 33822},
262 {1, 1, 667, 667, 6438, 36438, 6911, 36911},
263 {1, 1, 667, 800, 5941, 35941, 6377, 36377},
264
265 {1, 0, 400, 400, 3472, 33472, 4173, 34173},
266 {1, 0, 400, 667, 3443, 33443, 3996, 33996},
267 {1, 0, 400, 800, 3430, 33430, 3946, 33946},
268 {1, 1, 400, 667, 6509, 36509, 7062, 37062},
269 {1, 1, 400, 800, 5985, 35985, 6501, 36501},
270
271 {0, 0, 800, 400, 3438, 33438, 4065, 34065},
272 {0, 0, 800, 667, 3410, 33410, 3889, 33889},
273 {0, 0, 800, 800, 3403, 33403, 3845, 33845},
274 {0, 1, 800, 667, 6476, 36476, 6955, 36955},
275 {0, 1, 800, 800, 5958, 35958, 6400, 36400},
276
277 {0, 0, 667, 400, 3456, 33456, 4103, 34106},
278 {0, 0, 667, 667, 3428, 33428, 3927, 33927},
279 {0, 0, 667, 800, 3443, 33443, 3905, 33905},
280 {0, 1, 667, 667, 6494, 36494, 6993, 36993},
281 {0, 1, 667, 800, 5998, 35998, 6460, 36460},
282
283 {0, 0, 400, 400, 3528, 33528, 4255, 34255},
284 {0, 0, 400, 667, 3500, 33500, 4079, 34079},
285 {0, 0, 400, 800, 3487, 33487, 4029, 34029},
286 {0, 1, 400, 667, 6566, 36566, 7145, 37145},
287 {0, 1, 400, 800, 6042, 36042, 6584, 36584},
288};
289
290static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
291 bool is_ddr3,
292 int fsb,
293 int mem)
294{
295 const struct cxsr_latency *latency;
296 int i;
297
298 if (fsb == 0 || mem == 0)
299 return NULL;
300
301 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
302 latency = &cxsr_latency_table[i];
303 if (is_desktop == latency->is_desktop &&
304 is_ddr3 == latency->is_ddr3 &&
305 fsb == latency->fsb_freq && mem == latency->mem_freq)
306 return latency;
307 }
308
309 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
310
311 return NULL;
312}
313
314static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
315{
316 u32 val;
317
318 vlv_punit_get(dev_priv);
319
320 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
321 if (enable)
322 val &= ~FORCE_DDR_HIGH_FREQ;
323 else
324 val |= FORCE_DDR_HIGH_FREQ;
325 val &= ~FORCE_DDR_LOW_FREQ;
326 val |= FORCE_DDR_FREQ_REQ_ACK;
327 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
328
329 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
330 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
331 drm_err(&dev_priv->drm,
332 "timed out waiting for Punit DDR DVFS request\n");
333
334 vlv_punit_put(dev_priv);
335}
336
337static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
338{
339 u32 val;
340
341 vlv_punit_get(dev_priv);
342
343 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
344 if (enable)
345 val |= DSP_MAXFIFO_PM5_ENABLE;
346 else
347 val &= ~DSP_MAXFIFO_PM5_ENABLE;
348 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
349
350 vlv_punit_put(dev_priv);
351}
352
353#define FW_WM(value, plane) \
354 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
355
356static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
357{
358 bool was_enabled;
359 u32 val;
360
361 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
362 was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
363 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
364 POSTING_READ(FW_BLC_SELF_VLV);
365 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
366 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
367 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
368 POSTING_READ(FW_BLC_SELF);
369 } else if (IS_PINEVIEW(dev_priv)) {
370 val = I915_READ(DSPFW3);
371 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
372 if (enable)
373 val |= PINEVIEW_SELF_REFRESH_EN;
374 else
375 val &= ~PINEVIEW_SELF_REFRESH_EN;
376 I915_WRITE(DSPFW3, val);
377 POSTING_READ(DSPFW3);
378 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
379 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
380 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
381 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
382 I915_WRITE(FW_BLC_SELF, val);
383 POSTING_READ(FW_BLC_SELF);
384 } else if (IS_I915GM(dev_priv)) {
385
386
387
388
389
390 was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
391 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
392 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
393 I915_WRITE(INSTPM, val);
394 POSTING_READ(INSTPM);
395 } else {
396 return false;
397 }
398
399 trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
400
401 drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
402 enableddisabled(enable),
403 enableddisabled(was_enabled));
404
405 return was_enabled;
406}
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
446{
447 bool ret;
448
449 mutex_lock(&dev_priv->wm.wm_mutex);
450 ret = _intel_set_memory_cxsr(dev_priv, enable);
451 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
452 dev_priv->wm.vlv.cxsr = enable;
453 else if (IS_G4X(dev_priv))
454 dev_priv->wm.g4x.cxsr = enable;
455 mutex_unlock(&dev_priv->wm.wm_mutex);
456
457 return ret;
458}
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474static const int pessimal_latency_ns = 5000;
475
476#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
477 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
478
479static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
480{
481 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
482 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
483 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
484 enum pipe pipe = crtc->pipe;
485 int sprite0_start, sprite1_start;
486 u32 dsparb, dsparb2, dsparb3;
487
488 switch (pipe) {
489 case PIPE_A:
490 dsparb = I915_READ(DSPARB);
491 dsparb2 = I915_READ(DSPARB2);
492 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
493 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
494 break;
495 case PIPE_B:
496 dsparb = I915_READ(DSPARB);
497 dsparb2 = I915_READ(DSPARB2);
498 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
499 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
500 break;
501 case PIPE_C:
502 dsparb2 = I915_READ(DSPARB2);
503 dsparb3 = I915_READ(DSPARB3);
504 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
505 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
506 break;
507 default:
508 MISSING_CASE(pipe);
509 return;
510 }
511
512 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
513 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
514 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
515 fifo_state->plane[PLANE_CURSOR] = 63;
516}
517
518static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
519 enum i9xx_plane_id i9xx_plane)
520{
521 u32 dsparb = I915_READ(DSPARB);
522 int size;
523
524 size = dsparb & 0x7f;
525 if (i9xx_plane == PLANE_B)
526 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
527
528 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
529 dsparb, plane_name(i9xx_plane), size);
530
531 return size;
532}
533
534static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
535 enum i9xx_plane_id i9xx_plane)
536{
537 u32 dsparb = I915_READ(DSPARB);
538 int size;
539
540 size = dsparb & 0x1ff;
541 if (i9xx_plane == PLANE_B)
542 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
543 size >>= 1;
544
545 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
546 dsparb, plane_name(i9xx_plane), size);
547
548 return size;
549}
550
551static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
552 enum i9xx_plane_id i9xx_plane)
553{
554 u32 dsparb = I915_READ(DSPARB);
555 int size;
556
557 size = dsparb & 0x7f;
558 size >>= 2;
559
560 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
561 dsparb, plane_name(i9xx_plane), size);
562
563 return size;
564}
565
566
567static const struct intel_watermark_params pnv_display_wm = {
568 .fifo_size = PINEVIEW_DISPLAY_FIFO,
569 .max_wm = PINEVIEW_MAX_WM,
570 .default_wm = PINEVIEW_DFT_WM,
571 .guard_size = PINEVIEW_GUARD_WM,
572 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
573};
574
575static const struct intel_watermark_params pnv_display_hplloff_wm = {
576 .fifo_size = PINEVIEW_DISPLAY_FIFO,
577 .max_wm = PINEVIEW_MAX_WM,
578 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
579 .guard_size = PINEVIEW_GUARD_WM,
580 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
581};
582
583static const struct intel_watermark_params pnv_cursor_wm = {
584 .fifo_size = PINEVIEW_CURSOR_FIFO,
585 .max_wm = PINEVIEW_CURSOR_MAX_WM,
586 .default_wm = PINEVIEW_CURSOR_DFT_WM,
587 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
588 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
589};
590
591static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
592 .fifo_size = PINEVIEW_CURSOR_FIFO,
593 .max_wm = PINEVIEW_CURSOR_MAX_WM,
594 .default_wm = PINEVIEW_CURSOR_DFT_WM,
595 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
596 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
597};
598
599static const struct intel_watermark_params i965_cursor_wm_info = {
600 .fifo_size = I965_CURSOR_FIFO,
601 .max_wm = I965_CURSOR_MAX_WM,
602 .default_wm = I965_CURSOR_DFT_WM,
603 .guard_size = 2,
604 .cacheline_size = I915_FIFO_LINE_SIZE,
605};
606
607static const struct intel_watermark_params i945_wm_info = {
608 .fifo_size = I945_FIFO_SIZE,
609 .max_wm = I915_MAX_WM,
610 .default_wm = 1,
611 .guard_size = 2,
612 .cacheline_size = I915_FIFO_LINE_SIZE,
613};
614
615static const struct intel_watermark_params i915_wm_info = {
616 .fifo_size = I915_FIFO_SIZE,
617 .max_wm = I915_MAX_WM,
618 .default_wm = 1,
619 .guard_size = 2,
620 .cacheline_size = I915_FIFO_LINE_SIZE,
621};
622
623static const struct intel_watermark_params i830_a_wm_info = {
624 .fifo_size = I855GM_FIFO_SIZE,
625 .max_wm = I915_MAX_WM,
626 .default_wm = 1,
627 .guard_size = 2,
628 .cacheline_size = I830_FIFO_LINE_SIZE,
629};
630
631static const struct intel_watermark_params i830_bc_wm_info = {
632 .fifo_size = I855GM_FIFO_SIZE,
633 .max_wm = I915_MAX_WM/2,
634 .default_wm = 1,
635 .guard_size = 2,
636 .cacheline_size = I830_FIFO_LINE_SIZE,
637};
638
639static const struct intel_watermark_params i845_wm_info = {
640 .fifo_size = I830_FIFO_SIZE,
641 .max_wm = I915_MAX_WM,
642 .default_wm = 1,
643 .guard_size = 2,
644 .cacheline_size = I830_FIFO_LINE_SIZE,
645};
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680static unsigned int intel_wm_method1(unsigned int pixel_rate,
681 unsigned int cpp,
682 unsigned int latency)
683{
684 u64 ret;
685
686 ret = mul_u32_u32(pixel_rate, cpp * latency);
687 ret = DIV_ROUND_UP_ULL(ret, 10000);
688
689 return ret;
690}
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722static unsigned int intel_wm_method2(unsigned int pixel_rate,
723 unsigned int htotal,
724 unsigned int width,
725 unsigned int cpp,
726 unsigned int latency)
727{
728 unsigned int ret;
729
730
731
732
733
734 if (WARN_ON_ONCE(htotal == 0))
735 htotal = 1;
736
737 ret = (latency * pixel_rate) / (htotal * 10000);
738 ret = (ret + 1) * width * cpp;
739
740 return ret;
741}
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762static unsigned int intel_calculate_wm(int pixel_rate,
763 const struct intel_watermark_params *wm,
764 int fifo_size, int cpp,
765 unsigned int latency_ns)
766{
767 int entries, wm_size;
768
769
770
771
772
773
774
775 entries = intel_wm_method1(pixel_rate, cpp,
776 latency_ns / 100);
777 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
778 wm->guard_size;
779 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
780
781 wm_size = fifo_size - entries;
782 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
783
784
785 if (wm_size > wm->max_wm)
786 wm_size = wm->max_wm;
787 if (wm_size <= 0)
788 wm_size = wm->default_wm;
789
790
791
792
793
794
795
796
797 if (wm_size <= 8)
798 wm_size = 8;
799
800 return wm_size;
801}
802
803static bool is_disabling(int old, int new, int threshold)
804{
805 return old >= threshold && new < threshold;
806}
807
808static bool is_enabling(int old, int new, int threshold)
809{
810 return old < threshold && new >= threshold;
811}
812
813static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
814{
815 return dev_priv->wm.max_level + 1;
816}
817
818static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
819 const struct intel_plane_state *plane_state)
820{
821 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
822
823
824 if (!crtc_state->hw.active)
825 return false;
826
827
828
829
830
831
832
833
834
835 if (plane->id == PLANE_CURSOR)
836 return plane_state->hw.fb != NULL;
837 else
838 return plane_state->uapi.visible;
839}
840
841static bool intel_crtc_active(struct intel_crtc *crtc)
842{
843
844
845
846
847
848
849
850
851
852
853
854
855
856 return crtc->active && crtc->base.primary->state->fb &&
857 crtc->config->hw.adjusted_mode.crtc_clock;
858}
859
860static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
861{
862 struct intel_crtc *crtc, *enabled = NULL;
863
864 for_each_intel_crtc(&dev_priv->drm, crtc) {
865 if (intel_crtc_active(crtc)) {
866 if (enabled)
867 return NULL;
868 enabled = crtc;
869 }
870 }
871
872 return enabled;
873}
874
875static void pnv_update_wm(struct intel_crtc *unused_crtc)
876{
877 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
878 struct intel_crtc *crtc;
879 const struct cxsr_latency *latency;
880 u32 reg;
881 unsigned int wm;
882
883 latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
884 dev_priv->is_ddr3,
885 dev_priv->fsb_freq,
886 dev_priv->mem_freq);
887 if (!latency) {
888 drm_dbg_kms(&dev_priv->drm,
889 "Unknown FSB/MEM found, disable CxSR\n");
890 intel_set_memory_cxsr(dev_priv, false);
891 return;
892 }
893
894 crtc = single_enabled_crtc(dev_priv);
895 if (crtc) {
896 const struct drm_display_mode *adjusted_mode =
897 &crtc->config->hw.adjusted_mode;
898 const struct drm_framebuffer *fb =
899 crtc->base.primary->state->fb;
900 int cpp = fb->format->cpp[0];
901 int clock = adjusted_mode->crtc_clock;
902
903
904 wm = intel_calculate_wm(clock, &pnv_display_wm,
905 pnv_display_wm.fifo_size,
906 cpp, latency->display_sr);
907 reg = I915_READ(DSPFW1);
908 reg &= ~DSPFW_SR_MASK;
909 reg |= FW_WM(wm, SR);
910 I915_WRITE(DSPFW1, reg);
911 drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
912
913
914 wm = intel_calculate_wm(clock, &pnv_cursor_wm,
915 pnv_display_wm.fifo_size,
916 4, latency->cursor_sr);
917 reg = I915_READ(DSPFW3);
918 reg &= ~DSPFW_CURSOR_SR_MASK;
919 reg |= FW_WM(wm, CURSOR_SR);
920 I915_WRITE(DSPFW3, reg);
921
922
923 wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
924 pnv_display_hplloff_wm.fifo_size,
925 cpp, latency->display_hpll_disable);
926 reg = I915_READ(DSPFW3);
927 reg &= ~DSPFW_HPLL_SR_MASK;
928 reg |= FW_WM(wm, HPLL_SR);
929 I915_WRITE(DSPFW3, reg);
930
931
932 wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
933 pnv_display_hplloff_wm.fifo_size,
934 4, latency->cursor_hpll_disable);
935 reg = I915_READ(DSPFW3);
936 reg &= ~DSPFW_HPLL_CURSOR_MASK;
937 reg |= FW_WM(wm, HPLL_CURSOR);
938 I915_WRITE(DSPFW3, reg);
939 drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
940
941 intel_set_memory_cxsr(dev_priv, true);
942 } else {
943 intel_set_memory_cxsr(dev_priv, false);
944 }
945}
946
947
948
949
950
951
952
953
954
955
956
957static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
958{
959 int tlb_miss = fifo_size * 64 - width * cpp * 8;
960
961 return max(0, tlb_miss);
962}
963
964static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
965 const struct g4x_wm_values *wm)
966{
967 enum pipe pipe;
968
969 for_each_pipe(dev_priv, pipe)
970 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
971
972 I915_WRITE(DSPFW1,
973 FW_WM(wm->sr.plane, SR) |
974 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
975 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
976 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
977 I915_WRITE(DSPFW2,
978 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
979 FW_WM(wm->sr.fbc, FBC_SR) |
980 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
981 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
982 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
983 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
984 I915_WRITE(DSPFW3,
985 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
986 FW_WM(wm->sr.cursor, CURSOR_SR) |
987 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
988 FW_WM(wm->hpll.plane, HPLL_SR));
989
990 POSTING_READ(DSPFW1);
991}
992
993#define FW_WM_VLV(value, plane) \
994 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
995
996static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
997 const struct vlv_wm_values *wm)
998{
999 enum pipe pipe;
1000
1001 for_each_pipe(dev_priv, pipe) {
1002 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
1003
1004 I915_WRITE(VLV_DDL(pipe),
1005 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
1006 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
1007 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
1008 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
1009 }
1010
1011
1012
1013
1014
1015
1016 I915_WRITE(DSPHOWM, 0);
1017 I915_WRITE(DSPHOWM1, 0);
1018 I915_WRITE(DSPFW4, 0);
1019 I915_WRITE(DSPFW5, 0);
1020 I915_WRITE(DSPFW6, 0);
1021
1022 I915_WRITE(DSPFW1,
1023 FW_WM(wm->sr.plane, SR) |
1024 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
1025 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1026 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1027 I915_WRITE(DSPFW2,
1028 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1029 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1030 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1031 I915_WRITE(DSPFW3,
1032 FW_WM(wm->sr.cursor, CURSOR_SR));
1033
1034 if (IS_CHERRYVIEW(dev_priv)) {
1035 I915_WRITE(DSPFW7_CHV,
1036 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1037 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1038 I915_WRITE(DSPFW8_CHV,
1039 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1040 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1041 I915_WRITE(DSPFW9_CHV,
1042 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1043 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1044 I915_WRITE(DSPHOWM,
1045 FW_WM(wm->sr.plane >> 9, SR_HI) |
1046 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1047 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1048 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1049 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1050 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1051 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1052 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1053 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1054 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1055 } else {
1056 I915_WRITE(DSPFW7,
1057 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1058 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1059 I915_WRITE(DSPHOWM,
1060 FW_WM(wm->sr.plane >> 9, SR_HI) |
1061 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1062 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1063 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1064 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1065 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1066 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1067 }
1068
1069 POSTING_READ(DSPFW1);
1070}
1071
1072#undef FW_WM_VLV
1073
1074static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1075{
1076
1077 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1078 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1079 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1080
1081 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1082}
1083
1084static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1085{
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 switch (plane_id) {
1101 case PLANE_CURSOR:
1102 return 63;
1103 case PLANE_PRIMARY:
1104 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1105 case PLANE_SPRITE0:
1106 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1107 default:
1108 MISSING_CASE(plane_id);
1109 return 0;
1110 }
1111}
1112
1113static int g4x_fbc_fifo_size(int level)
1114{
1115 switch (level) {
1116 case G4X_WM_LEVEL_SR:
1117 return 7;
1118 case G4X_WM_LEVEL_HPLL:
1119 return 15;
1120 default:
1121 MISSING_CASE(level);
1122 return 0;
1123 }
1124}
1125
1126static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1127 const struct intel_plane_state *plane_state,
1128 int level)
1129{
1130 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1131 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1132 const struct drm_display_mode *adjusted_mode =
1133 &crtc_state->hw.adjusted_mode;
1134 unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
1135 unsigned int clock, htotal, cpp, width, wm;
1136
1137 if (latency == 0)
1138 return USHRT_MAX;
1139
1140 if (!intel_wm_plane_visible(crtc_state, plane_state))
1141 return 0;
1142
1143 cpp = plane_state->hw.fb->format->cpp[0];
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
1157 level != G4X_WM_LEVEL_NORMAL)
1158 cpp = max(cpp, 4u);
1159
1160 clock = adjusted_mode->crtc_clock;
1161 htotal = adjusted_mode->crtc_htotal;
1162
1163 width = drm_rect_width(&plane_state->uapi.dst);
1164
1165 if (plane->id == PLANE_CURSOR) {
1166 wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1167 } else if (plane->id == PLANE_PRIMARY &&
1168 level == G4X_WM_LEVEL_NORMAL) {
1169 wm = intel_wm_method1(clock, cpp, latency);
1170 } else {
1171 unsigned int small, large;
1172
1173 small = intel_wm_method1(clock, cpp, latency);
1174 large = intel_wm_method2(clock, htotal, width, cpp, latency);
1175
1176 wm = min(small, large);
1177 }
1178
1179 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1180 width, cpp);
1181
1182 wm = DIV_ROUND_UP(wm, 64) + 2;
1183
1184 return min_t(unsigned int, wm, USHRT_MAX);
1185}
1186
1187static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1188 int level, enum plane_id plane_id, u16 value)
1189{
1190 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1191 bool dirty = false;
1192
1193 for (; level < intel_wm_num_levels(dev_priv); level++) {
1194 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1195
1196 dirty |= raw->plane[plane_id] != value;
1197 raw->plane[plane_id] = value;
1198 }
1199
1200 return dirty;
1201}
1202
1203static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1204 int level, u16 value)
1205{
1206 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1207 bool dirty = false;
1208
1209
1210 level = max(level, G4X_WM_LEVEL_SR);
1211
1212 for (; level < intel_wm_num_levels(dev_priv); level++) {
1213 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1214
1215 dirty |= raw->fbc != value;
1216 raw->fbc = value;
1217 }
1218
1219 return dirty;
1220}
1221
1222static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
1223 const struct intel_plane_state *plane_state,
1224 u32 pri_val);
1225
1226static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1227 const struct intel_plane_state *plane_state)
1228{
1229 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1230 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1231 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1232 enum plane_id plane_id = plane->id;
1233 bool dirty = false;
1234 int level;
1235
1236 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1237 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1238 if (plane_id == PLANE_PRIMARY)
1239 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1240 goto out;
1241 }
1242
1243 for (level = 0; level < num_levels; level++) {
1244 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1245 int wm, max_wm;
1246
1247 wm = g4x_compute_wm(crtc_state, plane_state, level);
1248 max_wm = g4x_plane_fifo_size(plane_id, level);
1249
1250 if (wm > max_wm)
1251 break;
1252
1253 dirty |= raw->plane[plane_id] != wm;
1254 raw->plane[plane_id] = wm;
1255
1256 if (plane_id != PLANE_PRIMARY ||
1257 level == G4X_WM_LEVEL_NORMAL)
1258 continue;
1259
1260 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1261 raw->plane[plane_id]);
1262 max_wm = g4x_fbc_fifo_size(level);
1263
1264
1265
1266
1267
1268 if (wm > max_wm)
1269 wm = USHRT_MAX;
1270
1271 dirty |= raw->fbc != wm;
1272 raw->fbc = wm;
1273 }
1274
1275
1276 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1277
1278 if (plane_id == PLANE_PRIMARY)
1279 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1280
1281 out:
1282 if (dirty) {
1283 drm_dbg_kms(&dev_priv->drm,
1284 "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1285 plane->base.name,
1286 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1287 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1288 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1289
1290 if (plane_id == PLANE_PRIMARY)
1291 drm_dbg_kms(&dev_priv->drm,
1292 "FBC watermarks: SR=%d, HPLL=%d\n",
1293 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1294 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1295 }
1296
1297 return dirty;
1298}
1299
1300static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1301 enum plane_id plane_id, int level)
1302{
1303 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1304
1305 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1306}
1307
1308static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1309 int level)
1310{
1311 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1312
1313 if (level > dev_priv->wm.max_level)
1314 return false;
1315
1316 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1317 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1318 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1319}
1320
1321
1322static void g4x_invalidate_wms(struct intel_crtc *crtc,
1323 struct g4x_wm_state *wm_state, int level)
1324{
1325 if (level <= G4X_WM_LEVEL_NORMAL) {
1326 enum plane_id plane_id;
1327
1328 for_each_plane_id_on_crtc(crtc, plane_id)
1329 wm_state->wm.plane[plane_id] = USHRT_MAX;
1330 }
1331
1332 if (level <= G4X_WM_LEVEL_SR) {
1333 wm_state->cxsr = false;
1334 wm_state->sr.cursor = USHRT_MAX;
1335 wm_state->sr.plane = USHRT_MAX;
1336 wm_state->sr.fbc = USHRT_MAX;
1337 }
1338
1339 if (level <= G4X_WM_LEVEL_HPLL) {
1340 wm_state->hpll_en = false;
1341 wm_state->hpll.cursor = USHRT_MAX;
1342 wm_state->hpll.plane = USHRT_MAX;
1343 wm_state->hpll.fbc = USHRT_MAX;
1344 }
1345}
1346
1347static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1348{
1349 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1350 struct intel_atomic_state *state =
1351 to_intel_atomic_state(crtc_state->uapi.state);
1352 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1353 int num_active_planes = hweight8(crtc_state->active_planes &
1354 ~BIT(PLANE_CURSOR));
1355 const struct g4x_pipe_wm *raw;
1356 const struct intel_plane_state *old_plane_state;
1357 const struct intel_plane_state *new_plane_state;
1358 struct intel_plane *plane;
1359 enum plane_id plane_id;
1360 int i, level;
1361 unsigned int dirty = 0;
1362
1363 for_each_oldnew_intel_plane_in_state(state, plane,
1364 old_plane_state,
1365 new_plane_state, i) {
1366 if (new_plane_state->hw.crtc != &crtc->base &&
1367 old_plane_state->hw.crtc != &crtc->base)
1368 continue;
1369
1370 if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1371 dirty |= BIT(plane->id);
1372 }
1373
1374 if (!dirty)
1375 return 0;
1376
1377 level = G4X_WM_LEVEL_NORMAL;
1378 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1379 goto out;
1380
1381 raw = &crtc_state->wm.g4x.raw[level];
1382 for_each_plane_id_on_crtc(crtc, plane_id)
1383 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1384
1385 level = G4X_WM_LEVEL_SR;
1386
1387 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1388 goto out;
1389
1390 raw = &crtc_state->wm.g4x.raw[level];
1391 wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1392 wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1393 wm_state->sr.fbc = raw->fbc;
1394
1395 wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
1396
1397 level = G4X_WM_LEVEL_HPLL;
1398
1399 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1400 goto out;
1401
1402 raw = &crtc_state->wm.g4x.raw[level];
1403 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1404 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1405 wm_state->hpll.fbc = raw->fbc;
1406
1407 wm_state->hpll_en = wm_state->cxsr;
1408
1409 level++;
1410
1411 out:
1412 if (level == G4X_WM_LEVEL_NORMAL)
1413 return -EINVAL;
1414
1415
1416 g4x_invalidate_wms(crtc, wm_state, level);
1417
1418
1419
1420
1421
1422
1423
1424 wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;
1425
1426 if (level >= G4X_WM_LEVEL_SR &&
1427 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1428 wm_state->fbc_en = false;
1429 else if (level >= G4X_WM_LEVEL_HPLL &&
1430 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1431 wm_state->fbc_en = false;
1432
1433 return 0;
1434}
1435
1436static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
1437{
1438 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1439 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1440 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1441 struct intel_atomic_state *intel_state =
1442 to_intel_atomic_state(new_crtc_state->uapi.state);
1443 const struct intel_crtc_state *old_crtc_state =
1444 intel_atomic_get_old_crtc_state(intel_state, crtc);
1445 const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1446 enum plane_id plane_id;
1447
1448 if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
1449 *intermediate = *optimal;
1450
1451 intermediate->cxsr = false;
1452 intermediate->hpll_en = false;
1453 goto out;
1454 }
1455
1456 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1457 !new_crtc_state->disable_cxsr;
1458 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1459 !new_crtc_state->disable_cxsr;
1460 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1461
1462 for_each_plane_id_on_crtc(crtc, plane_id) {
1463 intermediate->wm.plane[plane_id] =
1464 max(optimal->wm.plane[plane_id],
1465 active->wm.plane[plane_id]);
1466
1467 WARN_ON(intermediate->wm.plane[plane_id] >
1468 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1469 }
1470
1471 intermediate->sr.plane = max(optimal->sr.plane,
1472 active->sr.plane);
1473 intermediate->sr.cursor = max(optimal->sr.cursor,
1474 active->sr.cursor);
1475 intermediate->sr.fbc = max(optimal->sr.fbc,
1476 active->sr.fbc);
1477
1478 intermediate->hpll.plane = max(optimal->hpll.plane,
1479 active->hpll.plane);
1480 intermediate->hpll.cursor = max(optimal->hpll.cursor,
1481 active->hpll.cursor);
1482 intermediate->hpll.fbc = max(optimal->hpll.fbc,
1483 active->hpll.fbc);
1484
1485 WARN_ON((intermediate->sr.plane >
1486 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1487 intermediate->sr.cursor >
1488 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1489 intermediate->cxsr);
1490 WARN_ON((intermediate->sr.plane >
1491 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1492 intermediate->sr.cursor >
1493 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1494 intermediate->hpll_en);
1495
1496 WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1497 intermediate->fbc_en && intermediate->cxsr);
1498 WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1499 intermediate->fbc_en && intermediate->hpll_en);
1500
1501out:
1502
1503
1504
1505
1506 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1507 new_crtc_state->wm.need_postvbl_update = true;
1508
1509 return 0;
1510}
1511
1512static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1513 struct g4x_wm_values *wm)
1514{
1515 struct intel_crtc *crtc;
1516 int num_active_pipes = 0;
1517
1518 wm->cxsr = true;
1519 wm->hpll_en = true;
1520 wm->fbc_en = true;
1521
1522 for_each_intel_crtc(&dev_priv->drm, crtc) {
1523 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1524
1525 if (!crtc->active)
1526 continue;
1527
1528 if (!wm_state->cxsr)
1529 wm->cxsr = false;
1530 if (!wm_state->hpll_en)
1531 wm->hpll_en = false;
1532 if (!wm_state->fbc_en)
1533 wm->fbc_en = false;
1534
1535 num_active_pipes++;
1536 }
1537
1538 if (num_active_pipes != 1) {
1539 wm->cxsr = false;
1540 wm->hpll_en = false;
1541 wm->fbc_en = false;
1542 }
1543
1544 for_each_intel_crtc(&dev_priv->drm, crtc) {
1545 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1546 enum pipe pipe = crtc->pipe;
1547
1548 wm->pipe[pipe] = wm_state->wm;
1549 if (crtc->active && wm->cxsr)
1550 wm->sr = wm_state->sr;
1551 if (crtc->active && wm->hpll_en)
1552 wm->hpll = wm_state->hpll;
1553 }
1554}
1555
1556static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1557{
1558 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1559 struct g4x_wm_values new_wm = {};
1560
1561 g4x_merge_wm(dev_priv, &new_wm);
1562
1563 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1564 return;
1565
1566 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1567 _intel_set_memory_cxsr(dev_priv, false);
1568
1569 g4x_write_wm_values(dev_priv, &new_wm);
1570
1571 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1572 _intel_set_memory_cxsr(dev_priv, true);
1573
1574 *old_wm = new_wm;
1575}
1576
1577static void g4x_initial_watermarks(struct intel_atomic_state *state,
1578 struct intel_crtc *crtc)
1579{
1580 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1581 const struct intel_crtc_state *crtc_state =
1582 intel_atomic_get_new_crtc_state(state, crtc);
1583
1584 mutex_lock(&dev_priv->wm.wm_mutex);
1585 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1586 g4x_program_watermarks(dev_priv);
1587 mutex_unlock(&dev_priv->wm.wm_mutex);
1588}
1589
1590static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1591 struct intel_crtc *crtc)
1592{
1593 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1594 const struct intel_crtc_state *crtc_state =
1595 intel_atomic_get_new_crtc_state(state, crtc);
1596
1597 if (!crtc_state->wm.need_postvbl_update)
1598 return;
1599
1600 mutex_lock(&dev_priv->wm.wm_mutex);
1601 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1602 g4x_program_watermarks(dev_priv);
1603 mutex_unlock(&dev_priv->wm.wm_mutex);
1604}
1605
1606
1607static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1608 unsigned int htotal,
1609 unsigned int width,
1610 unsigned int cpp,
1611 unsigned int latency)
1612{
1613 unsigned int ret;
1614
1615 ret = intel_wm_method2(pixel_rate, htotal,
1616 width, cpp, latency);
1617 ret = DIV_ROUND_UP(ret, 64);
1618
1619 return ret;
1620}
1621
1622static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1623{
1624
1625 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1626
1627 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1628
1629 if (IS_CHERRYVIEW(dev_priv)) {
1630 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1631 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1632
1633 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1634 }
1635}
1636
1637static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1638 const struct intel_plane_state *plane_state,
1639 int level)
1640{
1641 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1642 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1643 const struct drm_display_mode *adjusted_mode =
1644 &crtc_state->hw.adjusted_mode;
1645 unsigned int clock, htotal, cpp, width, wm;
1646
1647 if (dev_priv->wm.pri_latency[level] == 0)
1648 return USHRT_MAX;
1649
1650 if (!intel_wm_plane_visible(crtc_state, plane_state))
1651 return 0;
1652
1653 cpp = plane_state->hw.fb->format->cpp[0];
1654 clock = adjusted_mode->crtc_clock;
1655 htotal = adjusted_mode->crtc_htotal;
1656 width = crtc_state->pipe_src_w;
1657
1658 if (plane->id == PLANE_CURSOR) {
1659
1660
1661
1662
1663
1664
1665 wm = 63;
1666 } else {
1667 wm = vlv_wm_method2(clock, htotal, width, cpp,
1668 dev_priv->wm.pri_latency[level] * 10);
1669 }
1670
1671 return min_t(unsigned int, wm, USHRT_MAX);
1672}
1673
1674static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1675{
1676 return (active_planes & (BIT(PLANE_SPRITE0) |
1677 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1678}
1679
1680static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1681{
1682 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1683 const struct g4x_pipe_wm *raw =
1684 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1685 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1686 unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1687 int num_active_planes = hweight8(active_planes);
1688 const int fifo_size = 511;
1689 int fifo_extra, fifo_left = fifo_size;
1690 int sprite0_fifo_extra = 0;
1691 unsigned int total_rate;
1692 enum plane_id plane_id;
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702 if (vlv_need_sprite0_fifo_workaround(active_planes))
1703 sprite0_fifo_extra = 1;
1704
1705 total_rate = raw->plane[PLANE_PRIMARY] +
1706 raw->plane[PLANE_SPRITE0] +
1707 raw->plane[PLANE_SPRITE1] +
1708 sprite0_fifo_extra;
1709
1710 if (total_rate > fifo_size)
1711 return -EINVAL;
1712
1713 if (total_rate == 0)
1714 total_rate = 1;
1715
1716 for_each_plane_id_on_crtc(crtc, plane_id) {
1717 unsigned int rate;
1718
1719 if ((active_planes & BIT(plane_id)) == 0) {
1720 fifo_state->plane[plane_id] = 0;
1721 continue;
1722 }
1723
1724 rate = raw->plane[plane_id];
1725 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1726 fifo_left -= fifo_state->plane[plane_id];
1727 }
1728
1729 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1730 fifo_left -= sprite0_fifo_extra;
1731
1732 fifo_state->plane[PLANE_CURSOR] = 63;
1733
1734 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1735
1736
1737 for_each_plane_id_on_crtc(crtc, plane_id) {
1738 int plane_extra;
1739
1740 if (fifo_left == 0)
1741 break;
1742
1743 if ((active_planes & BIT(plane_id)) == 0)
1744 continue;
1745
1746 plane_extra = min(fifo_extra, fifo_left);
1747 fifo_state->plane[plane_id] += plane_extra;
1748 fifo_left -= plane_extra;
1749 }
1750
1751 WARN_ON(active_planes != 0 && fifo_left != 0);
1752
1753
1754 if (active_planes == 0) {
1755 WARN_ON(fifo_left != fifo_size);
1756 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1757 }
1758
1759 return 0;
1760}
1761
1762
1763static void vlv_invalidate_wms(struct intel_crtc *crtc,
1764 struct vlv_wm_state *wm_state, int level)
1765{
1766 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1767
1768 for (; level < intel_wm_num_levels(dev_priv); level++) {
1769 enum plane_id plane_id;
1770
1771 for_each_plane_id_on_crtc(crtc, plane_id)
1772 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1773
1774 wm_state->sr[level].cursor = USHRT_MAX;
1775 wm_state->sr[level].plane = USHRT_MAX;
1776 }
1777}
1778
1779static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1780{
1781 if (wm > fifo_size)
1782 return USHRT_MAX;
1783 else
1784 return fifo_size - wm;
1785}
1786
1787
1788
1789
1790
1791static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1792 int level, enum plane_id plane_id, u16 value)
1793{
1794 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1795 int num_levels = intel_wm_num_levels(dev_priv);
1796 bool dirty = false;
1797
1798 for (; level < num_levels; level++) {
1799 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1800
1801 dirty |= raw->plane[plane_id] != value;
1802 raw->plane[plane_id] = value;
1803 }
1804
1805 return dirty;
1806}
1807
1808static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1809 const struct intel_plane_state *plane_state)
1810{
1811 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1812 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1813 enum plane_id plane_id = plane->id;
1814 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1815 int level;
1816 bool dirty = false;
1817
1818 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1819 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1820 goto out;
1821 }
1822
1823 for (level = 0; level < num_levels; level++) {
1824 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1825 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1826 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1827
1828 if (wm > max_wm)
1829 break;
1830
1831 dirty |= raw->plane[plane_id] != wm;
1832 raw->plane[plane_id] = wm;
1833 }
1834
1835
1836 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1837
1838out:
1839 if (dirty)
1840 drm_dbg_kms(&dev_priv->drm,
1841 "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1842 plane->base.name,
1843 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1844 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1845 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1846
1847 return dirty;
1848}
1849
1850static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1851 enum plane_id plane_id, int level)
1852{
1853 const struct g4x_pipe_wm *raw =
1854 &crtc_state->wm.vlv.raw[level];
1855 const struct vlv_fifo_state *fifo_state =
1856 &crtc_state->wm.vlv.fifo_state;
1857
1858 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1859}
1860
1861static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1862{
1863 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1864 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1865 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1866 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1867}
1868
1869static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1870{
1871 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1872 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1873 struct intel_atomic_state *state =
1874 to_intel_atomic_state(crtc_state->uapi.state);
1875 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1876 const struct vlv_fifo_state *fifo_state =
1877 &crtc_state->wm.vlv.fifo_state;
1878 int num_active_planes = hweight8(crtc_state->active_planes &
1879 ~BIT(PLANE_CURSOR));
1880 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
1881 const struct intel_plane_state *old_plane_state;
1882 const struct intel_plane_state *new_plane_state;
1883 struct intel_plane *plane;
1884 enum plane_id plane_id;
1885 int level, ret, i;
1886 unsigned int dirty = 0;
1887
1888 for_each_oldnew_intel_plane_in_state(state, plane,
1889 old_plane_state,
1890 new_plane_state, i) {
1891 if (new_plane_state->hw.crtc != &crtc->base &&
1892 old_plane_state->hw.crtc != &crtc->base)
1893 continue;
1894
1895 if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1896 dirty |= BIT(plane->id);
1897 }
1898
1899
1900
1901
1902
1903
1904
1905 if (needs_modeset)
1906 crtc_state->fifo_changed = true;
1907
1908 if (!dirty)
1909 return 0;
1910
1911
1912 if (dirty & ~BIT(PLANE_CURSOR)) {
1913 const struct intel_crtc_state *old_crtc_state =
1914 intel_atomic_get_old_crtc_state(state, crtc);
1915 const struct vlv_fifo_state *old_fifo_state =
1916 &old_crtc_state->wm.vlv.fifo_state;
1917
1918 ret = vlv_compute_fifo(crtc_state);
1919 if (ret)
1920 return ret;
1921
1922 if (needs_modeset ||
1923 memcmp(old_fifo_state, fifo_state,
1924 sizeof(*fifo_state)) != 0)
1925 crtc_state->fifo_changed = true;
1926 }
1927
1928
1929 wm_state->num_levels = intel_wm_num_levels(dev_priv);
1930
1931
1932
1933
1934
1935 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1936
1937 for (level = 0; level < wm_state->num_levels; level++) {
1938 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1939 const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
1940
1941 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1942 break;
1943
1944 for_each_plane_id_on_crtc(crtc, plane_id) {
1945 wm_state->wm[level].plane[plane_id] =
1946 vlv_invert_wm_value(raw->plane[plane_id],
1947 fifo_state->plane[plane_id]);
1948 }
1949
1950 wm_state->sr[level].plane =
1951 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1952 raw->plane[PLANE_SPRITE0],
1953 raw->plane[PLANE_SPRITE1]),
1954 sr_fifo_size);
1955
1956 wm_state->sr[level].cursor =
1957 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1958 63);
1959 }
1960
1961 if (level == 0)
1962 return -EINVAL;
1963
1964
1965 wm_state->num_levels = level;
1966
1967
1968 vlv_invalidate_wms(crtc, wm_state, level);
1969
1970 return 0;
1971}
1972
1973#define VLV_FIFO(plane, value) \
1974 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1975
1976static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1977 struct intel_crtc *crtc)
1978{
1979 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1980 struct intel_uncore *uncore = &dev_priv->uncore;
1981 const struct intel_crtc_state *crtc_state =
1982 intel_atomic_get_new_crtc_state(state, crtc);
1983 const struct vlv_fifo_state *fifo_state =
1984 &crtc_state->wm.vlv.fifo_state;
1985 int sprite0_start, sprite1_start, fifo_size;
1986 u32 dsparb, dsparb2, dsparb3;
1987
1988 if (!crtc_state->fifo_changed)
1989 return;
1990
1991 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1992 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1993 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1994
1995 drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
1996 drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
1997
1998 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009 spin_lock(&uncore->lock);
2010
2011 switch (crtc->pipe) {
2012 case PIPE_A:
2013 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2014 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2015
2016 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
2017 VLV_FIFO(SPRITEB, 0xff));
2018 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
2019 VLV_FIFO(SPRITEB, sprite1_start));
2020
2021 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
2022 VLV_FIFO(SPRITEB_HI, 0x1));
2023 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
2024 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
2025
2026 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2027 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2028 break;
2029 case PIPE_B:
2030 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2031 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2032
2033 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
2034 VLV_FIFO(SPRITED, 0xff));
2035 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
2036 VLV_FIFO(SPRITED, sprite1_start));
2037
2038 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
2039 VLV_FIFO(SPRITED_HI, 0xff));
2040 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
2041 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
2042
2043 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2044 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2045 break;
2046 case PIPE_C:
2047 dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
2048 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2049
2050 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
2051 VLV_FIFO(SPRITEF, 0xff));
2052 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
2053 VLV_FIFO(SPRITEF, sprite1_start));
2054
2055 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
2056 VLV_FIFO(SPRITEF_HI, 0xff));
2057 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
2058 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
2059
2060 intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
2061 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2062 break;
2063 default:
2064 break;
2065 }
2066
2067 intel_uncore_posting_read_fw(uncore, DSPARB);
2068
2069 spin_unlock(&uncore->lock);
2070}
2071
2072#undef VLV_FIFO
2073
2074static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
2075{
2076 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2077 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2078 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2079 struct intel_atomic_state *intel_state =
2080 to_intel_atomic_state(new_crtc_state->uapi.state);
2081 const struct intel_crtc_state *old_crtc_state =
2082 intel_atomic_get_old_crtc_state(intel_state, crtc);
2083 const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2084 int level;
2085
2086 if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
2087 *intermediate = *optimal;
2088
2089 intermediate->cxsr = false;
2090 goto out;
2091 }
2092
2093 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2094 intermediate->cxsr = optimal->cxsr && active->cxsr &&
2095 !new_crtc_state->disable_cxsr;
2096
2097 for (level = 0; level < intermediate->num_levels; level++) {
2098 enum plane_id plane_id;
2099
2100 for_each_plane_id_on_crtc(crtc, plane_id) {
2101 intermediate->wm[level].plane[plane_id] =
2102 min(optimal->wm[level].plane[plane_id],
2103 active->wm[level].plane[plane_id]);
2104 }
2105
2106 intermediate->sr[level].plane = min(optimal->sr[level].plane,
2107 active->sr[level].plane);
2108 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2109 active->sr[level].cursor);
2110 }
2111
2112 vlv_invalidate_wms(crtc, intermediate, level);
2113
2114out:
2115
2116
2117
2118
2119 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2120 new_crtc_state->wm.need_postvbl_update = true;
2121
2122 return 0;
2123}
2124
2125static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2126 struct vlv_wm_values *wm)
2127{
2128 struct intel_crtc *crtc;
2129 int num_active_pipes = 0;
2130
2131 wm->level = dev_priv->wm.max_level;
2132 wm->cxsr = true;
2133
2134 for_each_intel_crtc(&dev_priv->drm, crtc) {
2135 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2136
2137 if (!crtc->active)
2138 continue;
2139
2140 if (!wm_state->cxsr)
2141 wm->cxsr = false;
2142
2143 num_active_pipes++;
2144 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2145 }
2146
2147 if (num_active_pipes != 1)
2148 wm->cxsr = false;
2149
2150 if (num_active_pipes > 1)
2151 wm->level = VLV_WM_LEVEL_PM2;
2152
2153 for_each_intel_crtc(&dev_priv->drm, crtc) {
2154 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2155 enum pipe pipe = crtc->pipe;
2156
2157 wm->pipe[pipe] = wm_state->wm[wm->level];
2158 if (crtc->active && wm->cxsr)
2159 wm->sr = wm_state->sr[wm->level];
2160
2161 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2162 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2163 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2164 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2165 }
2166}
2167
2168static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2169{
2170 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2171 struct vlv_wm_values new_wm = {};
2172
2173 vlv_merge_wm(dev_priv, &new_wm);
2174
2175 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2176 return;
2177
2178 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2179 chv_set_memory_dvfs(dev_priv, false);
2180
2181 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2182 chv_set_memory_pm5(dev_priv, false);
2183
2184 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2185 _intel_set_memory_cxsr(dev_priv, false);
2186
2187 vlv_write_wm_values(dev_priv, &new_wm);
2188
2189 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2190 _intel_set_memory_cxsr(dev_priv, true);
2191
2192 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2193 chv_set_memory_pm5(dev_priv, true);
2194
2195 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2196 chv_set_memory_dvfs(dev_priv, true);
2197
2198 *old_wm = new_wm;
2199}
2200
2201static void vlv_initial_watermarks(struct intel_atomic_state *state,
2202 struct intel_crtc *crtc)
2203{
2204 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2205 const struct intel_crtc_state *crtc_state =
2206 intel_atomic_get_new_crtc_state(state, crtc);
2207
2208 mutex_lock(&dev_priv->wm.wm_mutex);
2209 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2210 vlv_program_watermarks(dev_priv);
2211 mutex_unlock(&dev_priv->wm.wm_mutex);
2212}
2213
2214static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2215 struct intel_crtc *crtc)
2216{
2217 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2218 const struct intel_crtc_state *crtc_state =
2219 intel_atomic_get_new_crtc_state(state, crtc);
2220
2221 if (!crtc_state->wm.need_postvbl_update)
2222 return;
2223
2224 mutex_lock(&dev_priv->wm.wm_mutex);
2225 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2226 vlv_program_watermarks(dev_priv);
2227 mutex_unlock(&dev_priv->wm.wm_mutex);
2228}
2229
2230static void i965_update_wm(struct intel_crtc *unused_crtc)
2231{
2232 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2233 struct intel_crtc *crtc;
2234 int srwm = 1;
2235 int cursor_sr = 16;
2236 bool cxsr_enabled;
2237
2238
2239 crtc = single_enabled_crtc(dev_priv);
2240 if (crtc) {
2241
2242 static const int sr_latency_ns = 12000;
2243 const struct drm_display_mode *adjusted_mode =
2244 &crtc->config->hw.adjusted_mode;
2245 const struct drm_framebuffer *fb =
2246 crtc->base.primary->state->fb;
2247 int clock = adjusted_mode->crtc_clock;
2248 int htotal = adjusted_mode->crtc_htotal;
2249 int hdisplay = crtc->config->pipe_src_w;
2250 int cpp = fb->format->cpp[0];
2251 int entries;
2252
2253 entries = intel_wm_method2(clock, htotal,
2254 hdisplay, cpp, sr_latency_ns / 100);
2255 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2256 srwm = I965_FIFO_SIZE - entries;
2257 if (srwm < 0)
2258 srwm = 1;
2259 srwm &= 0x1ff;
2260 drm_dbg_kms(&dev_priv->drm,
2261 "self-refresh entries: %d, wm: %d\n",
2262 entries, srwm);
2263
2264 entries = intel_wm_method2(clock, htotal,
2265 crtc->base.cursor->state->crtc_w, 4,
2266 sr_latency_ns / 100);
2267 entries = DIV_ROUND_UP(entries,
2268 i965_cursor_wm_info.cacheline_size) +
2269 i965_cursor_wm_info.guard_size;
2270
2271 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2272 if (cursor_sr > i965_cursor_wm_info.max_wm)
2273 cursor_sr = i965_cursor_wm_info.max_wm;
2274
2275 drm_dbg_kms(&dev_priv->drm,
2276 "self-refresh watermark: display plane %d "
2277 "cursor %d\n", srwm, cursor_sr);
2278
2279 cxsr_enabled = true;
2280 } else {
2281 cxsr_enabled = false;
2282
2283 intel_set_memory_cxsr(dev_priv, false);
2284 }
2285
2286 drm_dbg_kms(&dev_priv->drm,
2287 "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2288 srwm);
2289
2290
2291 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
2292 FW_WM(8, CURSORB) |
2293 FW_WM(8, PLANEB) |
2294 FW_WM(8, PLANEA));
2295 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
2296 FW_WM(8, PLANEC_OLD));
2297
2298 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2299
2300 if (cxsr_enabled)
2301 intel_set_memory_cxsr(dev_priv, true);
2302}
2303
2304#undef FW_WM
2305
2306static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2307{
2308 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2309 const struct intel_watermark_params *wm_info;
2310 u32 fwater_lo;
2311 u32 fwater_hi;
2312 int cwm, srwm = 1;
2313 int fifo_size;
2314 int planea_wm, planeb_wm;
2315 struct intel_crtc *crtc, *enabled = NULL;
2316
2317 if (IS_I945GM(dev_priv))
2318 wm_info = &i945_wm_info;
2319 else if (!IS_GEN(dev_priv, 2))
2320 wm_info = &i915_wm_info;
2321 else
2322 wm_info = &i830_a_wm_info;
2323
2324 fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
2325 crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
2326 if (intel_crtc_active(crtc)) {
2327 const struct drm_display_mode *adjusted_mode =
2328 &crtc->config->hw.adjusted_mode;
2329 const struct drm_framebuffer *fb =
2330 crtc->base.primary->state->fb;
2331 int cpp;
2332
2333 if (IS_GEN(dev_priv, 2))
2334 cpp = 4;
2335 else
2336 cpp = fb->format->cpp[0];
2337
2338 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2339 wm_info, fifo_size, cpp,
2340 pessimal_latency_ns);
2341 enabled = crtc;
2342 } else {
2343 planea_wm = fifo_size - wm_info->guard_size;
2344 if (planea_wm > (long)wm_info->max_wm)
2345 planea_wm = wm_info->max_wm;
2346 }
2347
2348 if (IS_GEN(dev_priv, 2))
2349 wm_info = &i830_bc_wm_info;
2350
2351 fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
2352 crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
2353 if (intel_crtc_active(crtc)) {
2354 const struct drm_display_mode *adjusted_mode =
2355 &crtc->config->hw.adjusted_mode;
2356 const struct drm_framebuffer *fb =
2357 crtc->base.primary->state->fb;
2358 int cpp;
2359
2360 if (IS_GEN(dev_priv, 2))
2361 cpp = 4;
2362 else
2363 cpp = fb->format->cpp[0];
2364
2365 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2366 wm_info, fifo_size, cpp,
2367 pessimal_latency_ns);
2368 if (enabled == NULL)
2369 enabled = crtc;
2370 else
2371 enabled = NULL;
2372 } else {
2373 planeb_wm = fifo_size - wm_info->guard_size;
2374 if (planeb_wm > (long)wm_info->max_wm)
2375 planeb_wm = wm_info->max_wm;
2376 }
2377
2378 drm_dbg_kms(&dev_priv->drm,
2379 "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2380
2381 if (IS_I915GM(dev_priv) && enabled) {
2382 struct drm_i915_gem_object *obj;
2383
2384 obj = intel_fb_obj(enabled->base.primary->state->fb);
2385
2386
2387 if (!i915_gem_object_is_tiled(obj))
2388 enabled = NULL;
2389 }
2390
2391
2392
2393
2394 cwm = 2;
2395
2396
2397 intel_set_memory_cxsr(dev_priv, false);
2398
2399
2400 if (HAS_FW_BLC(dev_priv) && enabled) {
2401
2402 static const int sr_latency_ns = 6000;
2403 const struct drm_display_mode *adjusted_mode =
2404 &enabled->config->hw.adjusted_mode;
2405 const struct drm_framebuffer *fb =
2406 enabled->base.primary->state->fb;
2407 int clock = adjusted_mode->crtc_clock;
2408 int htotal = adjusted_mode->crtc_htotal;
2409 int hdisplay = enabled->config->pipe_src_w;
2410 int cpp;
2411 int entries;
2412
2413 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2414 cpp = 4;
2415 else
2416 cpp = fb->format->cpp[0];
2417
2418 entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
2419 sr_latency_ns / 100);
2420 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2421 drm_dbg_kms(&dev_priv->drm,
2422 "self-refresh entries: %d\n", entries);
2423 srwm = wm_info->fifo_size - entries;
2424 if (srwm < 0)
2425 srwm = 1;
2426
2427 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2428 I915_WRITE(FW_BLC_SELF,
2429 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2430 else
2431 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2432 }
2433
2434 drm_dbg_kms(&dev_priv->drm,
2435 "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2436 planea_wm, planeb_wm, cwm, srwm);
2437
2438 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2439 fwater_hi = (cwm & 0x1f);
2440
2441
2442 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2443 fwater_hi = fwater_hi | (1 << 8);
2444
2445 I915_WRITE(FW_BLC, fwater_lo);
2446 I915_WRITE(FW_BLC2, fwater_hi);
2447
2448 if (enabled)
2449 intel_set_memory_cxsr(dev_priv, true);
2450}
2451
2452static void i845_update_wm(struct intel_crtc *unused_crtc)
2453{
2454 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2455 struct intel_crtc *crtc;
2456 const struct drm_display_mode *adjusted_mode;
2457 u32 fwater_lo;
2458 int planea_wm;
2459
2460 crtc = single_enabled_crtc(dev_priv);
2461 if (crtc == NULL)
2462 return;
2463
2464 adjusted_mode = &crtc->config->hw.adjusted_mode;
2465 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2466 &i845_wm_info,
2467 dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
2468 4, pessimal_latency_ns);
2469 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2470 fwater_lo |= (3<<8) | planea_wm;
2471
2472 drm_dbg_kms(&dev_priv->drm,
2473 "Setting FIFO watermarks - A: %d\n", planea_wm);
2474
2475 I915_WRITE(FW_BLC, fwater_lo);
2476}
2477
2478
2479static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2480 unsigned int cpp,
2481 unsigned int latency)
2482{
2483 unsigned int ret;
2484
2485 ret = intel_wm_method1(pixel_rate, cpp, latency);
2486 ret = DIV_ROUND_UP(ret, 64) + 2;
2487
2488 return ret;
2489}
2490
2491
2492static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2493 unsigned int htotal,
2494 unsigned int width,
2495 unsigned int cpp,
2496 unsigned int latency)
2497{
2498 unsigned int ret;
2499
2500 ret = intel_wm_method2(pixel_rate, htotal,
2501 width, cpp, latency);
2502 ret = DIV_ROUND_UP(ret, 64) + 2;
2503
2504 return ret;
2505}
2506
2507static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
2508{
2509
2510
2511
2512
2513
2514
2515 if (WARN_ON(!cpp))
2516 return 0;
2517 if (WARN_ON(!horiz_pixels))
2518 return 0;
2519
2520 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2521}
2522
2523struct ilk_wm_maximums {
2524 u16 pri;
2525 u16 spr;
2526 u16 cur;
2527 u16 fbc;
2528};
2529
2530
2531
2532
2533
2534static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
2535 const struct intel_plane_state *plane_state,
2536 u32 mem_value, bool is_lp)
2537{
2538 u32 method1, method2;
2539 int cpp;
2540
2541 if (mem_value == 0)
2542 return U32_MAX;
2543
2544 if (!intel_wm_plane_visible(crtc_state, plane_state))
2545 return 0;
2546
2547 cpp = plane_state->hw.fb->format->cpp[0];
2548
2549 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2550
2551 if (!is_lp)
2552 return method1;
2553
2554 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2555 crtc_state->hw.adjusted_mode.crtc_htotal,
2556 drm_rect_width(&plane_state->uapi.dst),
2557 cpp, mem_value);
2558
2559 return min(method1, method2);
2560}
2561
2562
2563
2564
2565
2566static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
2567 const struct intel_plane_state *plane_state,
2568 u32 mem_value)
2569{
2570 u32 method1, method2;
2571 int cpp;
2572
2573 if (mem_value == 0)
2574 return U32_MAX;
2575
2576 if (!intel_wm_plane_visible(crtc_state, plane_state))
2577 return 0;
2578
2579 cpp = plane_state->hw.fb->format->cpp[0];
2580
2581 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2582 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2583 crtc_state->hw.adjusted_mode.crtc_htotal,
2584 drm_rect_width(&plane_state->uapi.dst),
2585 cpp, mem_value);
2586 return min(method1, method2);
2587}
2588
2589
2590
2591
2592
2593static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
2594 const struct intel_plane_state *plane_state,
2595 u32 mem_value)
2596{
2597 int cpp;
2598
2599 if (mem_value == 0)
2600 return U32_MAX;
2601
2602 if (!intel_wm_plane_visible(crtc_state, plane_state))
2603 return 0;
2604
2605 cpp = plane_state->hw.fb->format->cpp[0];
2606
2607 return ilk_wm_method2(crtc_state->pixel_rate,
2608 crtc_state->hw.adjusted_mode.crtc_htotal,
2609 drm_rect_width(&plane_state->uapi.dst),
2610 cpp, mem_value);
2611}
2612
2613
2614static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
2615 const struct intel_plane_state *plane_state,
2616 u32 pri_val)
2617{
2618 int cpp;
2619
2620 if (!intel_wm_plane_visible(crtc_state, plane_state))
2621 return 0;
2622
2623 cpp = plane_state->hw.fb->format->cpp[0];
2624
2625 return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst),
2626 cpp);
2627}
2628
2629static unsigned int
2630ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2631{
2632 if (INTEL_GEN(dev_priv) >= 8)
2633 return 3072;
2634 else if (INTEL_GEN(dev_priv) >= 7)
2635 return 768;
2636 else
2637 return 512;
2638}
2639
2640static unsigned int
2641ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2642 int level, bool is_sprite)
2643{
2644 if (INTEL_GEN(dev_priv) >= 8)
2645
2646 return level == 0 ? 255 : 2047;
2647 else if (INTEL_GEN(dev_priv) >= 7)
2648
2649 return level == 0 ? 127 : 1023;
2650 else if (!is_sprite)
2651
2652 return level == 0 ? 127 : 511;
2653 else
2654
2655 return level == 0 ? 63 : 255;
2656}
2657
2658static unsigned int
2659ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2660{
2661 if (INTEL_GEN(dev_priv) >= 7)
2662 return level == 0 ? 63 : 255;
2663 else
2664 return level == 0 ? 31 : 63;
2665}
2666
2667static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2668{
2669 if (INTEL_GEN(dev_priv) >= 8)
2670 return 31;
2671 else
2672 return 15;
2673}
2674
2675
2676static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2677 int level,
2678 const struct intel_wm_config *config,
2679 enum intel_ddb_partitioning ddb_partitioning,
2680 bool is_sprite)
2681{
2682 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2683
2684
2685 if (is_sprite && !config->sprites_enabled)
2686 return 0;
2687
2688
2689 if (level == 0 || config->num_pipes_active > 1) {
2690 fifo_size /= INTEL_NUM_PIPES(dev_priv);
2691
2692
2693
2694
2695
2696
2697 if (INTEL_GEN(dev_priv) <= 6)
2698 fifo_size /= 2;
2699 }
2700
2701 if (config->sprites_enabled) {
2702
2703 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2704 if (is_sprite)
2705 fifo_size *= 5;
2706 fifo_size /= 6;
2707 } else {
2708 fifo_size /= 2;
2709 }
2710 }
2711
2712
2713 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2714}
2715
2716
2717static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2718 int level,
2719 const struct intel_wm_config *config)
2720{
2721
2722 if (level > 0 && config->num_pipes_active > 1)
2723 return 64;
2724
2725
2726 return ilk_cursor_wm_reg_max(dev_priv, level);
2727}
2728
2729static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2730 int level,
2731 const struct intel_wm_config *config,
2732 enum intel_ddb_partitioning ddb_partitioning,
2733 struct ilk_wm_maximums *max)
2734{
2735 max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2736 max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2737 max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2738 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2739}
2740
2741static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2742 int level,
2743 struct ilk_wm_maximums *max)
2744{
2745 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2746 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2747 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2748 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2749}
2750
2751static bool ilk_validate_wm_level(int level,
2752 const struct ilk_wm_maximums *max,
2753 struct intel_wm_level *result)
2754{
2755 bool ret;
2756
2757
2758 if (!result->enable)
2759 return false;
2760
2761 result->enable = result->pri_val <= max->pri &&
2762 result->spr_val <= max->spr &&
2763 result->cur_val <= max->cur;
2764
2765 ret = result->enable;
2766
2767
2768
2769
2770
2771
2772 if (level == 0 && !result->enable) {
2773 if (result->pri_val > max->pri)
2774 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2775 level, result->pri_val, max->pri);
2776 if (result->spr_val > max->spr)
2777 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2778 level, result->spr_val, max->spr);
2779 if (result->cur_val > max->cur)
2780 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2781 level, result->cur_val, max->cur);
2782
2783 result->pri_val = min_t(u32, result->pri_val, max->pri);
2784 result->spr_val = min_t(u32, result->spr_val, max->spr);
2785 result->cur_val = min_t(u32, result->cur_val, max->cur);
2786 result->enable = true;
2787 }
2788
2789 return ret;
2790}
2791
2792static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2793 const struct intel_crtc *crtc,
2794 int level,
2795 struct intel_crtc_state *crtc_state,
2796 const struct intel_plane_state *pristate,
2797 const struct intel_plane_state *sprstate,
2798 const struct intel_plane_state *curstate,
2799 struct intel_wm_level *result)
2800{
2801 u16 pri_latency = dev_priv->wm.pri_latency[level];
2802 u16 spr_latency = dev_priv->wm.spr_latency[level];
2803 u16 cur_latency = dev_priv->wm.cur_latency[level];
2804
2805
2806 if (level > 0) {
2807 pri_latency *= 5;
2808 spr_latency *= 5;
2809 cur_latency *= 5;
2810 }
2811
2812 if (pristate) {
2813 result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
2814 pri_latency, level);
2815 result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
2816 }
2817
2818 if (sprstate)
2819 result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
2820
2821 if (curstate)
2822 result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
2823
2824 result->enable = true;
2825}
2826
2827static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2828 u16 wm[8])
2829{
2830 struct intel_uncore *uncore = &dev_priv->uncore;
2831
2832 if (INTEL_GEN(dev_priv) >= 9) {
2833 u32 val;
2834 int ret, i;
2835 int level, max_level = ilk_wm_max_level(dev_priv);
2836
2837
2838 val = 0;
2839 ret = sandybridge_pcode_read(dev_priv,
2840 GEN9_PCODE_READ_MEM_LATENCY,
2841 &val, NULL);
2842
2843 if (ret) {
2844 drm_err(&dev_priv->drm,
2845 "SKL Mailbox read error = %d\n", ret);
2846 return;
2847 }
2848
2849 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2850 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2851 GEN9_MEM_LATENCY_LEVEL_MASK;
2852 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2853 GEN9_MEM_LATENCY_LEVEL_MASK;
2854 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2855 GEN9_MEM_LATENCY_LEVEL_MASK;
2856
2857
2858 val = 1;
2859 ret = sandybridge_pcode_read(dev_priv,
2860 GEN9_PCODE_READ_MEM_LATENCY,
2861 &val, NULL);
2862 if (ret) {
2863 drm_err(&dev_priv->drm,
2864 "SKL Mailbox read error = %d\n", ret);
2865 return;
2866 }
2867
2868 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2869 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2870 GEN9_MEM_LATENCY_LEVEL_MASK;
2871 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2872 GEN9_MEM_LATENCY_LEVEL_MASK;
2873 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2874 GEN9_MEM_LATENCY_LEVEL_MASK;
2875
2876
2877
2878
2879
2880
2881 for (level = 1; level <= max_level; level++) {
2882 if (wm[level] == 0) {
2883 for (i = level + 1; i <= max_level; i++)
2884 wm[i] = 0;
2885 break;
2886 }
2887 }
2888
2889
2890
2891
2892
2893
2894
2895
2896 if (wm[0] == 0) {
2897 wm[0] += 2;
2898 for (level = 1; level <= max_level; level++) {
2899 if (wm[level] == 0)
2900 break;
2901 wm[level] += 2;
2902 }
2903 }
2904
2905
2906
2907
2908
2909
2910
2911 if (dev_priv->dram_info.is_16gb_dimm)
2912 wm[0] += 1;
2913
2914 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2915 u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
2916
2917 wm[0] = (sskpd >> 56) & 0xFF;
2918 if (wm[0] == 0)
2919 wm[0] = sskpd & 0xF;
2920 wm[1] = (sskpd >> 4) & 0xFF;
2921 wm[2] = (sskpd >> 12) & 0xFF;
2922 wm[3] = (sskpd >> 20) & 0x1FF;
2923 wm[4] = (sskpd >> 32) & 0x1FF;
2924 } else if (INTEL_GEN(dev_priv) >= 6) {
2925 u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
2926
2927 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2928 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2929 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2930 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2931 } else if (INTEL_GEN(dev_priv) >= 5) {
2932 u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
2933
2934
2935 wm[0] = 7;
2936 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2937 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2938 } else {
2939 MISSING_CASE(INTEL_DEVID(dev_priv));
2940 }
2941}
2942
2943static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2944 u16 wm[5])
2945{
2946
2947 if (IS_GEN(dev_priv, 5))
2948 wm[0] = 13;
2949}
2950
2951static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2952 u16 wm[5])
2953{
2954
2955 if (IS_GEN(dev_priv, 5))
2956 wm[0] = 13;
2957}
2958
2959int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2960{
2961
2962 if (INTEL_GEN(dev_priv) >= 9)
2963 return 7;
2964 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2965 return 4;
2966 else if (INTEL_GEN(dev_priv) >= 6)
2967 return 3;
2968 else
2969 return 2;
2970}
2971
2972static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2973 const char *name,
2974 const u16 wm[8])
2975{
2976 int level, max_level = ilk_wm_max_level(dev_priv);
2977
2978 for (level = 0; level <= max_level; level++) {
2979 unsigned int latency = wm[level];
2980
2981 if (latency == 0) {
2982 drm_dbg_kms(&dev_priv->drm,
2983 "%s WM%d latency not provided\n",
2984 name, level);
2985 continue;
2986 }
2987
2988
2989
2990
2991
2992 if (INTEL_GEN(dev_priv) >= 9)
2993 latency *= 10;
2994 else if (level > 0)
2995 latency *= 5;
2996
2997 drm_dbg_kms(&dev_priv->drm,
2998 "%s WM%d latency %u (%u.%u usec)\n", name, level,
2999 wm[level], latency / 10, latency % 10);
3000 }
3001}
3002
3003static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
3004 u16 wm[5], u16 min)
3005{
3006 int level, max_level = ilk_wm_max_level(dev_priv);
3007
3008 if (wm[0] >= min)
3009 return false;
3010
3011 wm[0] = max(wm[0], min);
3012 for (level = 1; level <= max_level; level++)
3013 wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
3014
3015 return true;
3016}
3017
3018static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3019{
3020 bool changed;
3021
3022
3023
3024
3025
3026 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
3027 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
3028 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
3029
3030 if (!changed)
3031 return;
3032
3033 drm_dbg_kms(&dev_priv->drm,
3034 "WM latency values increased to avoid potential underruns\n");
3035 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3036 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3037 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3038}
3039
3040static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3041{
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053 if (dev_priv->wm.pri_latency[3] == 0 &&
3054 dev_priv->wm.spr_latency[3] == 0 &&
3055 dev_priv->wm.cur_latency[3] == 0)
3056 return;
3057
3058 dev_priv->wm.pri_latency[3] = 0;
3059 dev_priv->wm.spr_latency[3] = 0;
3060 dev_priv->wm.cur_latency[3] = 0;
3061
3062 drm_dbg_kms(&dev_priv->drm,
3063 "LP3 watermarks disabled due to potential for lost interrupts\n");
3064 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3065 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3066 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3067}
3068
3069static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3070{
3071 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3072
3073 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
3074 sizeof(dev_priv->wm.pri_latency));
3075 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
3076 sizeof(dev_priv->wm.pri_latency));
3077
3078 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3079 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3080
3081 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3082 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3083 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3084
3085 if (IS_GEN(dev_priv, 6)) {
3086 snb_wm_latency_quirk(dev_priv);
3087 snb_wm_lp3_irq_quirk(dev_priv);
3088 }
3089}
3090
3091static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3092{
3093 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3094 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3095}
3096
3097static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
3098 struct intel_pipe_wm *pipe_wm)
3099{
3100
3101 const struct intel_wm_config config = {
3102 .num_pipes_active = 1,
3103 .sprites_enabled = pipe_wm->sprites_enabled,
3104 .sprites_scaled = pipe_wm->sprites_scaled,
3105 };
3106 struct ilk_wm_maximums max;
3107
3108
3109 ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
3110
3111
3112 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3113 drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
3114 return false;
3115 }
3116
3117 return true;
3118}
3119
3120
3121static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
3122{
3123 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3124 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3125 struct intel_pipe_wm *pipe_wm;
3126 struct intel_plane *plane;
3127 const struct intel_plane_state *plane_state;
3128 const struct intel_plane_state *pristate = NULL;
3129 const struct intel_plane_state *sprstate = NULL;
3130 const struct intel_plane_state *curstate = NULL;
3131 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3132 struct ilk_wm_maximums max;
3133
3134 pipe_wm = &crtc_state->wm.ilk.optimal;
3135
3136 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
3137 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
3138 pristate = plane_state;
3139 else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
3140 sprstate = plane_state;
3141 else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
3142 curstate = plane_state;
3143 }
3144
3145 pipe_wm->pipe_enabled = crtc_state->hw.active;
3146 if (sprstate) {
3147 pipe_wm->sprites_enabled = sprstate->uapi.visible;
3148 pipe_wm->sprites_scaled = sprstate->uapi.visible &&
3149 (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
3150 drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
3151 }
3152
3153 usable_level = max_level;
3154
3155
3156 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3157 usable_level = 1;
3158
3159
3160 if (pipe_wm->sprites_scaled)
3161 usable_level = 0;
3162
3163 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3164 ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
3165 pristate, sprstate, curstate, &pipe_wm->wm[0]);
3166
3167 if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
3168 return -EINVAL;
3169
3170 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3171
3172 for (level = 1; level <= usable_level; level++) {
3173 struct intel_wm_level *wm = &pipe_wm->wm[level];
3174
3175 ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
3176 pristate, sprstate, curstate, wm);
3177
3178
3179
3180
3181
3182
3183 if (!ilk_validate_wm_level(level, &max, wm)) {
3184 memset(wm, 0, sizeof(*wm));
3185 break;
3186 }
3187 }
3188
3189 return 0;
3190}
3191
3192
3193
3194
3195
3196
3197static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
3198{
3199 struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc);
3200 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3201 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3202 struct intel_atomic_state *intel_state =
3203 to_intel_atomic_state(newstate->uapi.state);
3204 const struct intel_crtc_state *oldstate =
3205 intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
3206 const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
3207 int level, max_level = ilk_wm_max_level(dev_priv);
3208
3209
3210
3211
3212
3213
3214 *a = newstate->wm.ilk.optimal;
3215 if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) ||
3216 intel_state->skip_intermediate_wm)
3217 return 0;
3218
3219 a->pipe_enabled |= b->pipe_enabled;
3220 a->sprites_enabled |= b->sprites_enabled;
3221 a->sprites_scaled |= b->sprites_scaled;
3222
3223 for (level = 0; level <= max_level; level++) {
3224 struct intel_wm_level *a_wm = &a->wm[level];
3225 const struct intel_wm_level *b_wm = &b->wm[level];
3226
3227 a_wm->enable &= b_wm->enable;
3228 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3229 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3230 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3231 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3232 }
3233
3234
3235
3236
3237
3238
3239
3240 if (!ilk_validate_pipe_wm(dev_priv, a))
3241 return -EINVAL;
3242
3243
3244
3245
3246
3247 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
3248 newstate->wm.need_postvbl_update = true;
3249
3250 return 0;
3251}
3252
3253
3254
3255
3256static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
3257 int level,
3258 struct intel_wm_level *ret_wm)
3259{
3260 const struct intel_crtc *intel_crtc;
3261
3262 ret_wm->enable = true;
3263
3264 for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3265 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3266 const struct intel_wm_level *wm = &active->wm[level];
3267
3268 if (!active->pipe_enabled)
3269 continue;
3270
3271
3272
3273
3274
3275
3276 if (!wm->enable)
3277 ret_wm->enable = false;
3278
3279 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3280 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3281 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3282 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3283 }
3284}
3285
3286
3287
3288
3289static void ilk_wm_merge(struct drm_i915_private *dev_priv,
3290 const struct intel_wm_config *config,
3291 const struct ilk_wm_maximums *max,
3292 struct intel_pipe_wm *merged)
3293{
3294 int level, max_level = ilk_wm_max_level(dev_priv);
3295 int last_enabled_level = max_level;
3296
3297
3298 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3299 config->num_pipes_active > 1)
3300 last_enabled_level = 0;
3301
3302
3303 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
3304
3305
3306 for (level = 1; level <= max_level; level++) {
3307 struct intel_wm_level *wm = &merged->wm[level];
3308
3309 ilk_merge_wm_level(dev_priv, level, wm);
3310
3311 if (level > last_enabled_level)
3312 wm->enable = false;
3313 else if (!ilk_validate_wm_level(level, max, wm))
3314
3315 last_enabled_level = level - 1;
3316
3317
3318
3319
3320
3321 if (wm->fbc_val > max->fbc) {
3322 if (wm->enable)
3323 merged->fbc_wm_enabled = false;
3324 wm->fbc_val = 0;
3325 }
3326 }
3327
3328
3329
3330
3331
3332
3333
3334 if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
3335 intel_fbc_is_active(dev_priv)) {
3336 for (level = 2; level <= max_level; level++) {
3337 struct intel_wm_level *wm = &merged->wm[level];
3338
3339 wm->enable = false;
3340 }
3341 }
3342}
3343
3344static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3345{
3346
3347 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3348}
3349
3350
3351static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3352 int level)
3353{
3354 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3355 return 2 * level;
3356 else
3357 return dev_priv->wm.pri_latency[level];
3358}
3359
3360static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3361 const struct intel_pipe_wm *merged,
3362 enum intel_ddb_partitioning partitioning,
3363 struct ilk_wm_values *results)
3364{
3365 struct intel_crtc *intel_crtc;
3366 int level, wm_lp;
3367
3368 results->enable_fbc_wm = merged->fbc_wm_enabled;
3369 results->partitioning = partitioning;
3370
3371
3372 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3373 const struct intel_wm_level *r;
3374
3375 level = ilk_wm_lp_to_level(wm_lp, merged);
3376
3377 r = &merged->wm[level];
3378
3379
3380
3381
3382
3383 results->wm_lp[wm_lp - 1] =
3384 (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
3385 (r->pri_val << WM1_LP_SR_SHIFT) |
3386 r->cur_val;
3387
3388 if (r->enable)
3389 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
3390
3391 if (INTEL_GEN(dev_priv) >= 8)
3392 results->wm_lp[wm_lp - 1] |=
3393 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
3394 else
3395 results->wm_lp[wm_lp - 1] |=
3396 r->fbc_val << WM1_LP_FBC_SHIFT;
3397
3398
3399
3400
3401
3402 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
3403 drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
3404 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
3405 } else
3406 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3407 }
3408
3409
3410 for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
3411 enum pipe pipe = intel_crtc->pipe;
3412 const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk;
3413 const struct intel_wm_level *r = &pipe_wm->wm[0];
3414
3415 if (drm_WARN_ON(&dev_priv->drm, !r->enable))
3416 continue;
3417
3418 results->wm_pipe[pipe] =
3419 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
3420 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
3421 r->cur_val;
3422 }
3423}
3424
3425
3426
3427static struct intel_pipe_wm *
3428ilk_find_best_result(struct drm_i915_private *dev_priv,
3429 struct intel_pipe_wm *r1,
3430 struct intel_pipe_wm *r2)
3431{
3432 int level, max_level = ilk_wm_max_level(dev_priv);
3433 int level1 = 0, level2 = 0;
3434
3435 for (level = 1; level <= max_level; level++) {
3436 if (r1->wm[level].enable)
3437 level1 = level;
3438 if (r2->wm[level].enable)
3439 level2 = level;
3440 }
3441
3442 if (level1 == level2) {
3443 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3444 return r2;
3445 else
3446 return r1;
3447 } else if (level1 > level2) {
3448 return r1;
3449 } else {
3450 return r2;
3451 }
3452}
3453
3454
3455#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3456#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3457#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3458#define WM_DIRTY_FBC (1 << 24)
3459#define WM_DIRTY_DDB (1 << 25)
3460
3461static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3462 const struct ilk_wm_values *old,
3463 const struct ilk_wm_values *new)
3464{
3465 unsigned int dirty = 0;
3466 enum pipe pipe;
3467 int wm_lp;
3468
3469 for_each_pipe(dev_priv, pipe) {
3470 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3471 dirty |= WM_DIRTY_PIPE(pipe);
3472
3473 dirty |= WM_DIRTY_LP_ALL;
3474 }
3475 }
3476
3477 if (old->enable_fbc_wm != new->enable_fbc_wm) {
3478 dirty |= WM_DIRTY_FBC;
3479
3480 dirty |= WM_DIRTY_LP_ALL;
3481 }
3482
3483 if (old->partitioning != new->partitioning) {
3484 dirty |= WM_DIRTY_DDB;
3485
3486 dirty |= WM_DIRTY_LP_ALL;
3487 }
3488
3489
3490 if (dirty & WM_DIRTY_LP_ALL)
3491 return dirty;
3492
3493
3494 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3495 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3496 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3497 break;
3498 }
3499
3500
3501 for (; wm_lp <= 3; wm_lp++)
3502 dirty |= WM_DIRTY_LP(wm_lp);
3503
3504 return dirty;
3505}
3506
3507static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3508 unsigned int dirty)
3509{
3510 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3511 bool changed = false;
3512
3513 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
3514 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
3515 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
3516 changed = true;
3517 }
3518 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
3519 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
3520 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
3521 changed = true;
3522 }
3523 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
3524 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
3525 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
3526 changed = true;
3527 }
3528
3529
3530
3531
3532
3533
3534 return changed;
3535}
3536
3537
3538
3539
3540
3541static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3542 struct ilk_wm_values *results)
3543{
3544 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3545 unsigned int dirty;
3546 u32 val;
3547
3548 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3549 if (!dirty)
3550 return;
3551
3552 _ilk_disable_lp_wm(dev_priv, dirty);
3553
3554 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3555 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
3556 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3557 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
3558 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3559 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
3560
3561 if (dirty & WM_DIRTY_DDB) {
3562 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3563 val = I915_READ(WM_MISC);
3564 if (results->partitioning == INTEL_DDB_PART_1_2)
3565 val &= ~WM_MISC_DATA_PARTITION_5_6;
3566 else
3567 val |= WM_MISC_DATA_PARTITION_5_6;
3568 I915_WRITE(WM_MISC, val);
3569 } else {
3570 val = I915_READ(DISP_ARB_CTL2);
3571 if (results->partitioning == INTEL_DDB_PART_1_2)
3572 val &= ~DISP_DATA_PARTITION_5_6;
3573 else
3574 val |= DISP_DATA_PARTITION_5_6;
3575 I915_WRITE(DISP_ARB_CTL2, val);
3576 }
3577 }
3578
3579 if (dirty & WM_DIRTY_FBC) {
3580 val = I915_READ(DISP_ARB_CTL);
3581 if (results->enable_fbc_wm)
3582 val &= ~DISP_FBC_WM_DIS;
3583 else
3584 val |= DISP_FBC_WM_DIS;
3585 I915_WRITE(DISP_ARB_CTL, val);
3586 }
3587
3588 if (dirty & WM_DIRTY_LP(1) &&
3589 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3590 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
3591
3592 if (INTEL_GEN(dev_priv) >= 7) {
3593 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3594 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
3595 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3596 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
3597 }
3598
3599 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3600 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
3601 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3602 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
3603 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3604 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
3605
3606 dev_priv->wm.hw = *results;
3607}
3608
3609bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
3610{
3611 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3612}
3613
3614u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
3615{
3616 int i;
3617 int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
3618 u8 enabled_slices_mask = 0;
3619
3620 for (i = 0; i < max_slices; i++) {
3621 if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
3622 enabled_slices_mask |= BIT(i);
3623 }
3624
3625 return enabled_slices_mask;
3626}
3627
3628
3629
3630
3631
3632static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
3633{
3634 return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
3635}
3636
3637static bool
3638intel_has_sagv(struct drm_i915_private *dev_priv)
3639{
3640
3641 if (IS_GEN(dev_priv, 12))
3642 return false;
3643
3644 return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
3645 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3646}
3647
3648static void
3649skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
3650{
3651 if (INTEL_GEN(dev_priv) >= 12) {
3652 u32 val = 0;
3653 int ret;
3654
3655 ret = sandybridge_pcode_read(dev_priv,
3656 GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
3657 &val, NULL);
3658 if (!ret) {
3659 dev_priv->sagv_block_time_us = val;
3660 return;
3661 }
3662
3663 drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
3664 } else if (IS_GEN(dev_priv, 11)) {
3665 dev_priv->sagv_block_time_us = 10;
3666 return;
3667 } else if (IS_GEN(dev_priv, 10)) {
3668 dev_priv->sagv_block_time_us = 20;
3669 return;
3670 } else if (IS_GEN(dev_priv, 9)) {
3671 dev_priv->sagv_block_time_us = 30;
3672 return;
3673 } else {
3674 MISSING_CASE(INTEL_GEN(dev_priv));
3675 }
3676
3677
3678 dev_priv->sagv_block_time_us = -1;
3679}
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692int
3693intel_enable_sagv(struct drm_i915_private *dev_priv)
3694{
3695 int ret;
3696
3697 if (!intel_has_sagv(dev_priv))
3698 return 0;
3699
3700 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3701 return 0;
3702
3703 drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
3704 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3705 GEN9_SAGV_ENABLE);
3706
3707
3708
3709
3710
3711
3712
3713 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3714 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3715 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3716 return 0;
3717 } else if (ret < 0) {
3718 drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
3719 return ret;
3720 }
3721
3722 dev_priv->sagv_status = I915_SAGV_ENABLED;
3723 return 0;
3724}
3725
3726int
3727intel_disable_sagv(struct drm_i915_private *dev_priv)
3728{
3729 int ret;
3730
3731 if (!intel_has_sagv(dev_priv))
3732 return 0;
3733
3734 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3735 return 0;
3736
3737 drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
3738
3739 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3740 GEN9_SAGV_DISABLE,
3741 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3742 1);
3743
3744
3745
3746
3747 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3748 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3749 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3750 return 0;
3751 } else if (ret < 0) {
3752 drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
3753 return ret;
3754 }
3755
3756 dev_priv->sagv_status = I915_SAGV_DISABLED;
3757 return 0;
3758}
3759
3760bool intel_can_enable_sagv(struct intel_atomic_state *state)
3761{
3762 struct drm_device *dev = state->base.dev;
3763 struct drm_i915_private *dev_priv = to_i915(dev);
3764 struct intel_crtc *crtc;
3765 struct intel_plane *plane;
3766 struct intel_crtc_state *crtc_state;
3767 enum pipe pipe;
3768 int level, latency;
3769
3770 if (!intel_has_sagv(dev_priv))
3771 return false;
3772
3773
3774
3775
3776 if (hweight8(state->active_pipes) == 0)
3777 return true;
3778
3779
3780
3781
3782
3783 if (hweight8(state->active_pipes) > 1)
3784 return false;
3785
3786
3787 pipe = ffs(state->active_pipes) - 1;
3788 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3789 crtc_state = to_intel_crtc_state(crtc->base.state);
3790
3791 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3792 return false;
3793
3794 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3795 struct skl_plane_wm *wm =
3796 &crtc_state->wm.skl.optimal.planes[plane->id];
3797
3798
3799 if (!wm->wm[0].plane_en)
3800 continue;
3801
3802
3803 for (level = ilk_wm_max_level(dev_priv);
3804 !wm->wm[level].plane_en; --level)
3805 { }
3806
3807 latency = dev_priv->wm.skl_latency[level];
3808
3809 if (skl_needs_memory_bw_wa(dev_priv) &&
3810 plane->base.state->fb->modifier ==
3811 I915_FORMAT_MOD_X_TILED)
3812 latency += 15;
3813
3814
3815
3816
3817
3818
3819 if (latency < dev_priv->sagv_block_time_us)
3820 return false;
3821 }
3822
3823 return true;
3824}
3825
3826
3827
3828
3829
3830
3831static unsigned int
3832icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
3833 u32 slice_size,
3834 u32 ddb_size)
3835{
3836 unsigned int offset = 0;
3837
3838 if (!dbuf_slice_mask)
3839 return 0;
3840
3841 offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
3842
3843 WARN_ON(offset >= ddb_size);
3844 return offset;
3845}
3846
3847static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
3848{
3849 u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3850
3851 drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
3852
3853 if (INTEL_GEN(dev_priv) < 11)
3854 return ddb_size - 4;
3855
3856 return ddb_size;
3857}
3858
3859static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
3860 u8 active_pipes);
3861
3862static void
3863skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
3864 const struct intel_crtc_state *crtc_state,
3865 const u64 total_data_rate,
3866 struct skl_ddb_entry *alloc,
3867 int *num_active )
3868{
3869 struct drm_atomic_state *state = crtc_state->uapi.state;
3870 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3871 struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
3872 const struct intel_crtc *crtc;
3873 u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
3874 enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
3875 u16 ddb_size;
3876 u32 ddb_range_size;
3877 u32 i;
3878 u32 dbuf_slice_mask;
3879 u32 active_pipes;
3880 u32 offset;
3881 u32 slice_size;
3882 u32 total_slice_mask;
3883 u32 start, end;
3884
3885 if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) {
3886 alloc->start = 0;
3887 alloc->end = 0;
3888 *num_active = hweight8(dev_priv->active_pipes);
3889 return;
3890 }
3891
3892 if (intel_state->active_pipe_changes)
3893 active_pipes = intel_state->active_pipes;
3894 else
3895 active_pipes = dev_priv->active_pipes;
3896
3897 *num_active = hweight8(active_pipes);
3898
3899 ddb_size = intel_get_ddb_size(dev_priv);
3900
3901 slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911 if (!intel_state->active_pipe_changes && !intel_state->modeset) {
3912
3913
3914
3915
3916 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3917 return;
3918 }
3919
3920
3921
3922
3923 dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
3924
3925 DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
3926 dbuf_slice_mask,
3927 pipe_name(for_pipe), active_pipes);
3928
3929
3930
3931
3932
3933 offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
3934 slice_size, ddb_size);
3935
3936
3937
3938
3939
3940
3941
3942 ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
3943
3944
3945
3946
3947
3948
3949 total_slice_mask = dbuf_slice_mask;
3950 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
3951 const struct drm_display_mode *adjusted_mode =
3952 &crtc_state->hw.adjusted_mode;
3953 enum pipe pipe = crtc->pipe;
3954 int hdisplay, vdisplay;
3955 u32 pipe_dbuf_slice_mask;
3956
3957 if (!crtc_state->hw.active)
3958 continue;
3959
3960 pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
3961 active_pipes);
3962
3963
3964
3965
3966
3967
3968
3969
3970 total_slice_mask |= pipe_dbuf_slice_mask;
3971
3972
3973
3974
3975
3976
3977
3978
3979 if (dbuf_slice_mask != pipe_dbuf_slice_mask)
3980 continue;
3981
3982 drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
3983
3984 total_width_in_range += hdisplay;
3985
3986 if (pipe < for_pipe)
3987 width_before_pipe_in_range += hdisplay;
3988 else if (pipe == for_pipe)
3989 pipe_width = hdisplay;
3990 }
3991
3992
3993
3994
3995
3996 intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1);
3997
3998 start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
3999 end = ddb_range_size *
4000 (width_before_pipe_in_range + pipe_width) / total_width_in_range;
4001
4002 alloc->start = offset + start;
4003 alloc->end = offset + end;
4004
4005 DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
4006 alloc->start, alloc->end);
4007 DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
4008 intel_state->enabled_dbuf_slices_mask,
4009 INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
4010}
4011
4012static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
4013 int width, const struct drm_format_info *format,
4014 u64 modifier, unsigned int rotation,
4015 u32 plane_pixel_rate, struct skl_wm_params *wp,
4016 int color_plane);
4017static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
4018 int level,
4019 const struct skl_wm_params *wp,
4020 const struct skl_wm_level *result_prev,
4021 struct skl_wm_level *result );
4022
4023static unsigned int
4024skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
4025 int num_active)
4026{
4027 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4028 int level, max_level = ilk_wm_max_level(dev_priv);
4029 struct skl_wm_level wm = {};
4030 int ret, min_ddb_alloc = 0;
4031 struct skl_wm_params wp;
4032
4033 ret = skl_compute_wm_params(crtc_state, 256,
4034 drm_format_info(DRM_FORMAT_ARGB8888),
4035 DRM_FORMAT_MOD_LINEAR,
4036 DRM_MODE_ROTATE_0,
4037 crtc_state->pixel_rate, &wp, 0);
4038 drm_WARN_ON(&dev_priv->drm, ret);
4039
4040 for (level = 0; level <= max_level; level++) {
4041 skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
4042 if (wm.min_ddb_alloc == U16_MAX)
4043 break;
4044
4045 min_ddb_alloc = wm.min_ddb_alloc;
4046 }
4047
4048 return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
4049}
4050
4051static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
4052 struct skl_ddb_entry *entry, u32 reg)
4053{
4054
4055 entry->start = reg & DDB_ENTRY_MASK;
4056 entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
4057
4058 if (entry->end)
4059 entry->end += 1;
4060}
4061
4062static void
4063skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
4064 const enum pipe pipe,
4065 const enum plane_id plane_id,
4066 struct skl_ddb_entry *ddb_y,
4067 struct skl_ddb_entry *ddb_uv)
4068{
4069 u32 val, val2;
4070 u32 fourcc = 0;
4071
4072
4073 if (plane_id == PLANE_CURSOR) {
4074 val = I915_READ(CUR_BUF_CFG(pipe));
4075 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4076 return;
4077 }
4078
4079 val = I915_READ(PLANE_CTL(pipe, plane_id));
4080
4081
4082 if (val & PLANE_CTL_ENABLE)
4083 fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
4084 val & PLANE_CTL_ORDER_RGBX,
4085 val & PLANE_CTL_ALPHA_MASK);
4086
4087 if (INTEL_GEN(dev_priv) >= 11) {
4088 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4089 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4090 } else {
4091 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
4092 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
4093
4094 if (fourcc &&
4095 drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
4096 swap(val, val2);
4097
4098 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4099 skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
4100 }
4101}
4102
4103void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
4104 struct skl_ddb_entry *ddb_y,
4105 struct skl_ddb_entry *ddb_uv)
4106{
4107 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4108 enum intel_display_power_domain power_domain;
4109 enum pipe pipe = crtc->pipe;
4110 intel_wakeref_t wakeref;
4111 enum plane_id plane_id;
4112
4113 power_domain = POWER_DOMAIN_PIPE(pipe);
4114 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4115 if (!wakeref)
4116 return;
4117
4118 for_each_plane_id_on_crtc(crtc, plane_id)
4119 skl_ddb_get_hw_plane_state(dev_priv, pipe,
4120 plane_id,
4121 &ddb_y[plane_id],
4122 &ddb_uv[plane_id]);
4123
4124 intel_display_power_put(dev_priv, power_domain, wakeref);
4125}
4126
4127void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
4128{
4129 dev_priv->enabled_dbuf_slices_mask =
4130 intel_enabled_dbuf_slices_mask(dev_priv);
4131}
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149static uint_fixed_16_16_t
4150skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
4151 const struct intel_plane_state *plane_state)
4152{
4153 u32 src_w, src_h, dst_w, dst_h;
4154 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
4155 uint_fixed_16_16_t downscale_h, downscale_w;
4156
4157 if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
4158 return u32_to_fixed16(0);
4159
4160
4161
4162
4163
4164
4165
4166
4167 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4168 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4169 dst_w = drm_rect_width(&plane_state->uapi.dst);
4170 dst_h = drm_rect_height(&plane_state->uapi.dst);
4171
4172 fp_w_ratio = div_fixed16(src_w, dst_w);
4173 fp_h_ratio = div_fixed16(src_h, dst_h);
4174 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
4175 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4176
4177 return mul_fixed16(downscale_w, downscale_h);
4178}
4179
4180struct dbuf_slice_conf_entry {
4181 u8 active_pipes;
4182 u8 dbuf_mask[I915_MAX_PIPES];
4183};
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
4196
4197{
4198 {
4199 .active_pipes = BIT(PIPE_A),
4200 .dbuf_mask = {
4201 [PIPE_A] = BIT(DBUF_S1),
4202 },
4203 },
4204 {
4205 .active_pipes = BIT(PIPE_B),
4206 .dbuf_mask = {
4207 [PIPE_B] = BIT(DBUF_S1),
4208 },
4209 },
4210 {
4211 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4212 .dbuf_mask = {
4213 [PIPE_A] = BIT(DBUF_S1),
4214 [PIPE_B] = BIT(DBUF_S2),
4215 },
4216 },
4217 {
4218 .active_pipes = BIT(PIPE_C),
4219 .dbuf_mask = {
4220 [PIPE_C] = BIT(DBUF_S2),
4221 },
4222 },
4223 {
4224 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4225 .dbuf_mask = {
4226 [PIPE_A] = BIT(DBUF_S1),
4227 [PIPE_C] = BIT(DBUF_S2),
4228 },
4229 },
4230 {
4231 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4232 .dbuf_mask = {
4233 [PIPE_B] = BIT(DBUF_S1),
4234 [PIPE_C] = BIT(DBUF_S2),
4235 },
4236 },
4237 {
4238 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4239 .dbuf_mask = {
4240 [PIPE_A] = BIT(DBUF_S1),
4241 [PIPE_B] = BIT(DBUF_S1),
4242 [PIPE_C] = BIT(DBUF_S2),
4243 },
4244 },
4245 {}
4246};
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
4259
4260{
4261 {
4262 .active_pipes = BIT(PIPE_A),
4263 .dbuf_mask = {
4264 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4265 },
4266 },
4267 {
4268 .active_pipes = BIT(PIPE_B),
4269 .dbuf_mask = {
4270 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4271 },
4272 },
4273 {
4274 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4275 .dbuf_mask = {
4276 [PIPE_A] = BIT(DBUF_S2),
4277 [PIPE_B] = BIT(DBUF_S1),
4278 },
4279 },
4280 {
4281 .active_pipes = BIT(PIPE_C),
4282 .dbuf_mask = {
4283 [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
4284 },
4285 },
4286 {
4287 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4288 .dbuf_mask = {
4289 [PIPE_A] = BIT(DBUF_S1),
4290 [PIPE_C] = BIT(DBUF_S2),
4291 },
4292 },
4293 {
4294 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4295 .dbuf_mask = {
4296 [PIPE_B] = BIT(DBUF_S1),
4297 [PIPE_C] = BIT(DBUF_S2),
4298 },
4299 },
4300 {
4301 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4302 .dbuf_mask = {
4303 [PIPE_A] = BIT(DBUF_S1),
4304 [PIPE_B] = BIT(DBUF_S1),
4305 [PIPE_C] = BIT(DBUF_S2),
4306 },
4307 },
4308 {
4309 .active_pipes = BIT(PIPE_D),
4310 .dbuf_mask = {
4311 [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
4312 },
4313 },
4314 {
4315 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
4316 .dbuf_mask = {
4317 [PIPE_A] = BIT(DBUF_S1),
4318 [PIPE_D] = BIT(DBUF_S2),
4319 },
4320 },
4321 {
4322 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
4323 .dbuf_mask = {
4324 [PIPE_B] = BIT(DBUF_S1),
4325 [PIPE_D] = BIT(DBUF_S2),
4326 },
4327 },
4328 {
4329 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
4330 .dbuf_mask = {
4331 [PIPE_A] = BIT(DBUF_S1),
4332 [PIPE_B] = BIT(DBUF_S1),
4333 [PIPE_D] = BIT(DBUF_S2),
4334 },
4335 },
4336 {
4337 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
4338 .dbuf_mask = {
4339 [PIPE_C] = BIT(DBUF_S1),
4340 [PIPE_D] = BIT(DBUF_S2),
4341 },
4342 },
4343 {
4344 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
4345 .dbuf_mask = {
4346 [PIPE_A] = BIT(DBUF_S1),
4347 [PIPE_C] = BIT(DBUF_S2),
4348 [PIPE_D] = BIT(DBUF_S2),
4349 },
4350 },
4351 {
4352 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4353 .dbuf_mask = {
4354 [PIPE_B] = BIT(DBUF_S1),
4355 [PIPE_C] = BIT(DBUF_S2),
4356 [PIPE_D] = BIT(DBUF_S2),
4357 },
4358 },
4359 {
4360 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4361 .dbuf_mask = {
4362 [PIPE_A] = BIT(DBUF_S1),
4363 [PIPE_B] = BIT(DBUF_S1),
4364 [PIPE_C] = BIT(DBUF_S2),
4365 [PIPE_D] = BIT(DBUF_S2),
4366 },
4367 },
4368 {}
4369};
4370
4371static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
4372 const struct dbuf_slice_conf_entry *dbuf_slices)
4373{
4374 int i;
4375
4376 for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
4377 if (dbuf_slices[i].active_pipes == active_pipes)
4378 return dbuf_slices[i].dbuf_mask[pipe];
4379 }
4380 return 0;
4381}
4382
4383
4384
4385
4386
4387
4388static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
4389{
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402 return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
4403}
4404
4405static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
4406{
4407 return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
4408}
4409
4410static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
4411 u8 active_pipes)
4412{
4413 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4414 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4415 enum pipe pipe = crtc->pipe;
4416
4417 if (IS_GEN(dev_priv, 12))
4418 return tgl_compute_dbuf_slices(pipe, active_pipes);
4419 else if (IS_GEN(dev_priv, 11))
4420 return icl_compute_dbuf_slices(pipe, active_pipes);
4421
4422
4423
4424
4425 return BIT(DBUF_S1);
4426}
4427
4428static u64
4429skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
4430 const struct intel_plane_state *plane_state,
4431 int color_plane)
4432{
4433 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4434 const struct drm_framebuffer *fb = plane_state->hw.fb;
4435 u32 data_rate;
4436 u32 width = 0, height = 0;
4437 uint_fixed_16_16_t down_scale_amount;
4438 u64 rate;
4439
4440 if (!plane_state->uapi.visible)
4441 return 0;
4442
4443 if (plane->id == PLANE_CURSOR)
4444 return 0;
4445
4446 if (color_plane == 1 &&
4447 !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
4448 return 0;
4449
4450
4451
4452
4453
4454
4455 width = drm_rect_width(&plane_state->uapi.src) >> 16;
4456 height = drm_rect_height(&plane_state->uapi.src) >> 16;
4457
4458
4459 if (color_plane == 1) {
4460 width /= 2;
4461 height /= 2;
4462 }
4463
4464 data_rate = width * height;
4465
4466 down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
4467
4468 rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4469
4470 rate *= fb->format->cpp[color_plane];
4471 return rate;
4472}
4473
4474static u64
4475skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
4476 u64 *plane_data_rate,
4477 u64 *uv_plane_data_rate)
4478{
4479 struct intel_plane *plane;
4480 const struct intel_plane_state *plane_state;
4481 u64 total_data_rate = 0;
4482
4483
4484 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
4485 enum plane_id plane_id = plane->id;
4486 u64 rate;
4487
4488
4489 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4490 plane_data_rate[plane_id] = rate;
4491 total_data_rate += rate;
4492
4493
4494 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
4495 uv_plane_data_rate[plane_id] = rate;
4496 total_data_rate += rate;
4497 }
4498
4499 return total_data_rate;
4500}
4501
4502static u64
4503icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
4504 u64 *plane_data_rate)
4505{
4506 struct intel_plane *plane;
4507 const struct intel_plane_state *plane_state;
4508 u64 total_data_rate = 0;
4509
4510
4511 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
4512 enum plane_id plane_id = plane->id;
4513 u64 rate;
4514
4515 if (!plane_state->planar_linked_plane) {
4516 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4517 plane_data_rate[plane_id] = rate;
4518 total_data_rate += rate;
4519 } else {
4520 enum plane_id y_plane_id;
4521
4522
4523
4524
4525
4526
4527
4528
4529 if (plane_state->planar_slave)
4530 continue;
4531
4532
4533 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4534 y_plane_id = plane_state->planar_linked_plane->id;
4535 plane_data_rate[y_plane_id] = rate;
4536 total_data_rate += rate;
4537
4538 rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
4539 plane_data_rate[plane_id] = rate;
4540 total_data_rate += rate;
4541 }
4542 }
4543
4544 return total_data_rate;
4545}
4546
4547static int
4548skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
4549{
4550 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4551 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4552 struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
4553 u16 alloc_size, start = 0;
4554 u16 total[I915_MAX_PLANES] = {};
4555 u16 uv_total[I915_MAX_PLANES] = {};
4556 u64 total_data_rate;
4557 enum plane_id plane_id;
4558 int num_active;
4559 u64 plane_data_rate[I915_MAX_PLANES] = {};
4560 u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
4561 u32 blocks;
4562 int level;
4563
4564
4565 memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
4566 memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
4567
4568 if (!crtc_state->hw.active) {
4569 alloc->start = alloc->end = 0;
4570 return 0;
4571 }
4572
4573 if (INTEL_GEN(dev_priv) >= 11)
4574 total_data_rate =
4575 icl_get_total_relative_data_rate(crtc_state,
4576 plane_data_rate);
4577 else
4578 total_data_rate =
4579 skl_get_total_relative_data_rate(crtc_state,
4580 plane_data_rate,
4581 uv_plane_data_rate);
4582
4583
4584 skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
4585 alloc, &num_active);
4586 alloc_size = skl_ddb_entry_size(alloc);
4587 if (alloc_size == 0)
4588 return 0;
4589
4590
4591 total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
4592 alloc_size -= total[PLANE_CURSOR];
4593 crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
4594 alloc->end - total[PLANE_CURSOR];
4595 crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
4596
4597 if (total_data_rate == 0)
4598 return 0;
4599
4600
4601
4602
4603
4604 for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
4605 blocks = 0;
4606 for_each_plane_id_on_crtc(crtc, plane_id) {
4607 const struct skl_plane_wm *wm =
4608 &crtc_state->wm.skl.optimal.planes[plane_id];
4609
4610 if (plane_id == PLANE_CURSOR) {
4611 if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
4612 drm_WARN_ON(&dev_priv->drm,
4613 wm->wm[level].min_ddb_alloc != U16_MAX);
4614 blocks = U32_MAX;
4615 break;
4616 }
4617 continue;
4618 }
4619
4620 blocks += wm->wm[level].min_ddb_alloc;
4621 blocks += wm->uv_wm[level].min_ddb_alloc;
4622 }
4623
4624 if (blocks <= alloc_size) {
4625 alloc_size -= blocks;
4626 break;
4627 }
4628 }
4629
4630 if (level < 0) {
4631 drm_dbg_kms(&dev_priv->drm,
4632 "Requested display configuration exceeds system DDB limitations");
4633 drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
4634 blocks, alloc_size);
4635 return -EINVAL;
4636 }
4637
4638
4639
4640
4641
4642
4643 for_each_plane_id_on_crtc(crtc, plane_id) {
4644 const struct skl_plane_wm *wm =
4645 &crtc_state->wm.skl.optimal.planes[plane_id];
4646 u64 rate;
4647 u16 extra;
4648
4649 if (plane_id == PLANE_CURSOR)
4650 continue;
4651
4652
4653
4654
4655
4656 if (total_data_rate == 0)
4657 break;
4658
4659 rate = plane_data_rate[plane_id];
4660 extra = min_t(u16, alloc_size,
4661 DIV64_U64_ROUND_UP(alloc_size * rate,
4662 total_data_rate));
4663 total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
4664 alloc_size -= extra;
4665 total_data_rate -= rate;
4666
4667 if (total_data_rate == 0)
4668 break;
4669
4670 rate = uv_plane_data_rate[plane_id];
4671 extra = min_t(u16, alloc_size,
4672 DIV64_U64_ROUND_UP(alloc_size * rate,
4673 total_data_rate));
4674 uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
4675 alloc_size -= extra;
4676 total_data_rate -= rate;
4677 }
4678 drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0);
4679
4680
4681 start = alloc->start;
4682 for_each_plane_id_on_crtc(crtc, plane_id) {
4683 struct skl_ddb_entry *plane_alloc =
4684 &crtc_state->wm.skl.plane_ddb_y[plane_id];
4685 struct skl_ddb_entry *uv_plane_alloc =
4686 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
4687
4688 if (plane_id == PLANE_CURSOR)
4689 continue;
4690
4691
4692 drm_WARN_ON(&dev_priv->drm,
4693 INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
4694
4695
4696 if (total[plane_id]) {
4697 plane_alloc->start = start;
4698 start += total[plane_id];
4699 plane_alloc->end = start;
4700 }
4701
4702 if (uv_total[plane_id]) {
4703 uv_plane_alloc->start = start;
4704 start += uv_total[plane_id];
4705 uv_plane_alloc->end = start;
4706 }
4707 }
4708
4709
4710
4711
4712
4713
4714
4715 for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
4716 for_each_plane_id_on_crtc(crtc, plane_id) {
4717 struct skl_plane_wm *wm =
4718 &crtc_state->wm.skl.optimal.planes[plane_id];
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732 if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
4733 wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
4734 memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
4735
4736
4737
4738
4739
4740 if (IS_GEN(dev_priv, 11) &&
4741 level == 1 && wm->wm[0].plane_en) {
4742 wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
4743 wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
4744 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
4745 }
4746 }
4747 }
4748
4749
4750
4751
4752
4753 for_each_plane_id_on_crtc(crtc, plane_id) {
4754 struct skl_plane_wm *wm =
4755 &crtc_state->wm.skl.optimal.planes[plane_id];
4756
4757 if (wm->trans_wm.plane_res_b >= total[plane_id])
4758 memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
4759 }
4760
4761 return 0;
4762}
4763
4764
4765
4766
4767
4768
4769
4770static uint_fixed_16_16_t
4771skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
4772 u8 cpp, u32 latency, u32 dbuf_block_size)
4773{
4774 u32 wm_intermediate_val;
4775 uint_fixed_16_16_t ret;
4776
4777 if (latency == 0)
4778 return FP_16_16_MAX;
4779
4780 wm_intermediate_val = latency * pixel_rate * cpp;
4781 ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
4782
4783 if (INTEL_GEN(dev_priv) >= 10)
4784 ret = add_fixed16_u32(ret, 1);
4785
4786 return ret;
4787}
4788
4789static uint_fixed_16_16_t
4790skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
4791 uint_fixed_16_16_t plane_blocks_per_line)
4792{
4793 u32 wm_intermediate_val;
4794 uint_fixed_16_16_t ret;
4795
4796 if (latency == 0)
4797 return FP_16_16_MAX;
4798
4799 wm_intermediate_val = latency * pixel_rate;
4800 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
4801 pipe_htotal * 1000);
4802 ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
4803 return ret;
4804}
4805
4806static uint_fixed_16_16_t
4807intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
4808{
4809 u32 pixel_rate;
4810 u32 crtc_htotal;
4811 uint_fixed_16_16_t linetime_us;
4812
4813 if (!crtc_state->hw.active)
4814 return u32_to_fixed16(0);
4815
4816 pixel_rate = crtc_state->pixel_rate;
4817
4818 if (WARN_ON(pixel_rate == 0))
4819 return u32_to_fixed16(0);
4820
4821 crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
4822 linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
4823
4824 return linetime_us;
4825}
4826
4827static u32
4828skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
4829 const struct intel_plane_state *plane_state)
4830{
4831 u64 adjusted_pixel_rate;
4832 uint_fixed_16_16_t downscale_amount;
4833
4834
4835 if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
4836 return 0;
4837
4838
4839
4840
4841
4842 adjusted_pixel_rate = crtc_state->pixel_rate;
4843 downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
4844
4845 return mul_round_up_u32_fixed16(adjusted_pixel_rate,
4846 downscale_amount);
4847}
4848
4849static int
4850skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
4851 int width, const struct drm_format_info *format,
4852 u64 modifier, unsigned int rotation,
4853 u32 plane_pixel_rate, struct skl_wm_params *wp,
4854 int color_plane)
4855{
4856 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4857 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4858 u32 interm_pbpl;
4859
4860
4861 if (color_plane == 1 &&
4862 !intel_format_info_is_yuv_semiplanar(format, modifier)) {
4863 drm_dbg_kms(&dev_priv->drm,
4864 "Non planar format have single plane\n");
4865 return -EINVAL;
4866 }
4867
4868 wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
4869 modifier == I915_FORMAT_MOD_Yf_TILED ||
4870 modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4871 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4872 wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
4873 wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
4874 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4875 wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
4876
4877 wp->width = width;
4878 if (color_plane == 1 && wp->is_planar)
4879 wp->width /= 2;
4880
4881 wp->cpp = format->cpp[color_plane];
4882 wp->plane_pixel_rate = plane_pixel_rate;
4883
4884 if (INTEL_GEN(dev_priv) >= 11 &&
4885 modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
4886 wp->dbuf_block_size = 256;
4887 else
4888 wp->dbuf_block_size = 512;
4889
4890 if (drm_rotation_90_or_270(rotation)) {
4891 switch (wp->cpp) {
4892 case 1:
4893 wp->y_min_scanlines = 16;
4894 break;
4895 case 2:
4896 wp->y_min_scanlines = 8;
4897 break;
4898 case 4:
4899 wp->y_min_scanlines = 4;
4900 break;
4901 default:
4902 MISSING_CASE(wp->cpp);
4903 return -EINVAL;
4904 }
4905 } else {
4906 wp->y_min_scanlines = 4;
4907 }
4908
4909 if (skl_needs_memory_bw_wa(dev_priv))
4910 wp->y_min_scanlines *= 2;
4911
4912 wp->plane_bytes_per_line = wp->width * wp->cpp;
4913 if (wp->y_tiled) {
4914 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
4915 wp->y_min_scanlines,
4916 wp->dbuf_block_size);
4917
4918 if (INTEL_GEN(dev_priv) >= 10)
4919 interm_pbpl++;
4920
4921 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
4922 wp->y_min_scanlines);
4923 } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
4924 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
4925 wp->dbuf_block_size);
4926 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4927 } else {
4928 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
4929 wp->dbuf_block_size) + 1;
4930 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4931 }
4932
4933 wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
4934 wp->plane_blocks_per_line);
4935
4936 wp->linetime_us = fixed16_to_u32_round_up(
4937 intel_get_linetime_us(crtc_state));
4938
4939 return 0;
4940}
4941
4942static int
4943skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
4944 const struct intel_plane_state *plane_state,
4945 struct skl_wm_params *wp, int color_plane)
4946{
4947 const struct drm_framebuffer *fb = plane_state->hw.fb;
4948 int width;
4949
4950
4951
4952
4953
4954
4955 width = drm_rect_width(&plane_state->uapi.src) >> 16;
4956
4957 return skl_compute_wm_params(crtc_state, width,
4958 fb->format, fb->modifier,
4959 plane_state->hw.rotation,
4960 skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
4961 wp, color_plane);
4962}
4963
4964static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
4965{
4966 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4967 return true;
4968
4969
4970 return level > 0;
4971}
4972
4973static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
4974 int level,
4975 const struct skl_wm_params *wp,
4976 const struct skl_wm_level *result_prev,
4977 struct skl_wm_level *result )
4978{
4979 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4980 u32 latency = dev_priv->wm.skl_latency[level];
4981 uint_fixed_16_16_t method1, method2;
4982 uint_fixed_16_16_t selected_result;
4983 u32 res_blocks, res_lines, min_ddb_alloc = 0;
4984
4985 if (latency == 0) {
4986
4987 result->min_ddb_alloc = U16_MAX;
4988 return;
4989 }
4990
4991
4992
4993
4994
4995 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
4996 dev_priv->ipc_enabled)
4997 latency += 4;
4998
4999 if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
5000 latency += 15;
5001
5002 method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
5003 wp->cpp, latency, wp->dbuf_block_size);
5004 method2 = skl_wm_method2(wp->plane_pixel_rate,
5005 crtc_state->hw.adjusted_mode.crtc_htotal,
5006 latency,
5007 wp->plane_blocks_per_line);
5008
5009 if (wp->y_tiled) {
5010 selected_result = max_fixed16(method2, wp->y_tile_minimum);
5011 } else {
5012 if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal /
5013 wp->dbuf_block_size < 1) &&
5014 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
5015 selected_result = method2;
5016 } else if (latency >= wp->linetime_us) {
5017 if (IS_GEN(dev_priv, 9) &&
5018 !IS_GEMINILAKE(dev_priv))
5019 selected_result = min_fixed16(method1, method2);
5020 else
5021 selected_result = method2;
5022 } else {
5023 selected_result = method1;
5024 }
5025 }
5026
5027 res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
5028 res_lines = div_round_up_fixed16(selected_result,
5029 wp->plane_blocks_per_line);
5030
5031 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
5032
5033 if (level == 0 && wp->rc_surface)
5034 res_blocks +=
5035 fixed16_to_u32_round_up(wp->y_tile_minimum);
5036
5037
5038 if (level >= 1 && level <= 7) {
5039 if (wp->y_tiled) {
5040 res_blocks +=
5041 fixed16_to_u32_round_up(wp->y_tile_minimum);
5042 res_lines += wp->y_min_scanlines;
5043 } else {
5044 res_blocks++;
5045 }
5046
5047
5048
5049
5050
5051
5052
5053 if (result_prev->plane_res_b > res_blocks)
5054 res_blocks = result_prev->plane_res_b;
5055 }
5056 }
5057
5058 if (INTEL_GEN(dev_priv) >= 11) {
5059 if (wp->y_tiled) {
5060 int extra_lines;
5061
5062 if (res_lines % wp->y_min_scanlines == 0)
5063 extra_lines = wp->y_min_scanlines;
5064 else
5065 extra_lines = wp->y_min_scanlines * 2 -
5066 res_lines % wp->y_min_scanlines;
5067
5068 min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines,
5069 wp->plane_blocks_per_line);
5070 } else {
5071 min_ddb_alloc = res_blocks +
5072 DIV_ROUND_UP(res_blocks, 10);
5073 }
5074 }
5075
5076 if (!skl_wm_has_lines(dev_priv, level))
5077 res_lines = 0;
5078
5079 if (res_lines > 31) {
5080
5081 result->min_ddb_alloc = U16_MAX;
5082 return;
5083 }
5084
5085
5086
5087
5088
5089
5090
5091 result->plane_res_b = res_blocks;
5092 result->plane_res_l = res_lines;
5093
5094 result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1;
5095 result->plane_en = true;
5096}
5097
5098static void
5099skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
5100 const struct skl_wm_params *wm_params,
5101 struct skl_wm_level *levels)
5102{
5103 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5104 int level, max_level = ilk_wm_max_level(dev_priv);
5105 struct skl_wm_level *result_prev = &levels[0];
5106
5107 for (level = 0; level <= max_level; level++) {
5108 struct skl_wm_level *result = &levels[level];
5109
5110 skl_compute_plane_wm(crtc_state, level, wm_params,
5111 result_prev, result);
5112
5113 result_prev = result;
5114 }
5115}
5116
5117static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
5118 const struct skl_wm_params *wp,
5119 struct skl_plane_wm *wm)
5120{
5121 struct drm_device *dev = crtc_state->uapi.crtc->dev;
5122 const struct drm_i915_private *dev_priv = to_i915(dev);
5123 u16 trans_min, trans_amount, trans_y_tile_min;
5124 u16 wm0_sel_res_b, trans_offset_b, res_blocks;
5125
5126
5127 if (!dev_priv->ipc_enabled)
5128 return;
5129
5130
5131
5132
5133
5134 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
5135 return;
5136
5137 if (INTEL_GEN(dev_priv) >= 11)
5138 trans_min = 4;
5139 else
5140 trans_min = 14;
5141
5142
5143 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
5144 trans_amount = 0;
5145 else
5146 trans_amount = 10;
5147
5148 trans_offset_b = trans_min + trans_amount;
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160 wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
5161
5162 if (wp->y_tiled) {
5163 trans_y_tile_min =
5164 (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
5165 res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
5166 trans_offset_b;
5167 } else {
5168 res_blocks = wm0_sel_res_b + trans_offset_b;
5169
5170
5171 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
5172 res_blocks += 1;
5173 }
5174
5175
5176
5177
5178
5179
5180 wm->trans_wm.plane_res_b = res_blocks + 1;
5181 wm->trans_wm.plane_en = true;
5182}
5183
5184static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
5185 const struct intel_plane_state *plane_state,
5186 enum plane_id plane_id, int color_plane)
5187{
5188 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
5189 struct skl_wm_params wm_params;
5190 int ret;
5191
5192 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5193 &wm_params, color_plane);
5194 if (ret)
5195 return ret;
5196
5197 skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
5198 skl_compute_transition_wm(crtc_state, &wm_params, wm);
5199
5200 return 0;
5201}
5202
5203static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
5204 const struct intel_plane_state *plane_state,
5205 enum plane_id plane_id)
5206{
5207 struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
5208 struct skl_wm_params wm_params;
5209 int ret;
5210
5211 wm->is_planar = true;
5212
5213
5214 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5215 &wm_params, 1);
5216 if (ret)
5217 return ret;
5218
5219 skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
5220
5221 return 0;
5222}
5223
5224static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
5225 const struct intel_plane_state *plane_state)
5226{
5227 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5228 const struct drm_framebuffer *fb = plane_state->hw.fb;
5229 enum plane_id plane_id = plane->id;
5230 int ret;
5231
5232 if (!intel_wm_plane_visible(crtc_state, plane_state))
5233 return 0;
5234
5235 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5236 plane_id, 0);
5237 if (ret)
5238 return ret;
5239
5240 if (fb->format->is_yuv && fb->format->num_planes > 1) {
5241 ret = skl_build_plane_wm_uv(crtc_state, plane_state,
5242 plane_id);
5243 if (ret)
5244 return ret;
5245 }
5246
5247 return 0;
5248}
5249
5250static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
5251 const struct intel_plane_state *plane_state)
5252{
5253 enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id;
5254 int ret;
5255
5256
5257 if (plane_state->planar_slave)
5258 return 0;
5259
5260 if (plane_state->planar_linked_plane) {
5261 const struct drm_framebuffer *fb = plane_state->hw.fb;
5262 enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
5263
5264 WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
5265 WARN_ON(!fb->format->is_yuv ||
5266 fb->format->num_planes == 1);
5267
5268 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5269 y_plane_id, 0);
5270 if (ret)
5271 return ret;
5272
5273 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5274 plane_id, 1);
5275 if (ret)
5276 return ret;
5277 } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
5278 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5279 plane_id, 0);
5280 if (ret)
5281 return ret;
5282 }
5283
5284 return 0;
5285}
5286
5287static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
5288{
5289 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5290 struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5291 struct intel_plane *plane;
5292 const struct intel_plane_state *plane_state;
5293 int ret;
5294
5295
5296
5297
5298
5299 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
5300
5301 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state,
5302 crtc_state) {
5303
5304 if (INTEL_GEN(dev_priv) >= 11)
5305 ret = icl_build_plane_wm(crtc_state, plane_state);
5306 else
5307 ret = skl_build_plane_wm(crtc_state, plane_state);
5308 if (ret)
5309 return ret;
5310 }
5311
5312 return 0;
5313}
5314
5315static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
5316 i915_reg_t reg,
5317 const struct skl_ddb_entry *entry)
5318{
5319 if (entry->end)
5320 intel_de_write_fw(dev_priv, reg,
5321 (entry->end - 1) << 16 | entry->start);
5322 else
5323 intel_de_write_fw(dev_priv, reg, 0);
5324}
5325
5326static void skl_write_wm_level(struct drm_i915_private *dev_priv,
5327 i915_reg_t reg,
5328 const struct skl_wm_level *level)
5329{
5330 u32 val = 0;
5331
5332 if (level->plane_en)
5333 val |= PLANE_WM_EN;
5334 if (level->ignore_lines)
5335 val |= PLANE_WM_IGNORE_LINES;
5336 val |= level->plane_res_b;
5337 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
5338
5339 intel_de_write_fw(dev_priv, reg, val);
5340}
5341
5342void skl_write_plane_wm(struct intel_plane *plane,
5343 const struct intel_crtc_state *crtc_state)
5344{
5345 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5346 int level, max_level = ilk_wm_max_level(dev_priv);
5347 enum plane_id plane_id = plane->id;
5348 enum pipe pipe = plane->pipe;
5349 const struct skl_plane_wm *wm =
5350 &crtc_state->wm.skl.optimal.planes[plane_id];
5351 const struct skl_ddb_entry *ddb_y =
5352 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5353 const struct skl_ddb_entry *ddb_uv =
5354 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
5355
5356 for (level = 0; level <= max_level; level++) {
5357 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
5358 &wm->wm[level]);
5359 }
5360 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5361 &wm->trans_wm);
5362
5363 if (INTEL_GEN(dev_priv) >= 11) {
5364 skl_ddb_entry_write(dev_priv,
5365 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5366 return;
5367 }
5368
5369 if (wm->is_planar)
5370 swap(ddb_y, ddb_uv);
5371
5372 skl_ddb_entry_write(dev_priv,
5373 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5374 skl_ddb_entry_write(dev_priv,
5375 PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
5376}
5377
5378void skl_write_cursor_wm(struct intel_plane *plane,
5379 const struct intel_crtc_state *crtc_state)
5380{
5381 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5382 int level, max_level = ilk_wm_max_level(dev_priv);
5383 enum plane_id plane_id = plane->id;
5384 enum pipe pipe = plane->pipe;
5385 const struct skl_plane_wm *wm =
5386 &crtc_state->wm.skl.optimal.planes[plane_id];
5387 const struct skl_ddb_entry *ddb =
5388 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5389
5390 for (level = 0; level <= max_level; level++) {
5391 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
5392 &wm->wm[level]);
5393 }
5394 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
5395
5396 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
5397}
5398
5399bool skl_wm_level_equals(const struct skl_wm_level *l1,
5400 const struct skl_wm_level *l2)
5401{
5402 return l1->plane_en == l2->plane_en &&
5403 l1->ignore_lines == l2->ignore_lines &&
5404 l1->plane_res_l == l2->plane_res_l &&
5405 l1->plane_res_b == l2->plane_res_b;
5406}
5407
5408static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
5409 const struct skl_plane_wm *wm1,
5410 const struct skl_plane_wm *wm2)
5411{
5412 int level, max_level = ilk_wm_max_level(dev_priv);
5413
5414 for (level = 0; level <= max_level; level++) {
5415
5416
5417
5418
5419
5420 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
5421 return false;
5422 }
5423
5424 return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
5425}
5426
5427static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
5428 const struct skl_ddb_entry *b)
5429{
5430 return a->start < b->end && b->start < a->end;
5431}
5432
5433bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
5434 const struct skl_ddb_entry *entries,
5435 int num_entries, int ignore_idx)
5436{
5437 int i;
5438
5439 for (i = 0; i < num_entries; i++) {
5440 if (i != ignore_idx &&
5441 skl_ddb_entries_overlap(ddb, &entries[i]))
5442 return true;
5443 }
5444
5445 return false;
5446}
5447
5448static int
5449skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
5450 struct intel_crtc_state *new_crtc_state)
5451{
5452 struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
5453 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5454 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5455 struct intel_plane *plane;
5456
5457 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5458 struct intel_plane_state *plane_state;
5459 enum plane_id plane_id = plane->id;
5460
5461 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
5462 &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
5463 skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
5464 &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
5465 continue;
5466
5467 plane_state = intel_atomic_get_plane_state(state, plane);
5468 if (IS_ERR(plane_state))
5469 return PTR_ERR(plane_state);
5470
5471 new_crtc_state->update_planes |= BIT(plane_id);
5472 }
5473
5474 return 0;
5475}
5476
5477static int
5478skl_compute_ddb(struct intel_atomic_state *state)
5479{
5480 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5481 struct intel_crtc_state *old_crtc_state;
5482 struct intel_crtc_state *new_crtc_state;
5483 struct intel_crtc *crtc;
5484 int ret, i;
5485
5486 state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask;
5487
5488 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5489 new_crtc_state, i) {
5490 ret = skl_allocate_pipe_ddb(new_crtc_state);
5491 if (ret)
5492 return ret;
5493
5494 ret = skl_ddb_add_affected_planes(old_crtc_state,
5495 new_crtc_state);
5496 if (ret)
5497 return ret;
5498 }
5499
5500 return 0;
5501}
5502
5503static char enast(bool enable)
5504{
5505 return enable ? '*' : ' ';
5506}
5507
5508static void
5509skl_print_wm_changes(struct intel_atomic_state *state)
5510{
5511 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5512 const struct intel_crtc_state *old_crtc_state;
5513 const struct intel_crtc_state *new_crtc_state;
5514 struct intel_plane *plane;
5515 struct intel_crtc *crtc;
5516 int i;
5517
5518 if (!drm_debug_enabled(DRM_UT_KMS))
5519 return;
5520
5521 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5522 new_crtc_state, i) {
5523 const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
5524
5525 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
5526 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
5527
5528 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5529 enum plane_id plane_id = plane->id;
5530 const struct skl_ddb_entry *old, *new;
5531
5532 old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
5533 new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
5534
5535 if (skl_ddb_entry_equal(old, new))
5536 continue;
5537
5538 drm_dbg_kms(&dev_priv->drm,
5539 "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
5540 plane->base.base.id, plane->base.name,
5541 old->start, old->end, new->start, new->end,
5542 skl_ddb_entry_size(old), skl_ddb_entry_size(new));
5543 }
5544
5545 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5546 enum plane_id plane_id = plane->id;
5547 const struct skl_plane_wm *old_wm, *new_wm;
5548
5549 old_wm = &old_pipe_wm->planes[plane_id];
5550 new_wm = &new_pipe_wm->planes[plane_id];
5551
5552 if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
5553 continue;
5554
5555 drm_dbg_kms(&dev_priv->drm,
5556 "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
5557 " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
5558 plane->base.base.id, plane->base.name,
5559 enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
5560 enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
5561 enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
5562 enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
5563 enast(old_wm->trans_wm.plane_en),
5564 enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
5565 enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
5566 enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
5567 enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
5568 enast(new_wm->trans_wm.plane_en));
5569
5570 drm_dbg_kms(&dev_priv->drm,
5571 "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
5572 " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
5573 plane->base.base.id, plane->base.name,
5574 enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
5575 enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
5576 enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
5577 enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
5578 enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
5579 enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
5580 enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
5581 enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
5582 enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
5583
5584 enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
5585 enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
5586 enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
5587 enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
5588 enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
5589 enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
5590 enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
5591 enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
5592 enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
5593
5594 drm_dbg_kms(&dev_priv->drm,
5595 "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5596 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5597 plane->base.base.id, plane->base.name,
5598 old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
5599 old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
5600 old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
5601 old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
5602 old_wm->trans_wm.plane_res_b,
5603 new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
5604 new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
5605 new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
5606 new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
5607 new_wm->trans_wm.plane_res_b);
5608
5609 drm_dbg_kms(&dev_priv->drm,
5610 "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5611 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5612 plane->base.base.id, plane->base.name,
5613 old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
5614 old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
5615 old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
5616 old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
5617 old_wm->trans_wm.min_ddb_alloc,
5618 new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
5619 new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
5620 new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
5621 new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
5622 new_wm->trans_wm.min_ddb_alloc);
5623 }
5624 }
5625}
5626
5627static int intel_add_all_pipes(struct intel_atomic_state *state)
5628{
5629 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5630 struct intel_crtc *crtc;
5631
5632 for_each_intel_crtc(&dev_priv->drm, crtc) {
5633 struct intel_crtc_state *crtc_state;
5634
5635 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5636 if (IS_ERR(crtc_state))
5637 return PTR_ERR(crtc_state);
5638 }
5639
5640 return 0;
5641}
5642
5643static int
5644skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
5645{
5646 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5647 int ret;
5648
5649
5650
5651
5652
5653
5654
5655 if (dev_priv->wm.distrust_bios_wm) {
5656 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5657 state->base.acquire_ctx);
5658 if (ret)
5659 return ret;
5660
5661 state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask;
5662
5663
5664
5665
5666
5667
5668
5669 if (!state->modeset)
5670 state->active_pipes = dev_priv->active_pipes;
5671 }
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686 if (state->active_pipe_changes || state->modeset) {
5687 ret = intel_add_all_pipes(state);
5688 if (ret)
5689 return ret;
5690 }
5691
5692 return 0;
5693}
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
5718 struct intel_crtc *crtc)
5719{
5720 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5721 const struct intel_crtc_state *old_crtc_state =
5722 intel_atomic_get_old_crtc_state(state, crtc);
5723 struct intel_crtc_state *new_crtc_state =
5724 intel_atomic_get_new_crtc_state(state, crtc);
5725 struct intel_plane *plane;
5726
5727 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5728 struct intel_plane_state *plane_state;
5729 enum plane_id plane_id = plane->id;
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739 if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
5740 skl_plane_wm_equals(dev_priv,
5741 &old_crtc_state->wm.skl.optimal.planes[plane_id],
5742 &new_crtc_state->wm.skl.optimal.planes[plane_id]))
5743 continue;
5744
5745 plane_state = intel_atomic_get_plane_state(state, plane);
5746 if (IS_ERR(plane_state))
5747 return PTR_ERR(plane_state);
5748
5749 new_crtc_state->update_planes |= BIT(plane_id);
5750 }
5751
5752 return 0;
5753}
5754
5755static int
5756skl_compute_wm(struct intel_atomic_state *state)
5757{
5758 struct intel_crtc *crtc;
5759 struct intel_crtc_state *new_crtc_state;
5760 struct intel_crtc_state *old_crtc_state;
5761 int ret, i;
5762
5763 ret = skl_ddb_add_affected_pipes(state);
5764 if (ret)
5765 return ret;
5766
5767
5768
5769
5770
5771
5772 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5773 new_crtc_state, i) {
5774 ret = skl_build_pipe_wm(new_crtc_state);
5775 if (ret)
5776 return ret;
5777 }
5778
5779 ret = skl_compute_ddb(state);
5780 if (ret)
5781 return ret;
5782
5783
5784
5785
5786
5787
5788 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5789 new_crtc_state, i) {
5790 ret = skl_wm_add_affected_planes(state, crtc);
5791 if (ret)
5792 return ret;
5793 }
5794
5795 skl_print_wm_changes(state);
5796
5797 return 0;
5798}
5799
5800static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
5801 struct intel_wm_config *config)
5802{
5803 struct intel_crtc *crtc;
5804
5805
5806 for_each_intel_crtc(&dev_priv->drm, crtc) {
5807 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
5808
5809 if (!wm->pipe_enabled)
5810 continue;
5811
5812 config->sprites_enabled |= wm->sprites_enabled;
5813 config->sprites_scaled |= wm->sprites_scaled;
5814 config->num_pipes_active++;
5815 }
5816}
5817
5818static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
5819{
5820 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
5821 struct ilk_wm_maximums max;
5822 struct intel_wm_config config = {};
5823 struct ilk_wm_values results = {};
5824 enum intel_ddb_partitioning partitioning;
5825
5826 ilk_compute_wm_config(dev_priv, &config);
5827
5828 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
5829 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
5830
5831
5832 if (INTEL_GEN(dev_priv) >= 7 &&
5833 config.num_pipes_active == 1 && config.sprites_enabled) {
5834 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
5835 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
5836
5837 best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
5838 } else {
5839 best_lp_wm = &lp_wm_1_2;
5840 }
5841
5842 partitioning = (best_lp_wm == &lp_wm_1_2) ?
5843 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
5844
5845 ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
5846
5847 ilk_write_wm_values(dev_priv, &results);
5848}
5849
5850static void ilk_initial_watermarks(struct intel_atomic_state *state,
5851 struct intel_crtc *crtc)
5852{
5853 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5854 const struct intel_crtc_state *crtc_state =
5855 intel_atomic_get_new_crtc_state(state, crtc);
5856
5857 mutex_lock(&dev_priv->wm.wm_mutex);
5858 crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
5859 ilk_program_watermarks(dev_priv);
5860 mutex_unlock(&dev_priv->wm.wm_mutex);
5861}
5862
5863static void ilk_optimize_watermarks(struct intel_atomic_state *state,
5864 struct intel_crtc *crtc)
5865{
5866 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5867 const struct intel_crtc_state *crtc_state =
5868 intel_atomic_get_new_crtc_state(state, crtc);
5869
5870 if (!crtc_state->wm.need_postvbl_update)
5871 return;
5872
5873 mutex_lock(&dev_priv->wm.wm_mutex);
5874 crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
5875 ilk_program_watermarks(dev_priv);
5876 mutex_unlock(&dev_priv->wm.wm_mutex);
5877}
5878
5879static inline void skl_wm_level_from_reg_val(u32 val,
5880 struct skl_wm_level *level)
5881{
5882 level->plane_en = val & PLANE_WM_EN;
5883 level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
5884 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
5885 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
5886 PLANE_WM_LINES_MASK;
5887}
5888
5889void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
5890 struct skl_pipe_wm *out)
5891{
5892 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5893 enum pipe pipe = crtc->pipe;
5894 int level, max_level;
5895 enum plane_id plane_id;
5896 u32 val;
5897
5898 max_level = ilk_wm_max_level(dev_priv);
5899
5900 for_each_plane_id_on_crtc(crtc, plane_id) {
5901 struct skl_plane_wm *wm = &out->planes[plane_id];
5902
5903 for (level = 0; level <= max_level; level++) {
5904 if (plane_id != PLANE_CURSOR)
5905 val = I915_READ(PLANE_WM(pipe, plane_id, level));
5906 else
5907 val = I915_READ(CUR_WM(pipe, level));
5908
5909 skl_wm_level_from_reg_val(val, &wm->wm[level]);
5910 }
5911
5912 if (plane_id != PLANE_CURSOR)
5913 val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
5914 else
5915 val = I915_READ(CUR_WM_TRANS(pipe));
5916
5917 skl_wm_level_from_reg_val(val, &wm->trans_wm);
5918 }
5919
5920 if (!crtc->active)
5921 return;
5922}
5923
5924void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
5925{
5926 struct intel_crtc *crtc;
5927 struct intel_crtc_state *crtc_state;
5928
5929 skl_ddb_get_hw_state(dev_priv);
5930 for_each_intel_crtc(&dev_priv->drm, crtc) {
5931 crtc_state = to_intel_crtc_state(crtc->base.state);
5932
5933 skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
5934 }
5935
5936 if (dev_priv->active_pipes) {
5937
5938 dev_priv->wm.distrust_bios_wm = true;
5939 }
5940}
5941
5942static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
5943{
5944 struct drm_device *dev = crtc->base.dev;
5945 struct drm_i915_private *dev_priv = to_i915(dev);
5946 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5947 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
5948 struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
5949 enum pipe pipe = crtc->pipe;
5950 static const i915_reg_t wm0_pipe_reg[] = {
5951 [PIPE_A] = WM0_PIPEA_ILK,
5952 [PIPE_B] = WM0_PIPEB_ILK,
5953 [PIPE_C] = WM0_PIPEC_IVB,
5954 };
5955
5956 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
5957
5958 memset(active, 0, sizeof(*active));
5959
5960 active->pipe_enabled = crtc->active;
5961
5962 if (active->pipe_enabled) {
5963 u32 tmp = hw->wm_pipe[pipe];
5964
5965
5966
5967
5968
5969
5970
5971 active->wm[0].enable = true;
5972 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
5973 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
5974 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
5975 } else {
5976 int level, max_level = ilk_wm_max_level(dev_priv);
5977
5978
5979
5980
5981
5982
5983 for (level = 0; level <= max_level; level++)
5984 active->wm[level].enable = true;
5985 }
5986
5987 crtc->wm.active.ilk = *active;
5988}
5989
5990#define _FW_WM(value, plane) \
5991 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
5992#define _FW_WM_VLV(value, plane) \
5993 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
5994
5995static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
5996 struct g4x_wm_values *wm)
5997{
5998 u32 tmp;
5999
6000 tmp = I915_READ(DSPFW1);
6001 wm->sr.plane = _FW_WM(tmp, SR);
6002 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
6003 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
6004 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
6005
6006 tmp = I915_READ(DSPFW2);
6007 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
6008 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
6009 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
6010 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
6011 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
6012 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
6013
6014 tmp = I915_READ(DSPFW3);
6015 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
6016 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
6017 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
6018 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
6019}
6020
6021static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
6022 struct vlv_wm_values *wm)
6023{
6024 enum pipe pipe;
6025 u32 tmp;
6026
6027 for_each_pipe(dev_priv, pipe) {
6028 tmp = I915_READ(VLV_DDL(pipe));
6029
6030 wm->ddl[pipe].plane[PLANE_PRIMARY] =
6031 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6032 wm->ddl[pipe].plane[PLANE_CURSOR] =
6033 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6034 wm->ddl[pipe].plane[PLANE_SPRITE0] =
6035 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6036 wm->ddl[pipe].plane[PLANE_SPRITE1] =
6037 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6038 }
6039
6040 tmp = I915_READ(DSPFW1);
6041 wm->sr.plane = _FW_WM(tmp, SR);
6042 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
6043 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
6044 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
6045
6046 tmp = I915_READ(DSPFW2);
6047 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
6048 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
6049 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
6050
6051 tmp = I915_READ(DSPFW3);
6052 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
6053
6054 if (IS_CHERRYVIEW(dev_priv)) {
6055 tmp = I915_READ(DSPFW7_CHV);
6056 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
6057 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
6058
6059 tmp = I915_READ(DSPFW8_CHV);
6060 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
6061 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
6062
6063 tmp = I915_READ(DSPFW9_CHV);
6064 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
6065 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
6066
6067 tmp = I915_READ(DSPHOWM);
6068 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
6069 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
6070 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
6071 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
6072 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
6073 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
6074 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
6075 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
6076 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
6077 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
6078 } else {
6079 tmp = I915_READ(DSPFW7);
6080 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
6081 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
6082
6083 tmp = I915_READ(DSPHOWM);
6084 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
6085 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
6086 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
6087 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
6088 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
6089 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
6090 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
6091 }
6092}
6093
6094#undef _FW_WM
6095#undef _FW_WM_VLV
6096
6097void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
6098{
6099 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
6100 struct intel_crtc *crtc;
6101
6102 g4x_read_wm_values(dev_priv, wm);
6103
6104 wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
6105
6106 for_each_intel_crtc(&dev_priv->drm, crtc) {
6107 struct intel_crtc_state *crtc_state =
6108 to_intel_crtc_state(crtc->base.state);
6109 struct g4x_wm_state *active = &crtc->wm.active.g4x;
6110 struct g4x_pipe_wm *raw;
6111 enum pipe pipe = crtc->pipe;
6112 enum plane_id plane_id;
6113 int level, max_level;
6114
6115 active->cxsr = wm->cxsr;
6116 active->hpll_en = wm->hpll_en;
6117 active->fbc_en = wm->fbc_en;
6118
6119 active->sr = wm->sr;
6120 active->hpll = wm->hpll;
6121
6122 for_each_plane_id_on_crtc(crtc, plane_id) {
6123 active->wm.plane[plane_id] =
6124 wm->pipe[pipe].plane[plane_id];
6125 }
6126
6127 if (wm->cxsr && wm->hpll_en)
6128 max_level = G4X_WM_LEVEL_HPLL;
6129 else if (wm->cxsr)
6130 max_level = G4X_WM_LEVEL_SR;
6131 else
6132 max_level = G4X_WM_LEVEL_NORMAL;
6133
6134 level = G4X_WM_LEVEL_NORMAL;
6135 raw = &crtc_state->wm.g4x.raw[level];
6136 for_each_plane_id_on_crtc(crtc, plane_id)
6137 raw->plane[plane_id] = active->wm.plane[plane_id];
6138
6139 if (++level > max_level)
6140 goto out;
6141
6142 raw = &crtc_state->wm.g4x.raw[level];
6143 raw->plane[PLANE_PRIMARY] = active->sr.plane;
6144 raw->plane[PLANE_CURSOR] = active->sr.cursor;
6145 raw->plane[PLANE_SPRITE0] = 0;
6146 raw->fbc = active->sr.fbc;
6147
6148 if (++level > max_level)
6149 goto out;
6150
6151 raw = &crtc_state->wm.g4x.raw[level];
6152 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
6153 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
6154 raw->plane[PLANE_SPRITE0] = 0;
6155 raw->fbc = active->hpll.fbc;
6156
6157 out:
6158 for_each_plane_id_on_crtc(crtc, plane_id)
6159 g4x_raw_plane_wm_set(crtc_state, level,
6160 plane_id, USHRT_MAX);
6161 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
6162
6163 crtc_state->wm.g4x.optimal = *active;
6164 crtc_state->wm.g4x.intermediate = *active;
6165
6166 drm_dbg_kms(&dev_priv->drm,
6167 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
6168 pipe_name(pipe),
6169 wm->pipe[pipe].plane[PLANE_PRIMARY],
6170 wm->pipe[pipe].plane[PLANE_CURSOR],
6171 wm->pipe[pipe].plane[PLANE_SPRITE0]);
6172 }
6173
6174 drm_dbg_kms(&dev_priv->drm,
6175 "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
6176 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
6177 drm_dbg_kms(&dev_priv->drm,
6178 "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
6179 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
6180 drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
6181 yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
6182}
6183
6184void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
6185{
6186 struct intel_plane *plane;
6187 struct intel_crtc *crtc;
6188
6189 mutex_lock(&dev_priv->wm.wm_mutex);
6190
6191 for_each_intel_plane(&dev_priv->drm, plane) {
6192 struct intel_crtc *crtc =
6193 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
6194 struct intel_crtc_state *crtc_state =
6195 to_intel_crtc_state(crtc->base.state);
6196 struct intel_plane_state *plane_state =
6197 to_intel_plane_state(plane->base.state);
6198 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
6199 enum plane_id plane_id = plane->id;
6200 int level;
6201
6202 if (plane_state->uapi.visible)
6203 continue;
6204
6205 for (level = 0; level < 3; level++) {
6206 struct g4x_pipe_wm *raw =
6207 &crtc_state->wm.g4x.raw[level];
6208
6209 raw->plane[plane_id] = 0;
6210 wm_state->wm.plane[plane_id] = 0;
6211 }
6212
6213 if (plane_id == PLANE_PRIMARY) {
6214 for (level = 0; level < 3; level++) {
6215 struct g4x_pipe_wm *raw =
6216 &crtc_state->wm.g4x.raw[level];
6217 raw->fbc = 0;
6218 }
6219
6220 wm_state->sr.fbc = 0;
6221 wm_state->hpll.fbc = 0;
6222 wm_state->fbc_en = false;
6223 }
6224 }
6225
6226 for_each_intel_crtc(&dev_priv->drm, crtc) {
6227 struct intel_crtc_state *crtc_state =
6228 to_intel_crtc_state(crtc->base.state);
6229
6230 crtc_state->wm.g4x.intermediate =
6231 crtc_state->wm.g4x.optimal;
6232 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
6233 }
6234
6235 g4x_program_watermarks(dev_priv);
6236
6237 mutex_unlock(&dev_priv->wm.wm_mutex);
6238}
6239
6240void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
6241{
6242 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
6243 struct intel_crtc *crtc;
6244 u32 val;
6245
6246 vlv_read_wm_values(dev_priv, wm);
6247
6248 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
6249 wm->level = VLV_WM_LEVEL_PM2;
6250
6251 if (IS_CHERRYVIEW(dev_priv)) {
6252 vlv_punit_get(dev_priv);
6253
6254 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
6255 if (val & DSP_MAXFIFO_PM5_ENABLE)
6256 wm->level = VLV_WM_LEVEL_PM5;
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6268 val |= FORCE_DDR_FREQ_REQ_ACK;
6269 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
6270
6271 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
6272 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
6273 drm_dbg_kms(&dev_priv->drm,
6274 "Punit not acking DDR DVFS request, "
6275 "assuming DDR DVFS is disabled\n");
6276 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
6277 } else {
6278 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6279 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
6280 wm->level = VLV_WM_LEVEL_DDR_DVFS;
6281 }
6282
6283 vlv_punit_put(dev_priv);
6284 }
6285
6286 for_each_intel_crtc(&dev_priv->drm, crtc) {
6287 struct intel_crtc_state *crtc_state =
6288 to_intel_crtc_state(crtc->base.state);
6289 struct vlv_wm_state *active = &crtc->wm.active.vlv;
6290 const struct vlv_fifo_state *fifo_state =
6291 &crtc_state->wm.vlv.fifo_state;
6292 enum pipe pipe = crtc->pipe;
6293 enum plane_id plane_id;
6294 int level;
6295
6296 vlv_get_fifo_size(crtc_state);
6297
6298 active->num_levels = wm->level + 1;
6299 active->cxsr = wm->cxsr;
6300
6301 for (level = 0; level < active->num_levels; level++) {
6302 struct g4x_pipe_wm *raw =
6303 &crtc_state->wm.vlv.raw[level];
6304
6305 active->sr[level].plane = wm->sr.plane;
6306 active->sr[level].cursor = wm->sr.cursor;
6307
6308 for_each_plane_id_on_crtc(crtc, plane_id) {
6309 active->wm[level].plane[plane_id] =
6310 wm->pipe[pipe].plane[plane_id];
6311
6312 raw->plane[plane_id] =
6313 vlv_invert_wm_value(active->wm[level].plane[plane_id],
6314 fifo_state->plane[plane_id]);
6315 }
6316 }
6317
6318 for_each_plane_id_on_crtc(crtc, plane_id)
6319 vlv_raw_plane_wm_set(crtc_state, level,
6320 plane_id, USHRT_MAX);
6321 vlv_invalidate_wms(crtc, active, level);
6322
6323 crtc_state->wm.vlv.optimal = *active;
6324 crtc_state->wm.vlv.intermediate = *active;
6325
6326 drm_dbg_kms(&dev_priv->drm,
6327 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
6328 pipe_name(pipe),
6329 wm->pipe[pipe].plane[PLANE_PRIMARY],
6330 wm->pipe[pipe].plane[PLANE_CURSOR],
6331 wm->pipe[pipe].plane[PLANE_SPRITE0],
6332 wm->pipe[pipe].plane[PLANE_SPRITE1]);
6333 }
6334
6335 drm_dbg_kms(&dev_priv->drm,
6336 "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
6337 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
6338}
6339
6340void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
6341{
6342 struct intel_plane *plane;
6343 struct intel_crtc *crtc;
6344
6345 mutex_lock(&dev_priv->wm.wm_mutex);
6346
6347 for_each_intel_plane(&dev_priv->drm, plane) {
6348 struct intel_crtc *crtc =
6349 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
6350 struct intel_crtc_state *crtc_state =
6351 to_intel_crtc_state(crtc->base.state);
6352 struct intel_plane_state *plane_state =
6353 to_intel_plane_state(plane->base.state);
6354 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
6355 const struct vlv_fifo_state *fifo_state =
6356 &crtc_state->wm.vlv.fifo_state;
6357 enum plane_id plane_id = plane->id;
6358 int level;
6359
6360 if (plane_state->uapi.visible)
6361 continue;
6362
6363 for (level = 0; level < wm_state->num_levels; level++) {
6364 struct g4x_pipe_wm *raw =
6365 &crtc_state->wm.vlv.raw[level];
6366
6367 raw->plane[plane_id] = 0;
6368
6369 wm_state->wm[level].plane[plane_id] =
6370 vlv_invert_wm_value(raw->plane[plane_id],
6371 fifo_state->plane[plane_id]);
6372 }
6373 }
6374
6375 for_each_intel_crtc(&dev_priv->drm, crtc) {
6376 struct intel_crtc_state *crtc_state =
6377 to_intel_crtc_state(crtc->base.state);
6378
6379 crtc_state->wm.vlv.intermediate =
6380 crtc_state->wm.vlv.optimal;
6381 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
6382 }
6383
6384 vlv_program_watermarks(dev_priv);
6385
6386 mutex_unlock(&dev_priv->wm.wm_mutex);
6387}
6388
6389
6390
6391
6392
6393static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
6394{
6395 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6396 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6397 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6398
6399
6400
6401
6402
6403}
6404
6405void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
6406{
6407 struct ilk_wm_values *hw = &dev_priv->wm.hw;
6408 struct intel_crtc *crtc;
6409
6410 ilk_init_lp_watermarks(dev_priv);
6411
6412 for_each_intel_crtc(&dev_priv->drm, crtc)
6413 ilk_pipe_wm_get_hw_state(crtc);
6414
6415 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
6416 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
6417 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
6418
6419 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
6420 if (INTEL_GEN(dev_priv) >= 7) {
6421 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
6422 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
6423 }
6424
6425 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6426 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
6427 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6428 else if (IS_IVYBRIDGE(dev_priv))
6429 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
6430 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6431
6432 hw->enable_fbc_wm =
6433 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
6434}
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469void intel_update_watermarks(struct intel_crtc *crtc)
6470{
6471 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6472
6473 if (dev_priv->display.update_wm)
6474 dev_priv->display.update_wm(crtc);
6475}
6476
6477void intel_enable_ipc(struct drm_i915_private *dev_priv)
6478{
6479 u32 val;
6480
6481 if (!HAS_IPC(dev_priv))
6482 return;
6483
6484 val = I915_READ(DISP_ARB_CTL2);
6485
6486 if (dev_priv->ipc_enabled)
6487 val |= DISP_IPC_ENABLE;
6488 else
6489 val &= ~DISP_IPC_ENABLE;
6490
6491 I915_WRITE(DISP_ARB_CTL2, val);
6492}
6493
6494static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
6495{
6496
6497 if (IS_SKYLAKE(dev_priv))
6498 return false;
6499
6500
6501 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
6502 return dev_priv->dram_info.symmetric_memory;
6503
6504 return true;
6505}
6506
6507void intel_init_ipc(struct drm_i915_private *dev_priv)
6508{
6509 if (!HAS_IPC(dev_priv))
6510 return;
6511
6512 dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
6513
6514 intel_enable_ipc(dev_priv);
6515}
6516
6517static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
6518{
6519
6520
6521
6522
6523
6524 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6525}
6526
6527static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
6528{
6529 enum pipe pipe;
6530
6531 for_each_pipe(dev_priv, pipe) {
6532 I915_WRITE(DSPCNTR(pipe),
6533 I915_READ(DSPCNTR(pipe)) |
6534 DISPPLANE_TRICKLE_FEED_DISABLE);
6535
6536 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6537 POSTING_READ(DSPSURF(pipe));
6538 }
6539}
6540
6541static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
6542{
6543 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6544
6545
6546
6547
6548
6549 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6550 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6551 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6552
6553 I915_WRITE(PCH_3DCGDIS0,
6554 MARIUNIT_CLOCK_GATE_DISABLE |
6555 SVSMUNIT_CLOCK_GATE_DISABLE);
6556 I915_WRITE(PCH_3DCGDIS1,
6557 VFMUNIT_CLOCK_GATE_DISABLE);
6558
6559
6560
6561
6562
6563
6564
6565
6566 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6567 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6568 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6569 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6570 I915_WRITE(DISP_ARB_CTL,
6571 (I915_READ(DISP_ARB_CTL) |
6572 DISP_FBC_WM_DIS));
6573
6574
6575
6576
6577
6578
6579
6580
6581 if (IS_IRONLAKE_M(dev_priv)) {
6582
6583 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6584 I915_READ(ILK_DISPLAY_CHICKEN1) |
6585 ILK_FBCQ_DIS);
6586 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6587 I915_READ(ILK_DISPLAY_CHICKEN2) |
6588 ILK_DPARB_GATE);
6589 }
6590
6591 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6592
6593 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6594 I915_READ(ILK_DISPLAY_CHICKEN2) |
6595 ILK_ELPIN_409_SELECT);
6596 I915_WRITE(_3D_CHICKEN2,
6597 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6598 _3D_CHICKEN2_WM_READ_PIPELINED);
6599
6600
6601 I915_WRITE(CACHE_MODE_0,
6602 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6603
6604
6605 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6606
6607 g4x_disable_trickle_feed(dev_priv);
6608
6609 ibx_init_clock_gating(dev_priv);
6610}
6611
6612static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
6613{
6614 enum pipe pipe;
6615 u32 val;
6616
6617
6618
6619
6620
6621
6622 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6623 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6624 PCH_CPUNIT_CLOCK_GATE_DISABLE);
6625 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6626 DPLS_EDP_PPS_FIX_DIS);
6627
6628
6629
6630 for_each_pipe(dev_priv, pipe) {
6631 val = I915_READ(TRANS_CHICKEN2(pipe));
6632 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6633 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6634 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6635 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6636 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6637 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6638 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6639 }
6640
6641 for_each_pipe(dev_priv, pipe) {
6642 I915_WRITE(TRANS_CHICKEN1(pipe),
6643 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6644 }
6645}
6646
6647static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
6648{
6649 u32 tmp;
6650
6651 tmp = I915_READ(MCH_SSKPD);
6652 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6653 drm_dbg_kms(&dev_priv->drm,
6654 "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6655 tmp);
6656}
6657
6658static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
6659{
6660 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6661
6662 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6663
6664 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6665 I915_READ(ILK_DISPLAY_CHICKEN2) |
6666 ILK_ELPIN_409_SELECT);
6667
6668
6669 I915_WRITE(_3D_CHICKEN,
6670 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6671
6672
6673 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6674
6675
6676
6677
6678
6679
6680
6681
6682
6683 I915_WRITE(GEN6_GT_MODE,
6684 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6685
6686 I915_WRITE(CACHE_MODE_0,
6687 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6688
6689 I915_WRITE(GEN6_UCGCTL1,
6690 I915_READ(GEN6_UCGCTL1) |
6691 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6692 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703
6704
6705
6706
6707 I915_WRITE(GEN6_UCGCTL2,
6708 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6709 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6710
6711
6712 I915_WRITE(_3D_CHICKEN3,
6713 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6714
6715
6716
6717
6718
6719
6720 I915_WRITE(_3D_CHICKEN3,
6721 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6722
6723
6724
6725
6726
6727
6728
6729
6730
6731
6732
6733
6734 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6735 I915_READ(ILK_DISPLAY_CHICKEN1) |
6736 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6737 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6738 I915_READ(ILK_DISPLAY_CHICKEN2) |
6739 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6740 I915_WRITE(ILK_DSPCLK_GATE_D,
6741 I915_READ(ILK_DSPCLK_GATE_D) |
6742 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6743 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6744
6745 g4x_disable_trickle_feed(dev_priv);
6746
6747 cpt_init_clock_gating(dev_priv);
6748
6749 gen6_check_mch_setup(dev_priv);
6750}
6751
6752static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6753{
6754 u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
6755
6756
6757
6758
6759
6760
6761
6762 reg &= ~GEN7_FF_SCHED_MASK;
6763 reg |= GEN7_FF_TS_SCHED_HW;
6764 reg |= GEN7_FF_VS_SCHED_HW;
6765 reg |= GEN7_FF_DS_SCHED_HW;
6766
6767 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6768}
6769
6770static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
6771{
6772
6773
6774
6775
6776 if (HAS_PCH_LPT_LP(dev_priv))
6777 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6778 I915_READ(SOUTH_DSPCLK_GATE_D) |
6779 PCH_LP_PARTITION_LEVEL_DISABLE);
6780
6781
6782 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
6783 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
6784 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6785}
6786
6787static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
6788{
6789 if (HAS_PCH_LPT_LP(dev_priv)) {
6790 u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6791
6792 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6793 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6794 }
6795}
6796
6797static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
6798 int general_prio_credits,
6799 int high_prio_credits)
6800{
6801 u32 misccpctl;
6802 u32 val;
6803
6804
6805 misccpctl = I915_READ(GEN7_MISCCPCTL);
6806 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6807
6808 val = I915_READ(GEN8_L3SQCREG1);
6809 val &= ~L3_PRIO_CREDITS_MASK;
6810 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
6811 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
6812 I915_WRITE(GEN8_L3SQCREG1, val);
6813
6814
6815
6816
6817
6818 POSTING_READ(GEN8_L3SQCREG1);
6819 udelay(1);
6820 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6821}
6822
6823static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
6824{
6825
6826 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
6827 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
6828
6829
6830 intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
6831 0, CNL_DELAY_PMRSP);
6832}
6833
6834static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
6835{
6836 u32 vd_pg_enable = 0;
6837 unsigned int i;
6838
6839
6840 for (i = 0; i < I915_MAX_VCS; i++) {
6841 if (HAS_ENGINE(dev_priv, _VCS(i)))
6842 vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
6843 VDN_MFX_POWERGATE_ENABLE(i);
6844 }
6845
6846 I915_WRITE(POWERGATE_ENABLE,
6847 I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
6848
6849
6850 if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
6851 I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
6852 TGL_VRH_GATING_DIS);
6853}
6854
6855static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
6856{
6857 if (!HAS_PCH_CNP(dev_priv))
6858 return;
6859
6860
6861 I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
6862 CNP_PWM_CGE_GATING_DISABLE);
6863}
6864
6865static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
6866{
6867 u32 val;
6868 cnp_init_clock_gating(dev_priv);
6869
6870
6871 I915_WRITE(_3D_CHICKEN3,
6872 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
6873
6874
6875 I915_WRITE(GEN8_CHICKEN_DCPR_1,
6876 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
6877
6878
6879 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
6880 DISP_FBC_MEMORY_WAKE);
6881
6882 val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
6883
6884 val |= RCCUNIT_CLKGATE_DIS;
6885
6886 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
6887 val |= SARBUNIT_CLKGATE_DIS;
6888 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
6889
6890
6891 val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
6892 val |= GWUNIT_CLKGATE_DIS;
6893 I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
6894
6895
6896
6897 val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
6898 val |= VFUNIT_CLKGATE_DIS;
6899 I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
6900}
6901
6902static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
6903{
6904 cnp_init_clock_gating(dev_priv);
6905 gen9_init_clock_gating(dev_priv);
6906
6907
6908 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6909 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
6910}
6911
6912static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
6913{
6914 gen9_init_clock_gating(dev_priv);
6915
6916
6917 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
6918 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6919 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6920
6921
6922 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
6923 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6924 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
6925
6926
6927 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6928 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
6929}
6930
6931static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
6932{
6933 gen9_init_clock_gating(dev_priv);
6934
6935
6936 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
6937 FBC_LLC_FULLY_OPEN);
6938
6939
6940 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6941 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
6942}
6943
6944static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
6945{
6946 enum pipe pipe;
6947
6948
6949 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6950
6951
6952 I915_WRITE(CHICKEN_PAR1_1,
6953 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6954
6955
6956 for_each_pipe(dev_priv, pipe) {
6957 I915_WRITE(CHICKEN_PIPESL_1(pipe),
6958 I915_READ(CHICKEN_PIPESL_1(pipe)) |
6959 BDW_DPRS_MASK_VBLANK_SRD);
6960 }
6961
6962
6963
6964 I915_WRITE(GEN7_FF_THREAD_MODE,
6965 I915_READ(GEN7_FF_THREAD_MODE) &
6966 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6967
6968 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6969 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6970
6971
6972 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6973 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6974
6975
6976 gen8_set_l3sqc_credits(dev_priv, 30, 2);
6977
6978
6979 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
6980 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
6981
6982 lpt_init_clock_gating(dev_priv);
6983
6984
6985
6986
6987
6988
6989 I915_WRITE(GEN6_UCGCTL1,
6990 I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
6991}
6992
6993static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
6994{
6995
6996 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6997 I915_WRITE(HSW_ROW_CHICKEN3,
6998 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6999
7000
7001 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7002 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7003 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7004
7005
7006 I915_WRITE(GEN7_FF_THREAD_MODE,
7007 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
7008
7009
7010 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7011
7012
7013 I915_WRITE(CACHE_MODE_0_GEN7,
7014 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7015
7016
7017 I915_WRITE(CACHE_MODE_1,
7018 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7019
7020
7021
7022
7023
7024
7025
7026
7027
7028 I915_WRITE(GEN7_GT_MODE,
7029 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7030
7031
7032 I915_WRITE(HALF_SLICE_CHICKEN3,
7033 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
7034
7035
7036 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7037
7038 lpt_init_clock_gating(dev_priv);
7039}
7040
7041static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
7042{
7043 u32 snpcr;
7044
7045 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7046
7047
7048 I915_WRITE(_3D_CHICKEN3,
7049 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7050
7051
7052 I915_WRITE(IVB_CHICKEN3,
7053 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7054 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7055
7056
7057 if (IS_IVB_GT1(dev_priv))
7058 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7059 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7060
7061
7062 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7063
7064
7065 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
7066 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
7067
7068
7069 I915_WRITE(GEN7_L3CNTLREG1,
7070 GEN7_WA_FOR_GEN7_L3_CONTROL);
7071 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
7072 GEN7_WA_L3_CHICKEN_MODE);
7073 if (IS_IVB_GT1(dev_priv))
7074 I915_WRITE(GEN7_ROW_CHICKEN2,
7075 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7076 else {
7077
7078 I915_WRITE(GEN7_ROW_CHICKEN2,
7079 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7080 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
7081 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7082 }
7083
7084
7085 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7086 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7087
7088
7089
7090
7091
7092 I915_WRITE(GEN6_UCGCTL2,
7093 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7094
7095
7096 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7097 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7098 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7099
7100 g4x_disable_trickle_feed(dev_priv);
7101
7102 gen7_setup_fixed_func_scheduler(dev_priv);
7103
7104 if (0) {
7105
7106 I915_WRITE(CACHE_MODE_0_GEN7,
7107 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7108 }
7109
7110
7111 I915_WRITE(CACHE_MODE_1,
7112 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7113
7114
7115
7116
7117
7118
7119
7120
7121
7122 I915_WRITE(GEN7_GT_MODE,
7123 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7124
7125 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
7126 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7127 snpcr |= GEN6_MBC_SNPCR_MED;
7128 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
7129
7130 if (!HAS_PCH_NOP(dev_priv))
7131 cpt_init_clock_gating(dev_priv);
7132
7133 gen6_check_mch_setup(dev_priv);
7134}
7135
7136static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
7137{
7138
7139 I915_WRITE(_3D_CHICKEN3,
7140 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7141
7142
7143 I915_WRITE(IVB_CHICKEN3,
7144 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7145 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7146
7147
7148
7149 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7150 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
7151 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7152
7153
7154 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7155
7156
7157 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7158 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7159
7160
7161 I915_WRITE(GEN7_ROW_CHICKEN2,
7162 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7163
7164
7165 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7166 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7167 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7168
7169 gen7_setup_fixed_func_scheduler(dev_priv);
7170
7171
7172
7173
7174
7175 I915_WRITE(GEN6_UCGCTL2,
7176 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7177
7178
7179
7180
7181 I915_WRITE(GEN7_UCGCTL4,
7182 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7183
7184
7185
7186
7187
7188 I915_WRITE(CACHE_MODE_1,
7189 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199 I915_WRITE(GEN7_GT_MODE,
7200 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7201
7202
7203
7204
7205
7206 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7207
7208
7209
7210
7211
7212
7213 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7214}
7215
7216static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
7217{
7218
7219
7220 I915_WRITE(GEN7_FF_THREAD_MODE,
7221 I915_READ(GEN7_FF_THREAD_MODE) &
7222 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7223
7224
7225 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7226 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7227
7228
7229 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7230 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7231
7232
7233 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7234 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7235
7236
7237
7238
7239
7240
7241 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7242}
7243
7244static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
7245{
7246 u32 dspclk_gate;
7247
7248 I915_WRITE(RENCLK_GATE_D1, 0);
7249 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7250 GS_UNIT_CLOCK_GATE_DISABLE |
7251 CL_UNIT_CLOCK_GATE_DISABLE);
7252 I915_WRITE(RAMCLK_GATE_D, 0);
7253 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7254 OVRUNIT_CLOCK_GATE_DISABLE |
7255 OVCUNIT_CLOCK_GATE_DISABLE;
7256 if (IS_GM45(dev_priv))
7257 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7258 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7259
7260
7261 I915_WRITE(CACHE_MODE_0,
7262 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7263
7264
7265 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7266
7267 g4x_disable_trickle_feed(dev_priv);
7268}
7269
7270static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
7271{
7272 struct intel_uncore *uncore = &dev_priv->uncore;
7273
7274 intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7275 intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
7276 intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
7277 intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
7278 intel_uncore_write16(uncore, DEUC, 0);
7279 intel_uncore_write(uncore,
7280 MI_ARB_STATE,
7281 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7282
7283
7284 intel_uncore_write(uncore,
7285 CACHE_MODE_0,
7286 _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7287}
7288
7289static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
7290{
7291 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7292 I965_RCC_CLOCK_GATE_DISABLE |
7293 I965_RCPB_CLOCK_GATE_DISABLE |
7294 I965_ISC_CLOCK_GATE_DISABLE |
7295 I965_FBC_CLOCK_GATE_DISABLE);
7296 I915_WRITE(RENCLK_GATE_D2, 0);
7297 I915_WRITE(MI_ARB_STATE,
7298 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7299
7300
7301 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7302}
7303
7304static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
7305{
7306 u32 dstate = I915_READ(D_STATE);
7307
7308 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7309 DSTATE_DOT_CLOCK_GATING;
7310 I915_WRITE(D_STATE, dstate);
7311
7312 if (IS_PINEVIEW(dev_priv))
7313 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7314
7315
7316 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7317
7318
7319 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7320
7321
7322 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7323
7324 I915_WRITE(MI_ARB_STATE,
7325 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7326}
7327
7328static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
7329{
7330 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7331
7332
7333 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7334 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7335
7336 I915_WRITE(MEM_MODE,
7337 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7338}
7339
7340static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
7341{
7342 I915_WRITE(MEM_MODE,
7343 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7344 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7345}
7346
7347void intel_init_clock_gating(struct drm_i915_private *dev_priv)
7348{
7349 dev_priv->display.init_clock_gating(dev_priv);
7350}
7351
7352void intel_suspend_hw(struct drm_i915_private *dev_priv)
7353{
7354 if (HAS_PCH_LPT(dev_priv))
7355 lpt_suspend_hw(dev_priv);
7356}
7357
7358static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
7359{
7360 drm_dbg_kms(&dev_priv->drm,
7361 "No clock gating settings or workarounds applied.\n");
7362}
7363
7364
7365
7366
7367
7368
7369
7370
7371
7372
7373void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7374{
7375 if (IS_GEN(dev_priv, 12))
7376 dev_priv->display.init_clock_gating = tgl_init_clock_gating;
7377 else if (IS_GEN(dev_priv, 11))
7378 dev_priv->display.init_clock_gating = icl_init_clock_gating;
7379 else if (IS_CANNONLAKE(dev_priv))
7380 dev_priv->display.init_clock_gating = cnl_init_clock_gating;
7381 else if (IS_COFFEELAKE(dev_priv))
7382 dev_priv->display.init_clock_gating = cfl_init_clock_gating;
7383 else if (IS_SKYLAKE(dev_priv))
7384 dev_priv->display.init_clock_gating = skl_init_clock_gating;
7385 else if (IS_KABYLAKE(dev_priv))
7386 dev_priv->display.init_clock_gating = kbl_init_clock_gating;
7387 else if (IS_BROXTON(dev_priv))
7388 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7389 else if (IS_GEMINILAKE(dev_priv))
7390 dev_priv->display.init_clock_gating = glk_init_clock_gating;
7391 else if (IS_BROADWELL(dev_priv))
7392 dev_priv->display.init_clock_gating = bdw_init_clock_gating;
7393 else if (IS_CHERRYVIEW(dev_priv))
7394 dev_priv->display.init_clock_gating = chv_init_clock_gating;
7395 else if (IS_HASWELL(dev_priv))
7396 dev_priv->display.init_clock_gating = hsw_init_clock_gating;
7397 else if (IS_IVYBRIDGE(dev_priv))
7398 dev_priv->display.init_clock_gating = ivb_init_clock_gating;
7399 else if (IS_VALLEYVIEW(dev_priv))
7400 dev_priv->display.init_clock_gating = vlv_init_clock_gating;
7401 else if (IS_GEN(dev_priv, 6))
7402 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7403 else if (IS_GEN(dev_priv, 5))
7404 dev_priv->display.init_clock_gating = ilk_init_clock_gating;
7405 else if (IS_G4X(dev_priv))
7406 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7407 else if (IS_I965GM(dev_priv))
7408 dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
7409 else if (IS_I965G(dev_priv))
7410 dev_priv->display.init_clock_gating = i965g_init_clock_gating;
7411 else if (IS_GEN(dev_priv, 3))
7412 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7413 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7414 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7415 else if (IS_GEN(dev_priv, 2))
7416 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7417 else {
7418 MISSING_CASE(INTEL_DEVID(dev_priv));
7419 dev_priv->display.init_clock_gating = nop_init_clock_gating;
7420 }
7421}
7422
7423
7424void intel_init_pm(struct drm_i915_private *dev_priv)
7425{
7426
7427 if (IS_PINEVIEW(dev_priv))
7428 pnv_get_mem_freq(dev_priv);
7429 else if (IS_GEN(dev_priv, 5))
7430 ilk_get_mem_freq(dev_priv);
7431
7432 if (intel_has_sagv(dev_priv))
7433 skl_setup_sagv_block_time(dev_priv);
7434
7435
7436 if (INTEL_GEN(dev_priv) >= 9) {
7437 skl_setup_wm_latency(dev_priv);
7438 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7439 } else if (HAS_PCH_SPLIT(dev_priv)) {
7440 ilk_setup_wm_latency(dev_priv);
7441
7442 if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
7443 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7444 (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
7445 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7446 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7447 dev_priv->display.compute_intermediate_wm =
7448 ilk_compute_intermediate_wm;
7449 dev_priv->display.initial_watermarks =
7450 ilk_initial_watermarks;
7451 dev_priv->display.optimize_watermarks =
7452 ilk_optimize_watermarks;
7453 } else {
7454 drm_dbg_kms(&dev_priv->drm,
7455 "Failed to read display plane latency. "
7456 "Disable CxSR\n");
7457 }
7458 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7459 vlv_setup_wm_latency(dev_priv);
7460 dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
7461 dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
7462 dev_priv->display.initial_watermarks = vlv_initial_watermarks;
7463 dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
7464 dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
7465 } else if (IS_G4X(dev_priv)) {
7466 g4x_setup_wm_latency(dev_priv);
7467 dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
7468 dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
7469 dev_priv->display.initial_watermarks = g4x_initial_watermarks;
7470 dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
7471 } else if (IS_PINEVIEW(dev_priv)) {
7472 if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
7473 dev_priv->is_ddr3,
7474 dev_priv->fsb_freq,
7475 dev_priv->mem_freq)) {
7476 drm_info(&dev_priv->drm,
7477 "failed to find known CxSR latency "
7478 "(found ddr%s fsb freq %d, mem freq %d), "
7479 "disabling CxSR\n",
7480 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7481 dev_priv->fsb_freq, dev_priv->mem_freq);
7482
7483 intel_set_memory_cxsr(dev_priv, false);
7484 dev_priv->display.update_wm = NULL;
7485 } else
7486 dev_priv->display.update_wm = pnv_update_wm;
7487 } else if (IS_GEN(dev_priv, 4)) {
7488 dev_priv->display.update_wm = i965_update_wm;
7489 } else if (IS_GEN(dev_priv, 3)) {
7490 dev_priv->display.update_wm = i9xx_update_wm;
7491 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7492 } else if (IS_GEN(dev_priv, 2)) {
7493 if (INTEL_NUM_PIPES(dev_priv) == 1) {
7494 dev_priv->display.update_wm = i845_update_wm;
7495 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7496 } else {
7497 dev_priv->display.update_wm = i9xx_update_wm;
7498 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7499 }
7500 } else {
7501 drm_err(&dev_priv->drm,
7502 "unexpected fall-through in %s\n", __func__);
7503 }
7504}
7505
7506void intel_pm_setup(struct drm_i915_private *dev_priv)
7507{
7508 dev_priv->runtime_pm.suspended = false;
7509 atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
7510}
7511