1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/pm_runtime.h>
30
31#include <drm/drm_atomic_helper.h>
32#include <drm/drm_fourcc.h>
33#include <drm/drm_plane_helper.h>
34
35#include "display/intel_atomic.h"
36#include "display/intel_atomic_plane.h"
37#include "display/intel_bw.h"
38#include "display/intel_de.h"
39#include "display/intel_display_trace.h"
40#include "display/intel_display_types.h"
41#include "display/intel_fb.h"
42#include "display/intel_fbc.h"
43#include "display/intel_sprite.h"
44#include "display/skl_universal_plane.h"
45
46#include "gt/intel_llc.h"
47
48#include "i915_drv.h"
49#include "i915_fixed.h"
50#include "i915_irq.h"
51#include "intel_pcode.h"
52#include "intel_pm.h"
53#include "vlv_sideband.h"
54#include "../../../platform/x86/intel_ips.h"
55
56
57struct skl_wm_params {
58 bool x_tiled, y_tiled;
59 bool rc_surface;
60 bool is_planar;
61 u32 width;
62 u8 cpp;
63 u32 plane_pixel_rate;
64 u32 y_min_scanlines;
65 u32 plane_bytes_per_line;
66 uint_fixed_16_16_t plane_blocks_per_line;
67 uint_fixed_16_16_t y_tile_minimum;
68 u32 linetime_us;
69 u32 dbuf_block_size;
70};
71
72
73struct intel_wm_config {
74 unsigned int num_pipes_active;
75 bool sprites_enabled;
76 bool sprites_scaled;
77};
78
79static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
80{
81 enum pipe pipe;
82
83 if (HAS_LLC(dev_priv)) {
84
85
86
87
88
89
90
91 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
92 intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) |
93 SKL_DE_COMPRESSED_HASH_MODE);
94 }
95
96 for_each_pipe(dev_priv, pipe) {
97
98
99
100
101 if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active(dev_priv))
102 intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
103 SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1);
104 }
105
106
107 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
108 intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
109
110
111 intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
112 intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
113
114
115
116
117
118 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
119 DISP_FBC_MEMORY_WAKE);
120}
121
122static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
123{
124 gen9_init_clock_gating(dev_priv);
125
126
127 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
128 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
129
130
131
132
133
134 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
135 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
136
137
138
139
140
141 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
142 PWM1_GATING_DIS | PWM2_GATING_DIS);
143
144
145
146
147
148
149
150 intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950));
151
152
153
154
155
156 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
157 DISP_FBC_WM_DIS);
158
159
160
161
162
163 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
164 DPFC_DISABLE_DUMMY0);
165}
166
167static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
168{
169 gen9_init_clock_gating(dev_priv);
170
171
172
173
174
175
176 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
177 PWM1_GATING_DIS | PWM2_GATING_DIS);
178}
179
180static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
181{
182 u32 tmp;
183
184 tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
185
186 switch (tmp & CLKCFG_FSB_MASK) {
187 case CLKCFG_FSB_533:
188 dev_priv->fsb_freq = 533;
189 break;
190 case CLKCFG_FSB_800:
191 dev_priv->fsb_freq = 800;
192 break;
193 case CLKCFG_FSB_667:
194 dev_priv->fsb_freq = 667;
195 break;
196 case CLKCFG_FSB_400:
197 dev_priv->fsb_freq = 400;
198 break;
199 }
200
201 switch (tmp & CLKCFG_MEM_MASK) {
202 case CLKCFG_MEM_533:
203 dev_priv->mem_freq = 533;
204 break;
205 case CLKCFG_MEM_667:
206 dev_priv->mem_freq = 667;
207 break;
208 case CLKCFG_MEM_800:
209 dev_priv->mem_freq = 800;
210 break;
211 }
212
213
214 tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
215 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
216}
217
218static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
219{
220 u16 ddrpll, csipll;
221
222 ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
223 csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
224
225 switch (ddrpll & 0xff) {
226 case 0xc:
227 dev_priv->mem_freq = 800;
228 break;
229 case 0x10:
230 dev_priv->mem_freq = 1066;
231 break;
232 case 0x14:
233 dev_priv->mem_freq = 1333;
234 break;
235 case 0x18:
236 dev_priv->mem_freq = 1600;
237 break;
238 default:
239 drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
240 ddrpll & 0xff);
241 dev_priv->mem_freq = 0;
242 break;
243 }
244
245 switch (csipll & 0x3ff) {
246 case 0x00c:
247 dev_priv->fsb_freq = 3200;
248 break;
249 case 0x00e:
250 dev_priv->fsb_freq = 3733;
251 break;
252 case 0x010:
253 dev_priv->fsb_freq = 4266;
254 break;
255 case 0x012:
256 dev_priv->fsb_freq = 4800;
257 break;
258 case 0x014:
259 dev_priv->fsb_freq = 5333;
260 break;
261 case 0x016:
262 dev_priv->fsb_freq = 5866;
263 break;
264 case 0x018:
265 dev_priv->fsb_freq = 6400;
266 break;
267 default:
268 drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
269 csipll & 0x3ff);
270 dev_priv->fsb_freq = 0;
271 break;
272 }
273}
274
275static const struct cxsr_latency cxsr_latency_table[] = {
276 {1, 0, 800, 400, 3382, 33382, 3983, 33983},
277 {1, 0, 800, 667, 3354, 33354, 3807, 33807},
278 {1, 0, 800, 800, 3347, 33347, 3763, 33763},
279 {1, 1, 800, 667, 6420, 36420, 6873, 36873},
280 {1, 1, 800, 800, 5902, 35902, 6318, 36318},
281
282 {1, 0, 667, 400, 3400, 33400, 4021, 34021},
283 {1, 0, 667, 667, 3372, 33372, 3845, 33845},
284 {1, 0, 667, 800, 3386, 33386, 3822, 33822},
285 {1, 1, 667, 667, 6438, 36438, 6911, 36911},
286 {1, 1, 667, 800, 5941, 35941, 6377, 36377},
287
288 {1, 0, 400, 400, 3472, 33472, 4173, 34173},
289 {1, 0, 400, 667, 3443, 33443, 3996, 33996},
290 {1, 0, 400, 800, 3430, 33430, 3946, 33946},
291 {1, 1, 400, 667, 6509, 36509, 7062, 37062},
292 {1, 1, 400, 800, 5985, 35985, 6501, 36501},
293
294 {0, 0, 800, 400, 3438, 33438, 4065, 34065},
295 {0, 0, 800, 667, 3410, 33410, 3889, 33889},
296 {0, 0, 800, 800, 3403, 33403, 3845, 33845},
297 {0, 1, 800, 667, 6476, 36476, 6955, 36955},
298 {0, 1, 800, 800, 5958, 35958, 6400, 36400},
299
300 {0, 0, 667, 400, 3456, 33456, 4103, 34106},
301 {0, 0, 667, 667, 3428, 33428, 3927, 33927},
302 {0, 0, 667, 800, 3443, 33443, 3905, 33905},
303 {0, 1, 667, 667, 6494, 36494, 6993, 36993},
304 {0, 1, 667, 800, 5998, 35998, 6460, 36460},
305
306 {0, 0, 400, 400, 3528, 33528, 4255, 34255},
307 {0, 0, 400, 667, 3500, 33500, 4079, 34079},
308 {0, 0, 400, 800, 3487, 33487, 4029, 34029},
309 {0, 1, 400, 667, 6566, 36566, 7145, 37145},
310 {0, 1, 400, 800, 6042, 36042, 6584, 36584},
311};
312
313static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
314 bool is_ddr3,
315 int fsb,
316 int mem)
317{
318 const struct cxsr_latency *latency;
319 int i;
320
321 if (fsb == 0 || mem == 0)
322 return NULL;
323
324 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
325 latency = &cxsr_latency_table[i];
326 if (is_desktop == latency->is_desktop &&
327 is_ddr3 == latency->is_ddr3 &&
328 fsb == latency->fsb_freq && mem == latency->mem_freq)
329 return latency;
330 }
331
332 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
333
334 return NULL;
335}
336
337static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
338{
339 u32 val;
340
341 vlv_punit_get(dev_priv);
342
343 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
344 if (enable)
345 val &= ~FORCE_DDR_HIGH_FREQ;
346 else
347 val |= FORCE_DDR_HIGH_FREQ;
348 val &= ~FORCE_DDR_LOW_FREQ;
349 val |= FORCE_DDR_FREQ_REQ_ACK;
350 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
351
352 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
353 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
354 drm_err(&dev_priv->drm,
355 "timed out waiting for Punit DDR DVFS request\n");
356
357 vlv_punit_put(dev_priv);
358}
359
360static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
361{
362 u32 val;
363
364 vlv_punit_get(dev_priv);
365
366 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
367 if (enable)
368 val |= DSP_MAXFIFO_PM5_ENABLE;
369 else
370 val &= ~DSP_MAXFIFO_PM5_ENABLE;
371 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
372
373 vlv_punit_put(dev_priv);
374}
375
376#define FW_WM(value, plane) \
377 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
378
379static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
380{
381 bool was_enabled;
382 u32 val;
383
384 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
385 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
386 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
387 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
388 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
389 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
390 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
391 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
392 } else if (IS_PINEVIEW(dev_priv)) {
393 val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
394 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
395 if (enable)
396 val |= PINEVIEW_SELF_REFRESH_EN;
397 else
398 val &= ~PINEVIEW_SELF_REFRESH_EN;
399 intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
400 intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
401 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
402 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
403 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
404 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
405 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
406 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
407 } else if (IS_I915GM(dev_priv)) {
408
409
410
411
412
413 was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
414 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
415 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
416 intel_uncore_write(&dev_priv->uncore, INSTPM, val);
417 intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
418 } else {
419 return false;
420 }
421
422 trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
423
424 drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
425 enableddisabled(enable),
426 enableddisabled(was_enabled));
427
428 return was_enabled;
429}
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
469{
470 bool ret;
471
472 mutex_lock(&dev_priv->wm.wm_mutex);
473 ret = _intel_set_memory_cxsr(dev_priv, enable);
474 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
475 dev_priv->wm.vlv.cxsr = enable;
476 else if (IS_G4X(dev_priv))
477 dev_priv->wm.g4x.cxsr = enable;
478 mutex_unlock(&dev_priv->wm.wm_mutex);
479
480 return ret;
481}
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497static const int pessimal_latency_ns = 5000;
498
499#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
500 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
501
502static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
503{
504 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
505 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
506 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
507 enum pipe pipe = crtc->pipe;
508 int sprite0_start, sprite1_start;
509 u32 dsparb, dsparb2, dsparb3;
510
511 switch (pipe) {
512 case PIPE_A:
513 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
514 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
515 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
516 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
517 break;
518 case PIPE_B:
519 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
520 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
521 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
522 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
523 break;
524 case PIPE_C:
525 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
526 dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
527 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
528 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
529 break;
530 default:
531 MISSING_CASE(pipe);
532 return;
533 }
534
535 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
536 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
537 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
538 fifo_state->plane[PLANE_CURSOR] = 63;
539}
540
541static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
542 enum i9xx_plane_id i9xx_plane)
543{
544 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
545 int size;
546
547 size = dsparb & 0x7f;
548 if (i9xx_plane == PLANE_B)
549 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
550
551 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
552 dsparb, plane_name(i9xx_plane), size);
553
554 return size;
555}
556
557static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
558 enum i9xx_plane_id i9xx_plane)
559{
560 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
561 int size;
562
563 size = dsparb & 0x1ff;
564 if (i9xx_plane == PLANE_B)
565 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
566 size >>= 1;
567
568 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
569 dsparb, plane_name(i9xx_plane), size);
570
571 return size;
572}
573
574static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
575 enum i9xx_plane_id i9xx_plane)
576{
577 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
578 int size;
579
580 size = dsparb & 0x7f;
581 size >>= 2;
582
583 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
584 dsparb, plane_name(i9xx_plane), size);
585
586 return size;
587}
588
589
590static const struct intel_watermark_params pnv_display_wm = {
591 .fifo_size = PINEVIEW_DISPLAY_FIFO,
592 .max_wm = PINEVIEW_MAX_WM,
593 .default_wm = PINEVIEW_DFT_WM,
594 .guard_size = PINEVIEW_GUARD_WM,
595 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
596};
597
598static const struct intel_watermark_params pnv_display_hplloff_wm = {
599 .fifo_size = PINEVIEW_DISPLAY_FIFO,
600 .max_wm = PINEVIEW_MAX_WM,
601 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
602 .guard_size = PINEVIEW_GUARD_WM,
603 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
604};
605
606static const struct intel_watermark_params pnv_cursor_wm = {
607 .fifo_size = PINEVIEW_CURSOR_FIFO,
608 .max_wm = PINEVIEW_CURSOR_MAX_WM,
609 .default_wm = PINEVIEW_CURSOR_DFT_WM,
610 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
611 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
612};
613
614static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
615 .fifo_size = PINEVIEW_CURSOR_FIFO,
616 .max_wm = PINEVIEW_CURSOR_MAX_WM,
617 .default_wm = PINEVIEW_CURSOR_DFT_WM,
618 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
619 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
620};
621
622static const struct intel_watermark_params i965_cursor_wm_info = {
623 .fifo_size = I965_CURSOR_FIFO,
624 .max_wm = I965_CURSOR_MAX_WM,
625 .default_wm = I965_CURSOR_DFT_WM,
626 .guard_size = 2,
627 .cacheline_size = I915_FIFO_LINE_SIZE,
628};
629
630static const struct intel_watermark_params i945_wm_info = {
631 .fifo_size = I945_FIFO_SIZE,
632 .max_wm = I915_MAX_WM,
633 .default_wm = 1,
634 .guard_size = 2,
635 .cacheline_size = I915_FIFO_LINE_SIZE,
636};
637
638static const struct intel_watermark_params i915_wm_info = {
639 .fifo_size = I915_FIFO_SIZE,
640 .max_wm = I915_MAX_WM,
641 .default_wm = 1,
642 .guard_size = 2,
643 .cacheline_size = I915_FIFO_LINE_SIZE,
644};
645
646static const struct intel_watermark_params i830_a_wm_info = {
647 .fifo_size = I855GM_FIFO_SIZE,
648 .max_wm = I915_MAX_WM,
649 .default_wm = 1,
650 .guard_size = 2,
651 .cacheline_size = I830_FIFO_LINE_SIZE,
652};
653
654static const struct intel_watermark_params i830_bc_wm_info = {
655 .fifo_size = I855GM_FIFO_SIZE,
656 .max_wm = I915_MAX_WM/2,
657 .default_wm = 1,
658 .guard_size = 2,
659 .cacheline_size = I830_FIFO_LINE_SIZE,
660};
661
662static const struct intel_watermark_params i845_wm_info = {
663 .fifo_size = I830_FIFO_SIZE,
664 .max_wm = I915_MAX_WM,
665 .default_wm = 1,
666 .guard_size = 2,
667 .cacheline_size = I830_FIFO_LINE_SIZE,
668};
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703static unsigned int intel_wm_method1(unsigned int pixel_rate,
704 unsigned int cpp,
705 unsigned int latency)
706{
707 u64 ret;
708
709 ret = mul_u32_u32(pixel_rate, cpp * latency);
710 ret = DIV_ROUND_UP_ULL(ret, 10000);
711
712 return ret;
713}
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745static unsigned int intel_wm_method2(unsigned int pixel_rate,
746 unsigned int htotal,
747 unsigned int width,
748 unsigned int cpp,
749 unsigned int latency)
750{
751 unsigned int ret;
752
753
754
755
756
757 if (WARN_ON_ONCE(htotal == 0))
758 htotal = 1;
759
760 ret = (latency * pixel_rate) / (htotal * 10000);
761 ret = (ret + 1) * width * cpp;
762
763 return ret;
764}
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785static unsigned int intel_calculate_wm(int pixel_rate,
786 const struct intel_watermark_params *wm,
787 int fifo_size, int cpp,
788 unsigned int latency_ns)
789{
790 int entries, wm_size;
791
792
793
794
795
796
797
798 entries = intel_wm_method1(pixel_rate, cpp,
799 latency_ns / 100);
800 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
801 wm->guard_size;
802 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
803
804 wm_size = fifo_size - entries;
805 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
806
807
808 if (wm_size > wm->max_wm)
809 wm_size = wm->max_wm;
810 if (wm_size <= 0)
811 wm_size = wm->default_wm;
812
813
814
815
816
817
818
819
820 if (wm_size <= 8)
821 wm_size = 8;
822
823 return wm_size;
824}
825
826static bool is_disabling(int old, int new, int threshold)
827{
828 return old >= threshold && new < threshold;
829}
830
831static bool is_enabling(int old, int new, int threshold)
832{
833 return old < threshold && new >= threshold;
834}
835
836static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
837{
838 return dev_priv->wm.max_level + 1;
839}
840
841static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
842 const struct intel_plane_state *plane_state)
843{
844 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
845
846
847 if (!crtc_state->hw.active)
848 return false;
849
850
851
852
853
854
855
856
857
858 if (plane->id == PLANE_CURSOR)
859 return plane_state->hw.fb != NULL;
860 else
861 return plane_state->uapi.visible;
862}
863
864static bool intel_crtc_active(struct intel_crtc *crtc)
865{
866
867
868
869
870
871
872
873
874
875
876
877
878
879 return crtc->active && crtc->base.primary->state->fb &&
880 crtc->config->hw.adjusted_mode.crtc_clock;
881}
882
883static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
884{
885 struct intel_crtc *crtc, *enabled = NULL;
886
887 for_each_intel_crtc(&dev_priv->drm, crtc) {
888 if (intel_crtc_active(crtc)) {
889 if (enabled)
890 return NULL;
891 enabled = crtc;
892 }
893 }
894
895 return enabled;
896}
897
898static void pnv_update_wm(struct drm_i915_private *dev_priv)
899{
900 struct intel_crtc *crtc;
901 const struct cxsr_latency *latency;
902 u32 reg;
903 unsigned int wm;
904
905 latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
906 dev_priv->is_ddr3,
907 dev_priv->fsb_freq,
908 dev_priv->mem_freq);
909 if (!latency) {
910 drm_dbg_kms(&dev_priv->drm,
911 "Unknown FSB/MEM found, disable CxSR\n");
912 intel_set_memory_cxsr(dev_priv, false);
913 return;
914 }
915
916 crtc = single_enabled_crtc(dev_priv);
917 if (crtc) {
918 const struct drm_display_mode *pipe_mode =
919 &crtc->config->hw.pipe_mode;
920 const struct drm_framebuffer *fb =
921 crtc->base.primary->state->fb;
922 int cpp = fb->format->cpp[0];
923 int clock = pipe_mode->crtc_clock;
924
925
926 wm = intel_calculate_wm(clock, &pnv_display_wm,
927 pnv_display_wm.fifo_size,
928 cpp, latency->display_sr);
929 reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
930 reg &= ~DSPFW_SR_MASK;
931 reg |= FW_WM(wm, SR);
932 intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
933 drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
934
935
936 wm = intel_calculate_wm(clock, &pnv_cursor_wm,
937 pnv_display_wm.fifo_size,
938 4, latency->cursor_sr);
939 reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
940 reg &= ~DSPFW_CURSOR_SR_MASK;
941 reg |= FW_WM(wm, CURSOR_SR);
942 intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
943
944
945 wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
946 pnv_display_hplloff_wm.fifo_size,
947 cpp, latency->display_hpll_disable);
948 reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
949 reg &= ~DSPFW_HPLL_SR_MASK;
950 reg |= FW_WM(wm, HPLL_SR);
951 intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
952
953
954 wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
955 pnv_display_hplloff_wm.fifo_size,
956 4, latency->cursor_hpll_disable);
957 reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
958 reg &= ~DSPFW_HPLL_CURSOR_MASK;
959 reg |= FW_WM(wm, HPLL_CURSOR);
960 intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
961 drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
962
963 intel_set_memory_cxsr(dev_priv, true);
964 } else {
965 intel_set_memory_cxsr(dev_priv, false);
966 }
967}
968
969
970
971
972
973
974
975
976
977
978
979static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
980{
981 int tlb_miss = fifo_size * 64 - width * cpp * 8;
982
983 return max(0, tlb_miss);
984}
985
986static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
987 const struct g4x_wm_values *wm)
988{
989 enum pipe pipe;
990
991 for_each_pipe(dev_priv, pipe)
992 trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
993
994 intel_uncore_write(&dev_priv->uncore, DSPFW1,
995 FW_WM(wm->sr.plane, SR) |
996 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
997 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
998 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
999 intel_uncore_write(&dev_priv->uncore, DSPFW2,
1000 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
1001 FW_WM(wm->sr.fbc, FBC_SR) |
1002 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
1003 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
1004 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1005 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1006 intel_uncore_write(&dev_priv->uncore, DSPFW3,
1007 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
1008 FW_WM(wm->sr.cursor, CURSOR_SR) |
1009 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
1010 FW_WM(wm->hpll.plane, HPLL_SR));
1011
1012 intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
1013}
1014
1015#define FW_WM_VLV(value, plane) \
1016 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
1017
1018static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
1019 const struct vlv_wm_values *wm)
1020{
1021 enum pipe pipe;
1022
1023 for_each_pipe(dev_priv, pipe) {
1024 trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
1025
1026 intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
1027 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
1028 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
1029 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
1030 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
1031 }
1032
1033
1034
1035
1036
1037
1038 intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
1039 intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
1040 intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
1041 intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
1042 intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
1043
1044 intel_uncore_write(&dev_priv->uncore, DSPFW1,
1045 FW_WM(wm->sr.plane, SR) |
1046 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
1047 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1048 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1049 intel_uncore_write(&dev_priv->uncore, DSPFW2,
1050 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1051 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1052 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1053 intel_uncore_write(&dev_priv->uncore, DSPFW3,
1054 FW_WM(wm->sr.cursor, CURSOR_SR));
1055
1056 if (IS_CHERRYVIEW(dev_priv)) {
1057 intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
1058 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1059 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1060 intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
1061 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1062 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1063 intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
1064 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1065 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1066 intel_uncore_write(&dev_priv->uncore, DSPHOWM,
1067 FW_WM(wm->sr.plane >> 9, SR_HI) |
1068 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1069 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1070 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1071 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1072 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1073 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1074 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1075 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1076 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1077 } else {
1078 intel_uncore_write(&dev_priv->uncore, DSPFW7,
1079 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1080 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1081 intel_uncore_write(&dev_priv->uncore, DSPHOWM,
1082 FW_WM(wm->sr.plane >> 9, SR_HI) |
1083 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1084 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1085 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1086 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1087 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1088 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1089 }
1090
1091 intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
1092}
1093
1094#undef FW_WM_VLV
1095
1096static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1097{
1098
1099 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1100 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1101 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1102
1103 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1104}
1105
1106static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1107{
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 switch (plane_id) {
1123 case PLANE_CURSOR:
1124 return 63;
1125 case PLANE_PRIMARY:
1126 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1127 case PLANE_SPRITE0:
1128 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1129 default:
1130 MISSING_CASE(plane_id);
1131 return 0;
1132 }
1133}
1134
1135static int g4x_fbc_fifo_size(int level)
1136{
1137 switch (level) {
1138 case G4X_WM_LEVEL_SR:
1139 return 7;
1140 case G4X_WM_LEVEL_HPLL:
1141 return 15;
1142 default:
1143 MISSING_CASE(level);
1144 return 0;
1145 }
1146}
1147
1148static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1149 const struct intel_plane_state *plane_state,
1150 int level)
1151{
1152 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1153 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1154 const struct drm_display_mode *pipe_mode =
1155 &crtc_state->hw.pipe_mode;
1156 unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
1157 unsigned int clock, htotal, cpp, width, wm;
1158
1159 if (latency == 0)
1160 return USHRT_MAX;
1161
1162 if (!intel_wm_plane_visible(crtc_state, plane_state))
1163 return 0;
1164
1165 cpp = plane_state->hw.fb->format->cpp[0];
1166
1167
1168
1169
1170
1171
1172
1173
1174 if (plane->id == PLANE_PRIMARY &&
1175 level != G4X_WM_LEVEL_NORMAL)
1176 cpp = max(cpp, 4u);
1177
1178 clock = pipe_mode->crtc_clock;
1179 htotal = pipe_mode->crtc_htotal;
1180
1181 width = drm_rect_width(&plane_state->uapi.dst);
1182
1183 if (plane->id == PLANE_CURSOR) {
1184 wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1185 } else if (plane->id == PLANE_PRIMARY &&
1186 level == G4X_WM_LEVEL_NORMAL) {
1187 wm = intel_wm_method1(clock, cpp, latency);
1188 } else {
1189 unsigned int small, large;
1190
1191 small = intel_wm_method1(clock, cpp, latency);
1192 large = intel_wm_method2(clock, htotal, width, cpp, latency);
1193
1194 wm = min(small, large);
1195 }
1196
1197 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1198 width, cpp);
1199
1200 wm = DIV_ROUND_UP(wm, 64) + 2;
1201
1202 return min_t(unsigned int, wm, USHRT_MAX);
1203}
1204
1205static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1206 int level, enum plane_id plane_id, u16 value)
1207{
1208 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1209 bool dirty = false;
1210
1211 for (; level < intel_wm_num_levels(dev_priv); level++) {
1212 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1213
1214 dirty |= raw->plane[plane_id] != value;
1215 raw->plane[plane_id] = value;
1216 }
1217
1218 return dirty;
1219}
1220
1221static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1222 int level, u16 value)
1223{
1224 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1225 bool dirty = false;
1226
1227
1228 level = max(level, G4X_WM_LEVEL_SR);
1229
1230 for (; level < intel_wm_num_levels(dev_priv); level++) {
1231 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1232
1233 dirty |= raw->fbc != value;
1234 raw->fbc = value;
1235 }
1236
1237 return dirty;
1238}
1239
1240static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
1241 const struct intel_plane_state *plane_state,
1242 u32 pri_val);
1243
1244static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1245 const struct intel_plane_state *plane_state)
1246{
1247 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1248 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1249 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1250 enum plane_id plane_id = plane->id;
1251 bool dirty = false;
1252 int level;
1253
1254 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1255 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1256 if (plane_id == PLANE_PRIMARY)
1257 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1258 goto out;
1259 }
1260
1261 for (level = 0; level < num_levels; level++) {
1262 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1263 int wm, max_wm;
1264
1265 wm = g4x_compute_wm(crtc_state, plane_state, level);
1266 max_wm = g4x_plane_fifo_size(plane_id, level);
1267
1268 if (wm > max_wm)
1269 break;
1270
1271 dirty |= raw->plane[plane_id] != wm;
1272 raw->plane[plane_id] = wm;
1273
1274 if (plane_id != PLANE_PRIMARY ||
1275 level == G4X_WM_LEVEL_NORMAL)
1276 continue;
1277
1278 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1279 raw->plane[plane_id]);
1280 max_wm = g4x_fbc_fifo_size(level);
1281
1282
1283
1284
1285
1286 if (wm > max_wm)
1287 wm = USHRT_MAX;
1288
1289 dirty |= raw->fbc != wm;
1290 raw->fbc = wm;
1291 }
1292
1293
1294 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1295
1296 if (plane_id == PLANE_PRIMARY)
1297 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1298
1299 out:
1300 if (dirty) {
1301 drm_dbg_kms(&dev_priv->drm,
1302 "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1303 plane->base.name,
1304 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1305 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1306 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1307
1308 if (plane_id == PLANE_PRIMARY)
1309 drm_dbg_kms(&dev_priv->drm,
1310 "FBC watermarks: SR=%d, HPLL=%d\n",
1311 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1312 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1313 }
1314
1315 return dirty;
1316}
1317
1318static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1319 enum plane_id plane_id, int level)
1320{
1321 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1322
1323 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1324}
1325
1326static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1327 int level)
1328{
1329 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1330
1331 if (level > dev_priv->wm.max_level)
1332 return false;
1333
1334 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1335 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1336 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1337}
1338
1339
1340static void g4x_invalidate_wms(struct intel_crtc *crtc,
1341 struct g4x_wm_state *wm_state, int level)
1342{
1343 if (level <= G4X_WM_LEVEL_NORMAL) {
1344 enum plane_id plane_id;
1345
1346 for_each_plane_id_on_crtc(crtc, plane_id)
1347 wm_state->wm.plane[plane_id] = USHRT_MAX;
1348 }
1349
1350 if (level <= G4X_WM_LEVEL_SR) {
1351 wm_state->cxsr = false;
1352 wm_state->sr.cursor = USHRT_MAX;
1353 wm_state->sr.plane = USHRT_MAX;
1354 wm_state->sr.fbc = USHRT_MAX;
1355 }
1356
1357 if (level <= G4X_WM_LEVEL_HPLL) {
1358 wm_state->hpll_en = false;
1359 wm_state->hpll.cursor = USHRT_MAX;
1360 wm_state->hpll.plane = USHRT_MAX;
1361 wm_state->hpll.fbc = USHRT_MAX;
1362 }
1363}
1364
1365static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
1366 int level)
1367{
1368 if (level < G4X_WM_LEVEL_SR)
1369 return false;
1370
1371 if (level >= G4X_WM_LEVEL_SR &&
1372 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1373 return false;
1374
1375 if (level >= G4X_WM_LEVEL_HPLL &&
1376 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1377 return false;
1378
1379 return true;
1380}
1381
1382static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
1383 struct intel_crtc *crtc)
1384{
1385 struct intel_crtc_state *crtc_state =
1386 intel_atomic_get_new_crtc_state(state, crtc);
1387 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1388 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1389 const struct g4x_pipe_wm *raw;
1390 const struct intel_plane_state *old_plane_state;
1391 const struct intel_plane_state *new_plane_state;
1392 struct intel_plane *plane;
1393 enum plane_id plane_id;
1394 int i, level;
1395 unsigned int dirty = 0;
1396
1397 for_each_oldnew_intel_plane_in_state(state, plane,
1398 old_plane_state,
1399 new_plane_state, i) {
1400 if (new_plane_state->hw.crtc != &crtc->base &&
1401 old_plane_state->hw.crtc != &crtc->base)
1402 continue;
1403
1404 if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1405 dirty |= BIT(plane->id);
1406 }
1407
1408 if (!dirty)
1409 return 0;
1410
1411 level = G4X_WM_LEVEL_NORMAL;
1412 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1413 goto out;
1414
1415 raw = &crtc_state->wm.g4x.raw[level];
1416 for_each_plane_id_on_crtc(crtc, plane_id)
1417 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1418
1419 level = G4X_WM_LEVEL_SR;
1420 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1421 goto out;
1422
1423 raw = &crtc_state->wm.g4x.raw[level];
1424 wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1425 wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1426 wm_state->sr.fbc = raw->fbc;
1427
1428 wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
1429
1430 level = G4X_WM_LEVEL_HPLL;
1431 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1432 goto out;
1433
1434 raw = &crtc_state->wm.g4x.raw[level];
1435 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1436 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1437 wm_state->hpll.fbc = raw->fbc;
1438
1439 wm_state->hpll_en = wm_state->cxsr;
1440
1441 level++;
1442
1443 out:
1444 if (level == G4X_WM_LEVEL_NORMAL)
1445 return -EINVAL;
1446
1447
1448 g4x_invalidate_wms(crtc, wm_state, level);
1449
1450
1451
1452
1453
1454
1455
1456
1457 wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
1458
1459 return 0;
1460}
1461
1462static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
1463 struct intel_crtc *crtc)
1464{
1465 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1466 struct intel_crtc_state *new_crtc_state =
1467 intel_atomic_get_new_crtc_state(state, crtc);
1468 const struct intel_crtc_state *old_crtc_state =
1469 intel_atomic_get_old_crtc_state(state, crtc);
1470 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1471 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1472 const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1473 enum plane_id plane_id;
1474
1475 if (!new_crtc_state->hw.active ||
1476 drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
1477 *intermediate = *optimal;
1478
1479 intermediate->cxsr = false;
1480 intermediate->hpll_en = false;
1481 goto out;
1482 }
1483
1484 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1485 !new_crtc_state->disable_cxsr;
1486 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1487 !new_crtc_state->disable_cxsr;
1488 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1489
1490 for_each_plane_id_on_crtc(crtc, plane_id) {
1491 intermediate->wm.plane[plane_id] =
1492 max(optimal->wm.plane[plane_id],
1493 active->wm.plane[plane_id]);
1494
1495 drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
1496 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1497 }
1498
1499 intermediate->sr.plane = max(optimal->sr.plane,
1500 active->sr.plane);
1501 intermediate->sr.cursor = max(optimal->sr.cursor,
1502 active->sr.cursor);
1503 intermediate->sr.fbc = max(optimal->sr.fbc,
1504 active->sr.fbc);
1505
1506 intermediate->hpll.plane = max(optimal->hpll.plane,
1507 active->hpll.plane);
1508 intermediate->hpll.cursor = max(optimal->hpll.cursor,
1509 active->hpll.cursor);
1510 intermediate->hpll.fbc = max(optimal->hpll.fbc,
1511 active->hpll.fbc);
1512
1513 drm_WARN_ON(&dev_priv->drm,
1514 (intermediate->sr.plane >
1515 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1516 intermediate->sr.cursor >
1517 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1518 intermediate->cxsr);
1519 drm_WARN_ON(&dev_priv->drm,
1520 (intermediate->sr.plane >
1521 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1522 intermediate->sr.cursor >
1523 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1524 intermediate->hpll_en);
1525
1526 drm_WARN_ON(&dev_priv->drm,
1527 intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1528 intermediate->fbc_en && intermediate->cxsr);
1529 drm_WARN_ON(&dev_priv->drm,
1530 intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1531 intermediate->fbc_en && intermediate->hpll_en);
1532
1533out:
1534
1535
1536
1537
1538 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1539 new_crtc_state->wm.need_postvbl_update = true;
1540
1541 return 0;
1542}
1543
1544static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1545 struct g4x_wm_values *wm)
1546{
1547 struct intel_crtc *crtc;
1548 int num_active_pipes = 0;
1549
1550 wm->cxsr = true;
1551 wm->hpll_en = true;
1552 wm->fbc_en = true;
1553
1554 for_each_intel_crtc(&dev_priv->drm, crtc) {
1555 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1556
1557 if (!crtc->active)
1558 continue;
1559
1560 if (!wm_state->cxsr)
1561 wm->cxsr = false;
1562 if (!wm_state->hpll_en)
1563 wm->hpll_en = false;
1564 if (!wm_state->fbc_en)
1565 wm->fbc_en = false;
1566
1567 num_active_pipes++;
1568 }
1569
1570 if (num_active_pipes != 1) {
1571 wm->cxsr = false;
1572 wm->hpll_en = false;
1573 wm->fbc_en = false;
1574 }
1575
1576 for_each_intel_crtc(&dev_priv->drm, crtc) {
1577 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1578 enum pipe pipe = crtc->pipe;
1579
1580 wm->pipe[pipe] = wm_state->wm;
1581 if (crtc->active && wm->cxsr)
1582 wm->sr = wm_state->sr;
1583 if (crtc->active && wm->hpll_en)
1584 wm->hpll = wm_state->hpll;
1585 }
1586}
1587
1588static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1589{
1590 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1591 struct g4x_wm_values new_wm = {};
1592
1593 g4x_merge_wm(dev_priv, &new_wm);
1594
1595 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1596 return;
1597
1598 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1599 _intel_set_memory_cxsr(dev_priv, false);
1600
1601 g4x_write_wm_values(dev_priv, &new_wm);
1602
1603 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1604 _intel_set_memory_cxsr(dev_priv, true);
1605
1606 *old_wm = new_wm;
1607}
1608
1609static void g4x_initial_watermarks(struct intel_atomic_state *state,
1610 struct intel_crtc *crtc)
1611{
1612 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1613 const struct intel_crtc_state *crtc_state =
1614 intel_atomic_get_new_crtc_state(state, crtc);
1615
1616 mutex_lock(&dev_priv->wm.wm_mutex);
1617 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1618 g4x_program_watermarks(dev_priv);
1619 mutex_unlock(&dev_priv->wm.wm_mutex);
1620}
1621
1622static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1623 struct intel_crtc *crtc)
1624{
1625 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1626 const struct intel_crtc_state *crtc_state =
1627 intel_atomic_get_new_crtc_state(state, crtc);
1628
1629 if (!crtc_state->wm.need_postvbl_update)
1630 return;
1631
1632 mutex_lock(&dev_priv->wm.wm_mutex);
1633 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1634 g4x_program_watermarks(dev_priv);
1635 mutex_unlock(&dev_priv->wm.wm_mutex);
1636}
1637
1638
1639static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1640 unsigned int htotal,
1641 unsigned int width,
1642 unsigned int cpp,
1643 unsigned int latency)
1644{
1645 unsigned int ret;
1646
1647 ret = intel_wm_method2(pixel_rate, htotal,
1648 width, cpp, latency);
1649 ret = DIV_ROUND_UP(ret, 64);
1650
1651 return ret;
1652}
1653
1654static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1655{
1656
1657 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1658
1659 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1660
1661 if (IS_CHERRYVIEW(dev_priv)) {
1662 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1663 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1664
1665 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1666 }
1667}
1668
1669static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1670 const struct intel_plane_state *plane_state,
1671 int level)
1672{
1673 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1674 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1675 const struct drm_display_mode *pipe_mode =
1676 &crtc_state->hw.pipe_mode;
1677 unsigned int clock, htotal, cpp, width, wm;
1678
1679 if (dev_priv->wm.pri_latency[level] == 0)
1680 return USHRT_MAX;
1681
1682 if (!intel_wm_plane_visible(crtc_state, plane_state))
1683 return 0;
1684
1685 cpp = plane_state->hw.fb->format->cpp[0];
1686 clock = pipe_mode->crtc_clock;
1687 htotal = pipe_mode->crtc_htotal;
1688 width = crtc_state->pipe_src_w;
1689
1690 if (plane->id == PLANE_CURSOR) {
1691
1692
1693
1694
1695
1696
1697 wm = 63;
1698 } else {
1699 wm = vlv_wm_method2(clock, htotal, width, cpp,
1700 dev_priv->wm.pri_latency[level] * 10);
1701 }
1702
1703 return min_t(unsigned int, wm, USHRT_MAX);
1704}
1705
1706static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1707{
1708 return (active_planes & (BIT(PLANE_SPRITE0) |
1709 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1710}
1711
1712static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1713{
1714 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1715 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1716 const struct g4x_pipe_wm *raw =
1717 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1718 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1719 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1720 int num_active_planes = hweight8(active_planes);
1721 const int fifo_size = 511;
1722 int fifo_extra, fifo_left = fifo_size;
1723 int sprite0_fifo_extra = 0;
1724 unsigned int total_rate;
1725 enum plane_id plane_id;
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735 if (vlv_need_sprite0_fifo_workaround(active_planes))
1736 sprite0_fifo_extra = 1;
1737
1738 total_rate = raw->plane[PLANE_PRIMARY] +
1739 raw->plane[PLANE_SPRITE0] +
1740 raw->plane[PLANE_SPRITE1] +
1741 sprite0_fifo_extra;
1742
1743 if (total_rate > fifo_size)
1744 return -EINVAL;
1745
1746 if (total_rate == 0)
1747 total_rate = 1;
1748
1749 for_each_plane_id_on_crtc(crtc, plane_id) {
1750 unsigned int rate;
1751
1752 if ((active_planes & BIT(plane_id)) == 0) {
1753 fifo_state->plane[plane_id] = 0;
1754 continue;
1755 }
1756
1757 rate = raw->plane[plane_id];
1758 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1759 fifo_left -= fifo_state->plane[plane_id];
1760 }
1761
1762 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1763 fifo_left -= sprite0_fifo_extra;
1764
1765 fifo_state->plane[PLANE_CURSOR] = 63;
1766
1767 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1768
1769
1770 for_each_plane_id_on_crtc(crtc, plane_id) {
1771 int plane_extra;
1772
1773 if (fifo_left == 0)
1774 break;
1775
1776 if ((active_planes & BIT(plane_id)) == 0)
1777 continue;
1778
1779 plane_extra = min(fifo_extra, fifo_left);
1780 fifo_state->plane[plane_id] += plane_extra;
1781 fifo_left -= plane_extra;
1782 }
1783
1784 drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
1785
1786
1787 if (active_planes == 0) {
1788 drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
1789 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1790 }
1791
1792 return 0;
1793}
1794
1795
1796static void vlv_invalidate_wms(struct intel_crtc *crtc,
1797 struct vlv_wm_state *wm_state, int level)
1798{
1799 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1800
1801 for (; level < intel_wm_num_levels(dev_priv); level++) {
1802 enum plane_id plane_id;
1803
1804 for_each_plane_id_on_crtc(crtc, plane_id)
1805 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1806
1807 wm_state->sr[level].cursor = USHRT_MAX;
1808 wm_state->sr[level].plane = USHRT_MAX;
1809 }
1810}
1811
1812static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1813{
1814 if (wm > fifo_size)
1815 return USHRT_MAX;
1816 else
1817 return fifo_size - wm;
1818}
1819
1820
1821
1822
1823
1824static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1825 int level, enum plane_id plane_id, u16 value)
1826{
1827 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1828 int num_levels = intel_wm_num_levels(dev_priv);
1829 bool dirty = false;
1830
1831 for (; level < num_levels; level++) {
1832 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1833
1834 dirty |= raw->plane[plane_id] != value;
1835 raw->plane[plane_id] = value;
1836 }
1837
1838 return dirty;
1839}
1840
1841static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1842 const struct intel_plane_state *plane_state)
1843{
1844 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1845 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1846 enum plane_id plane_id = plane->id;
1847 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1848 int level;
1849 bool dirty = false;
1850
1851 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1852 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1853 goto out;
1854 }
1855
1856 for (level = 0; level < num_levels; level++) {
1857 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1858 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1859 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1860
1861 if (wm > max_wm)
1862 break;
1863
1864 dirty |= raw->plane[plane_id] != wm;
1865 raw->plane[plane_id] = wm;
1866 }
1867
1868
1869 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1870
1871out:
1872 if (dirty)
1873 drm_dbg_kms(&dev_priv->drm,
1874 "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1875 plane->base.name,
1876 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1877 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1878 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1879
1880 return dirty;
1881}
1882
1883static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1884 enum plane_id plane_id, int level)
1885{
1886 const struct g4x_pipe_wm *raw =
1887 &crtc_state->wm.vlv.raw[level];
1888 const struct vlv_fifo_state *fifo_state =
1889 &crtc_state->wm.vlv.fifo_state;
1890
1891 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1892}
1893
1894static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1895{
1896 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1897 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1898 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1899 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1900}
1901
1902static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
1903 struct intel_crtc *crtc)
1904{
1905 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1906 struct intel_crtc_state *crtc_state =
1907 intel_atomic_get_new_crtc_state(state, crtc);
1908 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1909 const struct vlv_fifo_state *fifo_state =
1910 &crtc_state->wm.vlv.fifo_state;
1911 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1912 int num_active_planes = hweight8(active_planes);
1913 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
1914 const struct intel_plane_state *old_plane_state;
1915 const struct intel_plane_state *new_plane_state;
1916 struct intel_plane *plane;
1917 enum plane_id plane_id;
1918 int level, ret, i;
1919 unsigned int dirty = 0;
1920
1921 for_each_oldnew_intel_plane_in_state(state, plane,
1922 old_plane_state,
1923 new_plane_state, i) {
1924 if (new_plane_state->hw.crtc != &crtc->base &&
1925 old_plane_state->hw.crtc != &crtc->base)
1926 continue;
1927
1928 if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1929 dirty |= BIT(plane->id);
1930 }
1931
1932
1933
1934
1935
1936
1937
1938 if (needs_modeset)
1939 crtc_state->fifo_changed = true;
1940
1941 if (!dirty)
1942 return 0;
1943
1944
1945 if (dirty & ~BIT(PLANE_CURSOR)) {
1946 const struct intel_crtc_state *old_crtc_state =
1947 intel_atomic_get_old_crtc_state(state, crtc);
1948 const struct vlv_fifo_state *old_fifo_state =
1949 &old_crtc_state->wm.vlv.fifo_state;
1950
1951 ret = vlv_compute_fifo(crtc_state);
1952 if (ret)
1953 return ret;
1954
1955 if (needs_modeset ||
1956 memcmp(old_fifo_state, fifo_state,
1957 sizeof(*fifo_state)) != 0)
1958 crtc_state->fifo_changed = true;
1959 }
1960
1961
1962 wm_state->num_levels = intel_wm_num_levels(dev_priv);
1963
1964
1965
1966
1967
1968 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1969
1970 for (level = 0; level < wm_state->num_levels; level++) {
1971 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1972 const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
1973
1974 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1975 break;
1976
1977 for_each_plane_id_on_crtc(crtc, plane_id) {
1978 wm_state->wm[level].plane[plane_id] =
1979 vlv_invert_wm_value(raw->plane[plane_id],
1980 fifo_state->plane[plane_id]);
1981 }
1982
1983 wm_state->sr[level].plane =
1984 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1985 raw->plane[PLANE_SPRITE0],
1986 raw->plane[PLANE_SPRITE1]),
1987 sr_fifo_size);
1988
1989 wm_state->sr[level].cursor =
1990 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1991 63);
1992 }
1993
1994 if (level == 0)
1995 return -EINVAL;
1996
1997
1998 wm_state->num_levels = level;
1999
2000
2001 vlv_invalidate_wms(crtc, wm_state, level);
2002
2003 return 0;
2004}
2005
2006#define VLV_FIFO(plane, value) \
2007 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
2008
2009static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
2010 struct intel_crtc *crtc)
2011{
2012 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2013 struct intel_uncore *uncore = &dev_priv->uncore;
2014 const struct intel_crtc_state *crtc_state =
2015 intel_atomic_get_new_crtc_state(state, crtc);
2016 const struct vlv_fifo_state *fifo_state =
2017 &crtc_state->wm.vlv.fifo_state;
2018 int sprite0_start, sprite1_start, fifo_size;
2019 u32 dsparb, dsparb2, dsparb3;
2020
2021 if (!crtc_state->fifo_changed)
2022 return;
2023
2024 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
2025 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
2026 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
2027
2028 drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
2029 drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
2030
2031 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042 spin_lock(&uncore->lock);
2043
2044 switch (crtc->pipe) {
2045 case PIPE_A:
2046 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2047 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2048
2049 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
2050 VLV_FIFO(SPRITEB, 0xff));
2051 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
2052 VLV_FIFO(SPRITEB, sprite1_start));
2053
2054 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
2055 VLV_FIFO(SPRITEB_HI, 0x1));
2056 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
2057 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
2058
2059 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2060 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2061 break;
2062 case PIPE_B:
2063 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2064 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2065
2066 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
2067 VLV_FIFO(SPRITED, 0xff));
2068 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
2069 VLV_FIFO(SPRITED, sprite1_start));
2070
2071 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
2072 VLV_FIFO(SPRITED_HI, 0xff));
2073 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
2074 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
2075
2076 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2077 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2078 break;
2079 case PIPE_C:
2080 dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
2081 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2082
2083 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
2084 VLV_FIFO(SPRITEF, 0xff));
2085 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
2086 VLV_FIFO(SPRITEF, sprite1_start));
2087
2088 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
2089 VLV_FIFO(SPRITEF_HI, 0xff));
2090 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
2091 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
2092
2093 intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
2094 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2095 break;
2096 default:
2097 break;
2098 }
2099
2100 intel_uncore_posting_read_fw(uncore, DSPARB);
2101
2102 spin_unlock(&uncore->lock);
2103}
2104
2105#undef VLV_FIFO
2106
2107static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
2108 struct intel_crtc *crtc)
2109{
2110 struct intel_crtc_state *new_crtc_state =
2111 intel_atomic_get_new_crtc_state(state, crtc);
2112 const struct intel_crtc_state *old_crtc_state =
2113 intel_atomic_get_old_crtc_state(state, crtc);
2114 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2115 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2116 const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2117 int level;
2118
2119 if (!new_crtc_state->hw.active ||
2120 drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
2121 *intermediate = *optimal;
2122
2123 intermediate->cxsr = false;
2124 goto out;
2125 }
2126
2127 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2128 intermediate->cxsr = optimal->cxsr && active->cxsr &&
2129 !new_crtc_state->disable_cxsr;
2130
2131 for (level = 0; level < intermediate->num_levels; level++) {
2132 enum plane_id plane_id;
2133
2134 for_each_plane_id_on_crtc(crtc, plane_id) {
2135 intermediate->wm[level].plane[plane_id] =
2136 min(optimal->wm[level].plane[plane_id],
2137 active->wm[level].plane[plane_id]);
2138 }
2139
2140 intermediate->sr[level].plane = min(optimal->sr[level].plane,
2141 active->sr[level].plane);
2142 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2143 active->sr[level].cursor);
2144 }
2145
2146 vlv_invalidate_wms(crtc, intermediate, level);
2147
2148out:
2149
2150
2151
2152
2153 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2154 new_crtc_state->wm.need_postvbl_update = true;
2155
2156 return 0;
2157}
2158
2159static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2160 struct vlv_wm_values *wm)
2161{
2162 struct intel_crtc *crtc;
2163 int num_active_pipes = 0;
2164
2165 wm->level = dev_priv->wm.max_level;
2166 wm->cxsr = true;
2167
2168 for_each_intel_crtc(&dev_priv->drm, crtc) {
2169 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2170
2171 if (!crtc->active)
2172 continue;
2173
2174 if (!wm_state->cxsr)
2175 wm->cxsr = false;
2176
2177 num_active_pipes++;
2178 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2179 }
2180
2181 if (num_active_pipes != 1)
2182 wm->cxsr = false;
2183
2184 if (num_active_pipes > 1)
2185 wm->level = VLV_WM_LEVEL_PM2;
2186
2187 for_each_intel_crtc(&dev_priv->drm, crtc) {
2188 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2189 enum pipe pipe = crtc->pipe;
2190
2191 wm->pipe[pipe] = wm_state->wm[wm->level];
2192 if (crtc->active && wm->cxsr)
2193 wm->sr = wm_state->sr[wm->level];
2194
2195 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2196 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2197 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2198 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2199 }
2200}
2201
2202static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2203{
2204 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2205 struct vlv_wm_values new_wm = {};
2206
2207 vlv_merge_wm(dev_priv, &new_wm);
2208
2209 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2210 return;
2211
2212 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2213 chv_set_memory_dvfs(dev_priv, false);
2214
2215 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2216 chv_set_memory_pm5(dev_priv, false);
2217
2218 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2219 _intel_set_memory_cxsr(dev_priv, false);
2220
2221 vlv_write_wm_values(dev_priv, &new_wm);
2222
2223 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2224 _intel_set_memory_cxsr(dev_priv, true);
2225
2226 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2227 chv_set_memory_pm5(dev_priv, true);
2228
2229 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2230 chv_set_memory_dvfs(dev_priv, true);
2231
2232 *old_wm = new_wm;
2233}
2234
2235static void vlv_initial_watermarks(struct intel_atomic_state *state,
2236 struct intel_crtc *crtc)
2237{
2238 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2239 const struct intel_crtc_state *crtc_state =
2240 intel_atomic_get_new_crtc_state(state, crtc);
2241
2242 mutex_lock(&dev_priv->wm.wm_mutex);
2243 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2244 vlv_program_watermarks(dev_priv);
2245 mutex_unlock(&dev_priv->wm.wm_mutex);
2246}
2247
2248static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2249 struct intel_crtc *crtc)
2250{
2251 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2252 const struct intel_crtc_state *crtc_state =
2253 intel_atomic_get_new_crtc_state(state, crtc);
2254
2255 if (!crtc_state->wm.need_postvbl_update)
2256 return;
2257
2258 mutex_lock(&dev_priv->wm.wm_mutex);
2259 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2260 vlv_program_watermarks(dev_priv);
2261 mutex_unlock(&dev_priv->wm.wm_mutex);
2262}
2263
2264static void i965_update_wm(struct drm_i915_private *dev_priv)
2265{
2266 struct intel_crtc *crtc;
2267 int srwm = 1;
2268 int cursor_sr = 16;
2269 bool cxsr_enabled;
2270
2271
2272 crtc = single_enabled_crtc(dev_priv);
2273 if (crtc) {
2274
2275 static const int sr_latency_ns = 12000;
2276 const struct drm_display_mode *pipe_mode =
2277 &crtc->config->hw.pipe_mode;
2278 const struct drm_framebuffer *fb =
2279 crtc->base.primary->state->fb;
2280 int clock = pipe_mode->crtc_clock;
2281 int htotal = pipe_mode->crtc_htotal;
2282 int hdisplay = crtc->config->pipe_src_w;
2283 int cpp = fb->format->cpp[0];
2284 int entries;
2285
2286 entries = intel_wm_method2(clock, htotal,
2287 hdisplay, cpp, sr_latency_ns / 100);
2288 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2289 srwm = I965_FIFO_SIZE - entries;
2290 if (srwm < 0)
2291 srwm = 1;
2292 srwm &= 0x1ff;
2293 drm_dbg_kms(&dev_priv->drm,
2294 "self-refresh entries: %d, wm: %d\n",
2295 entries, srwm);
2296
2297 entries = intel_wm_method2(clock, htotal,
2298 crtc->base.cursor->state->crtc_w, 4,
2299 sr_latency_ns / 100);
2300 entries = DIV_ROUND_UP(entries,
2301 i965_cursor_wm_info.cacheline_size) +
2302 i965_cursor_wm_info.guard_size;
2303
2304 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2305 if (cursor_sr > i965_cursor_wm_info.max_wm)
2306 cursor_sr = i965_cursor_wm_info.max_wm;
2307
2308 drm_dbg_kms(&dev_priv->drm,
2309 "self-refresh watermark: display plane %d "
2310 "cursor %d\n", srwm, cursor_sr);
2311
2312 cxsr_enabled = true;
2313 } else {
2314 cxsr_enabled = false;
2315
2316 intel_set_memory_cxsr(dev_priv, false);
2317 }
2318
2319 drm_dbg_kms(&dev_priv->drm,
2320 "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2321 srwm);
2322
2323
2324 intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
2325 FW_WM(8, CURSORB) |
2326 FW_WM(8, PLANEB) |
2327 FW_WM(8, PLANEA));
2328 intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
2329 FW_WM(8, PLANEC_OLD));
2330
2331 intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2332
2333 if (cxsr_enabled)
2334 intel_set_memory_cxsr(dev_priv, true);
2335}
2336
2337#undef FW_WM
2338
2339static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
2340 enum i9xx_plane_id i9xx_plane)
2341{
2342 struct intel_plane *plane;
2343
2344 for_each_intel_plane(&i915->drm, plane) {
2345 if (plane->id == PLANE_PRIMARY &&
2346 plane->i9xx_plane == i9xx_plane)
2347 return intel_crtc_for_pipe(i915, plane->pipe);
2348 }
2349
2350 return NULL;
2351}
2352
2353static void i9xx_update_wm(struct drm_i915_private *dev_priv)
2354{
2355 const struct intel_watermark_params *wm_info;
2356 u32 fwater_lo;
2357 u32 fwater_hi;
2358 int cwm, srwm = 1;
2359 int fifo_size;
2360 int planea_wm, planeb_wm;
2361 struct intel_crtc *crtc, *enabled = NULL;
2362
2363 if (IS_I945GM(dev_priv))
2364 wm_info = &i945_wm_info;
2365 else if (DISPLAY_VER(dev_priv) != 2)
2366 wm_info = &i915_wm_info;
2367 else
2368 wm_info = &i830_a_wm_info;
2369
2370 if (DISPLAY_VER(dev_priv) == 2)
2371 fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
2372 else
2373 fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
2374 crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
2375 if (intel_crtc_active(crtc)) {
2376 const struct drm_display_mode *pipe_mode =
2377 &crtc->config->hw.pipe_mode;
2378 const struct drm_framebuffer *fb =
2379 crtc->base.primary->state->fb;
2380 int cpp;
2381
2382 if (DISPLAY_VER(dev_priv) == 2)
2383 cpp = 4;
2384 else
2385 cpp = fb->format->cpp[0];
2386
2387 planea_wm = intel_calculate_wm(pipe_mode->crtc_clock,
2388 wm_info, fifo_size, cpp,
2389 pessimal_latency_ns);
2390 enabled = crtc;
2391 } else {
2392 planea_wm = fifo_size - wm_info->guard_size;
2393 if (planea_wm > (long)wm_info->max_wm)
2394 planea_wm = wm_info->max_wm;
2395 }
2396
2397 if (DISPLAY_VER(dev_priv) == 2)
2398 wm_info = &i830_bc_wm_info;
2399
2400 if (DISPLAY_VER(dev_priv) == 2)
2401 fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
2402 else
2403 fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
2404 crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
2405 if (intel_crtc_active(crtc)) {
2406 const struct drm_display_mode *pipe_mode =
2407 &crtc->config->hw.pipe_mode;
2408 const struct drm_framebuffer *fb =
2409 crtc->base.primary->state->fb;
2410 int cpp;
2411
2412 if (DISPLAY_VER(dev_priv) == 2)
2413 cpp = 4;
2414 else
2415 cpp = fb->format->cpp[0];
2416
2417 planeb_wm = intel_calculate_wm(pipe_mode->crtc_clock,
2418 wm_info, fifo_size, cpp,
2419 pessimal_latency_ns);
2420 if (enabled == NULL)
2421 enabled = crtc;
2422 else
2423 enabled = NULL;
2424 } else {
2425 planeb_wm = fifo_size - wm_info->guard_size;
2426 if (planeb_wm > (long)wm_info->max_wm)
2427 planeb_wm = wm_info->max_wm;
2428 }
2429
2430 drm_dbg_kms(&dev_priv->drm,
2431 "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2432
2433 if (IS_I915GM(dev_priv) && enabled) {
2434 struct drm_i915_gem_object *obj;
2435
2436 obj = intel_fb_obj(enabled->base.primary->state->fb);
2437
2438
2439 if (!i915_gem_object_is_tiled(obj))
2440 enabled = NULL;
2441 }
2442
2443
2444
2445
2446 cwm = 2;
2447
2448
2449 intel_set_memory_cxsr(dev_priv, false);
2450
2451
2452 if (HAS_FW_BLC(dev_priv) && enabled) {
2453
2454 static const int sr_latency_ns = 6000;
2455 const struct drm_display_mode *pipe_mode =
2456 &enabled->config->hw.pipe_mode;
2457 const struct drm_framebuffer *fb =
2458 enabled->base.primary->state->fb;
2459 int clock = pipe_mode->crtc_clock;
2460 int htotal = pipe_mode->crtc_htotal;
2461 int hdisplay = enabled->config->pipe_src_w;
2462 int cpp;
2463 int entries;
2464
2465 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2466 cpp = 4;
2467 else
2468 cpp = fb->format->cpp[0];
2469
2470 entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
2471 sr_latency_ns / 100);
2472 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2473 drm_dbg_kms(&dev_priv->drm,
2474 "self-refresh entries: %d\n", entries);
2475 srwm = wm_info->fifo_size - entries;
2476 if (srwm < 0)
2477 srwm = 1;
2478
2479 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2480 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
2481 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2482 else
2483 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
2484 }
2485
2486 drm_dbg_kms(&dev_priv->drm,
2487 "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2488 planea_wm, planeb_wm, cwm, srwm);
2489
2490 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2491 fwater_hi = (cwm & 0x1f);
2492
2493
2494 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2495 fwater_hi = fwater_hi | (1 << 8);
2496
2497 intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
2498 intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
2499
2500 if (enabled)
2501 intel_set_memory_cxsr(dev_priv, true);
2502}
2503
2504static void i845_update_wm(struct drm_i915_private *dev_priv)
2505{
2506 struct intel_crtc *crtc;
2507 const struct drm_display_mode *pipe_mode;
2508 u32 fwater_lo;
2509 int planea_wm;
2510
2511 crtc = single_enabled_crtc(dev_priv);
2512 if (crtc == NULL)
2513 return;
2514
2515 pipe_mode = &crtc->config->hw.pipe_mode;
2516 planea_wm = intel_calculate_wm(pipe_mode->crtc_clock,
2517 &i845_wm_info,
2518 i845_get_fifo_size(dev_priv, PLANE_A),
2519 4, pessimal_latency_ns);
2520 fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
2521 fwater_lo |= (3<<8) | planea_wm;
2522
2523 drm_dbg_kms(&dev_priv->drm,
2524 "Setting FIFO watermarks - A: %d\n", planea_wm);
2525
2526 intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
2527}
2528
2529
2530static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2531 unsigned int cpp,
2532 unsigned int latency)
2533{
2534 unsigned int ret;
2535
2536 ret = intel_wm_method1(pixel_rate, cpp, latency);
2537 ret = DIV_ROUND_UP(ret, 64) + 2;
2538
2539 return ret;
2540}
2541
2542
2543static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2544 unsigned int htotal,
2545 unsigned int width,
2546 unsigned int cpp,
2547 unsigned int latency)
2548{
2549 unsigned int ret;
2550
2551 ret = intel_wm_method2(pixel_rate, htotal,
2552 width, cpp, latency);
2553 ret = DIV_ROUND_UP(ret, 64) + 2;
2554
2555 return ret;
2556}
2557
2558static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
2559{
2560
2561
2562
2563
2564
2565
2566 if (WARN_ON(!cpp))
2567 return 0;
2568 if (WARN_ON(!horiz_pixels))
2569 return 0;
2570
2571 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2572}
2573
2574struct ilk_wm_maximums {
2575 u16 pri;
2576 u16 spr;
2577 u16 cur;
2578 u16 fbc;
2579};
2580
2581
2582
2583
2584
2585static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
2586 const struct intel_plane_state *plane_state,
2587 u32 mem_value, bool is_lp)
2588{
2589 u32 method1, method2;
2590 int cpp;
2591
2592 if (mem_value == 0)
2593 return U32_MAX;
2594
2595 if (!intel_wm_plane_visible(crtc_state, plane_state))
2596 return 0;
2597
2598 cpp = plane_state->hw.fb->format->cpp[0];
2599
2600 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2601
2602 if (!is_lp)
2603 return method1;
2604
2605 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2606 crtc_state->hw.pipe_mode.crtc_htotal,
2607 drm_rect_width(&plane_state->uapi.dst),
2608 cpp, mem_value);
2609
2610 return min(method1, method2);
2611}
2612
2613
2614
2615
2616
2617static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
2618 const struct intel_plane_state *plane_state,
2619 u32 mem_value)
2620{
2621 u32 method1, method2;
2622 int cpp;
2623
2624 if (mem_value == 0)
2625 return U32_MAX;
2626
2627 if (!intel_wm_plane_visible(crtc_state, plane_state))
2628 return 0;
2629
2630 cpp = plane_state->hw.fb->format->cpp[0];
2631
2632 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2633 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2634 crtc_state->hw.pipe_mode.crtc_htotal,
2635 drm_rect_width(&plane_state->uapi.dst),
2636 cpp, mem_value);
2637 return min(method1, method2);
2638}
2639
2640
2641
2642
2643
2644static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
2645 const struct intel_plane_state *plane_state,
2646 u32 mem_value)
2647{
2648 int cpp;
2649
2650 if (mem_value == 0)
2651 return U32_MAX;
2652
2653 if (!intel_wm_plane_visible(crtc_state, plane_state))
2654 return 0;
2655
2656 cpp = plane_state->hw.fb->format->cpp[0];
2657
2658 return ilk_wm_method2(crtc_state->pixel_rate,
2659 crtc_state->hw.pipe_mode.crtc_htotal,
2660 drm_rect_width(&plane_state->uapi.dst),
2661 cpp, mem_value);
2662}
2663
2664
2665static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
2666 const struct intel_plane_state *plane_state,
2667 u32 pri_val)
2668{
2669 int cpp;
2670
2671 if (!intel_wm_plane_visible(crtc_state, plane_state))
2672 return 0;
2673
2674 cpp = plane_state->hw.fb->format->cpp[0];
2675
2676 return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst),
2677 cpp);
2678}
2679
2680static unsigned int
2681ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2682{
2683 if (DISPLAY_VER(dev_priv) >= 8)
2684 return 3072;
2685 else if (DISPLAY_VER(dev_priv) >= 7)
2686 return 768;
2687 else
2688 return 512;
2689}
2690
2691static unsigned int
2692ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2693 int level, bool is_sprite)
2694{
2695 if (DISPLAY_VER(dev_priv) >= 8)
2696
2697 return level == 0 ? 255 : 2047;
2698 else if (DISPLAY_VER(dev_priv) >= 7)
2699
2700 return level == 0 ? 127 : 1023;
2701 else if (!is_sprite)
2702
2703 return level == 0 ? 127 : 511;
2704 else
2705
2706 return level == 0 ? 63 : 255;
2707}
2708
2709static unsigned int
2710ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2711{
2712 if (DISPLAY_VER(dev_priv) >= 7)
2713 return level == 0 ? 63 : 255;
2714 else
2715 return level == 0 ? 31 : 63;
2716}
2717
2718static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2719{
2720 if (DISPLAY_VER(dev_priv) >= 8)
2721 return 31;
2722 else
2723 return 15;
2724}
2725
2726
2727static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2728 int level,
2729 const struct intel_wm_config *config,
2730 enum intel_ddb_partitioning ddb_partitioning,
2731 bool is_sprite)
2732{
2733 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2734
2735
2736 if (is_sprite && !config->sprites_enabled)
2737 return 0;
2738
2739
2740 if (level == 0 || config->num_pipes_active > 1) {
2741 fifo_size /= INTEL_NUM_PIPES(dev_priv);
2742
2743
2744
2745
2746
2747
2748 if (DISPLAY_VER(dev_priv) <= 6)
2749 fifo_size /= 2;
2750 }
2751
2752 if (config->sprites_enabled) {
2753
2754 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2755 if (is_sprite)
2756 fifo_size *= 5;
2757 fifo_size /= 6;
2758 } else {
2759 fifo_size /= 2;
2760 }
2761 }
2762
2763
2764 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2765}
2766
2767
2768static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2769 int level,
2770 const struct intel_wm_config *config)
2771{
2772
2773 if (level > 0 && config->num_pipes_active > 1)
2774 return 64;
2775
2776
2777 return ilk_cursor_wm_reg_max(dev_priv, level);
2778}
2779
2780static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2781 int level,
2782 const struct intel_wm_config *config,
2783 enum intel_ddb_partitioning ddb_partitioning,
2784 struct ilk_wm_maximums *max)
2785{
2786 max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2787 max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2788 max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2789 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2790}
2791
2792static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2793 int level,
2794 struct ilk_wm_maximums *max)
2795{
2796 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2797 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2798 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2799 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2800}
2801
2802static bool ilk_validate_wm_level(int level,
2803 const struct ilk_wm_maximums *max,
2804 struct intel_wm_level *result)
2805{
2806 bool ret;
2807
2808
2809 if (!result->enable)
2810 return false;
2811
2812 result->enable = result->pri_val <= max->pri &&
2813 result->spr_val <= max->spr &&
2814 result->cur_val <= max->cur;
2815
2816 ret = result->enable;
2817
2818
2819
2820
2821
2822
2823 if (level == 0 && !result->enable) {
2824 if (result->pri_val > max->pri)
2825 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2826 level, result->pri_val, max->pri);
2827 if (result->spr_val > max->spr)
2828 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2829 level, result->spr_val, max->spr);
2830 if (result->cur_val > max->cur)
2831 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2832 level, result->cur_val, max->cur);
2833
2834 result->pri_val = min_t(u32, result->pri_val, max->pri);
2835 result->spr_val = min_t(u32, result->spr_val, max->spr);
2836 result->cur_val = min_t(u32, result->cur_val, max->cur);
2837 result->enable = true;
2838 }
2839
2840 return ret;
2841}
2842
2843static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2844 const struct intel_crtc *crtc,
2845 int level,
2846 struct intel_crtc_state *crtc_state,
2847 const struct intel_plane_state *pristate,
2848 const struct intel_plane_state *sprstate,
2849 const struct intel_plane_state *curstate,
2850 struct intel_wm_level *result)
2851{
2852 u16 pri_latency = dev_priv->wm.pri_latency[level];
2853 u16 spr_latency = dev_priv->wm.spr_latency[level];
2854 u16 cur_latency = dev_priv->wm.cur_latency[level];
2855
2856
2857 if (level > 0) {
2858 pri_latency *= 5;
2859 spr_latency *= 5;
2860 cur_latency *= 5;
2861 }
2862
2863 if (pristate) {
2864 result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
2865 pri_latency, level);
2866 result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
2867 }
2868
2869 if (sprstate)
2870 result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
2871
2872 if (curstate)
2873 result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
2874
2875 result->enable = true;
2876}
2877
2878static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2879 u16 wm[8])
2880{
2881 struct intel_uncore *uncore = &dev_priv->uncore;
2882
2883 if (DISPLAY_VER(dev_priv) >= 9) {
2884 u32 val;
2885 int ret, i;
2886 int level, max_level = ilk_wm_max_level(dev_priv);
2887 int mult = IS_DG2(dev_priv) ? 2 : 1;
2888
2889
2890 val = 0;
2891 ret = sandybridge_pcode_read(dev_priv,
2892 GEN9_PCODE_READ_MEM_LATENCY,
2893 &val, NULL);
2894
2895 if (ret) {
2896 drm_err(&dev_priv->drm,
2897 "SKL Mailbox read error = %d\n", ret);
2898 return;
2899 }
2900
2901 wm[0] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2902 wm[1] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2903 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2904 wm[2] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2905 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2906 wm[3] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2907 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2908
2909
2910 val = 1;
2911 ret = sandybridge_pcode_read(dev_priv,
2912 GEN9_PCODE_READ_MEM_LATENCY,
2913 &val, NULL);
2914 if (ret) {
2915 drm_err(&dev_priv->drm,
2916 "SKL Mailbox read error = %d\n", ret);
2917 return;
2918 }
2919
2920 wm[4] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2921 wm[5] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2922 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2923 wm[6] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2924 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2925 wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2926 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2927
2928
2929
2930
2931
2932
2933 for (level = 1; level <= max_level; level++) {
2934 if (wm[level] == 0) {
2935 for (i = level + 1; i <= max_level; i++)
2936 wm[i] = 0;
2937
2938 max_level = level - 1;
2939
2940 break;
2941 }
2942 }
2943
2944
2945
2946
2947
2948
2949
2950
2951 if (wm[0] == 0) {
2952 u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
2953
2954 for (level = 0; level <= max_level; level++)
2955 wm[level] += adjust;
2956 }
2957
2958
2959
2960
2961
2962
2963
2964 if (dev_priv->dram_info.wm_lv_0_adjust_needed)
2965 wm[0] += 1;
2966 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2967 u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
2968
2969 wm[0] = (sskpd >> 56) & 0xFF;
2970 if (wm[0] == 0)
2971 wm[0] = sskpd & 0xF;
2972 wm[1] = (sskpd >> 4) & 0xFF;
2973 wm[2] = (sskpd >> 12) & 0xFF;
2974 wm[3] = (sskpd >> 20) & 0x1FF;
2975 wm[4] = (sskpd >> 32) & 0x1FF;
2976 } else if (DISPLAY_VER(dev_priv) >= 6) {
2977 u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
2978
2979 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2980 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2981 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2982 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2983 } else if (DISPLAY_VER(dev_priv) >= 5) {
2984 u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
2985
2986
2987 wm[0] = 7;
2988 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2989 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2990 } else {
2991 MISSING_CASE(INTEL_DEVID(dev_priv));
2992 }
2993}
2994
2995static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2996 u16 wm[5])
2997{
2998
2999 if (DISPLAY_VER(dev_priv) == 5)
3000 wm[0] = 13;
3001}
3002
3003static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
3004 u16 wm[5])
3005{
3006
3007 if (DISPLAY_VER(dev_priv) == 5)
3008 wm[0] = 13;
3009}
3010
3011int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
3012{
3013
3014 if (HAS_HW_SAGV_WM(dev_priv))
3015 return 5;
3016 else if (DISPLAY_VER(dev_priv) >= 9)
3017 return 7;
3018 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3019 return 4;
3020 else if (DISPLAY_VER(dev_priv) >= 6)
3021 return 3;
3022 else
3023 return 2;
3024}
3025
3026static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
3027 const char *name,
3028 const u16 wm[])
3029{
3030 int level, max_level = ilk_wm_max_level(dev_priv);
3031
3032 for (level = 0; level <= max_level; level++) {
3033 unsigned int latency = wm[level];
3034
3035 if (latency == 0) {
3036 drm_dbg_kms(&dev_priv->drm,
3037 "%s WM%d latency not provided\n",
3038 name, level);
3039 continue;
3040 }
3041
3042
3043
3044
3045
3046 if (DISPLAY_VER(dev_priv) >= 9)
3047 latency *= 10;
3048 else if (level > 0)
3049 latency *= 5;
3050
3051 drm_dbg_kms(&dev_priv->drm,
3052 "%s WM%d latency %u (%u.%u usec)\n", name, level,
3053 wm[level], latency / 10, latency % 10);
3054 }
3055}
3056
3057static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
3058 u16 wm[5], u16 min)
3059{
3060 int level, max_level = ilk_wm_max_level(dev_priv);
3061
3062 if (wm[0] >= min)
3063 return false;
3064
3065 wm[0] = max(wm[0], min);
3066 for (level = 1; level <= max_level; level++)
3067 wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
3068
3069 return true;
3070}
3071
3072static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3073{
3074 bool changed;
3075
3076
3077
3078
3079
3080 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12);
3081 changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12);
3082 changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
3083
3084 if (!changed)
3085 return;
3086
3087 drm_dbg_kms(&dev_priv->drm,
3088 "WM latency values increased to avoid potential underruns\n");
3089 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3090 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3091 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3092}
3093
3094static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3095{
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107 if (dev_priv->wm.pri_latency[3] == 0 &&
3108 dev_priv->wm.spr_latency[3] == 0 &&
3109 dev_priv->wm.cur_latency[3] == 0)
3110 return;
3111
3112 dev_priv->wm.pri_latency[3] = 0;
3113 dev_priv->wm.spr_latency[3] = 0;
3114 dev_priv->wm.cur_latency[3] = 0;
3115
3116 drm_dbg_kms(&dev_priv->drm,
3117 "LP3 watermarks disabled due to potential for lost interrupts\n");
3118 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3119 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3120 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3121}
3122
3123static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3124{
3125 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3126
3127 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
3128 sizeof(dev_priv->wm.pri_latency));
3129 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
3130 sizeof(dev_priv->wm.pri_latency));
3131
3132 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3133 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3134
3135 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3136 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3137 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3138
3139 if (DISPLAY_VER(dev_priv) == 6) {
3140 snb_wm_latency_quirk(dev_priv);
3141 snb_wm_lp3_irq_quirk(dev_priv);
3142 }
3143}
3144
3145static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3146{
3147 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3148 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3149}
3150
3151static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
3152 struct intel_pipe_wm *pipe_wm)
3153{
3154
3155 const struct intel_wm_config config = {
3156 .num_pipes_active = 1,
3157 .sprites_enabled = pipe_wm->sprites_enabled,
3158 .sprites_scaled = pipe_wm->sprites_scaled,
3159 };
3160 struct ilk_wm_maximums max;
3161
3162
3163 ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
3164
3165
3166 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3167 drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
3168 return false;
3169 }
3170
3171 return true;
3172}
3173
3174
3175static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
3176 struct intel_crtc *crtc)
3177{
3178 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3179 struct intel_crtc_state *crtc_state =
3180 intel_atomic_get_new_crtc_state(state, crtc);
3181 struct intel_pipe_wm *pipe_wm;
3182 struct intel_plane *plane;
3183 const struct intel_plane_state *plane_state;
3184 const struct intel_plane_state *pristate = NULL;
3185 const struct intel_plane_state *sprstate = NULL;
3186 const struct intel_plane_state *curstate = NULL;
3187 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3188 struct ilk_wm_maximums max;
3189
3190 pipe_wm = &crtc_state->wm.ilk.optimal;
3191
3192 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
3193 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
3194 pristate = plane_state;
3195 else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
3196 sprstate = plane_state;
3197 else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
3198 curstate = plane_state;
3199 }
3200
3201 pipe_wm->pipe_enabled = crtc_state->hw.active;
3202 if (sprstate) {
3203 pipe_wm->sprites_enabled = sprstate->uapi.visible;
3204 pipe_wm->sprites_scaled = sprstate->uapi.visible &&
3205 (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
3206 drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
3207 }
3208
3209 usable_level = max_level;
3210
3211
3212 if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3213 usable_level = 1;
3214
3215
3216 if (pipe_wm->sprites_scaled)
3217 usable_level = 0;
3218
3219 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3220 ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
3221 pristate, sprstate, curstate, &pipe_wm->wm[0]);
3222
3223 if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
3224 return -EINVAL;
3225
3226 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3227
3228 for (level = 1; level <= usable_level; level++) {
3229 struct intel_wm_level *wm = &pipe_wm->wm[level];
3230
3231 ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
3232 pristate, sprstate, curstate, wm);
3233
3234
3235
3236
3237
3238
3239 if (!ilk_validate_wm_level(level, &max, wm)) {
3240 memset(wm, 0, sizeof(*wm));
3241 break;
3242 }
3243 }
3244
3245 return 0;
3246}
3247
3248
3249
3250
3251
3252
3253static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
3254 struct intel_crtc *crtc)
3255{
3256 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3257 struct intel_crtc_state *new_crtc_state =
3258 intel_atomic_get_new_crtc_state(state, crtc);
3259 const struct intel_crtc_state *old_crtc_state =
3260 intel_atomic_get_old_crtc_state(state, crtc);
3261 struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
3262 const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
3263 int level, max_level = ilk_wm_max_level(dev_priv);
3264
3265
3266
3267
3268
3269
3270 *a = new_crtc_state->wm.ilk.optimal;
3271 if (!new_crtc_state->hw.active ||
3272 drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) ||
3273 state->skip_intermediate_wm)
3274 return 0;
3275
3276 a->pipe_enabled |= b->pipe_enabled;
3277 a->sprites_enabled |= b->sprites_enabled;
3278 a->sprites_scaled |= b->sprites_scaled;
3279
3280 for (level = 0; level <= max_level; level++) {
3281 struct intel_wm_level *a_wm = &a->wm[level];
3282 const struct intel_wm_level *b_wm = &b->wm[level];
3283
3284 a_wm->enable &= b_wm->enable;
3285 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3286 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3287 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3288 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3289 }
3290
3291
3292
3293
3294
3295
3296
3297 if (!ilk_validate_pipe_wm(dev_priv, a))
3298 return -EINVAL;
3299
3300
3301
3302
3303
3304 if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
3305 new_crtc_state->wm.need_postvbl_update = true;
3306
3307 return 0;
3308}
3309
3310
3311
3312
3313static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
3314 int level,
3315 struct intel_wm_level *ret_wm)
3316{
3317 const struct intel_crtc *crtc;
3318
3319 ret_wm->enable = true;
3320
3321 for_each_intel_crtc(&dev_priv->drm, crtc) {
3322 const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
3323 const struct intel_wm_level *wm = &active->wm[level];
3324
3325 if (!active->pipe_enabled)
3326 continue;
3327
3328
3329
3330
3331
3332
3333 if (!wm->enable)
3334 ret_wm->enable = false;
3335
3336 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3337 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3338 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3339 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3340 }
3341}
3342
3343
3344
3345
3346static void ilk_wm_merge(struct drm_i915_private *dev_priv,
3347 const struct intel_wm_config *config,
3348 const struct ilk_wm_maximums *max,
3349 struct intel_pipe_wm *merged)
3350{
3351 int level, max_level = ilk_wm_max_level(dev_priv);
3352 int last_enabled_level = max_level;
3353
3354
3355 if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3356 config->num_pipes_active > 1)
3357 last_enabled_level = 0;
3358
3359
3360 merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
3361
3362
3363 for (level = 1; level <= max_level; level++) {
3364 struct intel_wm_level *wm = &merged->wm[level];
3365
3366 ilk_merge_wm_level(dev_priv, level, wm);
3367
3368 if (level > last_enabled_level)
3369 wm->enable = false;
3370 else if (!ilk_validate_wm_level(level, max, wm))
3371
3372 last_enabled_level = level - 1;
3373
3374
3375
3376
3377
3378 if (wm->fbc_val > max->fbc) {
3379 if (wm->enable)
3380 merged->fbc_wm_enabled = false;
3381 wm->fbc_val = 0;
3382 }
3383 }
3384
3385
3386 if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
3387 dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
3388 for (level = 2; level <= max_level; level++) {
3389 struct intel_wm_level *wm = &merged->wm[level];
3390
3391 wm->enable = false;
3392 }
3393 }
3394}
3395
3396static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3397{
3398
3399 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3400}
3401
3402
3403static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3404 int level)
3405{
3406 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3407 return 2 * level;
3408 else
3409 return dev_priv->wm.pri_latency[level];
3410}
3411
3412static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3413 const struct intel_pipe_wm *merged,
3414 enum intel_ddb_partitioning partitioning,
3415 struct ilk_wm_values *results)
3416{
3417 struct intel_crtc *crtc;
3418 int level, wm_lp;
3419
3420 results->enable_fbc_wm = merged->fbc_wm_enabled;
3421 results->partitioning = partitioning;
3422
3423
3424 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3425 const struct intel_wm_level *r;
3426
3427 level = ilk_wm_lp_to_level(wm_lp, merged);
3428
3429 r = &merged->wm[level];
3430
3431
3432
3433
3434
3435 results->wm_lp[wm_lp - 1] =
3436 (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
3437 (r->pri_val << WM1_LP_SR_SHIFT) |
3438 r->cur_val;
3439
3440 if (r->enable)
3441 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
3442
3443 if (DISPLAY_VER(dev_priv) >= 8)
3444 results->wm_lp[wm_lp - 1] |=
3445 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
3446 else
3447 results->wm_lp[wm_lp - 1] |=
3448 r->fbc_val << WM1_LP_FBC_SHIFT;
3449
3450
3451
3452
3453
3454 if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
3455 drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
3456 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
3457 } else
3458 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3459 }
3460
3461
3462 for_each_intel_crtc(&dev_priv->drm, crtc) {
3463 enum pipe pipe = crtc->pipe;
3464 const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
3465 const struct intel_wm_level *r = &pipe_wm->wm[0];
3466
3467 if (drm_WARN_ON(&dev_priv->drm, !r->enable))
3468 continue;
3469
3470 results->wm_pipe[pipe] =
3471 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
3472 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
3473 r->cur_val;
3474 }
3475}
3476
3477
3478
3479static struct intel_pipe_wm *
3480ilk_find_best_result(struct drm_i915_private *dev_priv,
3481 struct intel_pipe_wm *r1,
3482 struct intel_pipe_wm *r2)
3483{
3484 int level, max_level = ilk_wm_max_level(dev_priv);
3485 int level1 = 0, level2 = 0;
3486
3487 for (level = 1; level <= max_level; level++) {
3488 if (r1->wm[level].enable)
3489 level1 = level;
3490 if (r2->wm[level].enable)
3491 level2 = level;
3492 }
3493
3494 if (level1 == level2) {
3495 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3496 return r2;
3497 else
3498 return r1;
3499 } else if (level1 > level2) {
3500 return r1;
3501 } else {
3502 return r2;
3503 }
3504}
3505
3506
3507#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3508#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3509#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3510#define WM_DIRTY_FBC (1 << 24)
3511#define WM_DIRTY_DDB (1 << 25)
3512
3513static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3514 const struct ilk_wm_values *old,
3515 const struct ilk_wm_values *new)
3516{
3517 unsigned int dirty = 0;
3518 enum pipe pipe;
3519 int wm_lp;
3520
3521 for_each_pipe(dev_priv, pipe) {
3522 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3523 dirty |= WM_DIRTY_PIPE(pipe);
3524
3525 dirty |= WM_DIRTY_LP_ALL;
3526 }
3527 }
3528
3529 if (old->enable_fbc_wm != new->enable_fbc_wm) {
3530 dirty |= WM_DIRTY_FBC;
3531
3532 dirty |= WM_DIRTY_LP_ALL;
3533 }
3534
3535 if (old->partitioning != new->partitioning) {
3536 dirty |= WM_DIRTY_DDB;
3537
3538 dirty |= WM_DIRTY_LP_ALL;
3539 }
3540
3541
3542 if (dirty & WM_DIRTY_LP_ALL)
3543 return dirty;
3544
3545
3546 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3547 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3548 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3549 break;
3550 }
3551
3552
3553 for (; wm_lp <= 3; wm_lp++)
3554 dirty |= WM_DIRTY_LP(wm_lp);
3555
3556 return dirty;
3557}
3558
3559static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3560 unsigned int dirty)
3561{
3562 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3563 bool changed = false;
3564
3565 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
3566 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
3567 intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
3568 changed = true;
3569 }
3570 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
3571 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
3572 intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
3573 changed = true;
3574 }
3575 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
3576 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
3577 intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
3578 changed = true;
3579 }
3580
3581
3582
3583
3584
3585
3586 return changed;
3587}
3588
3589
3590
3591
3592
3593static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3594 struct ilk_wm_values *results)
3595{
3596 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3597 unsigned int dirty;
3598 u32 val;
3599
3600 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3601 if (!dirty)
3602 return;
3603
3604 _ilk_disable_lp_wm(dev_priv, dirty);
3605
3606 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3607 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
3608 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3609 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
3610 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3611 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
3612
3613 if (dirty & WM_DIRTY_DDB) {
3614 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3615 val = intel_uncore_read(&dev_priv->uncore, WM_MISC);
3616 if (results->partitioning == INTEL_DDB_PART_1_2)
3617 val &= ~WM_MISC_DATA_PARTITION_5_6;
3618 else
3619 val |= WM_MISC_DATA_PARTITION_5_6;
3620 intel_uncore_write(&dev_priv->uncore, WM_MISC, val);
3621 } else {
3622 val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
3623 if (results->partitioning == INTEL_DDB_PART_1_2)
3624 val &= ~DISP_DATA_PARTITION_5_6;
3625 else
3626 val |= DISP_DATA_PARTITION_5_6;
3627 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
3628 }
3629 }
3630
3631 if (dirty & WM_DIRTY_FBC) {
3632 val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL);
3633 if (results->enable_fbc_wm)
3634 val &= ~DISP_FBC_WM_DIS;
3635 else
3636 val |= DISP_FBC_WM_DIS;
3637 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val);
3638 }
3639
3640 if (dirty & WM_DIRTY_LP(1) &&
3641 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3642 intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
3643
3644 if (DISPLAY_VER(dev_priv) >= 7) {
3645 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3646 intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
3647 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3648 intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
3649 }
3650
3651 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3652 intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
3653 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3654 intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
3655 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3656 intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
3657
3658 dev_priv->wm.hw = *results;
3659}
3660
3661bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
3662{
3663 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3664}
3665
3666u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
3667{
3668 u8 enabled_slices = 0;
3669 enum dbuf_slice slice;
3670
3671 for_each_dbuf_slice(dev_priv, slice) {
3672 if (intel_uncore_read(&dev_priv->uncore,
3673 DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
3674 enabled_slices |= BIT(slice);
3675 }
3676
3677 return enabled_slices;
3678}
3679
3680
3681
3682
3683
3684static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
3685{
3686 return DISPLAY_VER(dev_priv) == 9;
3687}
3688
3689static bool
3690intel_has_sagv(struct drm_i915_private *dev_priv)
3691{
3692 return DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv) &&
3693 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3694}
3695
3696static void
3697skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
3698{
3699 if (DISPLAY_VER(dev_priv) >= 12) {
3700 u32 val = 0;
3701 int ret;
3702
3703 ret = sandybridge_pcode_read(dev_priv,
3704 GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
3705 &val, NULL);
3706 if (!ret) {
3707 dev_priv->sagv_block_time_us = val;
3708 return;
3709 }
3710
3711 drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
3712 } else if (DISPLAY_VER(dev_priv) == 11) {
3713 dev_priv->sagv_block_time_us = 10;
3714 return;
3715 } else if (DISPLAY_VER(dev_priv) == 10) {
3716 dev_priv->sagv_block_time_us = 20;
3717 return;
3718 } else if (DISPLAY_VER(dev_priv) == 9) {
3719 dev_priv->sagv_block_time_us = 30;
3720 return;
3721 } else {
3722 MISSING_CASE(DISPLAY_VER(dev_priv));
3723 }
3724
3725
3726 dev_priv->sagv_block_time_us = -1;
3727}
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740static int
3741intel_enable_sagv(struct drm_i915_private *dev_priv)
3742{
3743 int ret;
3744
3745 if (!intel_has_sagv(dev_priv))
3746 return 0;
3747
3748 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3749 return 0;
3750
3751 drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
3752 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3753 GEN9_SAGV_ENABLE);
3754
3755
3756
3757
3758
3759
3760
3761 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3762 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3763 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3764 return 0;
3765 } else if (ret < 0) {
3766 drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
3767 return ret;
3768 }
3769
3770 dev_priv->sagv_status = I915_SAGV_ENABLED;
3771 return 0;
3772}
3773
3774static int
3775intel_disable_sagv(struct drm_i915_private *dev_priv)
3776{
3777 int ret;
3778
3779 if (!intel_has_sagv(dev_priv))
3780 return 0;
3781
3782 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3783 return 0;
3784
3785 drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
3786
3787 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3788 GEN9_SAGV_DISABLE,
3789 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3790 1);
3791
3792
3793
3794
3795 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3796 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3797 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3798 return 0;
3799 } else if (ret < 0) {
3800 drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
3801 return ret;
3802 }
3803
3804 dev_priv->sagv_status = I915_SAGV_DISABLED;
3805 return 0;
3806}
3807
3808void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
3809{
3810 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3811 const struct intel_bw_state *new_bw_state;
3812 const struct intel_bw_state *old_bw_state;
3813 u32 new_mask = 0;
3814
3815
3816
3817
3818
3819
3820
3821
3822 if (!intel_has_sagv(dev_priv))
3823 return;
3824
3825 new_bw_state = intel_atomic_get_new_bw_state(state);
3826 if (!new_bw_state)
3827 return;
3828
3829 if (DISPLAY_VER(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) {
3830 intel_disable_sagv(dev_priv);
3831 return;
3832 }
3833
3834 old_bw_state = intel_atomic_get_old_bw_state(state);
3835
3836
3837
3838 if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
3839 return;
3840
3841 new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
3842
3843
3844
3845
3846
3847 if (!new_mask)
3848 return;
3849
3850
3851
3852
3853
3854
3855
3856 icl_pcode_restrict_qgv_points(dev_priv, new_mask);
3857}
3858
3859void intel_sagv_post_plane_update(struct intel_atomic_state *state)
3860{
3861 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3862 const struct intel_bw_state *new_bw_state;
3863 const struct intel_bw_state *old_bw_state;
3864 u32 new_mask = 0;
3865
3866
3867
3868
3869
3870
3871
3872
3873 if (!intel_has_sagv(dev_priv))
3874 return;
3875
3876 new_bw_state = intel_atomic_get_new_bw_state(state);
3877 if (!new_bw_state)
3878 return;
3879
3880 if (DISPLAY_VER(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) {
3881 intel_enable_sagv(dev_priv);
3882 return;
3883 }
3884
3885 old_bw_state = intel_atomic_get_old_bw_state(state);
3886
3887
3888
3889 if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
3890 return;
3891
3892 new_mask = new_bw_state->qgv_points_mask;
3893
3894
3895
3896
3897
3898
3899
3900 icl_pcode_restrict_qgv_points(dev_priv, new_mask);
3901}
3902
3903static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
3904{
3905 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3906 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3907 enum plane_id plane_id;
3908 int max_level = INT_MAX;
3909
3910 if (!intel_has_sagv(dev_priv))
3911 return false;
3912
3913 if (!crtc_state->hw.active)
3914 return true;
3915
3916 if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
3917 return false;
3918
3919 for_each_plane_id_on_crtc(crtc, plane_id) {
3920 const struct skl_plane_wm *wm =
3921 &crtc_state->wm.skl.optimal.planes[plane_id];
3922 int level;
3923
3924
3925 if (!wm->wm[0].enable)
3926 continue;
3927
3928
3929 for (level = ilk_wm_max_level(dev_priv);
3930 !wm->wm[level].enable; --level)
3931 { }
3932
3933
3934 max_level = min(level, max_level);
3935 }
3936
3937
3938 if (max_level == INT_MAX)
3939 return true;
3940
3941 for_each_plane_id_on_crtc(crtc, plane_id) {
3942 const struct skl_plane_wm *wm =
3943 &crtc_state->wm.skl.optimal.planes[plane_id];
3944
3945
3946
3947
3948
3949 if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
3950 return false;
3951 }
3952
3953 return true;
3954}
3955
3956static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
3957{
3958 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3959 enum plane_id plane_id;
3960
3961 if (!crtc_state->hw.active)
3962 return true;
3963
3964 for_each_plane_id_on_crtc(crtc, plane_id) {
3965 const struct skl_plane_wm *wm =
3966 &crtc_state->wm.skl.optimal.planes[plane_id];
3967
3968 if (wm->wm[0].enable && !wm->sagv.wm0.enable)
3969 return false;
3970 }
3971
3972 return true;
3973}
3974
3975static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
3976{
3977 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3978 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3979
3980 if (DISPLAY_VER(dev_priv) >= 12)
3981 return tgl_crtc_can_enable_sagv(crtc_state);
3982 else
3983 return skl_crtc_can_enable_sagv(crtc_state);
3984}
3985
3986bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
3987 const struct intel_bw_state *bw_state)
3988{
3989 if (DISPLAY_VER(dev_priv) < 11 &&
3990 bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
3991 return false;
3992
3993 return bw_state->pipe_sagv_reject == 0;
3994}
3995
3996static int intel_compute_sagv_mask(struct intel_atomic_state *state)
3997{
3998 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3999 int ret;
4000 struct intel_crtc *crtc;
4001 struct intel_crtc_state *new_crtc_state;
4002 struct intel_bw_state *new_bw_state = NULL;
4003 const struct intel_bw_state *old_bw_state = NULL;
4004 int i;
4005
4006 for_each_new_intel_crtc_in_state(state, crtc,
4007 new_crtc_state, i) {
4008 new_bw_state = intel_atomic_get_bw_state(state);
4009 if (IS_ERR(new_bw_state))
4010 return PTR_ERR(new_bw_state);
4011
4012 old_bw_state = intel_atomic_get_old_bw_state(state);
4013
4014 if (intel_crtc_can_enable_sagv(new_crtc_state))
4015 new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
4016 else
4017 new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
4018 }
4019
4020 if (!new_bw_state)
4021 return 0;
4022
4023 new_bw_state->active_pipes =
4024 intel_calc_active_pipes(state, old_bw_state->active_pipes);
4025
4026 if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
4027 ret = intel_atomic_lock_global_state(&new_bw_state->base);
4028 if (ret)
4029 return ret;
4030 }
4031
4032 if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
4033 intel_can_enable_sagv(dev_priv, old_bw_state)) {
4034 ret = intel_atomic_serialize_global_state(&new_bw_state->base);
4035 if (ret)
4036 return ret;
4037 } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
4038 ret = intel_atomic_lock_global_state(&new_bw_state->base);
4039 if (ret)
4040 return ret;
4041 }
4042
4043 for_each_new_intel_crtc_in_state(state, crtc,
4044 new_crtc_state, i) {
4045 struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
4046
4047
4048
4049
4050
4051
4052
4053 pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(dev_priv) &&
4054 DISPLAY_VER(dev_priv) >= 12 &&
4055 intel_can_enable_sagv(dev_priv, new_bw_state);
4056 }
4057
4058 return 0;
4059}
4060
4061static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
4062{
4063 return INTEL_INFO(dev_priv)->dbuf.size /
4064 hweight8(INTEL_INFO(dev_priv)->dbuf.slice_mask);
4065}
4066
4067static void
4068skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
4069 struct skl_ddb_entry *ddb)
4070{
4071 int slice_size = intel_dbuf_slice_size(dev_priv);
4072
4073 if (!slice_mask) {
4074 ddb->start = 0;
4075 ddb->end = 0;
4076 return;
4077 }
4078
4079 ddb->start = (ffs(slice_mask) - 1) * slice_size;
4080 ddb->end = fls(slice_mask) * slice_size;
4081
4082 WARN_ON(ddb->start >= ddb->end);
4083 WARN_ON(ddb->end > INTEL_INFO(dev_priv)->dbuf.size);
4084}
4085
4086static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
4087{
4088 struct skl_ddb_entry ddb;
4089
4090 if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
4091 slice_mask = BIT(DBUF_S1);
4092 else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
4093 slice_mask = BIT(DBUF_S3);
4094
4095 skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
4096
4097 return ddb.start;
4098}
4099
4100u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
4101 const struct skl_ddb_entry *entry)
4102{
4103 int slice_size = intel_dbuf_slice_size(dev_priv);
4104 enum dbuf_slice start_slice, end_slice;
4105 u8 slice_mask = 0;
4106
4107 if (!skl_ddb_entry_size(entry))
4108 return 0;
4109
4110 start_slice = entry->start / slice_size;
4111 end_slice = (entry->end - 1) / slice_size;
4112
4113
4114
4115
4116
4117 while (start_slice <= end_slice) {
4118 slice_mask |= BIT(start_slice);
4119 start_slice++;
4120 }
4121
4122 return slice_mask;
4123}
4124
4125static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
4126{
4127 const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4128 int hdisplay, vdisplay;
4129
4130 if (!crtc_state->hw.active)
4131 return 0;
4132
4133
4134
4135
4136
4137
4138 drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
4139
4140 return hdisplay;
4141}
4142
4143static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
4144 enum pipe for_pipe,
4145 unsigned int *weight_start,
4146 unsigned int *weight_end,
4147 unsigned int *weight_total)
4148{
4149 struct drm_i915_private *dev_priv =
4150 to_i915(dbuf_state->base.state->base.dev);
4151 enum pipe pipe;
4152
4153 *weight_start = 0;
4154 *weight_end = 0;
4155 *weight_total = 0;
4156
4157 for_each_pipe(dev_priv, pipe) {
4158 int weight = dbuf_state->weight[pipe];
4159
4160
4161
4162
4163
4164
4165
4166
4167 if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
4168 continue;
4169
4170 *weight_total += weight;
4171 if (pipe < for_pipe) {
4172 *weight_start += weight;
4173 *weight_end += weight;
4174 } else if (pipe == for_pipe) {
4175 *weight_end += weight;
4176 }
4177 }
4178}
4179
4180static int
4181skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
4182{
4183 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4184 unsigned int weight_total, weight_start, weight_end;
4185 const struct intel_dbuf_state *old_dbuf_state =
4186 intel_atomic_get_old_dbuf_state(state);
4187 struct intel_dbuf_state *new_dbuf_state =
4188 intel_atomic_get_new_dbuf_state(state);
4189 struct intel_crtc_state *crtc_state;
4190 struct skl_ddb_entry ddb_slices;
4191 enum pipe pipe = crtc->pipe;
4192 unsigned int mbus_offset = 0;
4193 u32 ddb_range_size;
4194 u32 dbuf_slice_mask;
4195 u32 start, end;
4196 int ret;
4197
4198 if (new_dbuf_state->weight[pipe] == 0) {
4199 new_dbuf_state->ddb[pipe].start = 0;
4200 new_dbuf_state->ddb[pipe].end = 0;
4201 goto out;
4202 }
4203
4204 dbuf_slice_mask = new_dbuf_state->slices[pipe];
4205
4206 skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
4207 mbus_offset = mbus_ddb_offset(dev_priv, dbuf_slice_mask);
4208 ddb_range_size = skl_ddb_entry_size(&ddb_slices);
4209
4210 intel_crtc_dbuf_weights(new_dbuf_state, pipe,
4211 &weight_start, &weight_end, &weight_total);
4212
4213 start = ddb_range_size * weight_start / weight_total;
4214 end = ddb_range_size * weight_end / weight_total;
4215
4216 new_dbuf_state->ddb[pipe].start = ddb_slices.start - mbus_offset + start;
4217 new_dbuf_state->ddb[pipe].end = ddb_slices.start - mbus_offset + end;
4218out:
4219 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
4220 skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
4221 &new_dbuf_state->ddb[pipe]))
4222 return 0;
4223
4224 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
4225 if (ret)
4226 return ret;
4227
4228 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
4229 if (IS_ERR(crtc_state))
4230 return PTR_ERR(crtc_state);
4231
4232
4233
4234
4235
4236 crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
4237 crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
4238
4239 drm_dbg_kms(&dev_priv->drm,
4240 "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
4241 crtc->base.base.id, crtc->base.name,
4242 old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
4243 old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
4244 new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
4245 old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
4246
4247 return 0;
4248}
4249
4250static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
4251 int width, const struct drm_format_info *format,
4252 u64 modifier, unsigned int rotation,
4253 u32 plane_pixel_rate, struct skl_wm_params *wp,
4254 int color_plane);
4255static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
4256 int level,
4257 unsigned int latency,
4258 const struct skl_wm_params *wp,
4259 const struct skl_wm_level *result_prev,
4260 struct skl_wm_level *result );
4261
4262static unsigned int
4263skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
4264 int num_active)
4265{
4266 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4267 int level, max_level = ilk_wm_max_level(dev_priv);
4268 struct skl_wm_level wm = {};
4269 int ret, min_ddb_alloc = 0;
4270 struct skl_wm_params wp;
4271
4272 ret = skl_compute_wm_params(crtc_state, 256,
4273 drm_format_info(DRM_FORMAT_ARGB8888),
4274 DRM_FORMAT_MOD_LINEAR,
4275 DRM_MODE_ROTATE_0,
4276 crtc_state->pixel_rate, &wp, 0);
4277 drm_WARN_ON(&dev_priv->drm, ret);
4278
4279 for (level = 0; level <= max_level; level++) {
4280 unsigned int latency = dev_priv->wm.skl_latency[level];
4281
4282 skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm);
4283 if (wm.min_ddb_alloc == U16_MAX)
4284 break;
4285
4286 min_ddb_alloc = wm.min_ddb_alloc;
4287 }
4288
4289 return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
4290}
4291
4292static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
4293 struct skl_ddb_entry *entry, u32 reg)
4294{
4295 entry->start = reg & DDB_ENTRY_MASK;
4296 entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
4297
4298 if (entry->end)
4299 entry->end += 1;
4300}
4301
4302static void
4303skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
4304 const enum pipe pipe,
4305 const enum plane_id plane_id,
4306 struct skl_ddb_entry *ddb_y,
4307 struct skl_ddb_entry *ddb_uv)
4308{
4309 u32 val, val2;
4310 u32 fourcc = 0;
4311
4312
4313 if (plane_id == PLANE_CURSOR) {
4314 val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
4315 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4316 return;
4317 }
4318
4319 val = intel_uncore_read(&dev_priv->uncore, PLANE_CTL(pipe, plane_id));
4320
4321
4322 if (val & PLANE_CTL_ENABLE)
4323 fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
4324 val & PLANE_CTL_ORDER_RGBX,
4325 val & PLANE_CTL_ALPHA_MASK);
4326
4327 if (DISPLAY_VER(dev_priv) >= 11) {
4328 val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
4329 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4330 } else {
4331 val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
4332 val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
4333
4334 if (fourcc &&
4335 drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
4336 swap(val, val2);
4337
4338 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4339 skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
4340 }
4341}
4342
4343void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
4344 struct skl_ddb_entry *ddb_y,
4345 struct skl_ddb_entry *ddb_uv)
4346{
4347 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4348 enum intel_display_power_domain power_domain;
4349 enum pipe pipe = crtc->pipe;
4350 intel_wakeref_t wakeref;
4351 enum plane_id plane_id;
4352
4353 power_domain = POWER_DOMAIN_PIPE(pipe);
4354 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4355 if (!wakeref)
4356 return;
4357
4358 for_each_plane_id_on_crtc(crtc, plane_id)
4359 skl_ddb_get_hw_plane_state(dev_priv, pipe,
4360 plane_id,
4361 &ddb_y[plane_id],
4362 &ddb_uv[plane_id]);
4363
4364 intel_display_power_put(dev_priv, power_domain, wakeref);
4365}
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383static uint_fixed_16_16_t
4384skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
4385 const struct intel_plane_state *plane_state)
4386{
4387 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4388 u32 src_w, src_h, dst_w, dst_h;
4389 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
4390 uint_fixed_16_16_t downscale_h, downscale_w;
4391
4392 if (drm_WARN_ON(&dev_priv->drm,
4393 !intel_wm_plane_visible(crtc_state, plane_state)))
4394 return u32_to_fixed16(0);
4395
4396
4397
4398
4399
4400
4401
4402
4403 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4404 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4405 dst_w = drm_rect_width(&plane_state->uapi.dst);
4406 dst_h = drm_rect_height(&plane_state->uapi.dst);
4407
4408 fp_w_ratio = div_fixed16(src_w, dst_w);
4409 fp_h_ratio = div_fixed16(src_h, dst_h);
4410 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
4411 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4412
4413 return mul_fixed16(downscale_w, downscale_h);
4414}
4415
4416struct dbuf_slice_conf_entry {
4417 u8 active_pipes;
4418 u8 dbuf_mask[I915_MAX_PIPES];
4419 bool join_mbus;
4420};
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
4433
4434{
4435 {
4436 .active_pipes = BIT(PIPE_A),
4437 .dbuf_mask = {
4438 [PIPE_A] = BIT(DBUF_S1),
4439 },
4440 },
4441 {
4442 .active_pipes = BIT(PIPE_B),
4443 .dbuf_mask = {
4444 [PIPE_B] = BIT(DBUF_S1),
4445 },
4446 },
4447 {
4448 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4449 .dbuf_mask = {
4450 [PIPE_A] = BIT(DBUF_S1),
4451 [PIPE_B] = BIT(DBUF_S2),
4452 },
4453 },
4454 {
4455 .active_pipes = BIT(PIPE_C),
4456 .dbuf_mask = {
4457 [PIPE_C] = BIT(DBUF_S2),
4458 },
4459 },
4460 {
4461 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4462 .dbuf_mask = {
4463 [PIPE_A] = BIT(DBUF_S1),
4464 [PIPE_C] = BIT(DBUF_S2),
4465 },
4466 },
4467 {
4468 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4469 .dbuf_mask = {
4470 [PIPE_B] = BIT(DBUF_S1),
4471 [PIPE_C] = BIT(DBUF_S2),
4472 },
4473 },
4474 {
4475 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4476 .dbuf_mask = {
4477 [PIPE_A] = BIT(DBUF_S1),
4478 [PIPE_B] = BIT(DBUF_S1),
4479 [PIPE_C] = BIT(DBUF_S2),
4480 },
4481 },
4482 {}
4483};
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
4496
4497{
4498 {
4499 .active_pipes = BIT(PIPE_A),
4500 .dbuf_mask = {
4501 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4502 },
4503 },
4504 {
4505 .active_pipes = BIT(PIPE_B),
4506 .dbuf_mask = {
4507 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4508 },
4509 },
4510 {
4511 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4512 .dbuf_mask = {
4513 [PIPE_A] = BIT(DBUF_S2),
4514 [PIPE_B] = BIT(DBUF_S1),
4515 },
4516 },
4517 {
4518 .active_pipes = BIT(PIPE_C),
4519 .dbuf_mask = {
4520 [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
4521 },
4522 },
4523 {
4524 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4525 .dbuf_mask = {
4526 [PIPE_A] = BIT(DBUF_S1),
4527 [PIPE_C] = BIT(DBUF_S2),
4528 },
4529 },
4530 {
4531 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4532 .dbuf_mask = {
4533 [PIPE_B] = BIT(DBUF_S1),
4534 [PIPE_C] = BIT(DBUF_S2),
4535 },
4536 },
4537 {
4538 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4539 .dbuf_mask = {
4540 [PIPE_A] = BIT(DBUF_S1),
4541 [PIPE_B] = BIT(DBUF_S1),
4542 [PIPE_C] = BIT(DBUF_S2),
4543 },
4544 },
4545 {
4546 .active_pipes = BIT(PIPE_D),
4547 .dbuf_mask = {
4548 [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
4549 },
4550 },
4551 {
4552 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
4553 .dbuf_mask = {
4554 [PIPE_A] = BIT(DBUF_S1),
4555 [PIPE_D] = BIT(DBUF_S2),
4556 },
4557 },
4558 {
4559 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
4560 .dbuf_mask = {
4561 [PIPE_B] = BIT(DBUF_S1),
4562 [PIPE_D] = BIT(DBUF_S2),
4563 },
4564 },
4565 {
4566 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
4567 .dbuf_mask = {
4568 [PIPE_A] = BIT(DBUF_S1),
4569 [PIPE_B] = BIT(DBUF_S1),
4570 [PIPE_D] = BIT(DBUF_S2),
4571 },
4572 },
4573 {
4574 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
4575 .dbuf_mask = {
4576 [PIPE_C] = BIT(DBUF_S1),
4577 [PIPE_D] = BIT(DBUF_S2),
4578 },
4579 },
4580 {
4581 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
4582 .dbuf_mask = {
4583 [PIPE_A] = BIT(DBUF_S1),
4584 [PIPE_C] = BIT(DBUF_S2),
4585 [PIPE_D] = BIT(DBUF_S2),
4586 },
4587 },
4588 {
4589 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4590 .dbuf_mask = {
4591 [PIPE_B] = BIT(DBUF_S1),
4592 [PIPE_C] = BIT(DBUF_S2),
4593 [PIPE_D] = BIT(DBUF_S2),
4594 },
4595 },
4596 {
4597 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4598 .dbuf_mask = {
4599 [PIPE_A] = BIT(DBUF_S1),
4600 [PIPE_B] = BIT(DBUF_S1),
4601 [PIPE_C] = BIT(DBUF_S2),
4602 [PIPE_D] = BIT(DBUF_S2),
4603 },
4604 },
4605 {}
4606};
4607
4608static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
4609 {
4610 .active_pipes = BIT(PIPE_A),
4611 .dbuf_mask = {
4612 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4613 },
4614 },
4615 {
4616 .active_pipes = BIT(PIPE_B),
4617 .dbuf_mask = {
4618 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4619 },
4620 },
4621 {
4622 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4623 .dbuf_mask = {
4624 [PIPE_A] = BIT(DBUF_S1),
4625 [PIPE_B] = BIT(DBUF_S2),
4626 },
4627 },
4628 {
4629 .active_pipes = BIT(PIPE_C),
4630 .dbuf_mask = {
4631 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4632 },
4633 },
4634 {
4635 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4636 .dbuf_mask = {
4637 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4638 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4639 },
4640 },
4641 {
4642 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4643 .dbuf_mask = {
4644 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4645 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4646 },
4647 },
4648 {
4649 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4650 .dbuf_mask = {
4651 [PIPE_A] = BIT(DBUF_S1),
4652 [PIPE_B] = BIT(DBUF_S2),
4653 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4654 },
4655 },
4656 {
4657 .active_pipes = BIT(PIPE_D),
4658 .dbuf_mask = {
4659 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4660 },
4661 },
4662 {
4663 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
4664 .dbuf_mask = {
4665 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4666 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4667 },
4668 },
4669 {
4670 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
4671 .dbuf_mask = {
4672 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4673 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4674 },
4675 },
4676 {
4677 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
4678 .dbuf_mask = {
4679 [PIPE_A] = BIT(DBUF_S1),
4680 [PIPE_B] = BIT(DBUF_S2),
4681 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4682 },
4683 },
4684 {
4685 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
4686 .dbuf_mask = {
4687 [PIPE_C] = BIT(DBUF_S3),
4688 [PIPE_D] = BIT(DBUF_S4),
4689 },
4690 },
4691 {
4692 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
4693 .dbuf_mask = {
4694 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4695 [PIPE_C] = BIT(DBUF_S3),
4696 [PIPE_D] = BIT(DBUF_S4),
4697 },
4698 },
4699 {
4700 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4701 .dbuf_mask = {
4702 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4703 [PIPE_C] = BIT(DBUF_S3),
4704 [PIPE_D] = BIT(DBUF_S4),
4705 },
4706 },
4707 {
4708 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4709 .dbuf_mask = {
4710 [PIPE_A] = BIT(DBUF_S1),
4711 [PIPE_B] = BIT(DBUF_S2),
4712 [PIPE_C] = BIT(DBUF_S3),
4713 [PIPE_D] = BIT(DBUF_S4),
4714 },
4715 },
4716 {}
4717};
4718
4719static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
4720
4721
4722
4723
4724 {
4725 .active_pipes = BIT(PIPE_A),
4726 .dbuf_mask = {
4727 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
4728 },
4729 .join_mbus = true,
4730 },
4731 {
4732 .active_pipes = BIT(PIPE_B),
4733 .dbuf_mask = {
4734 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
4735 },
4736 .join_mbus = true,
4737 },
4738 {
4739 .active_pipes = BIT(PIPE_A),
4740 .dbuf_mask = {
4741 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4742 },
4743 .join_mbus = false,
4744 },
4745 {
4746 .active_pipes = BIT(PIPE_B),
4747 .dbuf_mask = {
4748 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4749 },
4750 .join_mbus = false,
4751 },
4752 {
4753 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4754 .dbuf_mask = {
4755 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4756 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4757 },
4758 },
4759 {
4760 .active_pipes = BIT(PIPE_C),
4761 .dbuf_mask = {
4762 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4763 },
4764 },
4765 {
4766 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4767 .dbuf_mask = {
4768 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4769 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4770 },
4771 },
4772 {
4773 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4774 .dbuf_mask = {
4775 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4776 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4777 },
4778 },
4779 {
4780 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4781 .dbuf_mask = {
4782 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4783 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4784 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4785 },
4786 },
4787 {
4788 .active_pipes = BIT(PIPE_D),
4789 .dbuf_mask = {
4790 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4791 },
4792 },
4793 {
4794 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
4795 .dbuf_mask = {
4796 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4797 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4798 },
4799 },
4800 {
4801 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
4802 .dbuf_mask = {
4803 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4804 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4805 },
4806 },
4807 {
4808 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
4809 .dbuf_mask = {
4810 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4811 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4812 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4813 },
4814 },
4815 {
4816 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
4817 .dbuf_mask = {
4818 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4819 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4820 },
4821 },
4822 {
4823 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
4824 .dbuf_mask = {
4825 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4826 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4827 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4828 },
4829 },
4830 {
4831 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4832 .dbuf_mask = {
4833 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4834 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4835 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4836 },
4837 },
4838 {
4839 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4840 .dbuf_mask = {
4841 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4842 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4843 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4844 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4845 },
4846 },
4847 {}
4848
4849};
4850
4851static bool check_mbus_joined(u8 active_pipes,
4852 const struct dbuf_slice_conf_entry *dbuf_slices)
4853{
4854 int i;
4855
4856 for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
4857 if (dbuf_slices[i].active_pipes == active_pipes)
4858 return dbuf_slices[i].join_mbus;
4859 }
4860 return false;
4861}
4862
4863static bool adlp_check_mbus_joined(u8 active_pipes)
4864{
4865 return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
4866}
4867
4868static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
4869 const struct dbuf_slice_conf_entry *dbuf_slices)
4870{
4871 int i;
4872
4873 for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
4874 if (dbuf_slices[i].active_pipes == active_pipes &&
4875 dbuf_slices[i].join_mbus == join_mbus)
4876 return dbuf_slices[i].dbuf_mask[pipe];
4877 }
4878 return 0;
4879}
4880
4881
4882
4883
4884
4885
4886static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
4887{
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
4901 icl_allowed_dbufs);
4902}
4903
4904static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
4905{
4906 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
4907 tgl_allowed_dbufs);
4908}
4909
4910static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
4911{
4912 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
4913 adlp_allowed_dbufs);
4914}
4915
4916static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
4917{
4918 return compute_dbuf_slices(pipe, active_pipes, join_mbus,
4919 dg2_allowed_dbufs);
4920}
4921
4922static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
4923{
4924 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4925 enum pipe pipe = crtc->pipe;
4926
4927 if (IS_DG2(dev_priv))
4928 return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
4929 else if (IS_ALDERLAKE_P(dev_priv))
4930 return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
4931 else if (DISPLAY_VER(dev_priv) == 12)
4932 return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
4933 else if (DISPLAY_VER(dev_priv) == 11)
4934 return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
4935
4936
4937
4938
4939 return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
4940}
4941
4942static u64
4943skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
4944 const struct intel_plane_state *plane_state,
4945 int color_plane)
4946{
4947 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4948 const struct drm_framebuffer *fb = plane_state->hw.fb;
4949 u32 data_rate;
4950 u32 width = 0, height = 0;
4951 uint_fixed_16_16_t down_scale_amount;
4952 u64 rate;
4953
4954 if (!plane_state->uapi.visible)
4955 return 0;
4956
4957 if (plane->id == PLANE_CURSOR)
4958 return 0;
4959
4960 if (color_plane == 1 &&
4961 !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
4962 return 0;
4963
4964
4965
4966
4967
4968
4969 width = drm_rect_width(&plane_state->uapi.src) >> 16;
4970 height = drm_rect_height(&plane_state->uapi.src) >> 16;
4971
4972
4973 if (color_plane == 1) {
4974 width /= 2;
4975 height /= 2;
4976 }
4977
4978 data_rate = width * height;
4979
4980 down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
4981
4982 rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4983
4984 rate *= fb->format->cpp[color_plane];
4985 return rate;
4986}
4987
4988static u64
4989skl_get_total_relative_data_rate(struct intel_atomic_state *state,
4990 struct intel_crtc *crtc)
4991{
4992 struct intel_crtc_state *crtc_state =
4993 intel_atomic_get_new_crtc_state(state, crtc);
4994 const struct intel_plane_state *plane_state;
4995 struct intel_plane *plane;
4996 u64 total_data_rate = 0;
4997 enum plane_id plane_id;
4998 int i;
4999
5000
5001 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5002 if (plane->pipe != crtc->pipe)
5003 continue;
5004
5005 plane_id = plane->id;
5006
5007
5008 crtc_state->plane_data_rate[plane_id] =
5009 skl_plane_relative_data_rate(crtc_state, plane_state, 0);
5010
5011
5012 crtc_state->uv_plane_data_rate[plane_id] =
5013 skl_plane_relative_data_rate(crtc_state, plane_state, 1);
5014 }
5015
5016 for_each_plane_id_on_crtc(crtc, plane_id) {
5017 total_data_rate += crtc_state->plane_data_rate[plane_id];
5018 total_data_rate += crtc_state->uv_plane_data_rate[plane_id];
5019 }
5020
5021 return total_data_rate;
5022}
5023
5024static u64
5025icl_get_total_relative_data_rate(struct intel_atomic_state *state,
5026 struct intel_crtc *crtc)
5027{
5028 struct intel_crtc_state *crtc_state =
5029 intel_atomic_get_new_crtc_state(state, crtc);
5030 const struct intel_plane_state *plane_state;
5031 struct intel_plane *plane;
5032 u64 total_data_rate = 0;
5033 enum plane_id plane_id;
5034 int i;
5035
5036
5037 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5038 if (plane->pipe != crtc->pipe)
5039 continue;
5040
5041 plane_id = plane->id;
5042
5043 if (!plane_state->planar_linked_plane) {
5044 crtc_state->plane_data_rate[plane_id] =
5045 skl_plane_relative_data_rate(crtc_state, plane_state, 0);
5046 } else {
5047 enum plane_id y_plane_id;
5048
5049
5050
5051
5052
5053
5054
5055
5056 if (plane_state->planar_slave)
5057 continue;
5058
5059
5060 y_plane_id = plane_state->planar_linked_plane->id;
5061 crtc_state->plane_data_rate[y_plane_id] =
5062 skl_plane_relative_data_rate(crtc_state, plane_state, 0);
5063
5064 crtc_state->plane_data_rate[plane_id] =
5065 skl_plane_relative_data_rate(crtc_state, plane_state, 1);
5066 }
5067 }
5068
5069 for_each_plane_id_on_crtc(crtc, plane_id)
5070 total_data_rate += crtc_state->plane_data_rate[plane_id];
5071
5072 return total_data_rate;
5073}
5074
5075const struct skl_wm_level *
5076skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
5077 enum plane_id plane_id,
5078 int level)
5079{
5080 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5081
5082 if (level == 0 && pipe_wm->use_sagv_wm)
5083 return &wm->sagv.wm0;
5084
5085 return &wm->wm[level];
5086}
5087
5088const struct skl_wm_level *
5089skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
5090 enum plane_id plane_id)
5091{
5092 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5093
5094 if (pipe_wm->use_sagv_wm)
5095 return &wm->sagv.trans_wm;
5096
5097 return &wm->trans_wm;
5098}
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112static void
5113skl_check_wm_level(struct skl_wm_level *wm, u64 total)
5114{
5115 if (wm->min_ddb_alloc > total)
5116 memset(wm, 0, sizeof(*wm));
5117}
5118
5119static void
5120skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
5121 u64 total, u64 uv_total)
5122{
5123 if (wm->min_ddb_alloc > total ||
5124 uv_wm->min_ddb_alloc > uv_total) {
5125 memset(wm, 0, sizeof(*wm));
5126 memset(uv_wm, 0, sizeof(*uv_wm));
5127 }
5128}
5129
5130static bool icl_need_wm1_wa(struct drm_i915_private *i915,
5131 enum plane_id plane_id)
5132{
5133
5134
5135
5136
5137
5138 return DISPLAY_VER(i915) == 11 ||
5139 (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
5140}
5141
5142static int
5143skl_allocate_plane_ddb(struct intel_atomic_state *state,
5144 struct intel_crtc *crtc)
5145{
5146 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5147 struct intel_crtc_state *crtc_state =
5148 intel_atomic_get_new_crtc_state(state, crtc);
5149 const struct intel_dbuf_state *dbuf_state =
5150 intel_atomic_get_new_dbuf_state(state);
5151 const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
5152 int num_active = hweight8(dbuf_state->active_pipes);
5153 u16 alloc_size, start = 0;
5154 u16 total[I915_MAX_PLANES] = {};
5155 u16 uv_total[I915_MAX_PLANES] = {};
5156 u64 total_data_rate;
5157 enum plane_id plane_id;
5158 u32 blocks;
5159 int level;
5160
5161
5162 memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
5163 memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
5164
5165 if (!crtc_state->hw.active)
5166 return 0;
5167
5168 if (DISPLAY_VER(dev_priv) >= 11)
5169 total_data_rate =
5170 icl_get_total_relative_data_rate(state, crtc);
5171 else
5172 total_data_rate =
5173 skl_get_total_relative_data_rate(state, crtc);
5174
5175 alloc_size = skl_ddb_entry_size(alloc);
5176 if (alloc_size == 0)
5177 return 0;
5178
5179
5180 total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
5181 alloc_size -= total[PLANE_CURSOR];
5182 crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
5183 alloc->end - total[PLANE_CURSOR];
5184 crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
5185
5186 if (total_data_rate == 0)
5187 return 0;
5188
5189
5190
5191
5192
5193 for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
5194 blocks = 0;
5195 for_each_plane_id_on_crtc(crtc, plane_id) {
5196 const struct skl_plane_wm *wm =
5197 &crtc_state->wm.skl.optimal.planes[plane_id];
5198
5199 if (plane_id == PLANE_CURSOR) {
5200 if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
5201 drm_WARN_ON(&dev_priv->drm,
5202 wm->wm[level].min_ddb_alloc != U16_MAX);
5203 blocks = U32_MAX;
5204 break;
5205 }
5206 continue;
5207 }
5208
5209 blocks += wm->wm[level].min_ddb_alloc;
5210 blocks += wm->uv_wm[level].min_ddb_alloc;
5211 }
5212
5213 if (blocks <= alloc_size) {
5214 alloc_size -= blocks;
5215 break;
5216 }
5217 }
5218
5219 if (level < 0) {
5220 drm_dbg_kms(&dev_priv->drm,
5221 "Requested display configuration exceeds system DDB limitations");
5222 drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
5223 blocks, alloc_size);
5224 return -EINVAL;
5225 }
5226
5227
5228
5229
5230
5231
5232 for_each_plane_id_on_crtc(crtc, plane_id) {
5233 const struct skl_plane_wm *wm =
5234 &crtc_state->wm.skl.optimal.planes[plane_id];
5235 u64 rate;
5236 u16 extra;
5237
5238 if (plane_id == PLANE_CURSOR)
5239 continue;
5240
5241
5242
5243
5244
5245 if (total_data_rate == 0)
5246 break;
5247
5248 rate = crtc_state->plane_data_rate[plane_id];
5249 extra = min_t(u16, alloc_size,
5250 DIV64_U64_ROUND_UP(alloc_size * rate,
5251 total_data_rate));
5252 total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
5253 alloc_size -= extra;
5254 total_data_rate -= rate;
5255
5256 if (total_data_rate == 0)
5257 break;
5258
5259 rate = crtc_state->uv_plane_data_rate[plane_id];
5260 extra = min_t(u16, alloc_size,
5261 DIV64_U64_ROUND_UP(alloc_size * rate,
5262 total_data_rate));
5263 uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
5264 alloc_size -= extra;
5265 total_data_rate -= rate;
5266 }
5267 drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0);
5268
5269
5270 start = alloc->start;
5271 for_each_plane_id_on_crtc(crtc, plane_id) {
5272 struct skl_ddb_entry *plane_alloc =
5273 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5274 struct skl_ddb_entry *uv_plane_alloc =
5275 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
5276
5277 if (plane_id == PLANE_CURSOR)
5278 continue;
5279
5280
5281 drm_WARN_ON(&dev_priv->drm,
5282 DISPLAY_VER(dev_priv) >= 11 && uv_total[plane_id]);
5283
5284
5285 if (total[plane_id]) {
5286 plane_alloc->start = start;
5287 start += total[plane_id];
5288 plane_alloc->end = start;
5289 }
5290
5291 if (uv_total[plane_id]) {
5292 uv_plane_alloc->start = start;
5293 start += uv_total[plane_id];
5294 uv_plane_alloc->end = start;
5295 }
5296 }
5297
5298
5299
5300
5301
5302
5303
5304 for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
5305 for_each_plane_id_on_crtc(crtc, plane_id) {
5306 struct skl_plane_wm *wm =
5307 &crtc_state->wm.skl.optimal.planes[plane_id];
5308
5309 skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level],
5310 total[plane_id], uv_total[plane_id]);
5311
5312 if (icl_need_wm1_wa(dev_priv, plane_id) &&
5313 level == 1 && wm->wm[0].enable) {
5314 wm->wm[level].blocks = wm->wm[0].blocks;
5315 wm->wm[level].lines = wm->wm[0].lines;
5316 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
5317 }
5318 }
5319 }
5320
5321
5322
5323
5324
5325 for_each_plane_id_on_crtc(crtc, plane_id) {
5326 struct skl_plane_wm *wm =
5327 &crtc_state->wm.skl.optimal.planes[plane_id];
5328
5329 skl_check_wm_level(&wm->trans_wm, total[plane_id]);
5330 skl_check_wm_level(&wm->sagv.wm0, total[plane_id]);
5331 skl_check_wm_level(&wm->sagv.trans_wm, total[plane_id]);
5332 }
5333
5334 return 0;
5335}
5336
5337
5338
5339
5340
5341
5342
5343static uint_fixed_16_16_t
5344skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
5345 u8 cpp, u32 latency, u32 dbuf_block_size)
5346{
5347 u32 wm_intermediate_val;
5348 uint_fixed_16_16_t ret;
5349
5350 if (latency == 0)
5351 return FP_16_16_MAX;
5352
5353 wm_intermediate_val = latency * pixel_rate * cpp;
5354 ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
5355
5356 if (DISPLAY_VER(dev_priv) >= 10)
5357 ret = add_fixed16_u32(ret, 1);
5358
5359 return ret;
5360}
5361
5362static uint_fixed_16_16_t
5363skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
5364 uint_fixed_16_16_t plane_blocks_per_line)
5365{
5366 u32 wm_intermediate_val;
5367 uint_fixed_16_16_t ret;
5368
5369 if (latency == 0)
5370 return FP_16_16_MAX;
5371
5372 wm_intermediate_val = latency * pixel_rate;
5373 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
5374 pipe_htotal * 1000);
5375 ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
5376 return ret;
5377}
5378
5379static uint_fixed_16_16_t
5380intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
5381{
5382 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5383 u32 pixel_rate;
5384 u32 crtc_htotal;
5385 uint_fixed_16_16_t linetime_us;
5386
5387 if (!crtc_state->hw.active)
5388 return u32_to_fixed16(0);
5389
5390 pixel_rate = crtc_state->pixel_rate;
5391
5392 if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0))
5393 return u32_to_fixed16(0);
5394
5395 crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
5396 linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
5397
5398 return linetime_us;
5399}
5400
5401static int
5402skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
5403 int width, const struct drm_format_info *format,
5404 u64 modifier, unsigned int rotation,
5405 u32 plane_pixel_rate, struct skl_wm_params *wp,
5406 int color_plane)
5407{
5408 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5409 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5410 u32 interm_pbpl;
5411
5412
5413 if (color_plane == 1 &&
5414 !intel_format_info_is_yuv_semiplanar(format, modifier)) {
5415 drm_dbg_kms(&dev_priv->drm,
5416 "Non planar format have single plane\n");
5417 return -EINVAL;
5418 }
5419
5420 wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
5421 modifier == I915_FORMAT_MOD_Yf_TILED ||
5422 modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
5423 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
5424 wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
5425 wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
5426 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
5427 wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
5428
5429 wp->width = width;
5430 if (color_plane == 1 && wp->is_planar)
5431 wp->width /= 2;
5432
5433 wp->cpp = format->cpp[color_plane];
5434 wp->plane_pixel_rate = plane_pixel_rate;
5435
5436 if (DISPLAY_VER(dev_priv) >= 11 &&
5437 modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
5438 wp->dbuf_block_size = 256;
5439 else
5440 wp->dbuf_block_size = 512;
5441
5442 if (drm_rotation_90_or_270(rotation)) {
5443 switch (wp->cpp) {
5444 case 1:
5445 wp->y_min_scanlines = 16;
5446 break;
5447 case 2:
5448 wp->y_min_scanlines = 8;
5449 break;
5450 case 4:
5451 wp->y_min_scanlines = 4;
5452 break;
5453 default:
5454 MISSING_CASE(wp->cpp);
5455 return -EINVAL;
5456 }
5457 } else {
5458 wp->y_min_scanlines = 4;
5459 }
5460
5461 if (skl_needs_memory_bw_wa(dev_priv))
5462 wp->y_min_scanlines *= 2;
5463
5464 wp->plane_bytes_per_line = wp->width * wp->cpp;
5465 if (wp->y_tiled) {
5466 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
5467 wp->y_min_scanlines,
5468 wp->dbuf_block_size);
5469
5470 if (DISPLAY_VER(dev_priv) >= 10)
5471 interm_pbpl++;
5472
5473 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
5474 wp->y_min_scanlines);
5475 } else {
5476 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
5477 wp->dbuf_block_size);
5478
5479 if (!wp->x_tiled || DISPLAY_VER(dev_priv) >= 10)
5480 interm_pbpl++;
5481
5482 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
5483 }
5484
5485 wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
5486 wp->plane_blocks_per_line);
5487
5488 wp->linetime_us = fixed16_to_u32_round_up(
5489 intel_get_linetime_us(crtc_state));
5490
5491 return 0;
5492}
5493
5494static int
5495skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
5496 const struct intel_plane_state *plane_state,
5497 struct skl_wm_params *wp, int color_plane)
5498{
5499 const struct drm_framebuffer *fb = plane_state->hw.fb;
5500 int width;
5501
5502
5503
5504
5505
5506
5507 width = drm_rect_width(&plane_state->uapi.src) >> 16;
5508
5509 return skl_compute_wm_params(crtc_state, width,
5510 fb->format, fb->modifier,
5511 plane_state->hw.rotation,
5512 intel_plane_pixel_rate(crtc_state, plane_state),
5513 wp, color_plane);
5514}
5515
5516static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
5517{
5518 if (DISPLAY_VER(dev_priv) >= 10)
5519 return true;
5520
5521
5522 return level > 0;
5523}
5524
5525static int skl_wm_max_lines(struct drm_i915_private *dev_priv)
5526{
5527 if (DISPLAY_VER(dev_priv) >= 13)
5528 return 255;
5529 else
5530 return 31;
5531}
5532
5533static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
5534 int level,
5535 unsigned int latency,
5536 const struct skl_wm_params *wp,
5537 const struct skl_wm_level *result_prev,
5538 struct skl_wm_level *result )
5539{
5540 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5541 uint_fixed_16_16_t method1, method2;
5542 uint_fixed_16_16_t selected_result;
5543 u32 blocks, lines, min_ddb_alloc = 0;
5544
5545 if (latency == 0) {
5546
5547 result->min_ddb_alloc = U16_MAX;
5548 return;
5549 }
5550
5551
5552
5553
5554
5555 if ((IS_KABYLAKE(dev_priv) ||
5556 IS_COFFEELAKE(dev_priv) ||
5557 IS_COMETLAKE(dev_priv)) &&
5558 dev_priv->ipc_enabled)
5559 latency += 4;
5560
5561 if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
5562 latency += 15;
5563
5564 method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
5565 wp->cpp, latency, wp->dbuf_block_size);
5566 method2 = skl_wm_method2(wp->plane_pixel_rate,
5567 crtc_state->hw.pipe_mode.crtc_htotal,
5568 latency,
5569 wp->plane_blocks_per_line);
5570
5571 if (wp->y_tiled) {
5572 selected_result = max_fixed16(method2, wp->y_tile_minimum);
5573 } else {
5574 if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
5575 wp->dbuf_block_size < 1) &&
5576 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
5577 selected_result = method2;
5578 } else if (latency >= wp->linetime_us) {
5579 if (DISPLAY_VER(dev_priv) == 9)
5580 selected_result = min_fixed16(method1, method2);
5581 else
5582 selected_result = method2;
5583 } else {
5584 selected_result = method1;
5585 }
5586 }
5587
5588 blocks = fixed16_to_u32_round_up(selected_result) + 1;
5589 lines = div_round_up_fixed16(selected_result,
5590 wp->plane_blocks_per_line);
5591
5592 if (DISPLAY_VER(dev_priv) == 9) {
5593
5594 if (level == 0 && wp->rc_surface)
5595 blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
5596
5597
5598 if (level >= 1 && level <= 7) {
5599 if (wp->y_tiled) {
5600 blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
5601 lines += wp->y_min_scanlines;
5602 } else {
5603 blocks++;
5604 }
5605
5606
5607
5608
5609
5610
5611
5612 if (result_prev->blocks > blocks)
5613 blocks = result_prev->blocks;
5614 }
5615 }
5616
5617 if (DISPLAY_VER(dev_priv) >= 11) {
5618 if (wp->y_tiled) {
5619 int extra_lines;
5620
5621 if (lines % wp->y_min_scanlines == 0)
5622 extra_lines = wp->y_min_scanlines;
5623 else
5624 extra_lines = wp->y_min_scanlines * 2 -
5625 lines % wp->y_min_scanlines;
5626
5627 min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
5628 wp->plane_blocks_per_line);
5629 } else {
5630 min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
5631 }
5632 }
5633
5634 if (!skl_wm_has_lines(dev_priv, level))
5635 lines = 0;
5636
5637 if (lines > skl_wm_max_lines(dev_priv)) {
5638
5639 result->min_ddb_alloc = U16_MAX;
5640 return;
5641 }
5642
5643
5644
5645
5646
5647
5648
5649 result->blocks = blocks;
5650 result->lines = lines;
5651
5652 result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
5653 result->enable = true;
5654
5655 if (DISPLAY_VER(dev_priv) < 12)
5656 result->can_sagv = latency >= dev_priv->sagv_block_time_us;
5657}
5658
5659static void
5660skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
5661 const struct skl_wm_params *wm_params,
5662 struct skl_wm_level *levels)
5663{
5664 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5665 int level, max_level = ilk_wm_max_level(dev_priv);
5666 struct skl_wm_level *result_prev = &levels[0];
5667
5668 for (level = 0; level <= max_level; level++) {
5669 struct skl_wm_level *result = &levels[level];
5670 unsigned int latency = dev_priv->wm.skl_latency[level];
5671
5672 skl_compute_plane_wm(crtc_state, level, latency,
5673 wm_params, result_prev, result);
5674
5675 result_prev = result;
5676 }
5677}
5678
5679static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
5680 const struct skl_wm_params *wm_params,
5681 struct skl_plane_wm *plane_wm)
5682{
5683 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5684 struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
5685 struct skl_wm_level *levels = plane_wm->wm;
5686 unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us;
5687
5688 skl_compute_plane_wm(crtc_state, 0, latency,
5689 wm_params, &levels[0],
5690 sagv_wm);
5691}
5692
5693static void skl_compute_transition_wm(struct drm_i915_private *dev_priv,
5694 struct skl_wm_level *trans_wm,
5695 const struct skl_wm_level *wm0,
5696 const struct skl_wm_params *wp)
5697{
5698 u16 trans_min, trans_amount, trans_y_tile_min;
5699 u16 wm0_blocks, trans_offset, blocks;
5700
5701
5702 if (!dev_priv->ipc_enabled)
5703 return;
5704
5705
5706
5707
5708
5709 if (DISPLAY_VER(dev_priv) == 9)
5710 return;
5711
5712 if (DISPLAY_VER(dev_priv) >= 11)
5713 trans_min = 4;
5714 else
5715 trans_min = 14;
5716
5717
5718 if (DISPLAY_VER(dev_priv) == 10)
5719 trans_amount = 0;
5720 else
5721 trans_amount = 10;
5722
5723 trans_offset = trans_min + trans_amount;
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735 wm0_blocks = wm0->blocks - 1;
5736
5737 if (wp->y_tiled) {
5738 trans_y_tile_min =
5739 (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
5740 blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
5741 } else {
5742 blocks = wm0_blocks + trans_offset;
5743 }
5744 blocks++;
5745
5746
5747
5748
5749
5750
5751 trans_wm->blocks = blocks;
5752 trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
5753 trans_wm->enable = true;
5754}
5755
5756static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
5757 const struct intel_plane_state *plane_state,
5758 enum plane_id plane_id, int color_plane)
5759{
5760 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5761 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5762 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
5763 struct skl_wm_params wm_params;
5764 int ret;
5765
5766 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5767 &wm_params, color_plane);
5768 if (ret)
5769 return ret;
5770
5771 skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
5772
5773 skl_compute_transition_wm(dev_priv, &wm->trans_wm,
5774 &wm->wm[0], &wm_params);
5775
5776 if (DISPLAY_VER(dev_priv) >= 12) {
5777 tgl_compute_sagv_wm(crtc_state, &wm_params, wm);
5778
5779 skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm,
5780 &wm->sagv.wm0, &wm_params);
5781 }
5782
5783 return 0;
5784}
5785
5786static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
5787 const struct intel_plane_state *plane_state,
5788 enum plane_id plane_id)
5789{
5790 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
5791 struct skl_wm_params wm_params;
5792 int ret;
5793
5794 wm->is_planar = true;
5795
5796
5797 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5798 &wm_params, 1);
5799 if (ret)
5800 return ret;
5801
5802 skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
5803
5804 return 0;
5805}
5806
5807static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
5808 const struct intel_plane_state *plane_state)
5809{
5810 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5811 enum plane_id plane_id = plane->id;
5812 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
5813 const struct drm_framebuffer *fb = plane_state->hw.fb;
5814 int ret;
5815
5816 memset(wm, 0, sizeof(*wm));
5817
5818 if (!intel_wm_plane_visible(crtc_state, plane_state))
5819 return 0;
5820
5821 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5822 plane_id, 0);
5823 if (ret)
5824 return ret;
5825
5826 if (fb->format->is_yuv && fb->format->num_planes > 1) {
5827 ret = skl_build_plane_wm_uv(crtc_state, plane_state,
5828 plane_id);
5829 if (ret)
5830 return ret;
5831 }
5832
5833 return 0;
5834}
5835
5836static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
5837 const struct intel_plane_state *plane_state)
5838{
5839 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5840 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5841 enum plane_id plane_id = plane->id;
5842 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
5843 int ret;
5844
5845
5846 if (plane_state->planar_slave)
5847 return 0;
5848
5849 memset(wm, 0, sizeof(*wm));
5850
5851 if (plane_state->planar_linked_plane) {
5852 const struct drm_framebuffer *fb = plane_state->hw.fb;
5853 enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
5854
5855 drm_WARN_ON(&dev_priv->drm,
5856 !intel_wm_plane_visible(crtc_state, plane_state));
5857 drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv ||
5858 fb->format->num_planes == 1);
5859
5860 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5861 y_plane_id, 0);
5862 if (ret)
5863 return ret;
5864
5865 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5866 plane_id, 1);
5867 if (ret)
5868 return ret;
5869 } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
5870 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5871 plane_id, 0);
5872 if (ret)
5873 return ret;
5874 }
5875
5876 return 0;
5877}
5878
5879static int skl_build_pipe_wm(struct intel_atomic_state *state,
5880 struct intel_crtc *crtc)
5881{
5882 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5883 struct intel_crtc_state *crtc_state =
5884 intel_atomic_get_new_crtc_state(state, crtc);
5885 const struct intel_plane_state *plane_state;
5886 struct intel_plane *plane;
5887 int ret, i;
5888
5889 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5890
5891
5892
5893
5894
5895 if (plane->pipe != crtc->pipe)
5896 continue;
5897
5898 if (DISPLAY_VER(dev_priv) >= 11)
5899 ret = icl_build_plane_wm(crtc_state, plane_state);
5900 else
5901 ret = skl_build_plane_wm(crtc_state, plane_state);
5902 if (ret)
5903 return ret;
5904 }
5905
5906 crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
5907
5908 return 0;
5909}
5910
5911static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
5912 i915_reg_t reg,
5913 const struct skl_ddb_entry *entry)
5914{
5915 if (entry->end)
5916 intel_de_write_fw(dev_priv, reg,
5917 (entry->end - 1) << 16 | entry->start);
5918 else
5919 intel_de_write_fw(dev_priv, reg, 0);
5920}
5921
5922static void skl_write_wm_level(struct drm_i915_private *dev_priv,
5923 i915_reg_t reg,
5924 const struct skl_wm_level *level)
5925{
5926 u32 val = 0;
5927
5928 if (level->enable)
5929 val |= PLANE_WM_EN;
5930 if (level->ignore_lines)
5931 val |= PLANE_WM_IGNORE_LINES;
5932 val |= level->blocks;
5933 val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
5934
5935 intel_de_write_fw(dev_priv, reg, val);
5936}
5937
5938void skl_write_plane_wm(struct intel_plane *plane,
5939 const struct intel_crtc_state *crtc_state)
5940{
5941 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5942 int level, max_level = ilk_wm_max_level(dev_priv);
5943 enum plane_id plane_id = plane->id;
5944 enum pipe pipe = plane->pipe;
5945 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5946 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5947 const struct skl_ddb_entry *ddb_y =
5948 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5949 const struct skl_ddb_entry *ddb_uv =
5950 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
5951
5952 for (level = 0; level <= max_level; level++)
5953 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
5954 skl_plane_wm_level(pipe_wm, plane_id, level));
5955
5956 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5957 skl_plane_trans_wm(pipe_wm, plane_id));
5958
5959 if (HAS_HW_SAGV_WM(dev_priv)) {
5960 skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id),
5961 &wm->sagv.wm0);
5962 skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id),
5963 &wm->sagv.trans_wm);
5964 }
5965
5966 if (DISPLAY_VER(dev_priv) >= 11) {
5967 skl_ddb_entry_write(dev_priv,
5968 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5969 return;
5970 }
5971
5972 if (wm->is_planar)
5973 swap(ddb_y, ddb_uv);
5974
5975 skl_ddb_entry_write(dev_priv,
5976 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5977 skl_ddb_entry_write(dev_priv,
5978 PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
5979}
5980
5981void skl_write_cursor_wm(struct intel_plane *plane,
5982 const struct intel_crtc_state *crtc_state)
5983{
5984 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5985 int level, max_level = ilk_wm_max_level(dev_priv);
5986 enum plane_id plane_id = plane->id;
5987 enum pipe pipe = plane->pipe;
5988 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5989 const struct skl_ddb_entry *ddb =
5990 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5991
5992 for (level = 0; level <= max_level; level++)
5993 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
5994 skl_plane_wm_level(pipe_wm, plane_id, level));
5995
5996 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe),
5997 skl_plane_trans_wm(pipe_wm, plane_id));
5998
5999 if (HAS_HW_SAGV_WM(dev_priv)) {
6000 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
6001
6002 skl_write_wm_level(dev_priv, CUR_WM_SAGV(pipe),
6003 &wm->sagv.wm0);
6004 skl_write_wm_level(dev_priv, CUR_WM_SAGV_TRANS(pipe),
6005 &wm->sagv.trans_wm);
6006 }
6007
6008 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
6009}
6010
6011bool skl_wm_level_equals(const struct skl_wm_level *l1,
6012 const struct skl_wm_level *l2)
6013{
6014 return l1->enable == l2->enable &&
6015 l1->ignore_lines == l2->ignore_lines &&
6016 l1->lines == l2->lines &&
6017 l1->blocks == l2->blocks;
6018}
6019
6020static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
6021 const struct skl_plane_wm *wm1,
6022 const struct skl_plane_wm *wm2)
6023{
6024 int level, max_level = ilk_wm_max_level(dev_priv);
6025
6026 for (level = 0; level <= max_level; level++) {
6027
6028
6029
6030
6031
6032 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
6033 return false;
6034 }
6035
6036 return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
6037 skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
6038 skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
6039}
6040
6041static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
6042 const struct skl_ddb_entry *b)
6043{
6044 return a->start < b->end && b->start < a->end;
6045}
6046
6047static void skl_ddb_entry_union(struct skl_ddb_entry *a,
6048 const struct skl_ddb_entry *b)
6049{
6050 if (a->end && b->end) {
6051 a->start = min(a->start, b->start);
6052 a->end = max(a->end, b->end);
6053 } else if (b->end) {
6054 a->start = b->start;
6055 a->end = b->end;
6056 }
6057}
6058
6059bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
6060 const struct skl_ddb_entry *entries,
6061 int num_entries, int ignore_idx)
6062{
6063 int i;
6064
6065 for (i = 0; i < num_entries; i++) {
6066 if (i != ignore_idx &&
6067 skl_ddb_entries_overlap(ddb, &entries[i]))
6068 return true;
6069 }
6070
6071 return false;
6072}
6073
6074static int
6075skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
6076 struct intel_crtc_state *new_crtc_state)
6077{
6078 struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
6079 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6080 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6081 struct intel_plane *plane;
6082
6083 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6084 struct intel_plane_state *plane_state;
6085 enum plane_id plane_id = plane->id;
6086
6087 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
6088 &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
6089 skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
6090 &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
6091 continue;
6092
6093 plane_state = intel_atomic_get_plane_state(state, plane);
6094 if (IS_ERR(plane_state))
6095 return PTR_ERR(plane_state);
6096
6097 new_crtc_state->update_planes |= BIT(plane_id);
6098 }
6099
6100 return 0;
6101}
6102
6103static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
6104{
6105 struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
6106 u8 enabled_slices;
6107 enum pipe pipe;
6108
6109
6110
6111
6112
6113 enabled_slices = BIT(DBUF_S1);
6114
6115 for_each_pipe(dev_priv, pipe)
6116 enabled_slices |= dbuf_state->slices[pipe];
6117
6118 return enabled_slices;
6119}
6120
6121static int
6122skl_compute_ddb(struct intel_atomic_state *state)
6123{
6124 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6125 const struct intel_dbuf_state *old_dbuf_state;
6126 struct intel_dbuf_state *new_dbuf_state = NULL;
6127 const struct intel_crtc_state *old_crtc_state;
6128 struct intel_crtc_state *new_crtc_state;
6129 struct intel_crtc *crtc;
6130 int ret, i;
6131
6132 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6133 new_dbuf_state = intel_atomic_get_dbuf_state(state);
6134 if (IS_ERR(new_dbuf_state))
6135 return PTR_ERR(new_dbuf_state);
6136
6137 old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
6138 break;
6139 }
6140
6141 if (!new_dbuf_state)
6142 return 0;
6143
6144 new_dbuf_state->active_pipes =
6145 intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
6146
6147 if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
6148 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
6149 if (ret)
6150 return ret;
6151 }
6152
6153 if (IS_ALDERLAKE_P(dev_priv))
6154 new_dbuf_state->joined_mbus =
6155 adlp_check_mbus_joined(new_dbuf_state->active_pipes);
6156
6157 for_each_intel_crtc(&dev_priv->drm, crtc) {
6158 enum pipe pipe = crtc->pipe;
6159
6160 new_dbuf_state->slices[pipe] =
6161 skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
6162 new_dbuf_state->joined_mbus);
6163
6164 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
6165 continue;
6166
6167 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
6168 if (ret)
6169 return ret;
6170 }
6171
6172 new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
6173
6174 if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
6175 old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
6176 ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
6177 if (ret)
6178 return ret;
6179
6180 if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
6181
6182 ret = intel_modeset_all_pipes(state);
6183 if (ret)
6184 return ret;
6185 }
6186
6187 drm_dbg_kms(&dev_priv->drm,
6188 "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
6189 old_dbuf_state->enabled_slices,
6190 new_dbuf_state->enabled_slices,
6191 INTEL_INFO(dev_priv)->dbuf.slice_mask,
6192 yesno(old_dbuf_state->joined_mbus),
6193 yesno(new_dbuf_state->joined_mbus));
6194 }
6195
6196 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6197 enum pipe pipe = crtc->pipe;
6198
6199 new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
6200
6201 if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
6202 continue;
6203
6204 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
6205 if (ret)
6206 return ret;
6207 }
6208
6209 for_each_intel_crtc(&dev_priv->drm, crtc) {
6210 ret = skl_crtc_allocate_ddb(state, crtc);
6211 if (ret)
6212 return ret;
6213 }
6214
6215 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6216 new_crtc_state, i) {
6217 ret = skl_allocate_plane_ddb(state, crtc);
6218 if (ret)
6219 return ret;
6220
6221 ret = skl_ddb_add_affected_planes(old_crtc_state,
6222 new_crtc_state);
6223 if (ret)
6224 return ret;
6225 }
6226
6227 return 0;
6228}
6229
6230static char enast(bool enable)
6231{
6232 return enable ? '*' : ' ';
6233}
6234
6235static void
6236skl_print_wm_changes(struct intel_atomic_state *state)
6237{
6238 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6239 const struct intel_crtc_state *old_crtc_state;
6240 const struct intel_crtc_state *new_crtc_state;
6241 struct intel_plane *plane;
6242 struct intel_crtc *crtc;
6243 int i;
6244
6245 if (!drm_debug_enabled(DRM_UT_KMS))
6246 return;
6247
6248 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6249 new_crtc_state, i) {
6250 const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
6251
6252 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
6253 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
6254
6255 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6256 enum plane_id plane_id = plane->id;
6257 const struct skl_ddb_entry *old, *new;
6258
6259 old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
6260 new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
6261
6262 if (skl_ddb_entry_equal(old, new))
6263 continue;
6264
6265 drm_dbg_kms(&dev_priv->drm,
6266 "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
6267 plane->base.base.id, plane->base.name,
6268 old->start, old->end, new->start, new->end,
6269 skl_ddb_entry_size(old), skl_ddb_entry_size(new));
6270 }
6271
6272 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6273 enum plane_id plane_id = plane->id;
6274 const struct skl_plane_wm *old_wm, *new_wm;
6275
6276 old_wm = &old_pipe_wm->planes[plane_id];
6277 new_wm = &new_pipe_wm->planes[plane_id];
6278
6279 if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
6280 continue;
6281
6282 drm_dbg_kms(&dev_priv->drm,
6283 "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
6284 " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
6285 plane->base.base.id, plane->base.name,
6286 enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
6287 enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
6288 enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
6289 enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
6290 enast(old_wm->trans_wm.enable),
6291 enast(old_wm->sagv.wm0.enable),
6292 enast(old_wm->sagv.trans_wm.enable),
6293 enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
6294 enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
6295 enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
6296 enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
6297 enast(new_wm->trans_wm.enable),
6298 enast(new_wm->sagv.wm0.enable),
6299 enast(new_wm->sagv.trans_wm.enable));
6300
6301 drm_dbg_kms(&dev_priv->drm,
6302 "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
6303 " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
6304 plane->base.base.id, plane->base.name,
6305 enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
6306 enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
6307 enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
6308 enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
6309 enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
6310 enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
6311 enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
6312 enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
6313 enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
6314 enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
6315 enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
6316 enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
6317 enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
6318 enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
6319 enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
6320 enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
6321 enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
6322 enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
6323 enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
6324 enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
6325 enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
6326 enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
6327
6328 drm_dbg_kms(&dev_priv->drm,
6329 "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
6330 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
6331 plane->base.base.id, plane->base.name,
6332 old_wm->wm[0].blocks, old_wm->wm[1].blocks,
6333 old_wm->wm[2].blocks, old_wm->wm[3].blocks,
6334 old_wm->wm[4].blocks, old_wm->wm[5].blocks,
6335 old_wm->wm[6].blocks, old_wm->wm[7].blocks,
6336 old_wm->trans_wm.blocks,
6337 old_wm->sagv.wm0.blocks,
6338 old_wm->sagv.trans_wm.blocks,
6339 new_wm->wm[0].blocks, new_wm->wm[1].blocks,
6340 new_wm->wm[2].blocks, new_wm->wm[3].blocks,
6341 new_wm->wm[4].blocks, new_wm->wm[5].blocks,
6342 new_wm->wm[6].blocks, new_wm->wm[7].blocks,
6343 new_wm->trans_wm.blocks,
6344 new_wm->sagv.wm0.blocks,
6345 new_wm->sagv.trans_wm.blocks);
6346
6347 drm_dbg_kms(&dev_priv->drm,
6348 "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
6349 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
6350 plane->base.base.id, plane->base.name,
6351 old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
6352 old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
6353 old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
6354 old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
6355 old_wm->trans_wm.min_ddb_alloc,
6356 old_wm->sagv.wm0.min_ddb_alloc,
6357 old_wm->sagv.trans_wm.min_ddb_alloc,
6358 new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
6359 new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
6360 new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
6361 new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
6362 new_wm->trans_wm.min_ddb_alloc,
6363 new_wm->sagv.wm0.min_ddb_alloc,
6364 new_wm->sagv.trans_wm.min_ddb_alloc);
6365 }
6366 }
6367}
6368
6369static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
6370 const struct skl_pipe_wm *old_pipe_wm,
6371 const struct skl_pipe_wm *new_pipe_wm)
6372{
6373 struct drm_i915_private *i915 = to_i915(plane->base.dev);
6374 int level, max_level = ilk_wm_max_level(i915);
6375
6376 for (level = 0; level <= max_level; level++) {
6377
6378
6379
6380
6381
6382 if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
6383 skl_plane_wm_level(new_pipe_wm, plane->id, level)))
6384 return false;
6385 }
6386
6387 if (HAS_HW_SAGV_WM(i915)) {
6388 const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
6389 const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
6390
6391 if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
6392 !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
6393 return false;
6394 }
6395
6396 return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
6397 skl_plane_trans_wm(new_pipe_wm, plane->id));
6398}
6399
6400
6401
6402
6403
6404
6405
6406
6407
6408
6409
6410
6411
6412
6413
6414
6415
6416
6417
6418
6419
6420
6421
6422static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
6423 struct intel_crtc *crtc)
6424{
6425 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6426 const struct intel_crtc_state *old_crtc_state =
6427 intel_atomic_get_old_crtc_state(state, crtc);
6428 struct intel_crtc_state *new_crtc_state =
6429 intel_atomic_get_new_crtc_state(state, crtc);
6430 struct intel_plane *plane;
6431
6432 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6433 struct intel_plane_state *plane_state;
6434 enum plane_id plane_id = plane->id;
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444 if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
6445 skl_plane_selected_wm_equals(plane,
6446 &old_crtc_state->wm.skl.optimal,
6447 &new_crtc_state->wm.skl.optimal))
6448 continue;
6449
6450 plane_state = intel_atomic_get_plane_state(state, plane);
6451 if (IS_ERR(plane_state))
6452 return PTR_ERR(plane_state);
6453
6454 new_crtc_state->update_planes |= BIT(plane_id);
6455 }
6456
6457 return 0;
6458}
6459
6460static int
6461skl_compute_wm(struct intel_atomic_state *state)
6462{
6463 struct intel_crtc *crtc;
6464 struct intel_crtc_state *new_crtc_state;
6465 int ret, i;
6466
6467 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6468 ret = skl_build_pipe_wm(state, crtc);
6469 if (ret)
6470 return ret;
6471 }
6472
6473 ret = skl_compute_ddb(state);
6474 if (ret)
6475 return ret;
6476
6477 ret = intel_compute_sagv_mask(state);
6478 if (ret)
6479 return ret;
6480
6481
6482
6483
6484
6485
6486 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6487 ret = skl_wm_add_affected_planes(state, crtc);
6488 if (ret)
6489 return ret;
6490 }
6491
6492 skl_print_wm_changes(state);
6493
6494 return 0;
6495}
6496
6497static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
6498 struct intel_wm_config *config)
6499{
6500 struct intel_crtc *crtc;
6501
6502
6503 for_each_intel_crtc(&dev_priv->drm, crtc) {
6504 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
6505
6506 if (!wm->pipe_enabled)
6507 continue;
6508
6509 config->sprites_enabled |= wm->sprites_enabled;
6510 config->sprites_scaled |= wm->sprites_scaled;
6511 config->num_pipes_active++;
6512 }
6513}
6514
6515static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
6516{
6517 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
6518 struct ilk_wm_maximums max;
6519 struct intel_wm_config config = {};
6520 struct ilk_wm_values results = {};
6521 enum intel_ddb_partitioning partitioning;
6522
6523 ilk_compute_wm_config(dev_priv, &config);
6524
6525 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
6526 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
6527
6528
6529 if (DISPLAY_VER(dev_priv) >= 7 &&
6530 config.num_pipes_active == 1 && config.sprites_enabled) {
6531 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
6532 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
6533
6534 best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
6535 } else {
6536 best_lp_wm = &lp_wm_1_2;
6537 }
6538
6539 partitioning = (best_lp_wm == &lp_wm_1_2) ?
6540 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
6541
6542 ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
6543
6544 ilk_write_wm_values(dev_priv, &results);
6545}
6546
6547static void ilk_initial_watermarks(struct intel_atomic_state *state,
6548 struct intel_crtc *crtc)
6549{
6550 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6551 const struct intel_crtc_state *crtc_state =
6552 intel_atomic_get_new_crtc_state(state, crtc);
6553
6554 mutex_lock(&dev_priv->wm.wm_mutex);
6555 crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
6556 ilk_program_watermarks(dev_priv);
6557 mutex_unlock(&dev_priv->wm.wm_mutex);
6558}
6559
6560static void ilk_optimize_watermarks(struct intel_atomic_state *state,
6561 struct intel_crtc *crtc)
6562{
6563 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6564 const struct intel_crtc_state *crtc_state =
6565 intel_atomic_get_new_crtc_state(state, crtc);
6566
6567 if (!crtc_state->wm.need_postvbl_update)
6568 return;
6569
6570 mutex_lock(&dev_priv->wm.wm_mutex);
6571 crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
6572 ilk_program_watermarks(dev_priv);
6573 mutex_unlock(&dev_priv->wm.wm_mutex);
6574}
6575
6576static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
6577{
6578 level->enable = val & PLANE_WM_EN;
6579 level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
6580 level->blocks = val & PLANE_WM_BLOCKS_MASK;
6581 level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
6582}
6583
6584void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
6585 struct skl_pipe_wm *out)
6586{
6587 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6588 enum pipe pipe = crtc->pipe;
6589 int level, max_level;
6590 enum plane_id plane_id;
6591 u32 val;
6592
6593 max_level = ilk_wm_max_level(dev_priv);
6594
6595 for_each_plane_id_on_crtc(crtc, plane_id) {
6596 struct skl_plane_wm *wm = &out->planes[plane_id];
6597
6598 for (level = 0; level <= max_level; level++) {
6599 if (plane_id != PLANE_CURSOR)
6600 val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level));
6601 else
6602 val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level));
6603
6604 skl_wm_level_from_reg_val(val, &wm->wm[level]);
6605 }
6606
6607 if (plane_id != PLANE_CURSOR)
6608 val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
6609 else
6610 val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
6611
6612 skl_wm_level_from_reg_val(val, &wm->trans_wm);
6613
6614 if (HAS_HW_SAGV_WM(dev_priv)) {
6615 if (plane_id != PLANE_CURSOR)
6616 val = intel_uncore_read(&dev_priv->uncore,
6617 PLANE_WM_SAGV(pipe, plane_id));
6618 else
6619 val = intel_uncore_read(&dev_priv->uncore,
6620 CUR_WM_SAGV(pipe));
6621
6622 skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
6623
6624 if (plane_id != PLANE_CURSOR)
6625 val = intel_uncore_read(&dev_priv->uncore,
6626 PLANE_WM_SAGV_TRANS(pipe, plane_id));
6627 else
6628 val = intel_uncore_read(&dev_priv->uncore,
6629 CUR_WM_SAGV_TRANS(pipe));
6630
6631 skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
6632 } else if (DISPLAY_VER(dev_priv) >= 12) {
6633 wm->sagv.wm0 = wm->wm[0];
6634 wm->sagv.trans_wm = wm->trans_wm;
6635 }
6636 }
6637}
6638
6639void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
6640{
6641 struct intel_dbuf_state *dbuf_state =
6642 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
6643 struct intel_crtc *crtc;
6644
6645 if (IS_ALDERLAKE_P(dev_priv))
6646 dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
6647
6648 for_each_intel_crtc(&dev_priv->drm, crtc) {
6649 struct intel_crtc_state *crtc_state =
6650 to_intel_crtc_state(crtc->base.state);
6651 enum pipe pipe = crtc->pipe;
6652 unsigned int mbus_offset;
6653 enum plane_id plane_id;
6654 u8 slices;
6655
6656 skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
6657 crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
6658
6659 memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
6660
6661 for_each_plane_id_on_crtc(crtc, plane_id) {
6662 struct skl_ddb_entry *ddb_y =
6663 &crtc_state->wm.skl.plane_ddb_y[plane_id];
6664 struct skl_ddb_entry *ddb_uv =
6665 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
6666
6667 skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
6668 plane_id, ddb_y, ddb_uv);
6669
6670 skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
6671 skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
6672 }
6673
6674 dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
6675
6676
6677
6678
6679
6680 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
6681 dbuf_state->joined_mbus);
6682 mbus_offset = mbus_ddb_offset(dev_priv, slices);
6683 crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
6684 crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
6685
6686
6687 dbuf_state->slices[pipe] =
6688 skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
6689
6690 drm_dbg_kms(&dev_priv->drm,
6691 "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
6692 crtc->base.base.id, crtc->base.name,
6693 dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
6694 dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
6695 yesno(dbuf_state->joined_mbus));
6696 }
6697
6698 dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
6699}
6700
6701static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
6702{
6703 const struct intel_dbuf_state *dbuf_state =
6704 to_intel_dbuf_state(i915->dbuf.obj.state);
6705 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
6706 struct intel_crtc *crtc;
6707
6708 for_each_intel_crtc(&i915->drm, crtc) {
6709 const struct intel_crtc_state *crtc_state =
6710 to_intel_crtc_state(crtc->base.state);
6711
6712 entries[crtc->pipe] = crtc_state->wm.skl.ddb;
6713 }
6714
6715 for_each_intel_crtc(&i915->drm, crtc) {
6716 const struct intel_crtc_state *crtc_state =
6717 to_intel_crtc_state(crtc->base.state);
6718 u8 slices;
6719
6720 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
6721 dbuf_state->joined_mbus);
6722 if (dbuf_state->slices[crtc->pipe] & ~slices)
6723 return true;
6724
6725 if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
6726 I915_MAX_PIPES, crtc->pipe))
6727 return true;
6728 }
6729
6730 return false;
6731}
6732
6733void skl_wm_sanitize(struct drm_i915_private *i915)
6734{
6735 struct intel_crtc *crtc;
6736
6737
6738
6739
6740
6741
6742
6743
6744
6745
6746
6747
6748 if (!skl_dbuf_is_misconfigured(i915))
6749 return;
6750
6751 drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
6752
6753 for_each_intel_crtc(&i915->drm, crtc) {
6754 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6755 const struct intel_plane_state *plane_state =
6756 to_intel_plane_state(plane->base.state);
6757 struct intel_crtc_state *crtc_state =
6758 to_intel_crtc_state(crtc->base.state);
6759
6760 if (plane_state->uapi.visible)
6761 intel_plane_disable_noatomic(crtc, plane);
6762
6763 drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
6764
6765 memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
6766 }
6767}
6768
6769static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
6770{
6771 struct drm_device *dev = crtc->base.dev;
6772 struct drm_i915_private *dev_priv = to_i915(dev);
6773 struct ilk_wm_values *hw = &dev_priv->wm.hw;
6774 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
6775 struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
6776 enum pipe pipe = crtc->pipe;
6777
6778 hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
6779
6780 memset(active, 0, sizeof(*active));
6781
6782 active->pipe_enabled = crtc->active;
6783
6784 if (active->pipe_enabled) {
6785 u32 tmp = hw->wm_pipe[pipe];
6786
6787
6788
6789
6790
6791
6792
6793 active->wm[0].enable = true;
6794 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
6795 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
6796 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
6797 } else {
6798 int level, max_level = ilk_wm_max_level(dev_priv);
6799
6800
6801
6802
6803
6804
6805 for (level = 0; level <= max_level; level++)
6806 active->wm[level].enable = true;
6807 }
6808
6809 crtc->wm.active.ilk = *active;
6810}
6811
6812#define _FW_WM(value, plane) \
6813 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
6814#define _FW_WM_VLV(value, plane) \
6815 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
6816
6817static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
6818 struct g4x_wm_values *wm)
6819{
6820 u32 tmp;
6821
6822 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
6823 wm->sr.plane = _FW_WM(tmp, SR);
6824 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
6825 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
6826 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
6827
6828 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
6829 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
6830 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
6831 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
6832 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
6833 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
6834 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
6835
6836 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
6837 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
6838 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
6839 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
6840 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
6841}
6842
6843static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
6844 struct vlv_wm_values *wm)
6845{
6846 enum pipe pipe;
6847 u32 tmp;
6848
6849 for_each_pipe(dev_priv, pipe) {
6850 tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
6851
6852 wm->ddl[pipe].plane[PLANE_PRIMARY] =
6853 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6854 wm->ddl[pipe].plane[PLANE_CURSOR] =
6855 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6856 wm->ddl[pipe].plane[PLANE_SPRITE0] =
6857 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6858 wm->ddl[pipe].plane[PLANE_SPRITE1] =
6859 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6860 }
6861
6862 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
6863 wm->sr.plane = _FW_WM(tmp, SR);
6864 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
6865 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
6866 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
6867
6868 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
6869 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
6870 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
6871 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
6872
6873 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
6874 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
6875
6876 if (IS_CHERRYVIEW(dev_priv)) {
6877 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
6878 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
6879 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
6880
6881 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
6882 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
6883 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
6884
6885 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
6886 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
6887 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
6888
6889 tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
6890 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
6891 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
6892 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
6893 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
6894 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
6895 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
6896 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
6897 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
6898 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
6899 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
6900 } else {
6901 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
6902 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
6903 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
6904
6905 tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
6906 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
6907 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
6908 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
6909 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
6910 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
6911 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
6912 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
6913 }
6914}
6915
6916#undef _FW_WM
6917#undef _FW_WM_VLV
6918
6919void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
6920{
6921 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
6922 struct intel_crtc *crtc;
6923
6924 g4x_read_wm_values(dev_priv, wm);
6925
6926 wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
6927
6928 for_each_intel_crtc(&dev_priv->drm, crtc) {
6929 struct intel_crtc_state *crtc_state =
6930 to_intel_crtc_state(crtc->base.state);
6931 struct g4x_wm_state *active = &crtc->wm.active.g4x;
6932 struct g4x_pipe_wm *raw;
6933 enum pipe pipe = crtc->pipe;
6934 enum plane_id plane_id;
6935 int level, max_level;
6936
6937 active->cxsr = wm->cxsr;
6938 active->hpll_en = wm->hpll_en;
6939 active->fbc_en = wm->fbc_en;
6940
6941 active->sr = wm->sr;
6942 active->hpll = wm->hpll;
6943
6944 for_each_plane_id_on_crtc(crtc, plane_id) {
6945 active->wm.plane[plane_id] =
6946 wm->pipe[pipe].plane[plane_id];
6947 }
6948
6949 if (wm->cxsr && wm->hpll_en)
6950 max_level = G4X_WM_LEVEL_HPLL;
6951 else if (wm->cxsr)
6952 max_level = G4X_WM_LEVEL_SR;
6953 else
6954 max_level = G4X_WM_LEVEL_NORMAL;
6955
6956 level = G4X_WM_LEVEL_NORMAL;
6957 raw = &crtc_state->wm.g4x.raw[level];
6958 for_each_plane_id_on_crtc(crtc, plane_id)
6959 raw->plane[plane_id] = active->wm.plane[plane_id];
6960
6961 level = G4X_WM_LEVEL_SR;
6962 if (level > max_level)
6963 goto out;
6964
6965 raw = &crtc_state->wm.g4x.raw[level];
6966 raw->plane[PLANE_PRIMARY] = active->sr.plane;
6967 raw->plane[PLANE_CURSOR] = active->sr.cursor;
6968 raw->plane[PLANE_SPRITE0] = 0;
6969 raw->fbc = active->sr.fbc;
6970
6971 level = G4X_WM_LEVEL_HPLL;
6972 if (level > max_level)
6973 goto out;
6974
6975 raw = &crtc_state->wm.g4x.raw[level];
6976 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
6977 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
6978 raw->plane[PLANE_SPRITE0] = 0;
6979 raw->fbc = active->hpll.fbc;
6980
6981 level++;
6982 out:
6983 for_each_plane_id_on_crtc(crtc, plane_id)
6984 g4x_raw_plane_wm_set(crtc_state, level,
6985 plane_id, USHRT_MAX);
6986 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
6987
6988 crtc_state->wm.g4x.optimal = *active;
6989 crtc_state->wm.g4x.intermediate = *active;
6990
6991 drm_dbg_kms(&dev_priv->drm,
6992 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
6993 pipe_name(pipe),
6994 wm->pipe[pipe].plane[PLANE_PRIMARY],
6995 wm->pipe[pipe].plane[PLANE_CURSOR],
6996 wm->pipe[pipe].plane[PLANE_SPRITE0]);
6997 }
6998
6999 drm_dbg_kms(&dev_priv->drm,
7000 "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
7001 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
7002 drm_dbg_kms(&dev_priv->drm,
7003 "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
7004 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
7005 drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
7006 yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
7007}
7008
7009void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
7010{
7011 struct intel_plane *plane;
7012 struct intel_crtc *crtc;
7013
7014 mutex_lock(&dev_priv->wm.wm_mutex);
7015
7016 for_each_intel_plane(&dev_priv->drm, plane) {
7017 struct intel_crtc *crtc =
7018 intel_crtc_for_pipe(dev_priv, plane->pipe);
7019 struct intel_crtc_state *crtc_state =
7020 to_intel_crtc_state(crtc->base.state);
7021 struct intel_plane_state *plane_state =
7022 to_intel_plane_state(plane->base.state);
7023 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
7024 enum plane_id plane_id = plane->id;
7025 int level;
7026
7027 if (plane_state->uapi.visible)
7028 continue;
7029
7030 for (level = 0; level < 3; level++) {
7031 struct g4x_pipe_wm *raw =
7032 &crtc_state->wm.g4x.raw[level];
7033
7034 raw->plane[plane_id] = 0;
7035 wm_state->wm.plane[plane_id] = 0;
7036 }
7037
7038 if (plane_id == PLANE_PRIMARY) {
7039 for (level = 0; level < 3; level++) {
7040 struct g4x_pipe_wm *raw =
7041 &crtc_state->wm.g4x.raw[level];
7042 raw->fbc = 0;
7043 }
7044
7045 wm_state->sr.fbc = 0;
7046 wm_state->hpll.fbc = 0;
7047 wm_state->fbc_en = false;
7048 }
7049 }
7050
7051 for_each_intel_crtc(&dev_priv->drm, crtc) {
7052 struct intel_crtc_state *crtc_state =
7053 to_intel_crtc_state(crtc->base.state);
7054
7055 crtc_state->wm.g4x.intermediate =
7056 crtc_state->wm.g4x.optimal;
7057 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
7058 }
7059
7060 g4x_program_watermarks(dev_priv);
7061
7062 mutex_unlock(&dev_priv->wm.wm_mutex);
7063}
7064
7065void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
7066{
7067 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
7068 struct intel_crtc *crtc;
7069 u32 val;
7070
7071 vlv_read_wm_values(dev_priv, wm);
7072
7073 wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
7074 wm->level = VLV_WM_LEVEL_PM2;
7075
7076 if (IS_CHERRYVIEW(dev_priv)) {
7077 vlv_punit_get(dev_priv);
7078
7079 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
7080 if (val & DSP_MAXFIFO_PM5_ENABLE)
7081 wm->level = VLV_WM_LEVEL_PM5;
7082
7083
7084
7085
7086
7087
7088
7089
7090
7091
7092 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
7093 val |= FORCE_DDR_FREQ_REQ_ACK;
7094 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
7095
7096 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
7097 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
7098 drm_dbg_kms(&dev_priv->drm,
7099 "Punit not acking DDR DVFS request, "
7100 "assuming DDR DVFS is disabled\n");
7101 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
7102 } else {
7103 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
7104 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
7105 wm->level = VLV_WM_LEVEL_DDR_DVFS;
7106 }
7107
7108 vlv_punit_put(dev_priv);
7109 }
7110
7111 for_each_intel_crtc(&dev_priv->drm, crtc) {
7112 struct intel_crtc_state *crtc_state =
7113 to_intel_crtc_state(crtc->base.state);
7114 struct vlv_wm_state *active = &crtc->wm.active.vlv;
7115 const struct vlv_fifo_state *fifo_state =
7116 &crtc_state->wm.vlv.fifo_state;
7117 enum pipe pipe = crtc->pipe;
7118 enum plane_id plane_id;
7119 int level;
7120
7121 vlv_get_fifo_size(crtc_state);
7122
7123 active->num_levels = wm->level + 1;
7124 active->cxsr = wm->cxsr;
7125
7126 for (level = 0; level < active->num_levels; level++) {
7127 struct g4x_pipe_wm *raw =
7128 &crtc_state->wm.vlv.raw[level];
7129
7130 active->sr[level].plane = wm->sr.plane;
7131 active->sr[level].cursor = wm->sr.cursor;
7132
7133 for_each_plane_id_on_crtc(crtc, plane_id) {
7134 active->wm[level].plane[plane_id] =
7135 wm->pipe[pipe].plane[plane_id];
7136
7137 raw->plane[plane_id] =
7138 vlv_invert_wm_value(active->wm[level].plane[plane_id],
7139 fifo_state->plane[plane_id]);
7140 }
7141 }
7142
7143 for_each_plane_id_on_crtc(crtc, plane_id)
7144 vlv_raw_plane_wm_set(crtc_state, level,
7145 plane_id, USHRT_MAX);
7146 vlv_invalidate_wms(crtc, active, level);
7147
7148 crtc_state->wm.vlv.optimal = *active;
7149 crtc_state->wm.vlv.intermediate = *active;
7150
7151 drm_dbg_kms(&dev_priv->drm,
7152 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
7153 pipe_name(pipe),
7154 wm->pipe[pipe].plane[PLANE_PRIMARY],
7155 wm->pipe[pipe].plane[PLANE_CURSOR],
7156 wm->pipe[pipe].plane[PLANE_SPRITE0],
7157 wm->pipe[pipe].plane[PLANE_SPRITE1]);
7158 }
7159
7160 drm_dbg_kms(&dev_priv->drm,
7161 "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
7162 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
7163}
7164
7165void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
7166{
7167 struct intel_plane *plane;
7168 struct intel_crtc *crtc;
7169
7170 mutex_lock(&dev_priv->wm.wm_mutex);
7171
7172 for_each_intel_plane(&dev_priv->drm, plane) {
7173 struct intel_crtc *crtc =
7174 intel_crtc_for_pipe(dev_priv, plane->pipe);
7175 struct intel_crtc_state *crtc_state =
7176 to_intel_crtc_state(crtc->base.state);
7177 struct intel_plane_state *plane_state =
7178 to_intel_plane_state(plane->base.state);
7179 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
7180 const struct vlv_fifo_state *fifo_state =
7181 &crtc_state->wm.vlv.fifo_state;
7182 enum plane_id plane_id = plane->id;
7183 int level;
7184
7185 if (plane_state->uapi.visible)
7186 continue;
7187
7188 for (level = 0; level < wm_state->num_levels; level++) {
7189 struct g4x_pipe_wm *raw =
7190 &crtc_state->wm.vlv.raw[level];
7191
7192 raw->plane[plane_id] = 0;
7193
7194 wm_state->wm[level].plane[plane_id] =
7195 vlv_invert_wm_value(raw->plane[plane_id],
7196 fifo_state->plane[plane_id]);
7197 }
7198 }
7199
7200 for_each_intel_crtc(&dev_priv->drm, crtc) {
7201 struct intel_crtc_state *crtc_state =
7202 to_intel_crtc_state(crtc->base.state);
7203
7204 crtc_state->wm.vlv.intermediate =
7205 crtc_state->wm.vlv.optimal;
7206 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
7207 }
7208
7209 vlv_program_watermarks(dev_priv);
7210
7211 mutex_unlock(&dev_priv->wm.wm_mutex);
7212}
7213
7214
7215
7216
7217
7218static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
7219{
7220 intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN);
7221 intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN);
7222 intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN);
7223
7224
7225
7226
7227
7228}
7229
7230void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
7231{
7232 struct ilk_wm_values *hw = &dev_priv->wm.hw;
7233 struct intel_crtc *crtc;
7234
7235 ilk_init_lp_watermarks(dev_priv);
7236
7237 for_each_intel_crtc(&dev_priv->drm, crtc)
7238 ilk_pipe_wm_get_hw_state(crtc);
7239
7240 hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
7241 hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
7242 hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
7243
7244 hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
7245 if (DISPLAY_VER(dev_priv) >= 7) {
7246 hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
7247 hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
7248 }
7249
7250 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
7251 hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
7252 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
7253 else if (IS_IVYBRIDGE(dev_priv))
7254 hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
7255 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
7256
7257 hw->enable_fbc_wm =
7258 !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
7259}
7260
7261void intel_enable_ipc(struct drm_i915_private *dev_priv)
7262{
7263 u32 val;
7264
7265 if (!HAS_IPC(dev_priv))
7266 return;
7267
7268 val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
7269
7270 if (dev_priv->ipc_enabled)
7271 val |= DISP_IPC_ENABLE;
7272 else
7273 val &= ~DISP_IPC_ENABLE;
7274
7275 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
7276}
7277
7278static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
7279{
7280
7281 if (IS_SKYLAKE(dev_priv))
7282 return false;
7283
7284
7285 if (IS_KABYLAKE(dev_priv) ||
7286 IS_COFFEELAKE(dev_priv) ||
7287 IS_COMETLAKE(dev_priv))
7288 return dev_priv->dram_info.symmetric_memory;
7289
7290 return true;
7291}
7292
7293void intel_init_ipc(struct drm_i915_private *dev_priv)
7294{
7295 if (!HAS_IPC(dev_priv))
7296 return;
7297
7298 dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
7299
7300 intel_enable_ipc(dev_priv);
7301}
7302
7303static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
7304{
7305
7306
7307
7308
7309
7310 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7311}
7312
7313static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
7314{
7315 enum pipe pipe;
7316
7317 for_each_pipe(dev_priv, pipe) {
7318 intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe),
7319 intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) |
7320 DISPPLANE_TRICKLE_FEED_DISABLE);
7321
7322 intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe)));
7323 intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
7324 }
7325}
7326
7327static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
7328{
7329 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7330
7331
7332
7333
7334
7335 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
7336 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
7337 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
7338
7339 intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0,
7340 MARIUNIT_CLOCK_GATE_DISABLE |
7341 SVSMUNIT_CLOCK_GATE_DISABLE);
7342 intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1,
7343 VFMUNIT_CLOCK_GATE_DISABLE);
7344
7345
7346
7347
7348
7349
7350
7351
7352 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7353 (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7354 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7355 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
7356 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL,
7357 (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7358 DISP_FBC_WM_DIS));
7359
7360
7361
7362
7363
7364
7365
7366
7367 if (IS_IRONLAKE_M(dev_priv)) {
7368
7369 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
7370 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
7371 ILK_FBCQ_DIS);
7372 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7373 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7374 ILK_DPARB_GATE);
7375 }
7376
7377 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
7378
7379 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7380 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7381 ILK_ELPIN_409_SELECT);
7382
7383 g4x_disable_trickle_feed(dev_priv);
7384
7385 ibx_init_clock_gating(dev_priv);
7386}
7387
7388static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
7389{
7390 enum pipe pipe;
7391 u32 val;
7392
7393
7394
7395
7396
7397
7398 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
7399 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
7400 PCH_CPUNIT_CLOCK_GATE_DISABLE);
7401 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) |
7402 DPLS_EDP_PPS_FIX_DIS);
7403
7404
7405
7406 for_each_pipe(dev_priv, pipe) {
7407 val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
7408 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
7409 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7410 if (dev_priv->vbt.fdi_rx_polarity_inverted)
7411 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7412 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
7413 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
7414 intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val);
7415 }
7416
7417 for_each_pipe(dev_priv, pipe) {
7418 intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe),
7419 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7420 }
7421}
7422
7423static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
7424{
7425 u32 tmp;
7426
7427 tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
7428 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
7429 drm_dbg_kms(&dev_priv->drm,
7430 "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
7431 tmp);
7432}
7433
7434static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
7435{
7436 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7437
7438 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
7439
7440 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7441 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7442 ILK_ELPIN_409_SELECT);
7443
7444 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
7445 intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
7446 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
7447 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7448
7449
7450
7451
7452
7453
7454
7455
7456
7457
7458
7459
7460
7461
7462 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
7463 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7464 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7465
7466
7467
7468
7469
7470
7471
7472
7473
7474
7475
7476
7477 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
7478 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
7479 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7480 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7481 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7482 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7483 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D,
7484 intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) |
7485 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
7486 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
7487
7488 g4x_disable_trickle_feed(dev_priv);
7489
7490 cpt_init_clock_gating(dev_priv);
7491
7492 gen6_check_mch_setup(dev_priv);
7493}
7494
7495static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
7496{
7497
7498
7499
7500
7501 if (HAS_PCH_LPT_LP(dev_priv))
7502 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D,
7503 intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
7504 PCH_LP_PARTITION_LEVEL_DISABLE);
7505
7506
7507 intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A),
7508 intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) |
7509 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7510}
7511
7512static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
7513{
7514 if (HAS_PCH_LPT_LP(dev_priv)) {
7515 u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
7516
7517 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7518 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
7519 }
7520}
7521
7522static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7523 int general_prio_credits,
7524 int high_prio_credits)
7525{
7526 u32 misccpctl;
7527 u32 val;
7528
7529
7530 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
7531 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7532
7533 val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1);
7534 val &= ~L3_PRIO_CREDITS_MASK;
7535 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
7536 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
7537 intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val);
7538
7539
7540
7541
7542
7543 intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1);
7544 udelay(1);
7545 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
7546}
7547
7548static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
7549{
7550
7551 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
7552 DPFC_CHICKEN_COMP_DUMMY_PIXEL);
7553
7554
7555 intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
7556 0, ICL_DELAY_PMRSP);
7557}
7558
7559static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
7560{
7561
7562 if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
7563 IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv))
7564 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
7565 DPFC_CHICKEN_COMP_DUMMY_PIXEL);
7566
7567
7568 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
7569 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
7570 TGL_VRH_GATING_DIS);
7571
7572
7573 if (DISPLAY_VER(dev_priv) == 12)
7574 intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY,
7575 CLKREQ_POLICY_MEM_UP_OVRD, 0);
7576}
7577
7578static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
7579{
7580 gen12lp_init_clock_gating(dev_priv);
7581
7582
7583 intel_de_rmw(dev_priv, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
7584}
7585
7586static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
7587{
7588 gen12lp_init_clock_gating(dev_priv);
7589
7590
7591 if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
7592 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
7593 DPT_GATING_DIS);
7594}
7595
7596static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
7597{
7598
7599 if (IS_XEHPSDV_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
7600 intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
7601}
7602
7603static void dg2_init_clock_gating(struct drm_i915_private *i915)
7604{
7605
7606 if (IS_DG2_G10(i915))
7607 intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
7608 SGSI_SIDECLK_DIS);
7609
7610
7611
7612
7613
7614 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
7615 intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
7616 SGR_DIS | SGGI_DIS);
7617}
7618
7619static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
7620{
7621 if (!HAS_PCH_CNP(dev_priv))
7622 return;
7623
7624
7625 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
7626 CNP_PWM_CGE_GATING_DISABLE);
7627}
7628
7629static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
7630{
7631 cnp_init_clock_gating(dev_priv);
7632 gen9_init_clock_gating(dev_priv);
7633
7634
7635 intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
7636 FBC_LLC_FULLY_OPEN);
7637
7638
7639
7640
7641
7642 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7643 DISP_FBC_WM_DIS);
7644
7645
7646
7647
7648
7649 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
7650 DPFC_NUKE_ON_ANY_MODIFICATION);
7651}
7652
7653static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
7654{
7655 gen9_init_clock_gating(dev_priv);
7656
7657
7658 intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
7659 FBC_LLC_FULLY_OPEN);
7660
7661
7662 if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
7663 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
7664 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7665
7666
7667 if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
7668 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
7669 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7670
7671
7672
7673
7674
7675 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7676 DISP_FBC_WM_DIS);
7677
7678
7679
7680
7681
7682 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
7683 DPFC_NUKE_ON_ANY_MODIFICATION);
7684}
7685
7686static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
7687{
7688 gen9_init_clock_gating(dev_priv);
7689
7690
7691 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) &
7692 ~GEN7_DOP_CLOCK_GATE_ENABLE);
7693
7694
7695 intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
7696 FBC_LLC_FULLY_OPEN);
7697
7698
7699
7700
7701
7702 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7703 DISP_FBC_WM_DIS);
7704
7705
7706
7707
7708
7709 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
7710 DPFC_NUKE_ON_ANY_MODIFICATION);
7711
7712
7713
7714
7715
7716 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
7717 DPFC_DISABLE_DUMMY0);
7718}
7719
7720static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
7721{
7722 enum pipe pipe;
7723
7724
7725 intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
7726 intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
7727 HSW_FBCQ_DIS);
7728
7729
7730 intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7731
7732
7733 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
7734 intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7735
7736 for_each_pipe(dev_priv, pipe) {
7737
7738 intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
7739 intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
7740 BDW_DPRS_MASK_VBLANK_SRD);
7741 }
7742
7743
7744
7745 intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
7746 intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
7747 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7748
7749 intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
7750 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7751
7752
7753 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
7754 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7755
7756
7757 gen8_set_l3sqc_credits(dev_priv, 30, 2);
7758
7759
7760 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1)
7761 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7762
7763 lpt_init_clock_gating(dev_priv);
7764
7765
7766
7767
7768
7769
7770 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
7771 intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
7772}
7773
7774static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
7775{
7776
7777 intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
7778 intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
7779 HSW_FBCQ_DIS);
7780
7781
7782 intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7783 intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7784 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7785
7786
7787 intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7788
7789 lpt_init_clock_gating(dev_priv);
7790}
7791
7792static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
7793{
7794 u32 snpcr;
7795
7796 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7797
7798
7799 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
7800 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
7801 ILK_FBCQ_DIS);
7802
7803
7804 intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
7805 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7806 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7807
7808 if (IS_IVB_GT1(dev_priv))
7809 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
7810 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7811 else {
7812
7813 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
7814 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7815 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2,
7816 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7817 }
7818
7819
7820
7821
7822
7823 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
7824 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7825
7826
7827 intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7828 intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7829 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7830
7831 g4x_disable_trickle_feed(dev_priv);
7832
7833 snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR);
7834 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7835 snpcr |= GEN6_MBC_SNPCR_MED;
7836 intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr);
7837
7838 if (!HAS_PCH_NOP(dev_priv))
7839 cpt_init_clock_gating(dev_priv);
7840
7841 gen6_check_mch_setup(dev_priv);
7842}
7843
7844static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
7845{
7846
7847 intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
7848 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7849 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7850
7851
7852 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
7853 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7854
7855
7856 intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7857 intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7858 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7859
7860
7861
7862
7863
7864 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
7865 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7866
7867
7868
7869
7870 intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4,
7871 intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7872
7873
7874
7875
7876
7877
7878 intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7879}
7880
7881static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
7882{
7883
7884
7885 intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
7886 intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
7887 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7888
7889
7890 intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
7891 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7892
7893
7894 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
7895 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7896
7897
7898 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
7899 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7900
7901
7902
7903
7904
7905
7906 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7907}
7908
7909static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
7910{
7911 u32 dspclk_gate;
7912
7913 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0);
7914 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7915 GS_UNIT_CLOCK_GATE_DISABLE |
7916 CL_UNIT_CLOCK_GATE_DISABLE);
7917 intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0);
7918 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7919 OVRUNIT_CLOCK_GATE_DISABLE |
7920 OVCUNIT_CLOCK_GATE_DISABLE;
7921 if (IS_GM45(dev_priv))
7922 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7923 intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate);
7924
7925 g4x_disable_trickle_feed(dev_priv);
7926}
7927
7928static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
7929{
7930 struct intel_uncore *uncore = &dev_priv->uncore;
7931
7932 intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7933 intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
7934 intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
7935 intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
7936 intel_uncore_write16(uncore, DEUC, 0);
7937 intel_uncore_write(uncore,
7938 MI_ARB_STATE,
7939 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7940}
7941
7942static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
7943{
7944 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7945 I965_RCC_CLOCK_GATE_DISABLE |
7946 I965_RCPB_CLOCK_GATE_DISABLE |
7947 I965_ISC_CLOCK_GATE_DISABLE |
7948 I965_FBC_CLOCK_GATE_DISABLE);
7949 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0);
7950 intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
7951 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7952}
7953
7954static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
7955{
7956 u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE);
7957
7958 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7959 DSTATE_DOT_CLOCK_GATING;
7960 intel_uncore_write(&dev_priv->uncore, D_STATE, dstate);
7961
7962 if (IS_PINEVIEW(dev_priv))
7963 intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7964
7965
7966 intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7967
7968
7969 intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7970
7971
7972 intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7973
7974 intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
7975 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7976}
7977
7978static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
7979{
7980 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7981
7982
7983 intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7984 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7985
7986 intel_uncore_write(&dev_priv->uncore, MEM_MODE,
7987 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7988
7989
7990
7991
7992
7993
7994
7995
7996 intel_uncore_write(&dev_priv->uncore, SCPD0,
7997 _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
7998}
7999
8000static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
8001{
8002 intel_uncore_write(&dev_priv->uncore, MEM_MODE,
8003 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
8004 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
8005}
8006
8007void intel_init_clock_gating(struct drm_i915_private *dev_priv)
8008{
8009 dev_priv->clock_gating_funcs->init_clock_gating(dev_priv);
8010}
8011
8012void intel_suspend_hw(struct drm_i915_private *dev_priv)
8013{
8014 if (HAS_PCH_LPT(dev_priv))
8015 lpt_suspend_hw(dev_priv);
8016}
8017
8018static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
8019{
8020 drm_dbg_kms(&dev_priv->drm,
8021 "No clock gating settings or workarounds applied.\n");
8022}
8023
8024#define CG_FUNCS(platform) \
8025static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = { \
8026 .init_clock_gating = platform##_init_clock_gating, \
8027}
8028
8029CG_FUNCS(dg2);
8030CG_FUNCS(xehpsdv);
8031CG_FUNCS(adlp);
8032CG_FUNCS(dg1);
8033CG_FUNCS(gen12lp);
8034CG_FUNCS(icl);
8035CG_FUNCS(cfl);
8036CG_FUNCS(skl);
8037CG_FUNCS(kbl);
8038CG_FUNCS(bxt);
8039CG_FUNCS(glk);
8040CG_FUNCS(bdw);
8041CG_FUNCS(chv);
8042CG_FUNCS(hsw);
8043CG_FUNCS(ivb);
8044CG_FUNCS(vlv);
8045CG_FUNCS(gen6);
8046CG_FUNCS(ilk);
8047CG_FUNCS(g4x);
8048CG_FUNCS(i965gm);
8049CG_FUNCS(i965g);
8050CG_FUNCS(gen3);
8051CG_FUNCS(i85x);
8052CG_FUNCS(i830);
8053CG_FUNCS(nop);
8054#undef CG_FUNCS
8055
8056
8057
8058
8059
8060
8061
8062
8063
8064
8065void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
8066{
8067 if (IS_DG2(dev_priv))
8068 dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
8069 else if (IS_XEHPSDV(dev_priv))
8070 dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
8071 else if (IS_ALDERLAKE_P(dev_priv))
8072 dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
8073 else if (IS_DG1(dev_priv))
8074 dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
8075 else if (GRAPHICS_VER(dev_priv) == 12)
8076 dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs;
8077 else if (GRAPHICS_VER(dev_priv) == 11)
8078 dev_priv->clock_gating_funcs = &icl_clock_gating_funcs;
8079 else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
8080 dev_priv->clock_gating_funcs = &cfl_clock_gating_funcs;
8081 else if (IS_SKYLAKE(dev_priv))
8082 dev_priv->clock_gating_funcs = &skl_clock_gating_funcs;
8083 else if (IS_KABYLAKE(dev_priv))
8084 dev_priv->clock_gating_funcs = &kbl_clock_gating_funcs;
8085 else if (IS_BROXTON(dev_priv))
8086 dev_priv->clock_gating_funcs = &bxt_clock_gating_funcs;
8087 else if (IS_GEMINILAKE(dev_priv))
8088 dev_priv->clock_gating_funcs = &glk_clock_gating_funcs;
8089 else if (IS_BROADWELL(dev_priv))
8090 dev_priv->clock_gating_funcs = &bdw_clock_gating_funcs;
8091 else if (IS_CHERRYVIEW(dev_priv))
8092 dev_priv->clock_gating_funcs = &chv_clock_gating_funcs;
8093 else if (IS_HASWELL(dev_priv))
8094 dev_priv->clock_gating_funcs = &hsw_clock_gating_funcs;
8095 else if (IS_IVYBRIDGE(dev_priv))
8096 dev_priv->clock_gating_funcs = &ivb_clock_gating_funcs;
8097 else if (IS_VALLEYVIEW(dev_priv))
8098 dev_priv->clock_gating_funcs = &vlv_clock_gating_funcs;
8099 else if (GRAPHICS_VER(dev_priv) == 6)
8100 dev_priv->clock_gating_funcs = &gen6_clock_gating_funcs;
8101 else if (GRAPHICS_VER(dev_priv) == 5)
8102 dev_priv->clock_gating_funcs = &ilk_clock_gating_funcs;
8103 else if (IS_G4X(dev_priv))
8104 dev_priv->clock_gating_funcs = &g4x_clock_gating_funcs;
8105 else if (IS_I965GM(dev_priv))
8106 dev_priv->clock_gating_funcs = &i965gm_clock_gating_funcs;
8107 else if (IS_I965G(dev_priv))
8108 dev_priv->clock_gating_funcs = &i965g_clock_gating_funcs;
8109 else if (GRAPHICS_VER(dev_priv) == 3)
8110 dev_priv->clock_gating_funcs = &gen3_clock_gating_funcs;
8111 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
8112 dev_priv->clock_gating_funcs = &i85x_clock_gating_funcs;
8113 else if (GRAPHICS_VER(dev_priv) == 2)
8114 dev_priv->clock_gating_funcs = &i830_clock_gating_funcs;
8115 else {
8116 MISSING_CASE(INTEL_DEVID(dev_priv));
8117 dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
8118 }
8119}
8120
8121static const struct drm_i915_wm_disp_funcs skl_wm_funcs = {
8122 .compute_global_watermarks = skl_compute_wm,
8123};
8124
8125static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = {
8126 .compute_pipe_wm = ilk_compute_pipe_wm,
8127 .compute_intermediate_wm = ilk_compute_intermediate_wm,
8128 .initial_watermarks = ilk_initial_watermarks,
8129 .optimize_watermarks = ilk_optimize_watermarks,
8130};
8131
8132static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
8133 .compute_pipe_wm = vlv_compute_pipe_wm,
8134 .compute_intermediate_wm = vlv_compute_intermediate_wm,
8135 .initial_watermarks = vlv_initial_watermarks,
8136 .optimize_watermarks = vlv_optimize_watermarks,
8137 .atomic_update_watermarks = vlv_atomic_update_fifo,
8138};
8139
8140static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = {
8141 .compute_pipe_wm = g4x_compute_pipe_wm,
8142 .compute_intermediate_wm = g4x_compute_intermediate_wm,
8143 .initial_watermarks = g4x_initial_watermarks,
8144 .optimize_watermarks = g4x_optimize_watermarks,
8145};
8146
8147static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = {
8148 .update_wm = pnv_update_wm,
8149};
8150
8151static const struct drm_i915_wm_disp_funcs i965_wm_funcs = {
8152 .update_wm = i965_update_wm,
8153};
8154
8155static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = {
8156 .update_wm = i9xx_update_wm,
8157};
8158
8159static const struct drm_i915_wm_disp_funcs i845_wm_funcs = {
8160 .update_wm = i845_update_wm,
8161};
8162
8163static const struct drm_i915_wm_disp_funcs nop_funcs = {
8164};
8165
8166
8167void intel_init_pm(struct drm_i915_private *dev_priv)
8168{
8169
8170 if (IS_PINEVIEW(dev_priv))
8171 pnv_get_mem_freq(dev_priv);
8172 else if (GRAPHICS_VER(dev_priv) == 5)
8173 ilk_get_mem_freq(dev_priv);
8174
8175 if (intel_has_sagv(dev_priv))
8176 skl_setup_sagv_block_time(dev_priv);
8177
8178
8179 if (DISPLAY_VER(dev_priv) >= 9) {
8180 skl_setup_wm_latency(dev_priv);
8181 dev_priv->wm_disp = &skl_wm_funcs;
8182 } else if (HAS_PCH_SPLIT(dev_priv)) {
8183 ilk_setup_wm_latency(dev_priv);
8184
8185 if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->wm.pri_latency[1] &&
8186 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
8187 (DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] &&
8188 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
8189 dev_priv->wm_disp = &ilk_wm_funcs;
8190 } else {
8191 drm_dbg_kms(&dev_priv->drm,
8192 "Failed to read display plane latency. "
8193 "Disable CxSR\n");
8194 dev_priv->wm_disp = &nop_funcs;
8195 }
8196 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8197 vlv_setup_wm_latency(dev_priv);
8198 dev_priv->wm_disp = &vlv_wm_funcs;
8199 } else if (IS_G4X(dev_priv)) {
8200 g4x_setup_wm_latency(dev_priv);
8201 dev_priv->wm_disp = &g4x_wm_funcs;
8202 } else if (IS_PINEVIEW(dev_priv)) {
8203 if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
8204 dev_priv->is_ddr3,
8205 dev_priv->fsb_freq,
8206 dev_priv->mem_freq)) {
8207 drm_info(&dev_priv->drm,
8208 "failed to find known CxSR latency "
8209 "(found ddr%s fsb freq %d, mem freq %d), "
8210 "disabling CxSR\n",
8211 (dev_priv->is_ddr3 == 1) ? "3" : "2",
8212 dev_priv->fsb_freq, dev_priv->mem_freq);
8213
8214 intel_set_memory_cxsr(dev_priv, false);
8215 dev_priv->wm_disp = &nop_funcs;
8216 } else
8217 dev_priv->wm_disp = &pnv_wm_funcs;
8218 } else if (DISPLAY_VER(dev_priv) == 4) {
8219 dev_priv->wm_disp = &i965_wm_funcs;
8220 } else if (DISPLAY_VER(dev_priv) == 3) {
8221 dev_priv->wm_disp = &i9xx_wm_funcs;
8222 } else if (DISPLAY_VER(dev_priv) == 2) {
8223 if (INTEL_NUM_PIPES(dev_priv) == 1)
8224 dev_priv->wm_disp = &i845_wm_funcs;
8225 else
8226 dev_priv->wm_disp = &i9xx_wm_funcs;
8227 } else {
8228 drm_err(&dev_priv->drm,
8229 "unexpected fall-through in %s\n", __func__);
8230 dev_priv->wm_disp = &nop_funcs;
8231 }
8232}
8233
8234void intel_pm_setup(struct drm_i915_private *dev_priv)
8235{
8236 dev_priv->runtime_pm.suspended = false;
8237 atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
8238}
8239
8240static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
8241{
8242 struct intel_dbuf_state *dbuf_state;
8243
8244 dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
8245 if (!dbuf_state)
8246 return NULL;
8247
8248 return &dbuf_state->base;
8249}
8250
8251static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
8252 struct intel_global_state *state)
8253{
8254 kfree(state);
8255}
8256
8257static const struct intel_global_state_funcs intel_dbuf_funcs = {
8258 .atomic_duplicate_state = intel_dbuf_duplicate_state,
8259 .atomic_destroy_state = intel_dbuf_destroy_state,
8260};
8261
8262struct intel_dbuf_state *
8263intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
8264{
8265 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8266 struct intel_global_state *dbuf_state;
8267
8268 dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj);
8269 if (IS_ERR(dbuf_state))
8270 return ERR_CAST(dbuf_state);
8271
8272 return to_intel_dbuf_state(dbuf_state);
8273}
8274
8275int intel_dbuf_init(struct drm_i915_private *dev_priv)
8276{
8277 struct intel_dbuf_state *dbuf_state;
8278
8279 dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
8280 if (!dbuf_state)
8281 return -ENOMEM;
8282
8283 intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj,
8284 &dbuf_state->base, &intel_dbuf_funcs);
8285
8286 return 0;
8287}
8288
8289
8290
8291
8292
8293static void update_mbus_pre_enable(struct intel_atomic_state *state)
8294{
8295 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8296 u32 mbus_ctl, dbuf_min_tracker_val;
8297 enum dbuf_slice slice;
8298 const struct intel_dbuf_state *dbuf_state =
8299 intel_atomic_get_new_dbuf_state(state);
8300
8301 if (!IS_ALDERLAKE_P(dev_priv))
8302 return;
8303
8304
8305
8306
8307
8308 if (dbuf_state->joined_mbus) {
8309 mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
8310 MBUS_JOIN_PIPE_SELECT_NONE;
8311 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
8312 } else {
8313 mbus_ctl = MBUS_HASHING_MODE_2x2 |
8314 MBUS_JOIN_PIPE_SELECT_NONE;
8315 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
8316 }
8317
8318 intel_de_rmw(dev_priv, MBUS_CTL,
8319 MBUS_HASHING_MODE_MASK | MBUS_JOIN |
8320 MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
8321
8322 for_each_dbuf_slice(dev_priv, slice)
8323 intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
8324 DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
8325 dbuf_min_tracker_val);
8326}
8327
8328void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
8329{
8330 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8331 const struct intel_dbuf_state *new_dbuf_state =
8332 intel_atomic_get_new_dbuf_state(state);
8333 const struct intel_dbuf_state *old_dbuf_state =
8334 intel_atomic_get_old_dbuf_state(state);
8335
8336 if (!new_dbuf_state ||
8337 ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
8338 && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
8339 return;
8340
8341 WARN_ON(!new_dbuf_state->base.changed);
8342
8343 update_mbus_pre_enable(state);
8344 gen9_dbuf_slices_update(dev_priv,
8345 old_dbuf_state->enabled_slices |
8346 new_dbuf_state->enabled_slices);
8347}
8348
8349void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
8350{
8351 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8352 const struct intel_dbuf_state *new_dbuf_state =
8353 intel_atomic_get_new_dbuf_state(state);
8354 const struct intel_dbuf_state *old_dbuf_state =
8355 intel_atomic_get_old_dbuf_state(state);
8356
8357 if (!new_dbuf_state ||
8358 ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
8359 && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
8360 return;
8361
8362 WARN_ON(!new_dbuf_state->base.changed);
8363
8364 gen9_dbuf_slices_update(dev_priv,
8365 new_dbuf_state->enabled_slices);
8366}
8367