1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/pm_runtime.h>
30
31#include <drm/drm_atomic_helper.h>
32#include <drm/drm_fourcc.h>
33#include <drm/drm_plane_helper.h>
34
35#include "display/intel_atomic.h"
36#include "display/intel_atomic_plane.h"
37#include "display/intel_bw.h"
38#include "display/intel_de.h"
39#include "display/intel_display_types.h"
40#include "display/intel_fbc.h"
41#include "display/intel_sprite.h"
42#include "display/skl_universal_plane.h"
43
44#include "gt/intel_llc.h"
45
46#include "i915_drv.h"
47#include "i915_fixed.h"
48#include "i915_irq.h"
49#include "i915_trace.h"
50#include "intel_pcode.h"
51#include "intel_pm.h"
52#include "vlv_sideband.h"
53#include "../../../platform/x86/intel_ips.h"
54
55
56struct skl_wm_params {
57 bool x_tiled, y_tiled;
58 bool rc_surface;
59 bool is_planar;
60 u32 width;
61 u8 cpp;
62 u32 plane_pixel_rate;
63 u32 y_min_scanlines;
64 u32 plane_bytes_per_line;
65 uint_fixed_16_16_t plane_blocks_per_line;
66 uint_fixed_16_16_t y_tile_minimum;
67 u32 linetime_us;
68 u32 dbuf_block_size;
69};
70
71
72struct intel_wm_config {
73 unsigned int num_pipes_active;
74 bool sprites_enabled;
75 bool sprites_scaled;
76};
77
78static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
79{
80 enum pipe pipe;
81
82 if (HAS_LLC(dev_priv)) {
83
84
85
86
87
88
89
90 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
91 intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) |
92 SKL_DE_COMPRESSED_HASH_MODE);
93 }
94
95 for_each_pipe(dev_priv, pipe) {
96
97
98
99
100 if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active())
101 intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
102 SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1);
103 }
104
105
106 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
107 intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
108
109
110 intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
111 intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
112
113
114
115
116
117 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
118 DISP_FBC_MEMORY_WAKE);
119}
120
121static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
122{
123 gen9_init_clock_gating(dev_priv);
124
125
126 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
127 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
128
129
130
131
132
133 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
134 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
135
136
137
138
139
140 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
141 PWM1_GATING_DIS | PWM2_GATING_DIS);
142
143
144
145
146
147
148
149 intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950));
150
151
152
153
154
155 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
156 DISP_FBC_WM_DIS);
157
158
159
160
161
162 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
163 ILK_DPFC_DISABLE_DUMMY0);
164}
165
166static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
167{
168 gen9_init_clock_gating(dev_priv);
169
170
171
172
173
174
175 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
176 PWM1_GATING_DIS | PWM2_GATING_DIS);
177}
178
179static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
180{
181 u32 tmp;
182
183 tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
184
185 switch (tmp & CLKCFG_FSB_MASK) {
186 case CLKCFG_FSB_533:
187 dev_priv->fsb_freq = 533;
188 break;
189 case CLKCFG_FSB_800:
190 dev_priv->fsb_freq = 800;
191 break;
192 case CLKCFG_FSB_667:
193 dev_priv->fsb_freq = 667;
194 break;
195 case CLKCFG_FSB_400:
196 dev_priv->fsb_freq = 400;
197 break;
198 }
199
200 switch (tmp & CLKCFG_MEM_MASK) {
201 case CLKCFG_MEM_533:
202 dev_priv->mem_freq = 533;
203 break;
204 case CLKCFG_MEM_667:
205 dev_priv->mem_freq = 667;
206 break;
207 case CLKCFG_MEM_800:
208 dev_priv->mem_freq = 800;
209 break;
210 }
211
212
213 tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
214 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
215}
216
217static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
218{
219 u16 ddrpll, csipll;
220
221 ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
222 csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
223
224 switch (ddrpll & 0xff) {
225 case 0xc:
226 dev_priv->mem_freq = 800;
227 break;
228 case 0x10:
229 dev_priv->mem_freq = 1066;
230 break;
231 case 0x14:
232 dev_priv->mem_freq = 1333;
233 break;
234 case 0x18:
235 dev_priv->mem_freq = 1600;
236 break;
237 default:
238 drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
239 ddrpll & 0xff);
240 dev_priv->mem_freq = 0;
241 break;
242 }
243
244 switch (csipll & 0x3ff) {
245 case 0x00c:
246 dev_priv->fsb_freq = 3200;
247 break;
248 case 0x00e:
249 dev_priv->fsb_freq = 3733;
250 break;
251 case 0x010:
252 dev_priv->fsb_freq = 4266;
253 break;
254 case 0x012:
255 dev_priv->fsb_freq = 4800;
256 break;
257 case 0x014:
258 dev_priv->fsb_freq = 5333;
259 break;
260 case 0x016:
261 dev_priv->fsb_freq = 5866;
262 break;
263 case 0x018:
264 dev_priv->fsb_freq = 6400;
265 break;
266 default:
267 drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
268 csipll & 0x3ff);
269 dev_priv->fsb_freq = 0;
270 break;
271 }
272}
273
274static const struct cxsr_latency cxsr_latency_table[] = {
275 {1, 0, 800, 400, 3382, 33382, 3983, 33983},
276 {1, 0, 800, 667, 3354, 33354, 3807, 33807},
277 {1, 0, 800, 800, 3347, 33347, 3763, 33763},
278 {1, 1, 800, 667, 6420, 36420, 6873, 36873},
279 {1, 1, 800, 800, 5902, 35902, 6318, 36318},
280
281 {1, 0, 667, 400, 3400, 33400, 4021, 34021},
282 {1, 0, 667, 667, 3372, 33372, 3845, 33845},
283 {1, 0, 667, 800, 3386, 33386, 3822, 33822},
284 {1, 1, 667, 667, 6438, 36438, 6911, 36911},
285 {1, 1, 667, 800, 5941, 35941, 6377, 36377},
286
287 {1, 0, 400, 400, 3472, 33472, 4173, 34173},
288 {1, 0, 400, 667, 3443, 33443, 3996, 33996},
289 {1, 0, 400, 800, 3430, 33430, 3946, 33946},
290 {1, 1, 400, 667, 6509, 36509, 7062, 37062},
291 {1, 1, 400, 800, 5985, 35985, 6501, 36501},
292
293 {0, 0, 800, 400, 3438, 33438, 4065, 34065},
294 {0, 0, 800, 667, 3410, 33410, 3889, 33889},
295 {0, 0, 800, 800, 3403, 33403, 3845, 33845},
296 {0, 1, 800, 667, 6476, 36476, 6955, 36955},
297 {0, 1, 800, 800, 5958, 35958, 6400, 36400},
298
299 {0, 0, 667, 400, 3456, 33456, 4103, 34106},
300 {0, 0, 667, 667, 3428, 33428, 3927, 33927},
301 {0, 0, 667, 800, 3443, 33443, 3905, 33905},
302 {0, 1, 667, 667, 6494, 36494, 6993, 36993},
303 {0, 1, 667, 800, 5998, 35998, 6460, 36460},
304
305 {0, 0, 400, 400, 3528, 33528, 4255, 34255},
306 {0, 0, 400, 667, 3500, 33500, 4079, 34079},
307 {0, 0, 400, 800, 3487, 33487, 4029, 34029},
308 {0, 1, 400, 667, 6566, 36566, 7145, 37145},
309 {0, 1, 400, 800, 6042, 36042, 6584, 36584},
310};
311
312static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
313 bool is_ddr3,
314 int fsb,
315 int mem)
316{
317 const struct cxsr_latency *latency;
318 int i;
319
320 if (fsb == 0 || mem == 0)
321 return NULL;
322
323 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
324 latency = &cxsr_latency_table[i];
325 if (is_desktop == latency->is_desktop &&
326 is_ddr3 == latency->is_ddr3 &&
327 fsb == latency->fsb_freq && mem == latency->mem_freq)
328 return latency;
329 }
330
331 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
332
333 return NULL;
334}
335
336static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
337{
338 u32 val;
339
340 vlv_punit_get(dev_priv);
341
342 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
343 if (enable)
344 val &= ~FORCE_DDR_HIGH_FREQ;
345 else
346 val |= FORCE_DDR_HIGH_FREQ;
347 val &= ~FORCE_DDR_LOW_FREQ;
348 val |= FORCE_DDR_FREQ_REQ_ACK;
349 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
350
351 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
352 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
353 drm_err(&dev_priv->drm,
354 "timed out waiting for Punit DDR DVFS request\n");
355
356 vlv_punit_put(dev_priv);
357}
358
359static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
360{
361 u32 val;
362
363 vlv_punit_get(dev_priv);
364
365 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
366 if (enable)
367 val |= DSP_MAXFIFO_PM5_ENABLE;
368 else
369 val &= ~DSP_MAXFIFO_PM5_ENABLE;
370 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
371
372 vlv_punit_put(dev_priv);
373}
374
375#define FW_WM(value, plane) \
376 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
377
378static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
379{
380 bool was_enabled;
381 u32 val;
382
383 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
384 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
385 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
386 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
387 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
388 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
389 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
390 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
391 } else if (IS_PINEVIEW(dev_priv)) {
392 val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
393 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
394 if (enable)
395 val |= PINEVIEW_SELF_REFRESH_EN;
396 else
397 val &= ~PINEVIEW_SELF_REFRESH_EN;
398 intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
399 intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
400 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
401 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
402 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
403 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
404 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
405 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
406 } else if (IS_I915GM(dev_priv)) {
407
408
409
410
411
412 was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
413 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
414 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
415 intel_uncore_write(&dev_priv->uncore, INSTPM, val);
416 intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
417 } else {
418 return false;
419 }
420
421 trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
422
423 drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
424 enableddisabled(enable),
425 enableddisabled(was_enabled));
426
427 return was_enabled;
428}
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
468{
469 bool ret;
470
471 mutex_lock(&dev_priv->wm.wm_mutex);
472 ret = _intel_set_memory_cxsr(dev_priv, enable);
473 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
474 dev_priv->wm.vlv.cxsr = enable;
475 else if (IS_G4X(dev_priv))
476 dev_priv->wm.g4x.cxsr = enable;
477 mutex_unlock(&dev_priv->wm.wm_mutex);
478
479 return ret;
480}
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496static const int pessimal_latency_ns = 5000;
497
498#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
499 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
500
501static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
502{
503 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
504 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
505 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
506 enum pipe pipe = crtc->pipe;
507 int sprite0_start, sprite1_start;
508 u32 dsparb, dsparb2, dsparb3;
509
510 switch (pipe) {
511 case PIPE_A:
512 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
513 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
514 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
515 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
516 break;
517 case PIPE_B:
518 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
519 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
520 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
521 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
522 break;
523 case PIPE_C:
524 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
525 dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
526 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
527 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
528 break;
529 default:
530 MISSING_CASE(pipe);
531 return;
532 }
533
534 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
535 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
536 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
537 fifo_state->plane[PLANE_CURSOR] = 63;
538}
539
540static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
541 enum i9xx_plane_id i9xx_plane)
542{
543 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
544 int size;
545
546 size = dsparb & 0x7f;
547 if (i9xx_plane == PLANE_B)
548 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
549
550 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
551 dsparb, plane_name(i9xx_plane), size);
552
553 return size;
554}
555
556static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
557 enum i9xx_plane_id i9xx_plane)
558{
559 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
560 int size;
561
562 size = dsparb & 0x1ff;
563 if (i9xx_plane == PLANE_B)
564 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
565 size >>= 1;
566
567 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
568 dsparb, plane_name(i9xx_plane), size);
569
570 return size;
571}
572
573static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
574 enum i9xx_plane_id i9xx_plane)
575{
576 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
577 int size;
578
579 size = dsparb & 0x7f;
580 size >>= 2;
581
582 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
583 dsparb, plane_name(i9xx_plane), size);
584
585 return size;
586}
587
588
589static const struct intel_watermark_params pnv_display_wm = {
590 .fifo_size = PINEVIEW_DISPLAY_FIFO,
591 .max_wm = PINEVIEW_MAX_WM,
592 .default_wm = PINEVIEW_DFT_WM,
593 .guard_size = PINEVIEW_GUARD_WM,
594 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
595};
596
597static const struct intel_watermark_params pnv_display_hplloff_wm = {
598 .fifo_size = PINEVIEW_DISPLAY_FIFO,
599 .max_wm = PINEVIEW_MAX_WM,
600 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
601 .guard_size = PINEVIEW_GUARD_WM,
602 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
603};
604
605static const struct intel_watermark_params pnv_cursor_wm = {
606 .fifo_size = PINEVIEW_CURSOR_FIFO,
607 .max_wm = PINEVIEW_CURSOR_MAX_WM,
608 .default_wm = PINEVIEW_CURSOR_DFT_WM,
609 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
610 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
611};
612
613static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
614 .fifo_size = PINEVIEW_CURSOR_FIFO,
615 .max_wm = PINEVIEW_CURSOR_MAX_WM,
616 .default_wm = PINEVIEW_CURSOR_DFT_WM,
617 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
618 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
619};
620
621static const struct intel_watermark_params i965_cursor_wm_info = {
622 .fifo_size = I965_CURSOR_FIFO,
623 .max_wm = I965_CURSOR_MAX_WM,
624 .default_wm = I965_CURSOR_DFT_WM,
625 .guard_size = 2,
626 .cacheline_size = I915_FIFO_LINE_SIZE,
627};
628
629static const struct intel_watermark_params i945_wm_info = {
630 .fifo_size = I945_FIFO_SIZE,
631 .max_wm = I915_MAX_WM,
632 .default_wm = 1,
633 .guard_size = 2,
634 .cacheline_size = I915_FIFO_LINE_SIZE,
635};
636
637static const struct intel_watermark_params i915_wm_info = {
638 .fifo_size = I915_FIFO_SIZE,
639 .max_wm = I915_MAX_WM,
640 .default_wm = 1,
641 .guard_size = 2,
642 .cacheline_size = I915_FIFO_LINE_SIZE,
643};
644
645static const struct intel_watermark_params i830_a_wm_info = {
646 .fifo_size = I855GM_FIFO_SIZE,
647 .max_wm = I915_MAX_WM,
648 .default_wm = 1,
649 .guard_size = 2,
650 .cacheline_size = I830_FIFO_LINE_SIZE,
651};
652
653static const struct intel_watermark_params i830_bc_wm_info = {
654 .fifo_size = I855GM_FIFO_SIZE,
655 .max_wm = I915_MAX_WM/2,
656 .default_wm = 1,
657 .guard_size = 2,
658 .cacheline_size = I830_FIFO_LINE_SIZE,
659};
660
661static const struct intel_watermark_params i845_wm_info = {
662 .fifo_size = I830_FIFO_SIZE,
663 .max_wm = I915_MAX_WM,
664 .default_wm = 1,
665 .guard_size = 2,
666 .cacheline_size = I830_FIFO_LINE_SIZE,
667};
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702static unsigned int intel_wm_method1(unsigned int pixel_rate,
703 unsigned int cpp,
704 unsigned int latency)
705{
706 u64 ret;
707
708 ret = mul_u32_u32(pixel_rate, cpp * latency);
709 ret = DIV_ROUND_UP_ULL(ret, 10000);
710
711 return ret;
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744static unsigned int intel_wm_method2(unsigned int pixel_rate,
745 unsigned int htotal,
746 unsigned int width,
747 unsigned int cpp,
748 unsigned int latency)
749{
750 unsigned int ret;
751
752
753
754
755
756 if (WARN_ON_ONCE(htotal == 0))
757 htotal = 1;
758
759 ret = (latency * pixel_rate) / (htotal * 10000);
760 ret = (ret + 1) * width * cpp;
761
762 return ret;
763}
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784static unsigned int intel_calculate_wm(int pixel_rate,
785 const struct intel_watermark_params *wm,
786 int fifo_size, int cpp,
787 unsigned int latency_ns)
788{
789 int entries, wm_size;
790
791
792
793
794
795
796
797 entries = intel_wm_method1(pixel_rate, cpp,
798 latency_ns / 100);
799 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
800 wm->guard_size;
801 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
802
803 wm_size = fifo_size - entries;
804 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
805
806
807 if (wm_size > wm->max_wm)
808 wm_size = wm->max_wm;
809 if (wm_size <= 0)
810 wm_size = wm->default_wm;
811
812
813
814
815
816
817
818
819 if (wm_size <= 8)
820 wm_size = 8;
821
822 return wm_size;
823}
824
825static bool is_disabling(int old, int new, int threshold)
826{
827 return old >= threshold && new < threshold;
828}
829
830static bool is_enabling(int old, int new, int threshold)
831{
832 return old < threshold && new >= threshold;
833}
834
835static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
836{
837 return dev_priv->wm.max_level + 1;
838}
839
840static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
841 const struct intel_plane_state *plane_state)
842{
843 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
844
845
846 if (!crtc_state->hw.active)
847 return false;
848
849
850
851
852
853
854
855
856
857 if (plane->id == PLANE_CURSOR)
858 return plane_state->hw.fb != NULL;
859 else
860 return plane_state->uapi.visible;
861}
862
863static bool intel_crtc_active(struct intel_crtc *crtc)
864{
865
866
867
868
869
870
871
872
873
874
875
876
877
878 return crtc->active && crtc->base.primary->state->fb &&
879 crtc->config->hw.adjusted_mode.crtc_clock;
880}
881
882static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
883{
884 struct intel_crtc *crtc, *enabled = NULL;
885
886 for_each_intel_crtc(&dev_priv->drm, crtc) {
887 if (intel_crtc_active(crtc)) {
888 if (enabled)
889 return NULL;
890 enabled = crtc;
891 }
892 }
893
894 return enabled;
895}
896
897static void pnv_update_wm(struct drm_i915_private *dev_priv)
898{
899 struct intel_crtc *crtc;
900 const struct cxsr_latency *latency;
901 u32 reg;
902 unsigned int wm;
903
904 latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
905 dev_priv->is_ddr3,
906 dev_priv->fsb_freq,
907 dev_priv->mem_freq);
908 if (!latency) {
909 drm_dbg_kms(&dev_priv->drm,
910 "Unknown FSB/MEM found, disable CxSR\n");
911 intel_set_memory_cxsr(dev_priv, false);
912 return;
913 }
914
915 crtc = single_enabled_crtc(dev_priv);
916 if (crtc) {
917 const struct drm_display_mode *pipe_mode =
918 &crtc->config->hw.pipe_mode;
919 const struct drm_framebuffer *fb =
920 crtc->base.primary->state->fb;
921 int cpp = fb->format->cpp[0];
922 int clock = pipe_mode->crtc_clock;
923
924
925 wm = intel_calculate_wm(clock, &pnv_display_wm,
926 pnv_display_wm.fifo_size,
927 cpp, latency->display_sr);
928 reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
929 reg &= ~DSPFW_SR_MASK;
930 reg |= FW_WM(wm, SR);
931 intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
932 drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
933
934
935 wm = intel_calculate_wm(clock, &pnv_cursor_wm,
936 pnv_display_wm.fifo_size,
937 4, latency->cursor_sr);
938 reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
939 reg &= ~DSPFW_CURSOR_SR_MASK;
940 reg |= FW_WM(wm, CURSOR_SR);
941 intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
942
943
944 wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
945 pnv_display_hplloff_wm.fifo_size,
946 cpp, latency->display_hpll_disable);
947 reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
948 reg &= ~DSPFW_HPLL_SR_MASK;
949 reg |= FW_WM(wm, HPLL_SR);
950 intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
951
952
953 wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
954 pnv_display_hplloff_wm.fifo_size,
955 4, latency->cursor_hpll_disable);
956 reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
957 reg &= ~DSPFW_HPLL_CURSOR_MASK;
958 reg |= FW_WM(wm, HPLL_CURSOR);
959 intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
960 drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
961
962 intel_set_memory_cxsr(dev_priv, true);
963 } else {
964 intel_set_memory_cxsr(dev_priv, false);
965 }
966}
967
968
969
970
971
972
973
974
975
976
977
978static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
979{
980 int tlb_miss = fifo_size * 64 - width * cpp * 8;
981
982 return max(0, tlb_miss);
983}
984
985static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
986 const struct g4x_wm_values *wm)
987{
988 enum pipe pipe;
989
990 for_each_pipe(dev_priv, pipe)
991 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
992
993 intel_uncore_write(&dev_priv->uncore, DSPFW1,
994 FW_WM(wm->sr.plane, SR) |
995 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
996 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
997 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
998 intel_uncore_write(&dev_priv->uncore, DSPFW2,
999 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
1000 FW_WM(wm->sr.fbc, FBC_SR) |
1001 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
1002 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
1003 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1004 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1005 intel_uncore_write(&dev_priv->uncore, DSPFW3,
1006 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
1007 FW_WM(wm->sr.cursor, CURSOR_SR) |
1008 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
1009 FW_WM(wm->hpll.plane, HPLL_SR));
1010
1011 intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
1012}
1013
1014#define FW_WM_VLV(value, plane) \
1015 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
1016
1017static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
1018 const struct vlv_wm_values *wm)
1019{
1020 enum pipe pipe;
1021
1022 for_each_pipe(dev_priv, pipe) {
1023 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
1024
1025 intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
1026 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
1027 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
1028 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
1029 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
1030 }
1031
1032
1033
1034
1035
1036
1037 intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
1038 intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
1039 intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
1040 intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
1041 intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
1042
1043 intel_uncore_write(&dev_priv->uncore, DSPFW1,
1044 FW_WM(wm->sr.plane, SR) |
1045 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
1046 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
1047 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
1048 intel_uncore_write(&dev_priv->uncore, DSPFW2,
1049 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
1050 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
1051 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
1052 intel_uncore_write(&dev_priv->uncore, DSPFW3,
1053 FW_WM(wm->sr.cursor, CURSOR_SR));
1054
1055 if (IS_CHERRYVIEW(dev_priv)) {
1056 intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
1057 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1058 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1059 intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
1060 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
1061 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1062 intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
1063 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
1064 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1065 intel_uncore_write(&dev_priv->uncore, DSPHOWM,
1066 FW_WM(wm->sr.plane >> 9, SR_HI) |
1067 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
1068 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
1069 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
1070 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1071 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1072 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1073 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1074 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1075 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1076 } else {
1077 intel_uncore_write(&dev_priv->uncore, DSPFW7,
1078 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1079 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1080 intel_uncore_write(&dev_priv->uncore, DSPHOWM,
1081 FW_WM(wm->sr.plane >> 9, SR_HI) |
1082 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1083 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1084 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1085 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1086 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1087 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1088 }
1089
1090 intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
1091}
1092
1093#undef FW_WM_VLV
1094
1095static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1096{
1097
1098 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1099 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1100 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1101
1102 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1103}
1104
1105static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1106{
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 switch (plane_id) {
1122 case PLANE_CURSOR:
1123 return 63;
1124 case PLANE_PRIMARY:
1125 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1126 case PLANE_SPRITE0:
1127 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1128 default:
1129 MISSING_CASE(plane_id);
1130 return 0;
1131 }
1132}
1133
1134static int g4x_fbc_fifo_size(int level)
1135{
1136 switch (level) {
1137 case G4X_WM_LEVEL_SR:
1138 return 7;
1139 case G4X_WM_LEVEL_HPLL:
1140 return 15;
1141 default:
1142 MISSING_CASE(level);
1143 return 0;
1144 }
1145}
1146
1147static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1148 const struct intel_plane_state *plane_state,
1149 int level)
1150{
1151 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1152 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1153 const struct drm_display_mode *pipe_mode =
1154 &crtc_state->hw.pipe_mode;
1155 unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
1156 unsigned int clock, htotal, cpp, width, wm;
1157
1158 if (latency == 0)
1159 return USHRT_MAX;
1160
1161 if (!intel_wm_plane_visible(crtc_state, plane_state))
1162 return 0;
1163
1164 cpp = plane_state->hw.fb->format->cpp[0];
1165
1166
1167
1168
1169
1170
1171
1172
1173 if (plane->id == PLANE_PRIMARY &&
1174 level != G4X_WM_LEVEL_NORMAL)
1175 cpp = max(cpp, 4u);
1176
1177 clock = pipe_mode->crtc_clock;
1178 htotal = pipe_mode->crtc_htotal;
1179
1180 width = drm_rect_width(&plane_state->uapi.dst);
1181
1182 if (plane->id == PLANE_CURSOR) {
1183 wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1184 } else if (plane->id == PLANE_PRIMARY &&
1185 level == G4X_WM_LEVEL_NORMAL) {
1186 wm = intel_wm_method1(clock, cpp, latency);
1187 } else {
1188 unsigned int small, large;
1189
1190 small = intel_wm_method1(clock, cpp, latency);
1191 large = intel_wm_method2(clock, htotal, width, cpp, latency);
1192
1193 wm = min(small, large);
1194 }
1195
1196 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1197 width, cpp);
1198
1199 wm = DIV_ROUND_UP(wm, 64) + 2;
1200
1201 return min_t(unsigned int, wm, USHRT_MAX);
1202}
1203
1204static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1205 int level, enum plane_id plane_id, u16 value)
1206{
1207 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1208 bool dirty = false;
1209
1210 for (; level < intel_wm_num_levels(dev_priv); level++) {
1211 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1212
1213 dirty |= raw->plane[plane_id] != value;
1214 raw->plane[plane_id] = value;
1215 }
1216
1217 return dirty;
1218}
1219
1220static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1221 int level, u16 value)
1222{
1223 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1224 bool dirty = false;
1225
1226
1227 level = max(level, G4X_WM_LEVEL_SR);
1228
1229 for (; level < intel_wm_num_levels(dev_priv); level++) {
1230 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1231
1232 dirty |= raw->fbc != value;
1233 raw->fbc = value;
1234 }
1235
1236 return dirty;
1237}
1238
1239static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
1240 const struct intel_plane_state *plane_state,
1241 u32 pri_val);
1242
1243static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1244 const struct intel_plane_state *plane_state)
1245{
1246 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1247 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1248 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1249 enum plane_id plane_id = plane->id;
1250 bool dirty = false;
1251 int level;
1252
1253 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1254 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1255 if (plane_id == PLANE_PRIMARY)
1256 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1257 goto out;
1258 }
1259
1260 for (level = 0; level < num_levels; level++) {
1261 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1262 int wm, max_wm;
1263
1264 wm = g4x_compute_wm(crtc_state, plane_state, level);
1265 max_wm = g4x_plane_fifo_size(plane_id, level);
1266
1267 if (wm > max_wm)
1268 break;
1269
1270 dirty |= raw->plane[plane_id] != wm;
1271 raw->plane[plane_id] = wm;
1272
1273 if (plane_id != PLANE_PRIMARY ||
1274 level == G4X_WM_LEVEL_NORMAL)
1275 continue;
1276
1277 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1278 raw->plane[plane_id]);
1279 max_wm = g4x_fbc_fifo_size(level);
1280
1281
1282
1283
1284
1285 if (wm > max_wm)
1286 wm = USHRT_MAX;
1287
1288 dirty |= raw->fbc != wm;
1289 raw->fbc = wm;
1290 }
1291
1292
1293 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1294
1295 if (plane_id == PLANE_PRIMARY)
1296 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1297
1298 out:
1299 if (dirty) {
1300 drm_dbg_kms(&dev_priv->drm,
1301 "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1302 plane->base.name,
1303 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1304 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1305 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1306
1307 if (plane_id == PLANE_PRIMARY)
1308 drm_dbg_kms(&dev_priv->drm,
1309 "FBC watermarks: SR=%d, HPLL=%d\n",
1310 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1311 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1312 }
1313
1314 return dirty;
1315}
1316
1317static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1318 enum plane_id plane_id, int level)
1319{
1320 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1321
1322 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1323}
1324
1325static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1326 int level)
1327{
1328 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1329
1330 if (level > dev_priv->wm.max_level)
1331 return false;
1332
1333 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1334 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1335 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1336}
1337
1338
1339static void g4x_invalidate_wms(struct intel_crtc *crtc,
1340 struct g4x_wm_state *wm_state, int level)
1341{
1342 if (level <= G4X_WM_LEVEL_NORMAL) {
1343 enum plane_id plane_id;
1344
1345 for_each_plane_id_on_crtc(crtc, plane_id)
1346 wm_state->wm.plane[plane_id] = USHRT_MAX;
1347 }
1348
1349 if (level <= G4X_WM_LEVEL_SR) {
1350 wm_state->cxsr = false;
1351 wm_state->sr.cursor = USHRT_MAX;
1352 wm_state->sr.plane = USHRT_MAX;
1353 wm_state->sr.fbc = USHRT_MAX;
1354 }
1355
1356 if (level <= G4X_WM_LEVEL_HPLL) {
1357 wm_state->hpll_en = false;
1358 wm_state->hpll.cursor = USHRT_MAX;
1359 wm_state->hpll.plane = USHRT_MAX;
1360 wm_state->hpll.fbc = USHRT_MAX;
1361 }
1362}
1363
1364static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
1365 int level)
1366{
1367 if (level < G4X_WM_LEVEL_SR)
1368 return false;
1369
1370 if (level >= G4X_WM_LEVEL_SR &&
1371 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1372 return false;
1373
1374 if (level >= G4X_WM_LEVEL_HPLL &&
1375 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1376 return false;
1377
1378 return true;
1379}
1380
1381static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
1382 struct intel_crtc *crtc)
1383{
1384 struct intel_crtc_state *crtc_state =
1385 intel_atomic_get_new_crtc_state(state, crtc);
1386 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1387 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1388 const struct g4x_pipe_wm *raw;
1389 const struct intel_plane_state *old_plane_state;
1390 const struct intel_plane_state *new_plane_state;
1391 struct intel_plane *plane;
1392 enum plane_id plane_id;
1393 int i, level;
1394 unsigned int dirty = 0;
1395
1396 for_each_oldnew_intel_plane_in_state(state, plane,
1397 old_plane_state,
1398 new_plane_state, i) {
1399 if (new_plane_state->hw.crtc != &crtc->base &&
1400 old_plane_state->hw.crtc != &crtc->base)
1401 continue;
1402
1403 if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1404 dirty |= BIT(plane->id);
1405 }
1406
1407 if (!dirty)
1408 return 0;
1409
1410 level = G4X_WM_LEVEL_NORMAL;
1411 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1412 goto out;
1413
1414 raw = &crtc_state->wm.g4x.raw[level];
1415 for_each_plane_id_on_crtc(crtc, plane_id)
1416 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1417
1418 level = G4X_WM_LEVEL_SR;
1419 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1420 goto out;
1421
1422 raw = &crtc_state->wm.g4x.raw[level];
1423 wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1424 wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1425 wm_state->sr.fbc = raw->fbc;
1426
1427 wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
1428
1429 level = G4X_WM_LEVEL_HPLL;
1430 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1431 goto out;
1432
1433 raw = &crtc_state->wm.g4x.raw[level];
1434 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1435 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1436 wm_state->hpll.fbc = raw->fbc;
1437
1438 wm_state->hpll_en = wm_state->cxsr;
1439
1440 level++;
1441
1442 out:
1443 if (level == G4X_WM_LEVEL_NORMAL)
1444 return -EINVAL;
1445
1446
1447 g4x_invalidate_wms(crtc, wm_state, level);
1448
1449
1450
1451
1452
1453
1454
1455
1456 wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
1457
1458 return 0;
1459}
1460
1461static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
1462 struct intel_crtc *crtc)
1463{
1464 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1465 struct intel_crtc_state *new_crtc_state =
1466 intel_atomic_get_new_crtc_state(state, crtc);
1467 const struct intel_crtc_state *old_crtc_state =
1468 intel_atomic_get_old_crtc_state(state, crtc);
1469 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1470 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1471 const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1472 enum plane_id plane_id;
1473
1474 if (!new_crtc_state->hw.active ||
1475 drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
1476 *intermediate = *optimal;
1477
1478 intermediate->cxsr = false;
1479 intermediate->hpll_en = false;
1480 goto out;
1481 }
1482
1483 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1484 !new_crtc_state->disable_cxsr;
1485 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1486 !new_crtc_state->disable_cxsr;
1487 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1488
1489 for_each_plane_id_on_crtc(crtc, plane_id) {
1490 intermediate->wm.plane[plane_id] =
1491 max(optimal->wm.plane[plane_id],
1492 active->wm.plane[plane_id]);
1493
1494 drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
1495 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1496 }
1497
1498 intermediate->sr.plane = max(optimal->sr.plane,
1499 active->sr.plane);
1500 intermediate->sr.cursor = max(optimal->sr.cursor,
1501 active->sr.cursor);
1502 intermediate->sr.fbc = max(optimal->sr.fbc,
1503 active->sr.fbc);
1504
1505 intermediate->hpll.plane = max(optimal->hpll.plane,
1506 active->hpll.plane);
1507 intermediate->hpll.cursor = max(optimal->hpll.cursor,
1508 active->hpll.cursor);
1509 intermediate->hpll.fbc = max(optimal->hpll.fbc,
1510 active->hpll.fbc);
1511
1512 drm_WARN_ON(&dev_priv->drm,
1513 (intermediate->sr.plane >
1514 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1515 intermediate->sr.cursor >
1516 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1517 intermediate->cxsr);
1518 drm_WARN_ON(&dev_priv->drm,
1519 (intermediate->sr.plane >
1520 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1521 intermediate->sr.cursor >
1522 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1523 intermediate->hpll_en);
1524
1525 drm_WARN_ON(&dev_priv->drm,
1526 intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1527 intermediate->fbc_en && intermediate->cxsr);
1528 drm_WARN_ON(&dev_priv->drm,
1529 intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1530 intermediate->fbc_en && intermediate->hpll_en);
1531
1532out:
1533
1534
1535
1536
1537 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1538 new_crtc_state->wm.need_postvbl_update = true;
1539
1540 return 0;
1541}
1542
1543static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1544 struct g4x_wm_values *wm)
1545{
1546 struct intel_crtc *crtc;
1547 int num_active_pipes = 0;
1548
1549 wm->cxsr = true;
1550 wm->hpll_en = true;
1551 wm->fbc_en = true;
1552
1553 for_each_intel_crtc(&dev_priv->drm, crtc) {
1554 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1555
1556 if (!crtc->active)
1557 continue;
1558
1559 if (!wm_state->cxsr)
1560 wm->cxsr = false;
1561 if (!wm_state->hpll_en)
1562 wm->hpll_en = false;
1563 if (!wm_state->fbc_en)
1564 wm->fbc_en = false;
1565
1566 num_active_pipes++;
1567 }
1568
1569 if (num_active_pipes != 1) {
1570 wm->cxsr = false;
1571 wm->hpll_en = false;
1572 wm->fbc_en = false;
1573 }
1574
1575 for_each_intel_crtc(&dev_priv->drm, crtc) {
1576 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1577 enum pipe pipe = crtc->pipe;
1578
1579 wm->pipe[pipe] = wm_state->wm;
1580 if (crtc->active && wm->cxsr)
1581 wm->sr = wm_state->sr;
1582 if (crtc->active && wm->hpll_en)
1583 wm->hpll = wm_state->hpll;
1584 }
1585}
1586
1587static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1588{
1589 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1590 struct g4x_wm_values new_wm = {};
1591
1592 g4x_merge_wm(dev_priv, &new_wm);
1593
1594 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1595 return;
1596
1597 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1598 _intel_set_memory_cxsr(dev_priv, false);
1599
1600 g4x_write_wm_values(dev_priv, &new_wm);
1601
1602 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1603 _intel_set_memory_cxsr(dev_priv, true);
1604
1605 *old_wm = new_wm;
1606}
1607
1608static void g4x_initial_watermarks(struct intel_atomic_state *state,
1609 struct intel_crtc *crtc)
1610{
1611 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1612 const struct intel_crtc_state *crtc_state =
1613 intel_atomic_get_new_crtc_state(state, crtc);
1614
1615 mutex_lock(&dev_priv->wm.wm_mutex);
1616 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1617 g4x_program_watermarks(dev_priv);
1618 mutex_unlock(&dev_priv->wm.wm_mutex);
1619}
1620
1621static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1622 struct intel_crtc *crtc)
1623{
1624 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1625 const struct intel_crtc_state *crtc_state =
1626 intel_atomic_get_new_crtc_state(state, crtc);
1627
1628 if (!crtc_state->wm.need_postvbl_update)
1629 return;
1630
1631 mutex_lock(&dev_priv->wm.wm_mutex);
1632 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1633 g4x_program_watermarks(dev_priv);
1634 mutex_unlock(&dev_priv->wm.wm_mutex);
1635}
1636
1637
1638static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1639 unsigned int htotal,
1640 unsigned int width,
1641 unsigned int cpp,
1642 unsigned int latency)
1643{
1644 unsigned int ret;
1645
1646 ret = intel_wm_method2(pixel_rate, htotal,
1647 width, cpp, latency);
1648 ret = DIV_ROUND_UP(ret, 64);
1649
1650 return ret;
1651}
1652
1653static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1654{
1655
1656 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1657
1658 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1659
1660 if (IS_CHERRYVIEW(dev_priv)) {
1661 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1662 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1663
1664 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1665 }
1666}
1667
1668static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1669 const struct intel_plane_state *plane_state,
1670 int level)
1671{
1672 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1673 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1674 const struct drm_display_mode *pipe_mode =
1675 &crtc_state->hw.pipe_mode;
1676 unsigned int clock, htotal, cpp, width, wm;
1677
1678 if (dev_priv->wm.pri_latency[level] == 0)
1679 return USHRT_MAX;
1680
1681 if (!intel_wm_plane_visible(crtc_state, plane_state))
1682 return 0;
1683
1684 cpp = plane_state->hw.fb->format->cpp[0];
1685 clock = pipe_mode->crtc_clock;
1686 htotal = pipe_mode->crtc_htotal;
1687 width = crtc_state->pipe_src_w;
1688
1689 if (plane->id == PLANE_CURSOR) {
1690
1691
1692
1693
1694
1695
1696 wm = 63;
1697 } else {
1698 wm = vlv_wm_method2(clock, htotal, width, cpp,
1699 dev_priv->wm.pri_latency[level] * 10);
1700 }
1701
1702 return min_t(unsigned int, wm, USHRT_MAX);
1703}
1704
1705static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1706{
1707 return (active_planes & (BIT(PLANE_SPRITE0) |
1708 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1709}
1710
1711static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1712{
1713 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1714 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1715 const struct g4x_pipe_wm *raw =
1716 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1717 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1718 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1719 int num_active_planes = hweight8(active_planes);
1720 const int fifo_size = 511;
1721 int fifo_extra, fifo_left = fifo_size;
1722 int sprite0_fifo_extra = 0;
1723 unsigned int total_rate;
1724 enum plane_id plane_id;
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734 if (vlv_need_sprite0_fifo_workaround(active_planes))
1735 sprite0_fifo_extra = 1;
1736
1737 total_rate = raw->plane[PLANE_PRIMARY] +
1738 raw->plane[PLANE_SPRITE0] +
1739 raw->plane[PLANE_SPRITE1] +
1740 sprite0_fifo_extra;
1741
1742 if (total_rate > fifo_size)
1743 return -EINVAL;
1744
1745 if (total_rate == 0)
1746 total_rate = 1;
1747
1748 for_each_plane_id_on_crtc(crtc, plane_id) {
1749 unsigned int rate;
1750
1751 if ((active_planes & BIT(plane_id)) == 0) {
1752 fifo_state->plane[plane_id] = 0;
1753 continue;
1754 }
1755
1756 rate = raw->plane[plane_id];
1757 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1758 fifo_left -= fifo_state->plane[plane_id];
1759 }
1760
1761 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1762 fifo_left -= sprite0_fifo_extra;
1763
1764 fifo_state->plane[PLANE_CURSOR] = 63;
1765
1766 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1767
1768
1769 for_each_plane_id_on_crtc(crtc, plane_id) {
1770 int plane_extra;
1771
1772 if (fifo_left == 0)
1773 break;
1774
1775 if ((active_planes & BIT(plane_id)) == 0)
1776 continue;
1777
1778 plane_extra = min(fifo_extra, fifo_left);
1779 fifo_state->plane[plane_id] += plane_extra;
1780 fifo_left -= plane_extra;
1781 }
1782
1783 drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
1784
1785
1786 if (active_planes == 0) {
1787 drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
1788 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1789 }
1790
1791 return 0;
1792}
1793
1794
1795static void vlv_invalidate_wms(struct intel_crtc *crtc,
1796 struct vlv_wm_state *wm_state, int level)
1797{
1798 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1799
1800 for (; level < intel_wm_num_levels(dev_priv); level++) {
1801 enum plane_id plane_id;
1802
1803 for_each_plane_id_on_crtc(crtc, plane_id)
1804 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1805
1806 wm_state->sr[level].cursor = USHRT_MAX;
1807 wm_state->sr[level].plane = USHRT_MAX;
1808 }
1809}
1810
1811static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1812{
1813 if (wm > fifo_size)
1814 return USHRT_MAX;
1815 else
1816 return fifo_size - wm;
1817}
1818
1819
1820
1821
1822
1823static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1824 int level, enum plane_id plane_id, u16 value)
1825{
1826 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1827 int num_levels = intel_wm_num_levels(dev_priv);
1828 bool dirty = false;
1829
1830 for (; level < num_levels; level++) {
1831 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1832
1833 dirty |= raw->plane[plane_id] != value;
1834 raw->plane[plane_id] = value;
1835 }
1836
1837 return dirty;
1838}
1839
1840static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1841 const struct intel_plane_state *plane_state)
1842{
1843 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1844 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1845 enum plane_id plane_id = plane->id;
1846 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1847 int level;
1848 bool dirty = false;
1849
1850 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1851 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1852 goto out;
1853 }
1854
1855 for (level = 0; level < num_levels; level++) {
1856 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1857 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1858 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1859
1860 if (wm > max_wm)
1861 break;
1862
1863 dirty |= raw->plane[plane_id] != wm;
1864 raw->plane[plane_id] = wm;
1865 }
1866
1867
1868 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1869
1870out:
1871 if (dirty)
1872 drm_dbg_kms(&dev_priv->drm,
1873 "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1874 plane->base.name,
1875 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1876 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1877 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1878
1879 return dirty;
1880}
1881
1882static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1883 enum plane_id plane_id, int level)
1884{
1885 const struct g4x_pipe_wm *raw =
1886 &crtc_state->wm.vlv.raw[level];
1887 const struct vlv_fifo_state *fifo_state =
1888 &crtc_state->wm.vlv.fifo_state;
1889
1890 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1891}
1892
1893static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1894{
1895 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1896 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1897 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1898 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1899}
1900
1901static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
1902 struct intel_crtc *crtc)
1903{
1904 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1905 struct intel_crtc_state *crtc_state =
1906 intel_atomic_get_new_crtc_state(state, crtc);
1907 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1908 const struct vlv_fifo_state *fifo_state =
1909 &crtc_state->wm.vlv.fifo_state;
1910 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1911 int num_active_planes = hweight8(active_planes);
1912 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
1913 const struct intel_plane_state *old_plane_state;
1914 const struct intel_plane_state *new_plane_state;
1915 struct intel_plane *plane;
1916 enum plane_id plane_id;
1917 int level, ret, i;
1918 unsigned int dirty = 0;
1919
1920 for_each_oldnew_intel_plane_in_state(state, plane,
1921 old_plane_state,
1922 new_plane_state, i) {
1923 if (new_plane_state->hw.crtc != &crtc->base &&
1924 old_plane_state->hw.crtc != &crtc->base)
1925 continue;
1926
1927 if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1928 dirty |= BIT(plane->id);
1929 }
1930
1931
1932
1933
1934
1935
1936
1937 if (needs_modeset)
1938 crtc_state->fifo_changed = true;
1939
1940 if (!dirty)
1941 return 0;
1942
1943
1944 if (dirty & ~BIT(PLANE_CURSOR)) {
1945 const struct intel_crtc_state *old_crtc_state =
1946 intel_atomic_get_old_crtc_state(state, crtc);
1947 const struct vlv_fifo_state *old_fifo_state =
1948 &old_crtc_state->wm.vlv.fifo_state;
1949
1950 ret = vlv_compute_fifo(crtc_state);
1951 if (ret)
1952 return ret;
1953
1954 if (needs_modeset ||
1955 memcmp(old_fifo_state, fifo_state,
1956 sizeof(*fifo_state)) != 0)
1957 crtc_state->fifo_changed = true;
1958 }
1959
1960
1961 wm_state->num_levels = intel_wm_num_levels(dev_priv);
1962
1963
1964
1965
1966
1967 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1968
1969 for (level = 0; level < wm_state->num_levels; level++) {
1970 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1971 const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
1972
1973 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1974 break;
1975
1976 for_each_plane_id_on_crtc(crtc, plane_id) {
1977 wm_state->wm[level].plane[plane_id] =
1978 vlv_invert_wm_value(raw->plane[plane_id],
1979 fifo_state->plane[plane_id]);
1980 }
1981
1982 wm_state->sr[level].plane =
1983 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1984 raw->plane[PLANE_SPRITE0],
1985 raw->plane[PLANE_SPRITE1]),
1986 sr_fifo_size);
1987
1988 wm_state->sr[level].cursor =
1989 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1990 63);
1991 }
1992
1993 if (level == 0)
1994 return -EINVAL;
1995
1996
1997 wm_state->num_levels = level;
1998
1999
2000 vlv_invalidate_wms(crtc, wm_state, level);
2001
2002 return 0;
2003}
2004
2005#define VLV_FIFO(plane, value) \
2006 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
2007
2008static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
2009 struct intel_crtc *crtc)
2010{
2011 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2012 struct intel_uncore *uncore = &dev_priv->uncore;
2013 const struct intel_crtc_state *crtc_state =
2014 intel_atomic_get_new_crtc_state(state, crtc);
2015 const struct vlv_fifo_state *fifo_state =
2016 &crtc_state->wm.vlv.fifo_state;
2017 int sprite0_start, sprite1_start, fifo_size;
2018 u32 dsparb, dsparb2, dsparb3;
2019
2020 if (!crtc_state->fifo_changed)
2021 return;
2022
2023 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
2024 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
2025 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
2026
2027 drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
2028 drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
2029
2030 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041 spin_lock(&uncore->lock);
2042
2043 switch (crtc->pipe) {
2044 case PIPE_A:
2045 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2046 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2047
2048 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
2049 VLV_FIFO(SPRITEB, 0xff));
2050 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
2051 VLV_FIFO(SPRITEB, sprite1_start));
2052
2053 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
2054 VLV_FIFO(SPRITEB_HI, 0x1));
2055 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
2056 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
2057
2058 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2059 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2060 break;
2061 case PIPE_B:
2062 dsparb = intel_uncore_read_fw(uncore, DSPARB);
2063 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2064
2065 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
2066 VLV_FIFO(SPRITED, 0xff));
2067 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
2068 VLV_FIFO(SPRITED, sprite1_start));
2069
2070 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
2071 VLV_FIFO(SPRITED_HI, 0xff));
2072 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
2073 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
2074
2075 intel_uncore_write_fw(uncore, DSPARB, dsparb);
2076 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2077 break;
2078 case PIPE_C:
2079 dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
2080 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
2081
2082 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
2083 VLV_FIFO(SPRITEF, 0xff));
2084 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
2085 VLV_FIFO(SPRITEF, sprite1_start));
2086
2087 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
2088 VLV_FIFO(SPRITEF_HI, 0xff));
2089 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
2090 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
2091
2092 intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
2093 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
2094 break;
2095 default:
2096 break;
2097 }
2098
2099 intel_uncore_posting_read_fw(uncore, DSPARB);
2100
2101 spin_unlock(&uncore->lock);
2102}
2103
2104#undef VLV_FIFO
2105
2106static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
2107 struct intel_crtc *crtc)
2108{
2109 struct intel_crtc_state *new_crtc_state =
2110 intel_atomic_get_new_crtc_state(state, crtc);
2111 const struct intel_crtc_state *old_crtc_state =
2112 intel_atomic_get_old_crtc_state(state, crtc);
2113 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2114 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2115 const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2116 int level;
2117
2118 if (!new_crtc_state->hw.active ||
2119 drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
2120 *intermediate = *optimal;
2121
2122 intermediate->cxsr = false;
2123 goto out;
2124 }
2125
2126 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2127 intermediate->cxsr = optimal->cxsr && active->cxsr &&
2128 !new_crtc_state->disable_cxsr;
2129
2130 for (level = 0; level < intermediate->num_levels; level++) {
2131 enum plane_id plane_id;
2132
2133 for_each_plane_id_on_crtc(crtc, plane_id) {
2134 intermediate->wm[level].plane[plane_id] =
2135 min(optimal->wm[level].plane[plane_id],
2136 active->wm[level].plane[plane_id]);
2137 }
2138
2139 intermediate->sr[level].plane = min(optimal->sr[level].plane,
2140 active->sr[level].plane);
2141 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2142 active->sr[level].cursor);
2143 }
2144
2145 vlv_invalidate_wms(crtc, intermediate, level);
2146
2147out:
2148
2149
2150
2151
2152 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2153 new_crtc_state->wm.need_postvbl_update = true;
2154
2155 return 0;
2156}
2157
2158static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2159 struct vlv_wm_values *wm)
2160{
2161 struct intel_crtc *crtc;
2162 int num_active_pipes = 0;
2163
2164 wm->level = dev_priv->wm.max_level;
2165 wm->cxsr = true;
2166
2167 for_each_intel_crtc(&dev_priv->drm, crtc) {
2168 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2169
2170 if (!crtc->active)
2171 continue;
2172
2173 if (!wm_state->cxsr)
2174 wm->cxsr = false;
2175
2176 num_active_pipes++;
2177 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2178 }
2179
2180 if (num_active_pipes != 1)
2181 wm->cxsr = false;
2182
2183 if (num_active_pipes > 1)
2184 wm->level = VLV_WM_LEVEL_PM2;
2185
2186 for_each_intel_crtc(&dev_priv->drm, crtc) {
2187 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2188 enum pipe pipe = crtc->pipe;
2189
2190 wm->pipe[pipe] = wm_state->wm[wm->level];
2191 if (crtc->active && wm->cxsr)
2192 wm->sr = wm_state->sr[wm->level];
2193
2194 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2195 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2196 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2197 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2198 }
2199}
2200
2201static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2202{
2203 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2204 struct vlv_wm_values new_wm = {};
2205
2206 vlv_merge_wm(dev_priv, &new_wm);
2207
2208 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2209 return;
2210
2211 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2212 chv_set_memory_dvfs(dev_priv, false);
2213
2214 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2215 chv_set_memory_pm5(dev_priv, false);
2216
2217 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2218 _intel_set_memory_cxsr(dev_priv, false);
2219
2220 vlv_write_wm_values(dev_priv, &new_wm);
2221
2222 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2223 _intel_set_memory_cxsr(dev_priv, true);
2224
2225 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2226 chv_set_memory_pm5(dev_priv, true);
2227
2228 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2229 chv_set_memory_dvfs(dev_priv, true);
2230
2231 *old_wm = new_wm;
2232}
2233
2234static void vlv_initial_watermarks(struct intel_atomic_state *state,
2235 struct intel_crtc *crtc)
2236{
2237 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2238 const struct intel_crtc_state *crtc_state =
2239 intel_atomic_get_new_crtc_state(state, crtc);
2240
2241 mutex_lock(&dev_priv->wm.wm_mutex);
2242 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2243 vlv_program_watermarks(dev_priv);
2244 mutex_unlock(&dev_priv->wm.wm_mutex);
2245}
2246
2247static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2248 struct intel_crtc *crtc)
2249{
2250 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2251 const struct intel_crtc_state *crtc_state =
2252 intel_atomic_get_new_crtc_state(state, crtc);
2253
2254 if (!crtc_state->wm.need_postvbl_update)
2255 return;
2256
2257 mutex_lock(&dev_priv->wm.wm_mutex);
2258 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2259 vlv_program_watermarks(dev_priv);
2260 mutex_unlock(&dev_priv->wm.wm_mutex);
2261}
2262
2263static void i965_update_wm(struct drm_i915_private *dev_priv)
2264{
2265 struct intel_crtc *crtc;
2266 int srwm = 1;
2267 int cursor_sr = 16;
2268 bool cxsr_enabled;
2269
2270
2271 crtc = single_enabled_crtc(dev_priv);
2272 if (crtc) {
2273
2274 static const int sr_latency_ns = 12000;
2275 const struct drm_display_mode *pipe_mode =
2276 &crtc->config->hw.pipe_mode;
2277 const struct drm_framebuffer *fb =
2278 crtc->base.primary->state->fb;
2279 int clock = pipe_mode->crtc_clock;
2280 int htotal = pipe_mode->crtc_htotal;
2281 int hdisplay = crtc->config->pipe_src_w;
2282 int cpp = fb->format->cpp[0];
2283 int entries;
2284
2285 entries = intel_wm_method2(clock, htotal,
2286 hdisplay, cpp, sr_latency_ns / 100);
2287 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2288 srwm = I965_FIFO_SIZE - entries;
2289 if (srwm < 0)
2290 srwm = 1;
2291 srwm &= 0x1ff;
2292 drm_dbg_kms(&dev_priv->drm,
2293 "self-refresh entries: %d, wm: %d\n",
2294 entries, srwm);
2295
2296 entries = intel_wm_method2(clock, htotal,
2297 crtc->base.cursor->state->crtc_w, 4,
2298 sr_latency_ns / 100);
2299 entries = DIV_ROUND_UP(entries,
2300 i965_cursor_wm_info.cacheline_size) +
2301 i965_cursor_wm_info.guard_size;
2302
2303 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2304 if (cursor_sr > i965_cursor_wm_info.max_wm)
2305 cursor_sr = i965_cursor_wm_info.max_wm;
2306
2307 drm_dbg_kms(&dev_priv->drm,
2308 "self-refresh watermark: display plane %d "
2309 "cursor %d\n", srwm, cursor_sr);
2310
2311 cxsr_enabled = true;
2312 } else {
2313 cxsr_enabled = false;
2314
2315 intel_set_memory_cxsr(dev_priv, false);
2316 }
2317
2318 drm_dbg_kms(&dev_priv->drm,
2319 "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2320 srwm);
2321
2322
2323 intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
2324 FW_WM(8, CURSORB) |
2325 FW_WM(8, PLANEB) |
2326 FW_WM(8, PLANEA));
2327 intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
2328 FW_WM(8, PLANEC_OLD));
2329
2330 intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2331
2332 if (cxsr_enabled)
2333 intel_set_memory_cxsr(dev_priv, true);
2334}
2335
2336#undef FW_WM
2337
2338static void i9xx_update_wm(struct drm_i915_private *dev_priv)
2339{
2340 const struct intel_watermark_params *wm_info;
2341 u32 fwater_lo;
2342 u32 fwater_hi;
2343 int cwm, srwm = 1;
2344 int fifo_size;
2345 int planea_wm, planeb_wm;
2346 struct intel_crtc *crtc, *enabled = NULL;
2347
2348 if (IS_I945GM(dev_priv))
2349 wm_info = &i945_wm_info;
2350 else if (DISPLAY_VER(dev_priv) != 2)
2351 wm_info = &i915_wm_info;
2352 else
2353 wm_info = &i830_a_wm_info;
2354
2355 if (DISPLAY_VER(dev_priv) == 2)
2356 fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
2357 else
2358 fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
2359 crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
2360 if (intel_crtc_active(crtc)) {
2361 const struct drm_display_mode *pipe_mode =
2362 &crtc->config->hw.pipe_mode;
2363 const struct drm_framebuffer *fb =
2364 crtc->base.primary->state->fb;
2365 int cpp;
2366
2367 if (DISPLAY_VER(dev_priv) == 2)
2368 cpp = 4;
2369 else
2370 cpp = fb->format->cpp[0];
2371
2372 planea_wm = intel_calculate_wm(pipe_mode->crtc_clock,
2373 wm_info, fifo_size, cpp,
2374 pessimal_latency_ns);
2375 enabled = crtc;
2376 } else {
2377 planea_wm = fifo_size - wm_info->guard_size;
2378 if (planea_wm > (long)wm_info->max_wm)
2379 planea_wm = wm_info->max_wm;
2380 }
2381
2382 if (DISPLAY_VER(dev_priv) == 2)
2383 wm_info = &i830_bc_wm_info;
2384
2385 if (DISPLAY_VER(dev_priv) == 2)
2386 fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
2387 else
2388 fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
2389 crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
2390 if (intel_crtc_active(crtc)) {
2391 const struct drm_display_mode *pipe_mode =
2392 &crtc->config->hw.pipe_mode;
2393 const struct drm_framebuffer *fb =
2394 crtc->base.primary->state->fb;
2395 int cpp;
2396
2397 if (DISPLAY_VER(dev_priv) == 2)
2398 cpp = 4;
2399 else
2400 cpp = fb->format->cpp[0];
2401
2402 planeb_wm = intel_calculate_wm(pipe_mode->crtc_clock,
2403 wm_info, fifo_size, cpp,
2404 pessimal_latency_ns);
2405 if (enabled == NULL)
2406 enabled = crtc;
2407 else
2408 enabled = NULL;
2409 } else {
2410 planeb_wm = fifo_size - wm_info->guard_size;
2411 if (planeb_wm > (long)wm_info->max_wm)
2412 planeb_wm = wm_info->max_wm;
2413 }
2414
2415 drm_dbg_kms(&dev_priv->drm,
2416 "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2417
2418 if (IS_I915GM(dev_priv) && enabled) {
2419 struct drm_i915_gem_object *obj;
2420
2421 obj = intel_fb_obj(enabled->base.primary->state->fb);
2422
2423
2424 if (!i915_gem_object_is_tiled(obj))
2425 enabled = NULL;
2426 }
2427
2428
2429
2430
2431 cwm = 2;
2432
2433
2434 intel_set_memory_cxsr(dev_priv, false);
2435
2436
2437 if (HAS_FW_BLC(dev_priv) && enabled) {
2438
2439 static const int sr_latency_ns = 6000;
2440 const struct drm_display_mode *pipe_mode =
2441 &enabled->config->hw.pipe_mode;
2442 const struct drm_framebuffer *fb =
2443 enabled->base.primary->state->fb;
2444 int clock = pipe_mode->crtc_clock;
2445 int htotal = pipe_mode->crtc_htotal;
2446 int hdisplay = enabled->config->pipe_src_w;
2447 int cpp;
2448 int entries;
2449
2450 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2451 cpp = 4;
2452 else
2453 cpp = fb->format->cpp[0];
2454
2455 entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
2456 sr_latency_ns / 100);
2457 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2458 drm_dbg_kms(&dev_priv->drm,
2459 "self-refresh entries: %d\n", entries);
2460 srwm = wm_info->fifo_size - entries;
2461 if (srwm < 0)
2462 srwm = 1;
2463
2464 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2465 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
2466 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2467 else
2468 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
2469 }
2470
2471 drm_dbg_kms(&dev_priv->drm,
2472 "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2473 planea_wm, planeb_wm, cwm, srwm);
2474
2475 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2476 fwater_hi = (cwm & 0x1f);
2477
2478
2479 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2480 fwater_hi = fwater_hi | (1 << 8);
2481
2482 intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
2483 intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
2484
2485 if (enabled)
2486 intel_set_memory_cxsr(dev_priv, true);
2487}
2488
2489static void i845_update_wm(struct drm_i915_private *dev_priv)
2490{
2491 struct intel_crtc *crtc;
2492 const struct drm_display_mode *pipe_mode;
2493 u32 fwater_lo;
2494 int planea_wm;
2495
2496 crtc = single_enabled_crtc(dev_priv);
2497 if (crtc == NULL)
2498 return;
2499
2500 pipe_mode = &crtc->config->hw.pipe_mode;
2501 planea_wm = intel_calculate_wm(pipe_mode->crtc_clock,
2502 &i845_wm_info,
2503 i845_get_fifo_size(dev_priv, PLANE_A),
2504 4, pessimal_latency_ns);
2505 fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
2506 fwater_lo |= (3<<8) | planea_wm;
2507
2508 drm_dbg_kms(&dev_priv->drm,
2509 "Setting FIFO watermarks - A: %d\n", planea_wm);
2510
2511 intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
2512}
2513
2514
2515static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2516 unsigned int cpp,
2517 unsigned int latency)
2518{
2519 unsigned int ret;
2520
2521 ret = intel_wm_method1(pixel_rate, cpp, latency);
2522 ret = DIV_ROUND_UP(ret, 64) + 2;
2523
2524 return ret;
2525}
2526
2527
2528static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2529 unsigned int htotal,
2530 unsigned int width,
2531 unsigned int cpp,
2532 unsigned int latency)
2533{
2534 unsigned int ret;
2535
2536 ret = intel_wm_method2(pixel_rate, htotal,
2537 width, cpp, latency);
2538 ret = DIV_ROUND_UP(ret, 64) + 2;
2539
2540 return ret;
2541}
2542
2543static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
2544{
2545
2546
2547
2548
2549
2550
2551 if (WARN_ON(!cpp))
2552 return 0;
2553 if (WARN_ON(!horiz_pixels))
2554 return 0;
2555
2556 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2557}
2558
2559struct ilk_wm_maximums {
2560 u16 pri;
2561 u16 spr;
2562 u16 cur;
2563 u16 fbc;
2564};
2565
2566
2567
2568
2569
2570static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
2571 const struct intel_plane_state *plane_state,
2572 u32 mem_value, bool is_lp)
2573{
2574 u32 method1, method2;
2575 int cpp;
2576
2577 if (mem_value == 0)
2578 return U32_MAX;
2579
2580 if (!intel_wm_plane_visible(crtc_state, plane_state))
2581 return 0;
2582
2583 cpp = plane_state->hw.fb->format->cpp[0];
2584
2585 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2586
2587 if (!is_lp)
2588 return method1;
2589
2590 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2591 crtc_state->hw.pipe_mode.crtc_htotal,
2592 drm_rect_width(&plane_state->uapi.dst),
2593 cpp, mem_value);
2594
2595 return min(method1, method2);
2596}
2597
2598
2599
2600
2601
2602static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
2603 const struct intel_plane_state *plane_state,
2604 u32 mem_value)
2605{
2606 u32 method1, method2;
2607 int cpp;
2608
2609 if (mem_value == 0)
2610 return U32_MAX;
2611
2612 if (!intel_wm_plane_visible(crtc_state, plane_state))
2613 return 0;
2614
2615 cpp = plane_state->hw.fb->format->cpp[0];
2616
2617 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2618 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2619 crtc_state->hw.pipe_mode.crtc_htotal,
2620 drm_rect_width(&plane_state->uapi.dst),
2621 cpp, mem_value);
2622 return min(method1, method2);
2623}
2624
2625
2626
2627
2628
2629static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
2630 const struct intel_plane_state *plane_state,
2631 u32 mem_value)
2632{
2633 int cpp;
2634
2635 if (mem_value == 0)
2636 return U32_MAX;
2637
2638 if (!intel_wm_plane_visible(crtc_state, plane_state))
2639 return 0;
2640
2641 cpp = plane_state->hw.fb->format->cpp[0];
2642
2643 return ilk_wm_method2(crtc_state->pixel_rate,
2644 crtc_state->hw.pipe_mode.crtc_htotal,
2645 drm_rect_width(&plane_state->uapi.dst),
2646 cpp, mem_value);
2647}
2648
2649
2650static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
2651 const struct intel_plane_state *plane_state,
2652 u32 pri_val)
2653{
2654 int cpp;
2655
2656 if (!intel_wm_plane_visible(crtc_state, plane_state))
2657 return 0;
2658
2659 cpp = plane_state->hw.fb->format->cpp[0];
2660
2661 return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst),
2662 cpp);
2663}
2664
2665static unsigned int
2666ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2667{
2668 if (DISPLAY_VER(dev_priv) >= 8)
2669 return 3072;
2670 else if (DISPLAY_VER(dev_priv) >= 7)
2671 return 768;
2672 else
2673 return 512;
2674}
2675
2676static unsigned int
2677ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2678 int level, bool is_sprite)
2679{
2680 if (DISPLAY_VER(dev_priv) >= 8)
2681
2682 return level == 0 ? 255 : 2047;
2683 else if (DISPLAY_VER(dev_priv) >= 7)
2684
2685 return level == 0 ? 127 : 1023;
2686 else if (!is_sprite)
2687
2688 return level == 0 ? 127 : 511;
2689 else
2690
2691 return level == 0 ? 63 : 255;
2692}
2693
2694static unsigned int
2695ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2696{
2697 if (DISPLAY_VER(dev_priv) >= 7)
2698 return level == 0 ? 63 : 255;
2699 else
2700 return level == 0 ? 31 : 63;
2701}
2702
2703static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2704{
2705 if (DISPLAY_VER(dev_priv) >= 8)
2706 return 31;
2707 else
2708 return 15;
2709}
2710
2711
2712static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2713 int level,
2714 const struct intel_wm_config *config,
2715 enum intel_ddb_partitioning ddb_partitioning,
2716 bool is_sprite)
2717{
2718 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2719
2720
2721 if (is_sprite && !config->sprites_enabled)
2722 return 0;
2723
2724
2725 if (level == 0 || config->num_pipes_active > 1) {
2726 fifo_size /= INTEL_NUM_PIPES(dev_priv);
2727
2728
2729
2730
2731
2732
2733 if (DISPLAY_VER(dev_priv) <= 6)
2734 fifo_size /= 2;
2735 }
2736
2737 if (config->sprites_enabled) {
2738
2739 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2740 if (is_sprite)
2741 fifo_size *= 5;
2742 fifo_size /= 6;
2743 } else {
2744 fifo_size /= 2;
2745 }
2746 }
2747
2748
2749 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2750}
2751
2752
2753static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2754 int level,
2755 const struct intel_wm_config *config)
2756{
2757
2758 if (level > 0 && config->num_pipes_active > 1)
2759 return 64;
2760
2761
2762 return ilk_cursor_wm_reg_max(dev_priv, level);
2763}
2764
2765static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2766 int level,
2767 const struct intel_wm_config *config,
2768 enum intel_ddb_partitioning ddb_partitioning,
2769 struct ilk_wm_maximums *max)
2770{
2771 max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2772 max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2773 max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2774 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2775}
2776
2777static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2778 int level,
2779 struct ilk_wm_maximums *max)
2780{
2781 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2782 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2783 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2784 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2785}
2786
2787static bool ilk_validate_wm_level(int level,
2788 const struct ilk_wm_maximums *max,
2789 struct intel_wm_level *result)
2790{
2791 bool ret;
2792
2793
2794 if (!result->enable)
2795 return false;
2796
2797 result->enable = result->pri_val <= max->pri &&
2798 result->spr_val <= max->spr &&
2799 result->cur_val <= max->cur;
2800
2801 ret = result->enable;
2802
2803
2804
2805
2806
2807
2808 if (level == 0 && !result->enable) {
2809 if (result->pri_val > max->pri)
2810 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2811 level, result->pri_val, max->pri);
2812 if (result->spr_val > max->spr)
2813 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2814 level, result->spr_val, max->spr);
2815 if (result->cur_val > max->cur)
2816 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2817 level, result->cur_val, max->cur);
2818
2819 result->pri_val = min_t(u32, result->pri_val, max->pri);
2820 result->spr_val = min_t(u32, result->spr_val, max->spr);
2821 result->cur_val = min_t(u32, result->cur_val, max->cur);
2822 result->enable = true;
2823 }
2824
2825 return ret;
2826}
2827
2828static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2829 const struct intel_crtc *crtc,
2830 int level,
2831 struct intel_crtc_state *crtc_state,
2832 const struct intel_plane_state *pristate,
2833 const struct intel_plane_state *sprstate,
2834 const struct intel_plane_state *curstate,
2835 struct intel_wm_level *result)
2836{
2837 u16 pri_latency = dev_priv->wm.pri_latency[level];
2838 u16 spr_latency = dev_priv->wm.spr_latency[level];
2839 u16 cur_latency = dev_priv->wm.cur_latency[level];
2840
2841
2842 if (level > 0) {
2843 pri_latency *= 5;
2844 spr_latency *= 5;
2845 cur_latency *= 5;
2846 }
2847
2848 if (pristate) {
2849 result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
2850 pri_latency, level);
2851 result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
2852 }
2853
2854 if (sprstate)
2855 result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
2856
2857 if (curstate)
2858 result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
2859
2860 result->enable = true;
2861}
2862
2863static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2864 u16 wm[8])
2865{
2866 struct intel_uncore *uncore = &dev_priv->uncore;
2867
2868 if (DISPLAY_VER(dev_priv) >= 9) {
2869 u32 val;
2870 int ret, i;
2871 int level, max_level = ilk_wm_max_level(dev_priv);
2872 int mult = IS_DG2(dev_priv) ? 2 : 1;
2873
2874
2875 val = 0;
2876 ret = sandybridge_pcode_read(dev_priv,
2877 GEN9_PCODE_READ_MEM_LATENCY,
2878 &val, NULL);
2879
2880 if (ret) {
2881 drm_err(&dev_priv->drm,
2882 "SKL Mailbox read error = %d\n", ret);
2883 return;
2884 }
2885
2886 wm[0] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2887 wm[1] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2888 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2889 wm[2] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2890 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2891 wm[3] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2892 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2893
2894
2895 val = 1;
2896 ret = sandybridge_pcode_read(dev_priv,
2897 GEN9_PCODE_READ_MEM_LATENCY,
2898 &val, NULL);
2899 if (ret) {
2900 drm_err(&dev_priv->drm,
2901 "SKL Mailbox read error = %d\n", ret);
2902 return;
2903 }
2904
2905 wm[4] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2906 wm[5] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2907 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2908 wm[6] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2909 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2910 wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2911 GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
2912
2913
2914
2915
2916
2917
2918 for (level = 1; level <= max_level; level++) {
2919 if (wm[level] == 0) {
2920 for (i = level + 1; i <= max_level; i++)
2921 wm[i] = 0;
2922
2923 max_level = level - 1;
2924
2925 break;
2926 }
2927 }
2928
2929
2930
2931
2932
2933
2934
2935
2936 if (wm[0] == 0) {
2937 u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
2938
2939 for (level = 0; level <= max_level; level++)
2940 wm[level] += adjust;
2941 }
2942
2943
2944
2945
2946
2947
2948
2949 if (dev_priv->dram_info.wm_lv_0_adjust_needed)
2950 wm[0] += 1;
2951 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2952 u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
2953
2954 wm[0] = (sskpd >> 56) & 0xFF;
2955 if (wm[0] == 0)
2956 wm[0] = sskpd & 0xF;
2957 wm[1] = (sskpd >> 4) & 0xFF;
2958 wm[2] = (sskpd >> 12) & 0xFF;
2959 wm[3] = (sskpd >> 20) & 0x1FF;
2960 wm[4] = (sskpd >> 32) & 0x1FF;
2961 } else if (DISPLAY_VER(dev_priv) >= 6) {
2962 u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
2963
2964 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2965 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2966 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2967 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2968 } else if (DISPLAY_VER(dev_priv) >= 5) {
2969 u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
2970
2971
2972 wm[0] = 7;
2973 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2974 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2975 } else {
2976 MISSING_CASE(INTEL_DEVID(dev_priv));
2977 }
2978}
2979
2980static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2981 u16 wm[5])
2982{
2983
2984 if (DISPLAY_VER(dev_priv) == 5)
2985 wm[0] = 13;
2986}
2987
2988static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2989 u16 wm[5])
2990{
2991
2992 if (DISPLAY_VER(dev_priv) == 5)
2993 wm[0] = 13;
2994}
2995
2996int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2997{
2998
2999 if (HAS_HW_SAGV_WM(dev_priv))
3000 return 5;
3001 else if (DISPLAY_VER(dev_priv) >= 9)
3002 return 7;
3003 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3004 return 4;
3005 else if (DISPLAY_VER(dev_priv) >= 6)
3006 return 3;
3007 else
3008 return 2;
3009}
3010
3011static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
3012 const char *name,
3013 const u16 wm[])
3014{
3015 int level, max_level = ilk_wm_max_level(dev_priv);
3016
3017 for (level = 0; level <= max_level; level++) {
3018 unsigned int latency = wm[level];
3019
3020 if (latency == 0) {
3021 drm_dbg_kms(&dev_priv->drm,
3022 "%s WM%d latency not provided\n",
3023 name, level);
3024 continue;
3025 }
3026
3027
3028
3029
3030
3031 if (DISPLAY_VER(dev_priv) >= 9)
3032 latency *= 10;
3033 else if (level > 0)
3034 latency *= 5;
3035
3036 drm_dbg_kms(&dev_priv->drm,
3037 "%s WM%d latency %u (%u.%u usec)\n", name, level,
3038 wm[level], latency / 10, latency % 10);
3039 }
3040}
3041
3042static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
3043 u16 wm[5], u16 min)
3044{
3045 int level, max_level = ilk_wm_max_level(dev_priv);
3046
3047 if (wm[0] >= min)
3048 return false;
3049
3050 wm[0] = max(wm[0], min);
3051 for (level = 1; level <= max_level; level++)
3052 wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
3053
3054 return true;
3055}
3056
3057static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3058{
3059 bool changed;
3060
3061
3062
3063
3064
3065 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
3066 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
3067 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
3068
3069 if (!changed)
3070 return;
3071
3072 drm_dbg_kms(&dev_priv->drm,
3073 "WM latency values increased to avoid potential underruns\n");
3074 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3075 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3076 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3077}
3078
3079static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3080{
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092 if (dev_priv->wm.pri_latency[3] == 0 &&
3093 dev_priv->wm.spr_latency[3] == 0 &&
3094 dev_priv->wm.cur_latency[3] == 0)
3095 return;
3096
3097 dev_priv->wm.pri_latency[3] = 0;
3098 dev_priv->wm.spr_latency[3] = 0;
3099 dev_priv->wm.cur_latency[3] = 0;
3100
3101 drm_dbg_kms(&dev_priv->drm,
3102 "LP3 watermarks disabled due to potential for lost interrupts\n");
3103 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3104 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3105 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3106}
3107
3108static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3109{
3110 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3111
3112 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
3113 sizeof(dev_priv->wm.pri_latency));
3114 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
3115 sizeof(dev_priv->wm.pri_latency));
3116
3117 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3118 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3119
3120 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3121 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3122 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3123
3124 if (DISPLAY_VER(dev_priv) == 6) {
3125 snb_wm_latency_quirk(dev_priv);
3126 snb_wm_lp3_irq_quirk(dev_priv);
3127 }
3128}
3129
3130static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3131{
3132 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3133 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3134}
3135
3136static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
3137 struct intel_pipe_wm *pipe_wm)
3138{
3139
3140 const struct intel_wm_config config = {
3141 .num_pipes_active = 1,
3142 .sprites_enabled = pipe_wm->sprites_enabled,
3143 .sprites_scaled = pipe_wm->sprites_scaled,
3144 };
3145 struct ilk_wm_maximums max;
3146
3147
3148 ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
3149
3150
3151 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3152 drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
3153 return false;
3154 }
3155
3156 return true;
3157}
3158
3159
3160static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
3161 struct intel_crtc *crtc)
3162{
3163 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3164 struct intel_crtc_state *crtc_state =
3165 intel_atomic_get_new_crtc_state(state, crtc);
3166 struct intel_pipe_wm *pipe_wm;
3167 struct intel_plane *plane;
3168 const struct intel_plane_state *plane_state;
3169 const struct intel_plane_state *pristate = NULL;
3170 const struct intel_plane_state *sprstate = NULL;
3171 const struct intel_plane_state *curstate = NULL;
3172 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3173 struct ilk_wm_maximums max;
3174
3175 pipe_wm = &crtc_state->wm.ilk.optimal;
3176
3177 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
3178 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
3179 pristate = plane_state;
3180 else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
3181 sprstate = plane_state;
3182 else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
3183 curstate = plane_state;
3184 }
3185
3186 pipe_wm->pipe_enabled = crtc_state->hw.active;
3187 if (sprstate) {
3188 pipe_wm->sprites_enabled = sprstate->uapi.visible;
3189 pipe_wm->sprites_scaled = sprstate->uapi.visible &&
3190 (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
3191 drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
3192 }
3193
3194 usable_level = max_level;
3195
3196
3197 if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3198 usable_level = 1;
3199
3200
3201 if (pipe_wm->sprites_scaled)
3202 usable_level = 0;
3203
3204 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3205 ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
3206 pristate, sprstate, curstate, &pipe_wm->wm[0]);
3207
3208 if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
3209 return -EINVAL;
3210
3211 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3212
3213 for (level = 1; level <= usable_level; level++) {
3214 struct intel_wm_level *wm = &pipe_wm->wm[level];
3215
3216 ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
3217 pristate, sprstate, curstate, wm);
3218
3219
3220
3221
3222
3223
3224 if (!ilk_validate_wm_level(level, &max, wm)) {
3225 memset(wm, 0, sizeof(*wm));
3226 break;
3227 }
3228 }
3229
3230 return 0;
3231}
3232
3233
3234
3235
3236
3237
3238static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
3239 struct intel_crtc *crtc)
3240{
3241 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3242 struct intel_crtc_state *new_crtc_state =
3243 intel_atomic_get_new_crtc_state(state, crtc);
3244 const struct intel_crtc_state *old_crtc_state =
3245 intel_atomic_get_old_crtc_state(state, crtc);
3246 struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
3247 const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
3248 int level, max_level = ilk_wm_max_level(dev_priv);
3249
3250
3251
3252
3253
3254
3255 *a = new_crtc_state->wm.ilk.optimal;
3256 if (!new_crtc_state->hw.active ||
3257 drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) ||
3258 state->skip_intermediate_wm)
3259 return 0;
3260
3261 a->pipe_enabled |= b->pipe_enabled;
3262 a->sprites_enabled |= b->sprites_enabled;
3263 a->sprites_scaled |= b->sprites_scaled;
3264
3265 for (level = 0; level <= max_level; level++) {
3266 struct intel_wm_level *a_wm = &a->wm[level];
3267 const struct intel_wm_level *b_wm = &b->wm[level];
3268
3269 a_wm->enable &= b_wm->enable;
3270 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3271 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3272 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3273 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3274 }
3275
3276
3277
3278
3279
3280
3281
3282 if (!ilk_validate_pipe_wm(dev_priv, a))
3283 return -EINVAL;
3284
3285
3286
3287
3288
3289 if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
3290 new_crtc_state->wm.need_postvbl_update = true;
3291
3292 return 0;
3293}
3294
3295
3296
3297
3298static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
3299 int level,
3300 struct intel_wm_level *ret_wm)
3301{
3302 const struct intel_crtc *crtc;
3303
3304 ret_wm->enable = true;
3305
3306 for_each_intel_crtc(&dev_priv->drm, crtc) {
3307 const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
3308 const struct intel_wm_level *wm = &active->wm[level];
3309
3310 if (!active->pipe_enabled)
3311 continue;
3312
3313
3314
3315
3316
3317
3318 if (!wm->enable)
3319 ret_wm->enable = false;
3320
3321 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3322 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3323 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3324 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3325 }
3326}
3327
3328
3329
3330
3331static void ilk_wm_merge(struct drm_i915_private *dev_priv,
3332 const struct intel_wm_config *config,
3333 const struct ilk_wm_maximums *max,
3334 struct intel_pipe_wm *merged)
3335{
3336 int level, max_level = ilk_wm_max_level(dev_priv);
3337 int last_enabled_level = max_level;
3338
3339
3340 if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3341 config->num_pipes_active > 1)
3342 last_enabled_level = 0;
3343
3344
3345 merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
3346
3347
3348 for (level = 1; level <= max_level; level++) {
3349 struct intel_wm_level *wm = &merged->wm[level];
3350
3351 ilk_merge_wm_level(dev_priv, level, wm);
3352
3353 if (level > last_enabled_level)
3354 wm->enable = false;
3355 else if (!ilk_validate_wm_level(level, max, wm))
3356
3357 last_enabled_level = level - 1;
3358
3359
3360
3361
3362
3363 if (wm->fbc_val > max->fbc) {
3364 if (wm->enable)
3365 merged->fbc_wm_enabled = false;
3366 wm->fbc_val = 0;
3367 }
3368 }
3369
3370
3371
3372
3373
3374
3375
3376 if (DISPLAY_VER(dev_priv) == 5 && !merged->fbc_wm_enabled &&
3377 intel_fbc_is_active(dev_priv)) {
3378 for (level = 2; level <= max_level; level++) {
3379 struct intel_wm_level *wm = &merged->wm[level];
3380
3381 wm->enable = false;
3382 }
3383 }
3384}
3385
3386static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3387{
3388
3389 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3390}
3391
3392
3393static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3394 int level)
3395{
3396 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3397 return 2 * level;
3398 else
3399 return dev_priv->wm.pri_latency[level];
3400}
3401
3402static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3403 const struct intel_pipe_wm *merged,
3404 enum intel_ddb_partitioning partitioning,
3405 struct ilk_wm_values *results)
3406{
3407 struct intel_crtc *crtc;
3408 int level, wm_lp;
3409
3410 results->enable_fbc_wm = merged->fbc_wm_enabled;
3411 results->partitioning = partitioning;
3412
3413
3414 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3415 const struct intel_wm_level *r;
3416
3417 level = ilk_wm_lp_to_level(wm_lp, merged);
3418
3419 r = &merged->wm[level];
3420
3421
3422
3423
3424
3425 results->wm_lp[wm_lp - 1] =
3426 (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
3427 (r->pri_val << WM1_LP_SR_SHIFT) |
3428 r->cur_val;
3429
3430 if (r->enable)
3431 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
3432
3433 if (DISPLAY_VER(dev_priv) >= 8)
3434 results->wm_lp[wm_lp - 1] |=
3435 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
3436 else
3437 results->wm_lp[wm_lp - 1] |=
3438 r->fbc_val << WM1_LP_FBC_SHIFT;
3439
3440
3441
3442
3443
3444 if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
3445 drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
3446 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
3447 } else
3448 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3449 }
3450
3451
3452 for_each_intel_crtc(&dev_priv->drm, crtc) {
3453 enum pipe pipe = crtc->pipe;
3454 const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
3455 const struct intel_wm_level *r = &pipe_wm->wm[0];
3456
3457 if (drm_WARN_ON(&dev_priv->drm, !r->enable))
3458 continue;
3459
3460 results->wm_pipe[pipe] =
3461 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
3462 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
3463 r->cur_val;
3464 }
3465}
3466
3467
3468
3469static struct intel_pipe_wm *
3470ilk_find_best_result(struct drm_i915_private *dev_priv,
3471 struct intel_pipe_wm *r1,
3472 struct intel_pipe_wm *r2)
3473{
3474 int level, max_level = ilk_wm_max_level(dev_priv);
3475 int level1 = 0, level2 = 0;
3476
3477 for (level = 1; level <= max_level; level++) {
3478 if (r1->wm[level].enable)
3479 level1 = level;
3480 if (r2->wm[level].enable)
3481 level2 = level;
3482 }
3483
3484 if (level1 == level2) {
3485 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3486 return r2;
3487 else
3488 return r1;
3489 } else if (level1 > level2) {
3490 return r1;
3491 } else {
3492 return r2;
3493 }
3494}
3495
3496
3497#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3498#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3499#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3500#define WM_DIRTY_FBC (1 << 24)
3501#define WM_DIRTY_DDB (1 << 25)
3502
3503static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3504 const struct ilk_wm_values *old,
3505 const struct ilk_wm_values *new)
3506{
3507 unsigned int dirty = 0;
3508 enum pipe pipe;
3509 int wm_lp;
3510
3511 for_each_pipe(dev_priv, pipe) {
3512 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3513 dirty |= WM_DIRTY_PIPE(pipe);
3514
3515 dirty |= WM_DIRTY_LP_ALL;
3516 }
3517 }
3518
3519 if (old->enable_fbc_wm != new->enable_fbc_wm) {
3520 dirty |= WM_DIRTY_FBC;
3521
3522 dirty |= WM_DIRTY_LP_ALL;
3523 }
3524
3525 if (old->partitioning != new->partitioning) {
3526 dirty |= WM_DIRTY_DDB;
3527
3528 dirty |= WM_DIRTY_LP_ALL;
3529 }
3530
3531
3532 if (dirty & WM_DIRTY_LP_ALL)
3533 return dirty;
3534
3535
3536 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3537 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3538 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3539 break;
3540 }
3541
3542
3543 for (; wm_lp <= 3; wm_lp++)
3544 dirty |= WM_DIRTY_LP(wm_lp);
3545
3546 return dirty;
3547}
3548
3549static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3550 unsigned int dirty)
3551{
3552 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3553 bool changed = false;
3554
3555 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
3556 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
3557 intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
3558 changed = true;
3559 }
3560 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
3561 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
3562 intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
3563 changed = true;
3564 }
3565 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
3566 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
3567 intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
3568 changed = true;
3569 }
3570
3571
3572
3573
3574
3575
3576 return changed;
3577}
3578
3579
3580
3581
3582
3583static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3584 struct ilk_wm_values *results)
3585{
3586 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3587 unsigned int dirty;
3588 u32 val;
3589
3590 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3591 if (!dirty)
3592 return;
3593
3594 _ilk_disable_lp_wm(dev_priv, dirty);
3595
3596 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3597 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
3598 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3599 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
3600 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3601 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
3602
3603 if (dirty & WM_DIRTY_DDB) {
3604 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3605 val = intel_uncore_read(&dev_priv->uncore, WM_MISC);
3606 if (results->partitioning == INTEL_DDB_PART_1_2)
3607 val &= ~WM_MISC_DATA_PARTITION_5_6;
3608 else
3609 val |= WM_MISC_DATA_PARTITION_5_6;
3610 intel_uncore_write(&dev_priv->uncore, WM_MISC, val);
3611 } else {
3612 val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
3613 if (results->partitioning == INTEL_DDB_PART_1_2)
3614 val &= ~DISP_DATA_PARTITION_5_6;
3615 else
3616 val |= DISP_DATA_PARTITION_5_6;
3617 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
3618 }
3619 }
3620
3621 if (dirty & WM_DIRTY_FBC) {
3622 val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL);
3623 if (results->enable_fbc_wm)
3624 val &= ~DISP_FBC_WM_DIS;
3625 else
3626 val |= DISP_FBC_WM_DIS;
3627 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val);
3628 }
3629
3630 if (dirty & WM_DIRTY_LP(1) &&
3631 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3632 intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
3633
3634 if (DISPLAY_VER(dev_priv) >= 7) {
3635 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3636 intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
3637 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3638 intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
3639 }
3640
3641 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3642 intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
3643 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3644 intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
3645 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3646 intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
3647
3648 dev_priv->wm.hw = *results;
3649}
3650
3651bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
3652{
3653 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3654}
3655
3656u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
3657{
3658 u8 enabled_slices = 0;
3659 enum dbuf_slice slice;
3660
3661 for_each_dbuf_slice(dev_priv, slice) {
3662 if (intel_uncore_read(&dev_priv->uncore,
3663 DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
3664 enabled_slices |= BIT(slice);
3665 }
3666
3667 return enabled_slices;
3668}
3669
3670
3671
3672
3673
3674static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
3675{
3676 return DISPLAY_VER(dev_priv) == 9;
3677}
3678
3679static bool
3680intel_has_sagv(struct drm_i915_private *dev_priv)
3681{
3682 return DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv) &&
3683 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3684}
3685
3686static void
3687skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
3688{
3689 if (DISPLAY_VER(dev_priv) >= 12) {
3690 u32 val = 0;
3691 int ret;
3692
3693 ret = sandybridge_pcode_read(dev_priv,
3694 GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
3695 &val, NULL);
3696 if (!ret) {
3697 dev_priv->sagv_block_time_us = val;
3698 return;
3699 }
3700
3701 drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
3702 } else if (DISPLAY_VER(dev_priv) == 11) {
3703 dev_priv->sagv_block_time_us = 10;
3704 return;
3705 } else if (DISPLAY_VER(dev_priv) == 10) {
3706 dev_priv->sagv_block_time_us = 20;
3707 return;
3708 } else if (DISPLAY_VER(dev_priv) == 9) {
3709 dev_priv->sagv_block_time_us = 30;
3710 return;
3711 } else {
3712 MISSING_CASE(DISPLAY_VER(dev_priv));
3713 }
3714
3715
3716 dev_priv->sagv_block_time_us = -1;
3717}
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730static int
3731intel_enable_sagv(struct drm_i915_private *dev_priv)
3732{
3733 int ret;
3734
3735 if (!intel_has_sagv(dev_priv))
3736 return 0;
3737
3738 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3739 return 0;
3740
3741 drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
3742 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3743 GEN9_SAGV_ENABLE);
3744
3745
3746
3747
3748
3749
3750
3751 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3752 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3753 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3754 return 0;
3755 } else if (ret < 0) {
3756 drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
3757 return ret;
3758 }
3759
3760 dev_priv->sagv_status = I915_SAGV_ENABLED;
3761 return 0;
3762}
3763
3764static int
3765intel_disable_sagv(struct drm_i915_private *dev_priv)
3766{
3767 int ret;
3768
3769 if (!intel_has_sagv(dev_priv))
3770 return 0;
3771
3772 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3773 return 0;
3774
3775 drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
3776
3777 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3778 GEN9_SAGV_DISABLE,
3779 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3780 1);
3781
3782
3783
3784
3785 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3786 drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
3787 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3788 return 0;
3789 } else if (ret < 0) {
3790 drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
3791 return ret;
3792 }
3793
3794 dev_priv->sagv_status = I915_SAGV_DISABLED;
3795 return 0;
3796}
3797
3798void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
3799{
3800 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3801 const struct intel_bw_state *new_bw_state;
3802 const struct intel_bw_state *old_bw_state;
3803 u32 new_mask = 0;
3804
3805
3806
3807
3808
3809
3810
3811
3812 if (!intel_has_sagv(dev_priv))
3813 return;
3814
3815 new_bw_state = intel_atomic_get_new_bw_state(state);
3816 if (!new_bw_state)
3817 return;
3818
3819 if (DISPLAY_VER(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) {
3820 intel_disable_sagv(dev_priv);
3821 return;
3822 }
3823
3824 old_bw_state = intel_atomic_get_old_bw_state(state);
3825
3826
3827
3828 if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
3829 return;
3830
3831 new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
3832
3833
3834
3835
3836
3837 if (!new_mask)
3838 return;
3839
3840
3841
3842
3843
3844
3845
3846 icl_pcode_restrict_qgv_points(dev_priv, new_mask);
3847}
3848
3849void intel_sagv_post_plane_update(struct intel_atomic_state *state)
3850{
3851 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3852 const struct intel_bw_state *new_bw_state;
3853 const struct intel_bw_state *old_bw_state;
3854 u32 new_mask = 0;
3855
3856
3857
3858
3859
3860
3861
3862
3863 if (!intel_has_sagv(dev_priv))
3864 return;
3865
3866 new_bw_state = intel_atomic_get_new_bw_state(state);
3867 if (!new_bw_state)
3868 return;
3869
3870 if (DISPLAY_VER(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) {
3871 intel_enable_sagv(dev_priv);
3872 return;
3873 }
3874
3875 old_bw_state = intel_atomic_get_old_bw_state(state);
3876
3877
3878
3879 if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
3880 return;
3881
3882 new_mask = new_bw_state->qgv_points_mask;
3883
3884
3885
3886
3887
3888
3889
3890 icl_pcode_restrict_qgv_points(dev_priv, new_mask);
3891}
3892
3893static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
3894{
3895 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3896 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3897 enum plane_id plane_id;
3898 int max_level = INT_MAX;
3899
3900 if (!intel_has_sagv(dev_priv))
3901 return false;
3902
3903 if (!crtc_state->hw.active)
3904 return true;
3905
3906 if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
3907 return false;
3908
3909 for_each_plane_id_on_crtc(crtc, plane_id) {
3910 const struct skl_plane_wm *wm =
3911 &crtc_state->wm.skl.optimal.planes[plane_id];
3912 int level;
3913
3914
3915 if (!wm->wm[0].enable)
3916 continue;
3917
3918
3919 for (level = ilk_wm_max_level(dev_priv);
3920 !wm->wm[level].enable; --level)
3921 { }
3922
3923
3924 max_level = min(level, max_level);
3925 }
3926
3927
3928 if (max_level == INT_MAX)
3929 return true;
3930
3931 for_each_plane_id_on_crtc(crtc, plane_id) {
3932 const struct skl_plane_wm *wm =
3933 &crtc_state->wm.skl.optimal.planes[plane_id];
3934
3935
3936
3937
3938
3939 if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
3940 return false;
3941 }
3942
3943 return true;
3944}
3945
3946static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
3947{
3948 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3949 enum plane_id plane_id;
3950
3951 if (!crtc_state->hw.active)
3952 return true;
3953
3954 for_each_plane_id_on_crtc(crtc, plane_id) {
3955 const struct skl_plane_wm *wm =
3956 &crtc_state->wm.skl.optimal.planes[plane_id];
3957
3958 if (wm->wm[0].enable && !wm->sagv.wm0.enable)
3959 return false;
3960 }
3961
3962 return true;
3963}
3964
3965static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
3966{
3967 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3968 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3969
3970 if (DISPLAY_VER(dev_priv) >= 12)
3971 return tgl_crtc_can_enable_sagv(crtc_state);
3972 else
3973 return skl_crtc_can_enable_sagv(crtc_state);
3974}
3975
3976bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
3977 const struct intel_bw_state *bw_state)
3978{
3979 if (DISPLAY_VER(dev_priv) < 11 &&
3980 bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
3981 return false;
3982
3983 return bw_state->pipe_sagv_reject == 0;
3984}
3985
3986static int intel_compute_sagv_mask(struct intel_atomic_state *state)
3987{
3988 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3989 int ret;
3990 struct intel_crtc *crtc;
3991 struct intel_crtc_state *new_crtc_state;
3992 struct intel_bw_state *new_bw_state = NULL;
3993 const struct intel_bw_state *old_bw_state = NULL;
3994 int i;
3995
3996 for_each_new_intel_crtc_in_state(state, crtc,
3997 new_crtc_state, i) {
3998 new_bw_state = intel_atomic_get_bw_state(state);
3999 if (IS_ERR(new_bw_state))
4000 return PTR_ERR(new_bw_state);
4001
4002 old_bw_state = intel_atomic_get_old_bw_state(state);
4003
4004 if (intel_crtc_can_enable_sagv(new_crtc_state))
4005 new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
4006 else
4007 new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
4008 }
4009
4010 if (!new_bw_state)
4011 return 0;
4012
4013 new_bw_state->active_pipes =
4014 intel_calc_active_pipes(state, old_bw_state->active_pipes);
4015
4016 if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
4017 ret = intel_atomic_lock_global_state(&new_bw_state->base);
4018 if (ret)
4019 return ret;
4020 }
4021
4022 for_each_new_intel_crtc_in_state(state, crtc,
4023 new_crtc_state, i) {
4024 struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
4025
4026
4027
4028
4029
4030
4031
4032 pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(dev_priv) &&
4033 DISPLAY_VER(dev_priv) >= 12 &&
4034 intel_can_enable_sagv(dev_priv, new_bw_state);
4035 }
4036
4037 if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
4038 intel_can_enable_sagv(dev_priv, old_bw_state)) {
4039 ret = intel_atomic_serialize_global_state(&new_bw_state->base);
4040 if (ret)
4041 return ret;
4042 } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
4043 ret = intel_atomic_lock_global_state(&new_bw_state->base);
4044 if (ret)
4045 return ret;
4046 }
4047
4048 return 0;
4049}
4050
4051static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
4052{
4053 return INTEL_INFO(dev_priv)->dbuf.size /
4054 hweight8(INTEL_INFO(dev_priv)->dbuf.slice_mask);
4055}
4056
4057static void
4058skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
4059 struct skl_ddb_entry *ddb)
4060{
4061 int slice_size = intel_dbuf_slice_size(dev_priv);
4062
4063 if (!slice_mask) {
4064 ddb->start = 0;
4065 ddb->end = 0;
4066 return;
4067 }
4068
4069 ddb->start = (ffs(slice_mask) - 1) * slice_size;
4070 ddb->end = fls(slice_mask) * slice_size;
4071
4072 WARN_ON(ddb->start >= ddb->end);
4073 WARN_ON(ddb->end > INTEL_INFO(dev_priv)->dbuf.size);
4074}
4075
4076static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
4077{
4078 struct skl_ddb_entry ddb;
4079
4080 if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
4081 slice_mask = BIT(DBUF_S1);
4082 else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
4083 slice_mask = BIT(DBUF_S3);
4084
4085 skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
4086
4087 return ddb.start;
4088}
4089
4090u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
4091 const struct skl_ddb_entry *entry)
4092{
4093 int slice_size = intel_dbuf_slice_size(dev_priv);
4094 enum dbuf_slice start_slice, end_slice;
4095 u8 slice_mask = 0;
4096
4097 if (!skl_ddb_entry_size(entry))
4098 return 0;
4099
4100 start_slice = entry->start / slice_size;
4101 end_slice = (entry->end - 1) / slice_size;
4102
4103
4104
4105
4106
4107 while (start_slice <= end_slice) {
4108 slice_mask |= BIT(start_slice);
4109 start_slice++;
4110 }
4111
4112 return slice_mask;
4113}
4114
4115static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
4116{
4117 const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4118 int hdisplay, vdisplay;
4119
4120 if (!crtc_state->hw.active)
4121 return 0;
4122
4123
4124
4125
4126
4127
4128 drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
4129
4130 return hdisplay;
4131}
4132
4133static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
4134 enum pipe for_pipe,
4135 unsigned int *weight_start,
4136 unsigned int *weight_end,
4137 unsigned int *weight_total)
4138{
4139 struct drm_i915_private *dev_priv =
4140 to_i915(dbuf_state->base.state->base.dev);
4141 enum pipe pipe;
4142
4143 *weight_start = 0;
4144 *weight_end = 0;
4145 *weight_total = 0;
4146
4147 for_each_pipe(dev_priv, pipe) {
4148 int weight = dbuf_state->weight[pipe];
4149
4150
4151
4152
4153
4154
4155
4156
4157 if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
4158 continue;
4159
4160 *weight_total += weight;
4161 if (pipe < for_pipe) {
4162 *weight_start += weight;
4163 *weight_end += weight;
4164 } else if (pipe == for_pipe) {
4165 *weight_end += weight;
4166 }
4167 }
4168}
4169
4170static int
4171skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
4172{
4173 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4174 unsigned int weight_total, weight_start, weight_end;
4175 const struct intel_dbuf_state *old_dbuf_state =
4176 intel_atomic_get_old_dbuf_state(state);
4177 struct intel_dbuf_state *new_dbuf_state =
4178 intel_atomic_get_new_dbuf_state(state);
4179 struct intel_crtc_state *crtc_state;
4180 struct skl_ddb_entry ddb_slices;
4181 enum pipe pipe = crtc->pipe;
4182 unsigned int mbus_offset = 0;
4183 u32 ddb_range_size;
4184 u32 dbuf_slice_mask;
4185 u32 start, end;
4186 int ret;
4187
4188 if (new_dbuf_state->weight[pipe] == 0) {
4189 new_dbuf_state->ddb[pipe].start = 0;
4190 new_dbuf_state->ddb[pipe].end = 0;
4191 goto out;
4192 }
4193
4194 dbuf_slice_mask = new_dbuf_state->slices[pipe];
4195
4196 skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
4197 mbus_offset = mbus_ddb_offset(dev_priv, dbuf_slice_mask);
4198 ddb_range_size = skl_ddb_entry_size(&ddb_slices);
4199
4200 intel_crtc_dbuf_weights(new_dbuf_state, pipe,
4201 &weight_start, &weight_end, &weight_total);
4202
4203 start = ddb_range_size * weight_start / weight_total;
4204 end = ddb_range_size * weight_end / weight_total;
4205
4206 new_dbuf_state->ddb[pipe].start = ddb_slices.start - mbus_offset + start;
4207 new_dbuf_state->ddb[pipe].end = ddb_slices.start - mbus_offset + end;
4208out:
4209 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
4210 skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
4211 &new_dbuf_state->ddb[pipe]))
4212 return 0;
4213
4214 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
4215 if (ret)
4216 return ret;
4217
4218 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
4219 if (IS_ERR(crtc_state))
4220 return PTR_ERR(crtc_state);
4221
4222
4223
4224
4225
4226 crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
4227 crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
4228
4229 drm_dbg_kms(&dev_priv->drm,
4230 "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
4231 crtc->base.base.id, crtc->base.name,
4232 old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
4233 old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
4234 new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
4235 old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
4236
4237 return 0;
4238}
4239
4240static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
4241 int width, const struct drm_format_info *format,
4242 u64 modifier, unsigned int rotation,
4243 u32 plane_pixel_rate, struct skl_wm_params *wp,
4244 int color_plane);
4245static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
4246 int level,
4247 unsigned int latency,
4248 const struct skl_wm_params *wp,
4249 const struct skl_wm_level *result_prev,
4250 struct skl_wm_level *result );
4251
4252static unsigned int
4253skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
4254 int num_active)
4255{
4256 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4257 int level, max_level = ilk_wm_max_level(dev_priv);
4258 struct skl_wm_level wm = {};
4259 int ret, min_ddb_alloc = 0;
4260 struct skl_wm_params wp;
4261
4262 ret = skl_compute_wm_params(crtc_state, 256,
4263 drm_format_info(DRM_FORMAT_ARGB8888),
4264 DRM_FORMAT_MOD_LINEAR,
4265 DRM_MODE_ROTATE_0,
4266 crtc_state->pixel_rate, &wp, 0);
4267 drm_WARN_ON(&dev_priv->drm, ret);
4268
4269 for (level = 0; level <= max_level; level++) {
4270 unsigned int latency = dev_priv->wm.skl_latency[level];
4271
4272 skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm);
4273 if (wm.min_ddb_alloc == U16_MAX)
4274 break;
4275
4276 min_ddb_alloc = wm.min_ddb_alloc;
4277 }
4278
4279 return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
4280}
4281
4282static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
4283 struct skl_ddb_entry *entry, u32 reg)
4284{
4285 entry->start = reg & DDB_ENTRY_MASK;
4286 entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
4287
4288 if (entry->end)
4289 entry->end += 1;
4290}
4291
4292static void
4293skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
4294 const enum pipe pipe,
4295 const enum plane_id plane_id,
4296 struct skl_ddb_entry *ddb_y,
4297 struct skl_ddb_entry *ddb_uv)
4298{
4299 u32 val, val2;
4300 u32 fourcc = 0;
4301
4302
4303 if (plane_id == PLANE_CURSOR) {
4304 val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
4305 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4306 return;
4307 }
4308
4309 val = intel_uncore_read(&dev_priv->uncore, PLANE_CTL(pipe, plane_id));
4310
4311
4312 if (val & PLANE_CTL_ENABLE)
4313 fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
4314 val & PLANE_CTL_ORDER_RGBX,
4315 val & PLANE_CTL_ALPHA_MASK);
4316
4317 if (DISPLAY_VER(dev_priv) >= 11) {
4318 val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
4319 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4320 } else {
4321 val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
4322 val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
4323
4324 if (fourcc &&
4325 drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
4326 swap(val, val2);
4327
4328 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
4329 skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
4330 }
4331}
4332
4333void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
4334 struct skl_ddb_entry *ddb_y,
4335 struct skl_ddb_entry *ddb_uv)
4336{
4337 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4338 enum intel_display_power_domain power_domain;
4339 enum pipe pipe = crtc->pipe;
4340 intel_wakeref_t wakeref;
4341 enum plane_id plane_id;
4342
4343 power_domain = POWER_DOMAIN_PIPE(pipe);
4344 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4345 if (!wakeref)
4346 return;
4347
4348 for_each_plane_id_on_crtc(crtc, plane_id)
4349 skl_ddb_get_hw_plane_state(dev_priv, pipe,
4350 plane_id,
4351 &ddb_y[plane_id],
4352 &ddb_uv[plane_id]);
4353
4354 intel_display_power_put(dev_priv, power_domain, wakeref);
4355}
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373static uint_fixed_16_16_t
4374skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
4375 const struct intel_plane_state *plane_state)
4376{
4377 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4378 u32 src_w, src_h, dst_w, dst_h;
4379 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
4380 uint_fixed_16_16_t downscale_h, downscale_w;
4381
4382 if (drm_WARN_ON(&dev_priv->drm,
4383 !intel_wm_plane_visible(crtc_state, plane_state)))
4384 return u32_to_fixed16(0);
4385
4386
4387
4388
4389
4390
4391
4392
4393 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4394 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4395 dst_w = drm_rect_width(&plane_state->uapi.dst);
4396 dst_h = drm_rect_height(&plane_state->uapi.dst);
4397
4398 fp_w_ratio = div_fixed16(src_w, dst_w);
4399 fp_h_ratio = div_fixed16(src_h, dst_h);
4400 downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
4401 downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4402
4403 return mul_fixed16(downscale_w, downscale_h);
4404}
4405
4406struct dbuf_slice_conf_entry {
4407 u8 active_pipes;
4408 u8 dbuf_mask[I915_MAX_PIPES];
4409 bool join_mbus;
4410};
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
4423
4424{
4425 {
4426 .active_pipes = BIT(PIPE_A),
4427 .dbuf_mask = {
4428 [PIPE_A] = BIT(DBUF_S1),
4429 },
4430 },
4431 {
4432 .active_pipes = BIT(PIPE_B),
4433 .dbuf_mask = {
4434 [PIPE_B] = BIT(DBUF_S1),
4435 },
4436 },
4437 {
4438 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4439 .dbuf_mask = {
4440 [PIPE_A] = BIT(DBUF_S1),
4441 [PIPE_B] = BIT(DBUF_S2),
4442 },
4443 },
4444 {
4445 .active_pipes = BIT(PIPE_C),
4446 .dbuf_mask = {
4447 [PIPE_C] = BIT(DBUF_S2),
4448 },
4449 },
4450 {
4451 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4452 .dbuf_mask = {
4453 [PIPE_A] = BIT(DBUF_S1),
4454 [PIPE_C] = BIT(DBUF_S2),
4455 },
4456 },
4457 {
4458 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4459 .dbuf_mask = {
4460 [PIPE_B] = BIT(DBUF_S1),
4461 [PIPE_C] = BIT(DBUF_S2),
4462 },
4463 },
4464 {
4465 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4466 .dbuf_mask = {
4467 [PIPE_A] = BIT(DBUF_S1),
4468 [PIPE_B] = BIT(DBUF_S1),
4469 [PIPE_C] = BIT(DBUF_S2),
4470 },
4471 },
4472 {}
4473};
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
4486
4487{
4488 {
4489 .active_pipes = BIT(PIPE_A),
4490 .dbuf_mask = {
4491 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4492 },
4493 },
4494 {
4495 .active_pipes = BIT(PIPE_B),
4496 .dbuf_mask = {
4497 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4498 },
4499 },
4500 {
4501 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4502 .dbuf_mask = {
4503 [PIPE_A] = BIT(DBUF_S2),
4504 [PIPE_B] = BIT(DBUF_S1),
4505 },
4506 },
4507 {
4508 .active_pipes = BIT(PIPE_C),
4509 .dbuf_mask = {
4510 [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
4511 },
4512 },
4513 {
4514 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4515 .dbuf_mask = {
4516 [PIPE_A] = BIT(DBUF_S1),
4517 [PIPE_C] = BIT(DBUF_S2),
4518 },
4519 },
4520 {
4521 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4522 .dbuf_mask = {
4523 [PIPE_B] = BIT(DBUF_S1),
4524 [PIPE_C] = BIT(DBUF_S2),
4525 },
4526 },
4527 {
4528 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4529 .dbuf_mask = {
4530 [PIPE_A] = BIT(DBUF_S1),
4531 [PIPE_B] = BIT(DBUF_S1),
4532 [PIPE_C] = BIT(DBUF_S2),
4533 },
4534 },
4535 {
4536 .active_pipes = BIT(PIPE_D),
4537 .dbuf_mask = {
4538 [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
4539 },
4540 },
4541 {
4542 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
4543 .dbuf_mask = {
4544 [PIPE_A] = BIT(DBUF_S1),
4545 [PIPE_D] = BIT(DBUF_S2),
4546 },
4547 },
4548 {
4549 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
4550 .dbuf_mask = {
4551 [PIPE_B] = BIT(DBUF_S1),
4552 [PIPE_D] = BIT(DBUF_S2),
4553 },
4554 },
4555 {
4556 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
4557 .dbuf_mask = {
4558 [PIPE_A] = BIT(DBUF_S1),
4559 [PIPE_B] = BIT(DBUF_S1),
4560 [PIPE_D] = BIT(DBUF_S2),
4561 },
4562 },
4563 {
4564 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
4565 .dbuf_mask = {
4566 [PIPE_C] = BIT(DBUF_S1),
4567 [PIPE_D] = BIT(DBUF_S2),
4568 },
4569 },
4570 {
4571 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
4572 .dbuf_mask = {
4573 [PIPE_A] = BIT(DBUF_S1),
4574 [PIPE_C] = BIT(DBUF_S2),
4575 [PIPE_D] = BIT(DBUF_S2),
4576 },
4577 },
4578 {
4579 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4580 .dbuf_mask = {
4581 [PIPE_B] = BIT(DBUF_S1),
4582 [PIPE_C] = BIT(DBUF_S2),
4583 [PIPE_D] = BIT(DBUF_S2),
4584 },
4585 },
4586 {
4587 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4588 .dbuf_mask = {
4589 [PIPE_A] = BIT(DBUF_S1),
4590 [PIPE_B] = BIT(DBUF_S1),
4591 [PIPE_C] = BIT(DBUF_S2),
4592 [PIPE_D] = BIT(DBUF_S2),
4593 },
4594 },
4595 {}
4596};
4597
4598static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
4599 {
4600 .active_pipes = BIT(PIPE_A),
4601 .dbuf_mask = {
4602 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4603 },
4604 },
4605 {
4606 .active_pipes = BIT(PIPE_B),
4607 .dbuf_mask = {
4608 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4609 },
4610 },
4611 {
4612 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4613 .dbuf_mask = {
4614 [PIPE_A] = BIT(DBUF_S1),
4615 [PIPE_B] = BIT(DBUF_S2),
4616 },
4617 },
4618 {
4619 .active_pipes = BIT(PIPE_C),
4620 .dbuf_mask = {
4621 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4622 },
4623 },
4624 {
4625 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4626 .dbuf_mask = {
4627 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4628 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4629 },
4630 },
4631 {
4632 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4633 .dbuf_mask = {
4634 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4635 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4636 },
4637 },
4638 {
4639 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4640 .dbuf_mask = {
4641 [PIPE_A] = BIT(DBUF_S1),
4642 [PIPE_B] = BIT(DBUF_S2),
4643 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4644 },
4645 },
4646 {
4647 .active_pipes = BIT(PIPE_D),
4648 .dbuf_mask = {
4649 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4650 },
4651 },
4652 {
4653 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
4654 .dbuf_mask = {
4655 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4656 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4657 },
4658 },
4659 {
4660 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
4661 .dbuf_mask = {
4662 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4663 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4664 },
4665 },
4666 {
4667 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
4668 .dbuf_mask = {
4669 [PIPE_A] = BIT(DBUF_S1),
4670 [PIPE_B] = BIT(DBUF_S2),
4671 [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
4672 },
4673 },
4674 {
4675 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
4676 .dbuf_mask = {
4677 [PIPE_C] = BIT(DBUF_S3),
4678 [PIPE_D] = BIT(DBUF_S4),
4679 },
4680 },
4681 {
4682 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
4683 .dbuf_mask = {
4684 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4685 [PIPE_C] = BIT(DBUF_S3),
4686 [PIPE_D] = BIT(DBUF_S4),
4687 },
4688 },
4689 {
4690 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4691 .dbuf_mask = {
4692 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
4693 [PIPE_C] = BIT(DBUF_S3),
4694 [PIPE_D] = BIT(DBUF_S4),
4695 },
4696 },
4697 {
4698 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4699 .dbuf_mask = {
4700 [PIPE_A] = BIT(DBUF_S1),
4701 [PIPE_B] = BIT(DBUF_S2),
4702 [PIPE_C] = BIT(DBUF_S3),
4703 [PIPE_D] = BIT(DBUF_S4),
4704 },
4705 },
4706 {}
4707};
4708
4709static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
4710 {
4711 .active_pipes = BIT(PIPE_A),
4712 .dbuf_mask = {
4713 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
4714 },
4715 .join_mbus = true,
4716 },
4717 {
4718 .active_pipes = BIT(PIPE_B),
4719 .dbuf_mask = {
4720 [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
4721 },
4722 .join_mbus = true,
4723 },
4724 {
4725 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
4726 .dbuf_mask = {
4727 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4728 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4729 },
4730 },
4731 {
4732 .active_pipes = BIT(PIPE_C),
4733 .dbuf_mask = {
4734 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4735 },
4736 },
4737 {
4738 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
4739 .dbuf_mask = {
4740 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4741 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4742 },
4743 },
4744 {
4745 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
4746 .dbuf_mask = {
4747 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4748 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4749 },
4750 },
4751 {
4752 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
4753 .dbuf_mask = {
4754 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4755 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4756 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4757 },
4758 },
4759 {
4760 .active_pipes = BIT(PIPE_D),
4761 .dbuf_mask = {
4762 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4763 },
4764 },
4765 {
4766 .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
4767 .dbuf_mask = {
4768 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4769 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4770 },
4771 },
4772 {
4773 .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
4774 .dbuf_mask = {
4775 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4776 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4777 },
4778 },
4779 {
4780 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
4781 .dbuf_mask = {
4782 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4783 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4784 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4785 },
4786 },
4787 {
4788 .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
4789 .dbuf_mask = {
4790 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4791 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4792 },
4793 },
4794 {
4795 .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
4796 .dbuf_mask = {
4797 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4798 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4799 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4800 },
4801 },
4802 {
4803 .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4804 .dbuf_mask = {
4805 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4806 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4807 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4808 },
4809 },
4810 {
4811 .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
4812 .dbuf_mask = {
4813 [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
4814 [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
4815 [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
4816 [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
4817 },
4818 },
4819 {}
4820
4821};
4822
4823static bool check_mbus_joined(u8 active_pipes,
4824 const struct dbuf_slice_conf_entry *dbuf_slices)
4825{
4826 int i;
4827
4828 for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
4829 if (dbuf_slices[i].active_pipes == active_pipes)
4830 return dbuf_slices[i].join_mbus;
4831 }
4832 return false;
4833}
4834
4835static bool adlp_check_mbus_joined(u8 active_pipes)
4836{
4837 return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
4838}
4839
4840static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
4841 const struct dbuf_slice_conf_entry *dbuf_slices)
4842{
4843 int i;
4844
4845 for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
4846 if (dbuf_slices[i].active_pipes == active_pipes)
4847 return dbuf_slices[i].dbuf_mask[pipe];
4848 }
4849 return 0;
4850}
4851
4852
4853
4854
4855
4856
4857static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
4858{
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871 return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
4872}
4873
4874static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
4875{
4876 return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
4877}
4878
4879static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
4880{
4881 return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs);
4882}
4883
4884static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
4885{
4886 return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs);
4887}
4888
4889static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
4890{
4891 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4892 enum pipe pipe = crtc->pipe;
4893
4894 if (IS_DG2(dev_priv))
4895 return dg2_compute_dbuf_slices(pipe, active_pipes);
4896 else if (IS_ALDERLAKE_P(dev_priv))
4897 return adlp_compute_dbuf_slices(pipe, active_pipes);
4898 else if (DISPLAY_VER(dev_priv) == 12)
4899 return tgl_compute_dbuf_slices(pipe, active_pipes);
4900 else if (DISPLAY_VER(dev_priv) == 11)
4901 return icl_compute_dbuf_slices(pipe, active_pipes);
4902
4903
4904
4905
4906 return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
4907}
4908
4909static u64
4910skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
4911 const struct intel_plane_state *plane_state,
4912 int color_plane)
4913{
4914 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4915 const struct drm_framebuffer *fb = plane_state->hw.fb;
4916 u32 data_rate;
4917 u32 width = 0, height = 0;
4918 uint_fixed_16_16_t down_scale_amount;
4919 u64 rate;
4920
4921 if (!plane_state->uapi.visible)
4922 return 0;
4923
4924 if (plane->id == PLANE_CURSOR)
4925 return 0;
4926
4927 if (color_plane == 1 &&
4928 !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
4929 return 0;
4930
4931
4932
4933
4934
4935
4936 width = drm_rect_width(&plane_state->uapi.src) >> 16;
4937 height = drm_rect_height(&plane_state->uapi.src) >> 16;
4938
4939
4940 if (color_plane == 1) {
4941 width /= 2;
4942 height /= 2;
4943 }
4944
4945 data_rate = width * height;
4946
4947 down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
4948
4949 rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4950
4951 rate *= fb->format->cpp[color_plane];
4952 return rate;
4953}
4954
4955static u64
4956skl_get_total_relative_data_rate(struct intel_atomic_state *state,
4957 struct intel_crtc *crtc)
4958{
4959 struct intel_crtc_state *crtc_state =
4960 intel_atomic_get_new_crtc_state(state, crtc);
4961 const struct intel_plane_state *plane_state;
4962 struct intel_plane *plane;
4963 u64 total_data_rate = 0;
4964 enum plane_id plane_id;
4965 int i;
4966
4967
4968 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4969 if (plane->pipe != crtc->pipe)
4970 continue;
4971
4972 plane_id = plane->id;
4973
4974
4975 crtc_state->plane_data_rate[plane_id] =
4976 skl_plane_relative_data_rate(crtc_state, plane_state, 0);
4977
4978
4979 crtc_state->uv_plane_data_rate[plane_id] =
4980 skl_plane_relative_data_rate(crtc_state, plane_state, 1);
4981 }
4982
4983 for_each_plane_id_on_crtc(crtc, plane_id) {
4984 total_data_rate += crtc_state->plane_data_rate[plane_id];
4985 total_data_rate += crtc_state->uv_plane_data_rate[plane_id];
4986 }
4987
4988 return total_data_rate;
4989}
4990
4991static u64
4992icl_get_total_relative_data_rate(struct intel_atomic_state *state,
4993 struct intel_crtc *crtc)
4994{
4995 struct intel_crtc_state *crtc_state =
4996 intel_atomic_get_new_crtc_state(state, crtc);
4997 const struct intel_plane_state *plane_state;
4998 struct intel_plane *plane;
4999 u64 total_data_rate = 0;
5000 enum plane_id plane_id;
5001 int i;
5002
5003
5004 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5005 if (plane->pipe != crtc->pipe)
5006 continue;
5007
5008 plane_id = plane->id;
5009
5010 if (!plane_state->planar_linked_plane) {
5011 crtc_state->plane_data_rate[plane_id] =
5012 skl_plane_relative_data_rate(crtc_state, plane_state, 0);
5013 } else {
5014 enum plane_id y_plane_id;
5015
5016
5017
5018
5019
5020
5021
5022
5023 if (plane_state->planar_slave)
5024 continue;
5025
5026
5027 y_plane_id = plane_state->planar_linked_plane->id;
5028 crtc_state->plane_data_rate[y_plane_id] =
5029 skl_plane_relative_data_rate(crtc_state, plane_state, 0);
5030
5031 crtc_state->plane_data_rate[plane_id] =
5032 skl_plane_relative_data_rate(crtc_state, plane_state, 1);
5033 }
5034 }
5035
5036 for_each_plane_id_on_crtc(crtc, plane_id)
5037 total_data_rate += crtc_state->plane_data_rate[plane_id];
5038
5039 return total_data_rate;
5040}
5041
5042const struct skl_wm_level *
5043skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
5044 enum plane_id plane_id,
5045 int level)
5046{
5047 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5048
5049 if (level == 0 && pipe_wm->use_sagv_wm)
5050 return &wm->sagv.wm0;
5051
5052 return &wm->wm[level];
5053}
5054
5055const struct skl_wm_level *
5056skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
5057 enum plane_id plane_id)
5058{
5059 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5060
5061 if (pipe_wm->use_sagv_wm)
5062 return &wm->sagv.trans_wm;
5063
5064 return &wm->trans_wm;
5065}
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079static void
5080skl_check_wm_level(struct skl_wm_level *wm, u64 total)
5081{
5082 if (wm->min_ddb_alloc > total)
5083 memset(wm, 0, sizeof(*wm));
5084}
5085
5086static void
5087skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
5088 u64 total, u64 uv_total)
5089{
5090 if (wm->min_ddb_alloc > total ||
5091 uv_wm->min_ddb_alloc > uv_total) {
5092 memset(wm, 0, sizeof(*wm));
5093 memset(uv_wm, 0, sizeof(*uv_wm));
5094 }
5095}
5096
5097static int
5098skl_allocate_plane_ddb(struct intel_atomic_state *state,
5099 struct intel_crtc *crtc)
5100{
5101 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5102 struct intel_crtc_state *crtc_state =
5103 intel_atomic_get_new_crtc_state(state, crtc);
5104 const struct intel_dbuf_state *dbuf_state =
5105 intel_atomic_get_new_dbuf_state(state);
5106 const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
5107 int num_active = hweight8(dbuf_state->active_pipes);
5108 u16 alloc_size, start = 0;
5109 u16 total[I915_MAX_PLANES] = {};
5110 u16 uv_total[I915_MAX_PLANES] = {};
5111 u64 total_data_rate;
5112 enum plane_id plane_id;
5113 u32 blocks;
5114 int level;
5115
5116
5117 memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
5118 memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
5119
5120 if (!crtc_state->hw.active)
5121 return 0;
5122
5123 if (DISPLAY_VER(dev_priv) >= 11)
5124 total_data_rate =
5125 icl_get_total_relative_data_rate(state, crtc);
5126 else
5127 total_data_rate =
5128 skl_get_total_relative_data_rate(state, crtc);
5129
5130 alloc_size = skl_ddb_entry_size(alloc);
5131 if (alloc_size == 0)
5132 return 0;
5133
5134
5135 total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
5136 alloc_size -= total[PLANE_CURSOR];
5137 crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
5138 alloc->end - total[PLANE_CURSOR];
5139 crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
5140
5141 if (total_data_rate == 0)
5142 return 0;
5143
5144
5145
5146
5147
5148 for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
5149 blocks = 0;
5150 for_each_plane_id_on_crtc(crtc, plane_id) {
5151 const struct skl_plane_wm *wm =
5152 &crtc_state->wm.skl.optimal.planes[plane_id];
5153
5154 if (plane_id == PLANE_CURSOR) {
5155 if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
5156 drm_WARN_ON(&dev_priv->drm,
5157 wm->wm[level].min_ddb_alloc != U16_MAX);
5158 blocks = U32_MAX;
5159 break;
5160 }
5161 continue;
5162 }
5163
5164 blocks += wm->wm[level].min_ddb_alloc;
5165 blocks += wm->uv_wm[level].min_ddb_alloc;
5166 }
5167
5168 if (blocks <= alloc_size) {
5169 alloc_size -= blocks;
5170 break;
5171 }
5172 }
5173
5174 if (level < 0) {
5175 drm_dbg_kms(&dev_priv->drm,
5176 "Requested display configuration exceeds system DDB limitations");
5177 drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
5178 blocks, alloc_size);
5179 return -EINVAL;
5180 }
5181
5182
5183
5184
5185
5186
5187 for_each_plane_id_on_crtc(crtc, plane_id) {
5188 const struct skl_plane_wm *wm =
5189 &crtc_state->wm.skl.optimal.planes[plane_id];
5190 u64 rate;
5191 u16 extra;
5192
5193 if (plane_id == PLANE_CURSOR)
5194 continue;
5195
5196
5197
5198
5199
5200 if (total_data_rate == 0)
5201 break;
5202
5203 rate = crtc_state->plane_data_rate[plane_id];
5204 extra = min_t(u16, alloc_size,
5205 DIV64_U64_ROUND_UP(alloc_size * rate,
5206 total_data_rate));
5207 total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
5208 alloc_size -= extra;
5209 total_data_rate -= rate;
5210
5211 if (total_data_rate == 0)
5212 break;
5213
5214 rate = crtc_state->uv_plane_data_rate[plane_id];
5215 extra = min_t(u16, alloc_size,
5216 DIV64_U64_ROUND_UP(alloc_size * rate,
5217 total_data_rate));
5218 uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
5219 alloc_size -= extra;
5220 total_data_rate -= rate;
5221 }
5222 drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0);
5223
5224
5225 start = alloc->start;
5226 for_each_plane_id_on_crtc(crtc, plane_id) {
5227 struct skl_ddb_entry *plane_alloc =
5228 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5229 struct skl_ddb_entry *uv_plane_alloc =
5230 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
5231
5232 if (plane_id == PLANE_CURSOR)
5233 continue;
5234
5235
5236 drm_WARN_ON(&dev_priv->drm,
5237 DISPLAY_VER(dev_priv) >= 11 && uv_total[plane_id]);
5238
5239
5240 if (total[plane_id]) {
5241 plane_alloc->start = start;
5242 start += total[plane_id];
5243 plane_alloc->end = start;
5244 }
5245
5246 if (uv_total[plane_id]) {
5247 uv_plane_alloc->start = start;
5248 start += uv_total[plane_id];
5249 uv_plane_alloc->end = start;
5250 }
5251 }
5252
5253
5254
5255
5256
5257
5258
5259 for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
5260 for_each_plane_id_on_crtc(crtc, plane_id) {
5261 struct skl_plane_wm *wm =
5262 &crtc_state->wm.skl.optimal.planes[plane_id];
5263
5264 skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level],
5265 total[plane_id], uv_total[plane_id]);
5266
5267
5268
5269
5270
5271 if (DISPLAY_VER(dev_priv) == 11 &&
5272 level == 1 && wm->wm[0].enable) {
5273 wm->wm[level].blocks = wm->wm[0].blocks;
5274 wm->wm[level].lines = wm->wm[0].lines;
5275 wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
5276 }
5277 }
5278 }
5279
5280
5281
5282
5283
5284 for_each_plane_id_on_crtc(crtc, plane_id) {
5285 struct skl_plane_wm *wm =
5286 &crtc_state->wm.skl.optimal.planes[plane_id];
5287
5288 skl_check_wm_level(&wm->trans_wm, total[plane_id]);
5289 skl_check_wm_level(&wm->sagv.wm0, total[plane_id]);
5290 skl_check_wm_level(&wm->sagv.trans_wm, total[plane_id]);
5291 }
5292
5293 return 0;
5294}
5295
5296
5297
5298
5299
5300
5301
5302static uint_fixed_16_16_t
5303skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
5304 u8 cpp, u32 latency, u32 dbuf_block_size)
5305{
5306 u32 wm_intermediate_val;
5307 uint_fixed_16_16_t ret;
5308
5309 if (latency == 0)
5310 return FP_16_16_MAX;
5311
5312 wm_intermediate_val = latency * pixel_rate * cpp;
5313 ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
5314
5315 if (DISPLAY_VER(dev_priv) >= 10)
5316 ret = add_fixed16_u32(ret, 1);
5317
5318 return ret;
5319}
5320
5321static uint_fixed_16_16_t
5322skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
5323 uint_fixed_16_16_t plane_blocks_per_line)
5324{
5325 u32 wm_intermediate_val;
5326 uint_fixed_16_16_t ret;
5327
5328 if (latency == 0)
5329 return FP_16_16_MAX;
5330
5331 wm_intermediate_val = latency * pixel_rate;
5332 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
5333 pipe_htotal * 1000);
5334 ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
5335 return ret;
5336}
5337
5338static uint_fixed_16_16_t
5339intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
5340{
5341 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5342 u32 pixel_rate;
5343 u32 crtc_htotal;
5344 uint_fixed_16_16_t linetime_us;
5345
5346 if (!crtc_state->hw.active)
5347 return u32_to_fixed16(0);
5348
5349 pixel_rate = crtc_state->pixel_rate;
5350
5351 if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0))
5352 return u32_to_fixed16(0);
5353
5354 crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
5355 linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
5356
5357 return linetime_us;
5358}
5359
5360static int
5361skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
5362 int width, const struct drm_format_info *format,
5363 u64 modifier, unsigned int rotation,
5364 u32 plane_pixel_rate, struct skl_wm_params *wp,
5365 int color_plane)
5366{
5367 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5368 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5369 u32 interm_pbpl;
5370
5371
5372 if (color_plane == 1 &&
5373 !intel_format_info_is_yuv_semiplanar(format, modifier)) {
5374 drm_dbg_kms(&dev_priv->drm,
5375 "Non planar format have single plane\n");
5376 return -EINVAL;
5377 }
5378
5379 wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
5380 modifier == I915_FORMAT_MOD_Yf_TILED ||
5381 modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
5382 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
5383 wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
5384 wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
5385 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
5386 wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
5387
5388 wp->width = width;
5389 if (color_plane == 1 && wp->is_planar)
5390 wp->width /= 2;
5391
5392 wp->cpp = format->cpp[color_plane];
5393 wp->plane_pixel_rate = plane_pixel_rate;
5394
5395 if (DISPLAY_VER(dev_priv) >= 11 &&
5396 modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
5397 wp->dbuf_block_size = 256;
5398 else
5399 wp->dbuf_block_size = 512;
5400
5401 if (drm_rotation_90_or_270(rotation)) {
5402 switch (wp->cpp) {
5403 case 1:
5404 wp->y_min_scanlines = 16;
5405 break;
5406 case 2:
5407 wp->y_min_scanlines = 8;
5408 break;
5409 case 4:
5410 wp->y_min_scanlines = 4;
5411 break;
5412 default:
5413 MISSING_CASE(wp->cpp);
5414 return -EINVAL;
5415 }
5416 } else {
5417 wp->y_min_scanlines = 4;
5418 }
5419
5420 if (skl_needs_memory_bw_wa(dev_priv))
5421 wp->y_min_scanlines *= 2;
5422
5423 wp->plane_bytes_per_line = wp->width * wp->cpp;
5424 if (wp->y_tiled) {
5425 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
5426 wp->y_min_scanlines,
5427 wp->dbuf_block_size);
5428
5429 if (DISPLAY_VER(dev_priv) >= 10)
5430 interm_pbpl++;
5431
5432 wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
5433 wp->y_min_scanlines);
5434 } else {
5435 interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
5436 wp->dbuf_block_size);
5437
5438 if (!wp->x_tiled || DISPLAY_VER(dev_priv) >= 10)
5439 interm_pbpl++;
5440
5441 wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
5442 }
5443
5444 wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
5445 wp->plane_blocks_per_line);
5446
5447 wp->linetime_us = fixed16_to_u32_round_up(
5448 intel_get_linetime_us(crtc_state));
5449
5450 return 0;
5451}
5452
5453static int
5454skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
5455 const struct intel_plane_state *plane_state,
5456 struct skl_wm_params *wp, int color_plane)
5457{
5458 const struct drm_framebuffer *fb = plane_state->hw.fb;
5459 int width;
5460
5461
5462
5463
5464
5465
5466 width = drm_rect_width(&plane_state->uapi.src) >> 16;
5467
5468 return skl_compute_wm_params(crtc_state, width,
5469 fb->format, fb->modifier,
5470 plane_state->hw.rotation,
5471 intel_plane_pixel_rate(crtc_state, plane_state),
5472 wp, color_plane);
5473}
5474
5475static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
5476{
5477 if (DISPLAY_VER(dev_priv) >= 10)
5478 return true;
5479
5480
5481 return level > 0;
5482}
5483
5484static int skl_wm_max_lines(struct drm_i915_private *dev_priv)
5485{
5486 if (DISPLAY_VER(dev_priv) >= 13)
5487 return 255;
5488 else
5489 return 31;
5490}
5491
5492static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
5493 int level,
5494 unsigned int latency,
5495 const struct skl_wm_params *wp,
5496 const struct skl_wm_level *result_prev,
5497 struct skl_wm_level *result )
5498{
5499 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5500 uint_fixed_16_16_t method1, method2;
5501 uint_fixed_16_16_t selected_result;
5502 u32 blocks, lines, min_ddb_alloc = 0;
5503
5504 if (latency == 0) {
5505
5506 result->min_ddb_alloc = U16_MAX;
5507 return;
5508 }
5509
5510
5511
5512
5513
5514 if ((IS_KABYLAKE(dev_priv) ||
5515 IS_COFFEELAKE(dev_priv) ||
5516 IS_COMETLAKE(dev_priv)) &&
5517 dev_priv->ipc_enabled)
5518 latency += 4;
5519
5520 if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
5521 latency += 15;
5522
5523 method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
5524 wp->cpp, latency, wp->dbuf_block_size);
5525 method2 = skl_wm_method2(wp->plane_pixel_rate,
5526 crtc_state->hw.pipe_mode.crtc_htotal,
5527 latency,
5528 wp->plane_blocks_per_line);
5529
5530 if (wp->y_tiled) {
5531 selected_result = max_fixed16(method2, wp->y_tile_minimum);
5532 } else {
5533 if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
5534 wp->dbuf_block_size < 1) &&
5535 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
5536 selected_result = method2;
5537 } else if (latency >= wp->linetime_us) {
5538 if (DISPLAY_VER(dev_priv) == 9)
5539 selected_result = min_fixed16(method1, method2);
5540 else
5541 selected_result = method2;
5542 } else {
5543 selected_result = method1;
5544 }
5545 }
5546
5547 blocks = fixed16_to_u32_round_up(selected_result) + 1;
5548 lines = div_round_up_fixed16(selected_result,
5549 wp->plane_blocks_per_line);
5550
5551 if (DISPLAY_VER(dev_priv) == 9) {
5552
5553 if (level == 0 && wp->rc_surface)
5554 blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
5555
5556
5557 if (level >= 1 && level <= 7) {
5558 if (wp->y_tiled) {
5559 blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
5560 lines += wp->y_min_scanlines;
5561 } else {
5562 blocks++;
5563 }
5564
5565
5566
5567
5568
5569
5570
5571 if (result_prev->blocks > blocks)
5572 blocks = result_prev->blocks;
5573 }
5574 }
5575
5576 if (DISPLAY_VER(dev_priv) >= 11) {
5577 if (wp->y_tiled) {
5578 int extra_lines;
5579
5580 if (lines % wp->y_min_scanlines == 0)
5581 extra_lines = wp->y_min_scanlines;
5582 else
5583 extra_lines = wp->y_min_scanlines * 2 -
5584 lines % wp->y_min_scanlines;
5585
5586 min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
5587 wp->plane_blocks_per_line);
5588 } else {
5589 min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
5590 }
5591 }
5592
5593 if (!skl_wm_has_lines(dev_priv, level))
5594 lines = 0;
5595
5596 if (lines > skl_wm_max_lines(dev_priv)) {
5597
5598 result->min_ddb_alloc = U16_MAX;
5599 return;
5600 }
5601
5602
5603
5604
5605
5606
5607
5608 result->blocks = blocks;
5609 result->lines = lines;
5610
5611 result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
5612 result->enable = true;
5613
5614 if (DISPLAY_VER(dev_priv) < 12)
5615 result->can_sagv = latency >= dev_priv->sagv_block_time_us;
5616}
5617
5618static void
5619skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
5620 const struct skl_wm_params *wm_params,
5621 struct skl_wm_level *levels)
5622{
5623 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5624 int level, max_level = ilk_wm_max_level(dev_priv);
5625 struct skl_wm_level *result_prev = &levels[0];
5626
5627 for (level = 0; level <= max_level; level++) {
5628 struct skl_wm_level *result = &levels[level];
5629 unsigned int latency = dev_priv->wm.skl_latency[level];
5630
5631 skl_compute_plane_wm(crtc_state, level, latency,
5632 wm_params, result_prev, result);
5633
5634 result_prev = result;
5635 }
5636}
5637
5638static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
5639 const struct skl_wm_params *wm_params,
5640 struct skl_plane_wm *plane_wm)
5641{
5642 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5643 struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
5644 struct skl_wm_level *levels = plane_wm->wm;
5645 unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us;
5646
5647 skl_compute_plane_wm(crtc_state, 0, latency,
5648 wm_params, &levels[0],
5649 sagv_wm);
5650}
5651
5652static void skl_compute_transition_wm(struct drm_i915_private *dev_priv,
5653 struct skl_wm_level *trans_wm,
5654 const struct skl_wm_level *wm0,
5655 const struct skl_wm_params *wp)
5656{
5657 u16 trans_min, trans_amount, trans_y_tile_min;
5658 u16 wm0_blocks, trans_offset, blocks;
5659
5660
5661 if (!dev_priv->ipc_enabled)
5662 return;
5663
5664
5665
5666
5667
5668 if (DISPLAY_VER(dev_priv) == 9)
5669 return;
5670
5671 if (DISPLAY_VER(dev_priv) >= 11)
5672 trans_min = 4;
5673 else
5674 trans_min = 14;
5675
5676
5677 if (DISPLAY_VER(dev_priv) == 10)
5678 trans_amount = 0;
5679 else
5680 trans_amount = 10;
5681
5682 trans_offset = trans_min + trans_amount;
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694 wm0_blocks = wm0->blocks - 1;
5695
5696 if (wp->y_tiled) {
5697 trans_y_tile_min =
5698 (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
5699 blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
5700 } else {
5701 blocks = wm0_blocks + trans_offset;
5702 }
5703 blocks++;
5704
5705
5706
5707
5708
5709
5710 trans_wm->blocks = blocks;
5711 trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
5712 trans_wm->enable = true;
5713}
5714
5715static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
5716 const struct intel_plane_state *plane_state,
5717 enum plane_id plane_id, int color_plane)
5718{
5719 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5720 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5721 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
5722 struct skl_wm_params wm_params;
5723 int ret;
5724
5725 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5726 &wm_params, color_plane);
5727 if (ret)
5728 return ret;
5729
5730 skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
5731
5732 skl_compute_transition_wm(dev_priv, &wm->trans_wm,
5733 &wm->wm[0], &wm_params);
5734
5735 if (DISPLAY_VER(dev_priv) >= 12) {
5736 tgl_compute_sagv_wm(crtc_state, &wm_params, wm);
5737
5738 skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm,
5739 &wm->sagv.wm0, &wm_params);
5740 }
5741
5742 return 0;
5743}
5744
5745static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
5746 const struct intel_plane_state *plane_state,
5747 enum plane_id plane_id)
5748{
5749 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
5750 struct skl_wm_params wm_params;
5751 int ret;
5752
5753 wm->is_planar = true;
5754
5755
5756 ret = skl_compute_plane_wm_params(crtc_state, plane_state,
5757 &wm_params, 1);
5758 if (ret)
5759 return ret;
5760
5761 skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
5762
5763 return 0;
5764}
5765
5766static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
5767 const struct intel_plane_state *plane_state)
5768{
5769 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5770 enum plane_id plane_id = plane->id;
5771 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
5772 const struct drm_framebuffer *fb = plane_state->hw.fb;
5773 int ret;
5774
5775 memset(wm, 0, sizeof(*wm));
5776
5777 if (!intel_wm_plane_visible(crtc_state, plane_state))
5778 return 0;
5779
5780 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5781 plane_id, 0);
5782 if (ret)
5783 return ret;
5784
5785 if (fb->format->is_yuv && fb->format->num_planes > 1) {
5786 ret = skl_build_plane_wm_uv(crtc_state, plane_state,
5787 plane_id);
5788 if (ret)
5789 return ret;
5790 }
5791
5792 return 0;
5793}
5794
5795static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
5796 const struct intel_plane_state *plane_state)
5797{
5798 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5799 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5800 enum plane_id plane_id = plane->id;
5801 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
5802 int ret;
5803
5804
5805 if (plane_state->planar_slave)
5806 return 0;
5807
5808 memset(wm, 0, sizeof(*wm));
5809
5810 if (plane_state->planar_linked_plane) {
5811 const struct drm_framebuffer *fb = plane_state->hw.fb;
5812 enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
5813
5814 drm_WARN_ON(&dev_priv->drm,
5815 !intel_wm_plane_visible(crtc_state, plane_state));
5816 drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv ||
5817 fb->format->num_planes == 1);
5818
5819 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5820 y_plane_id, 0);
5821 if (ret)
5822 return ret;
5823
5824 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5825 plane_id, 1);
5826 if (ret)
5827 return ret;
5828 } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
5829 ret = skl_build_plane_wm_single(crtc_state, plane_state,
5830 plane_id, 0);
5831 if (ret)
5832 return ret;
5833 }
5834
5835 return 0;
5836}
5837
5838static int skl_build_pipe_wm(struct intel_atomic_state *state,
5839 struct intel_crtc *crtc)
5840{
5841 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5842 struct intel_crtc_state *crtc_state =
5843 intel_atomic_get_new_crtc_state(state, crtc);
5844 const struct intel_plane_state *plane_state;
5845 struct intel_plane *plane;
5846 int ret, i;
5847
5848 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5849
5850
5851
5852
5853
5854 if (plane->pipe != crtc->pipe)
5855 continue;
5856
5857 if (DISPLAY_VER(dev_priv) >= 11)
5858 ret = icl_build_plane_wm(crtc_state, plane_state);
5859 else
5860 ret = skl_build_plane_wm(crtc_state, plane_state);
5861 if (ret)
5862 return ret;
5863 }
5864
5865 crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
5866
5867 return 0;
5868}
5869
5870static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
5871 i915_reg_t reg,
5872 const struct skl_ddb_entry *entry)
5873{
5874 if (entry->end)
5875 intel_de_write_fw(dev_priv, reg,
5876 (entry->end - 1) << 16 | entry->start);
5877 else
5878 intel_de_write_fw(dev_priv, reg, 0);
5879}
5880
5881static void skl_write_wm_level(struct drm_i915_private *dev_priv,
5882 i915_reg_t reg,
5883 const struct skl_wm_level *level)
5884{
5885 u32 val = 0;
5886
5887 if (level->enable)
5888 val |= PLANE_WM_EN;
5889 if (level->ignore_lines)
5890 val |= PLANE_WM_IGNORE_LINES;
5891 val |= level->blocks;
5892 val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
5893
5894 intel_de_write_fw(dev_priv, reg, val);
5895}
5896
5897void skl_write_plane_wm(struct intel_plane *plane,
5898 const struct intel_crtc_state *crtc_state)
5899{
5900 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5901 int level, max_level = ilk_wm_max_level(dev_priv);
5902 enum plane_id plane_id = plane->id;
5903 enum pipe pipe = plane->pipe;
5904 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5905 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5906 const struct skl_ddb_entry *ddb_y =
5907 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5908 const struct skl_ddb_entry *ddb_uv =
5909 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
5910
5911 for (level = 0; level <= max_level; level++)
5912 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
5913 skl_plane_wm_level(pipe_wm, plane_id, level));
5914
5915 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5916 skl_plane_trans_wm(pipe_wm, plane_id));
5917
5918 if (HAS_HW_SAGV_WM(dev_priv)) {
5919 skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id),
5920 &wm->sagv.wm0);
5921 skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id),
5922 &wm->sagv.trans_wm);
5923 }
5924
5925 if (DISPLAY_VER(dev_priv) >= 11) {
5926 skl_ddb_entry_write(dev_priv,
5927 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5928 return;
5929 }
5930
5931 if (wm->is_planar)
5932 swap(ddb_y, ddb_uv);
5933
5934 skl_ddb_entry_write(dev_priv,
5935 PLANE_BUF_CFG(pipe, plane_id), ddb_y);
5936 skl_ddb_entry_write(dev_priv,
5937 PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
5938}
5939
5940void skl_write_cursor_wm(struct intel_plane *plane,
5941 const struct intel_crtc_state *crtc_state)
5942{
5943 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5944 int level, max_level = ilk_wm_max_level(dev_priv);
5945 enum plane_id plane_id = plane->id;
5946 enum pipe pipe = plane->pipe;
5947 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
5948 const struct skl_ddb_entry *ddb =
5949 &crtc_state->wm.skl.plane_ddb_y[plane_id];
5950
5951 for (level = 0; level <= max_level; level++)
5952 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
5953 skl_plane_wm_level(pipe_wm, plane_id, level));
5954
5955 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe),
5956 skl_plane_trans_wm(pipe_wm, plane_id));
5957
5958 if (HAS_HW_SAGV_WM(dev_priv)) {
5959 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5960
5961 skl_write_wm_level(dev_priv, CUR_WM_SAGV(pipe),
5962 &wm->sagv.wm0);
5963 skl_write_wm_level(dev_priv, CUR_WM_SAGV_TRANS(pipe),
5964 &wm->sagv.trans_wm);
5965 }
5966
5967 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
5968}
5969
5970bool skl_wm_level_equals(const struct skl_wm_level *l1,
5971 const struct skl_wm_level *l2)
5972{
5973 return l1->enable == l2->enable &&
5974 l1->ignore_lines == l2->ignore_lines &&
5975 l1->lines == l2->lines &&
5976 l1->blocks == l2->blocks;
5977}
5978
5979static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
5980 const struct skl_plane_wm *wm1,
5981 const struct skl_plane_wm *wm2)
5982{
5983 int level, max_level = ilk_wm_max_level(dev_priv);
5984
5985 for (level = 0; level <= max_level; level++) {
5986
5987
5988
5989
5990
5991 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
5992 return false;
5993 }
5994
5995 return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
5996 skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
5997 skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
5998}
5999
6000static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
6001 const struct skl_ddb_entry *b)
6002{
6003 return a->start < b->end && b->start < a->end;
6004}
6005
6006static void skl_ddb_entry_union(struct skl_ddb_entry *a,
6007 const struct skl_ddb_entry *b)
6008{
6009 if (a->end && b->end) {
6010 a->start = min(a->start, b->start);
6011 a->end = max(a->end, b->end);
6012 } else if (b->end) {
6013 a->start = b->start;
6014 a->end = b->end;
6015 }
6016}
6017
6018bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
6019 const struct skl_ddb_entry *entries,
6020 int num_entries, int ignore_idx)
6021{
6022 int i;
6023
6024 for (i = 0; i < num_entries; i++) {
6025 if (i != ignore_idx &&
6026 skl_ddb_entries_overlap(ddb, &entries[i]))
6027 return true;
6028 }
6029
6030 return false;
6031}
6032
6033static int
6034skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
6035 struct intel_crtc_state *new_crtc_state)
6036{
6037 struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
6038 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6039 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6040 struct intel_plane *plane;
6041
6042 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6043 struct intel_plane_state *plane_state;
6044 enum plane_id plane_id = plane->id;
6045
6046 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
6047 &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
6048 skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
6049 &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
6050 continue;
6051
6052 plane_state = intel_atomic_get_plane_state(state, plane);
6053 if (IS_ERR(plane_state))
6054 return PTR_ERR(plane_state);
6055
6056 new_crtc_state->update_planes |= BIT(plane_id);
6057 }
6058
6059 return 0;
6060}
6061
6062static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
6063{
6064 struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
6065 u8 enabled_slices;
6066 enum pipe pipe;
6067
6068
6069
6070
6071
6072 enabled_slices = BIT(DBUF_S1);
6073
6074 for_each_pipe(dev_priv, pipe)
6075 enabled_slices |= dbuf_state->slices[pipe];
6076
6077 return enabled_slices;
6078}
6079
6080static int
6081skl_compute_ddb(struct intel_atomic_state *state)
6082{
6083 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6084 const struct intel_dbuf_state *old_dbuf_state;
6085 struct intel_dbuf_state *new_dbuf_state = NULL;
6086 const struct intel_crtc_state *old_crtc_state;
6087 struct intel_crtc_state *new_crtc_state;
6088 struct intel_crtc *crtc;
6089 int ret, i;
6090
6091 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6092 new_dbuf_state = intel_atomic_get_dbuf_state(state);
6093 if (IS_ERR(new_dbuf_state))
6094 return PTR_ERR(new_dbuf_state);
6095
6096 old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
6097 break;
6098 }
6099
6100 if (!new_dbuf_state)
6101 return 0;
6102
6103 new_dbuf_state->active_pipes =
6104 intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
6105
6106 if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
6107 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
6108 if (ret)
6109 return ret;
6110 }
6111
6112 for_each_intel_crtc(&dev_priv->drm, crtc) {
6113 enum pipe pipe = crtc->pipe;
6114
6115 new_dbuf_state->slices[pipe] =
6116 skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
6117
6118 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
6119 continue;
6120
6121 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
6122 if (ret)
6123 return ret;
6124 }
6125
6126 new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
6127
6128 if (IS_ALDERLAKE_P(dev_priv))
6129 new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes);
6130
6131 if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
6132 old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
6133 ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
6134 if (ret)
6135 return ret;
6136
6137 if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
6138
6139 ret = intel_modeset_all_pipes(state);
6140 if (ret)
6141 return ret;
6142 }
6143
6144 drm_dbg_kms(&dev_priv->drm,
6145 "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
6146 old_dbuf_state->enabled_slices,
6147 new_dbuf_state->enabled_slices,
6148 INTEL_INFO(dev_priv)->dbuf.slice_mask,
6149 yesno(old_dbuf_state->joined_mbus),
6150 yesno(new_dbuf_state->joined_mbus));
6151 }
6152
6153 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6154 enum pipe pipe = crtc->pipe;
6155
6156 new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
6157
6158 if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
6159 continue;
6160
6161 ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
6162 if (ret)
6163 return ret;
6164 }
6165
6166 for_each_intel_crtc(&dev_priv->drm, crtc) {
6167 ret = skl_crtc_allocate_ddb(state, crtc);
6168 if (ret)
6169 return ret;
6170 }
6171
6172 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6173 new_crtc_state, i) {
6174 ret = skl_allocate_plane_ddb(state, crtc);
6175 if (ret)
6176 return ret;
6177
6178 ret = skl_ddb_add_affected_planes(old_crtc_state,
6179 new_crtc_state);
6180 if (ret)
6181 return ret;
6182 }
6183
6184 return 0;
6185}
6186
6187static char enast(bool enable)
6188{
6189 return enable ? '*' : ' ';
6190}
6191
6192static void
6193skl_print_wm_changes(struct intel_atomic_state *state)
6194{
6195 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6196 const struct intel_crtc_state *old_crtc_state;
6197 const struct intel_crtc_state *new_crtc_state;
6198 struct intel_plane *plane;
6199 struct intel_crtc *crtc;
6200 int i;
6201
6202 if (!drm_debug_enabled(DRM_UT_KMS))
6203 return;
6204
6205 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6206 new_crtc_state, i) {
6207 const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
6208
6209 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
6210 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
6211
6212 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6213 enum plane_id plane_id = plane->id;
6214 const struct skl_ddb_entry *old, *new;
6215
6216 old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
6217 new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
6218
6219 if (skl_ddb_entry_equal(old, new))
6220 continue;
6221
6222 drm_dbg_kms(&dev_priv->drm,
6223 "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
6224 plane->base.base.id, plane->base.name,
6225 old->start, old->end, new->start, new->end,
6226 skl_ddb_entry_size(old), skl_ddb_entry_size(new));
6227 }
6228
6229 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6230 enum plane_id plane_id = plane->id;
6231 const struct skl_plane_wm *old_wm, *new_wm;
6232
6233 old_wm = &old_pipe_wm->planes[plane_id];
6234 new_wm = &new_pipe_wm->planes[plane_id];
6235
6236 if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
6237 continue;
6238
6239 drm_dbg_kms(&dev_priv->drm,
6240 "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
6241 " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
6242 plane->base.base.id, plane->base.name,
6243 enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
6244 enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
6245 enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
6246 enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
6247 enast(old_wm->trans_wm.enable),
6248 enast(old_wm->sagv.wm0.enable),
6249 enast(old_wm->sagv.trans_wm.enable),
6250 enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
6251 enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
6252 enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
6253 enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
6254 enast(new_wm->trans_wm.enable),
6255 enast(new_wm->sagv.wm0.enable),
6256 enast(new_wm->sagv.trans_wm.enable));
6257
6258 drm_dbg_kms(&dev_priv->drm,
6259 "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
6260 " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
6261 plane->base.base.id, plane->base.name,
6262 enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
6263 enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
6264 enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
6265 enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
6266 enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
6267 enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
6268 enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
6269 enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
6270 enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
6271 enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
6272 enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
6273 enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
6274 enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
6275 enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
6276 enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
6277 enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
6278 enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
6279 enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
6280 enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
6281 enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
6282 enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
6283 enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
6284
6285 drm_dbg_kms(&dev_priv->drm,
6286 "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
6287 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
6288 plane->base.base.id, plane->base.name,
6289 old_wm->wm[0].blocks, old_wm->wm[1].blocks,
6290 old_wm->wm[2].blocks, old_wm->wm[3].blocks,
6291 old_wm->wm[4].blocks, old_wm->wm[5].blocks,
6292 old_wm->wm[6].blocks, old_wm->wm[7].blocks,
6293 old_wm->trans_wm.blocks,
6294 old_wm->sagv.wm0.blocks,
6295 old_wm->sagv.trans_wm.blocks,
6296 new_wm->wm[0].blocks, new_wm->wm[1].blocks,
6297 new_wm->wm[2].blocks, new_wm->wm[3].blocks,
6298 new_wm->wm[4].blocks, new_wm->wm[5].blocks,
6299 new_wm->wm[6].blocks, new_wm->wm[7].blocks,
6300 new_wm->trans_wm.blocks,
6301 new_wm->sagv.wm0.blocks,
6302 new_wm->sagv.trans_wm.blocks);
6303
6304 drm_dbg_kms(&dev_priv->drm,
6305 "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
6306 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
6307 plane->base.base.id, plane->base.name,
6308 old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
6309 old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
6310 old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
6311 old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
6312 old_wm->trans_wm.min_ddb_alloc,
6313 old_wm->sagv.wm0.min_ddb_alloc,
6314 old_wm->sagv.trans_wm.min_ddb_alloc,
6315 new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
6316 new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
6317 new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
6318 new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
6319 new_wm->trans_wm.min_ddb_alloc,
6320 new_wm->sagv.wm0.min_ddb_alloc,
6321 new_wm->sagv.trans_wm.min_ddb_alloc);
6322 }
6323 }
6324}
6325
6326static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
6327 const struct skl_pipe_wm *old_pipe_wm,
6328 const struct skl_pipe_wm *new_pipe_wm)
6329{
6330 struct drm_i915_private *i915 = to_i915(plane->base.dev);
6331 int level, max_level = ilk_wm_max_level(i915);
6332
6333 for (level = 0; level <= max_level; level++) {
6334
6335
6336
6337
6338
6339 if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
6340 skl_plane_wm_level(new_pipe_wm, plane->id, level)))
6341 return false;
6342 }
6343
6344 if (HAS_HW_SAGV_WM(i915)) {
6345 const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
6346 const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
6347
6348 if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
6349 !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
6350 return false;
6351 }
6352
6353 return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
6354 skl_plane_trans_wm(new_pipe_wm, plane->id));
6355}
6356
6357
6358
6359
6360
6361
6362
6363
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375
6376
6377
6378
6379static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
6380 struct intel_crtc *crtc)
6381{
6382 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6383 const struct intel_crtc_state *old_crtc_state =
6384 intel_atomic_get_old_crtc_state(state, crtc);
6385 struct intel_crtc_state *new_crtc_state =
6386 intel_atomic_get_new_crtc_state(state, crtc);
6387 struct intel_plane *plane;
6388
6389 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6390 struct intel_plane_state *plane_state;
6391 enum plane_id plane_id = plane->id;
6392
6393
6394
6395
6396
6397
6398
6399
6400
6401 if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
6402 skl_plane_selected_wm_equals(plane,
6403 &old_crtc_state->wm.skl.optimal,
6404 &new_crtc_state->wm.skl.optimal))
6405 continue;
6406
6407 plane_state = intel_atomic_get_plane_state(state, plane);
6408 if (IS_ERR(plane_state))
6409 return PTR_ERR(plane_state);
6410
6411 new_crtc_state->update_planes |= BIT(plane_id);
6412 }
6413
6414 return 0;
6415}
6416
6417static int
6418skl_compute_wm(struct intel_atomic_state *state)
6419{
6420 struct intel_crtc *crtc;
6421 struct intel_crtc_state *new_crtc_state;
6422 int ret, i;
6423
6424 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6425 ret = skl_build_pipe_wm(state, crtc);
6426 if (ret)
6427 return ret;
6428 }
6429
6430 ret = skl_compute_ddb(state);
6431 if (ret)
6432 return ret;
6433
6434 ret = intel_compute_sagv_mask(state);
6435 if (ret)
6436 return ret;
6437
6438
6439
6440
6441
6442
6443 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6444 ret = skl_wm_add_affected_planes(state, crtc);
6445 if (ret)
6446 return ret;
6447 }
6448
6449 skl_print_wm_changes(state);
6450
6451 return 0;
6452}
6453
6454static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
6455 struct intel_wm_config *config)
6456{
6457 struct intel_crtc *crtc;
6458
6459
6460 for_each_intel_crtc(&dev_priv->drm, crtc) {
6461 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
6462
6463 if (!wm->pipe_enabled)
6464 continue;
6465
6466 config->sprites_enabled |= wm->sprites_enabled;
6467 config->sprites_scaled |= wm->sprites_scaled;
6468 config->num_pipes_active++;
6469 }
6470}
6471
6472static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
6473{
6474 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
6475 struct ilk_wm_maximums max;
6476 struct intel_wm_config config = {};
6477 struct ilk_wm_values results = {};
6478 enum intel_ddb_partitioning partitioning;
6479
6480 ilk_compute_wm_config(dev_priv, &config);
6481
6482 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
6483 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
6484
6485
6486 if (DISPLAY_VER(dev_priv) >= 7 &&
6487 config.num_pipes_active == 1 && config.sprites_enabled) {
6488 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
6489 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
6490
6491 best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
6492 } else {
6493 best_lp_wm = &lp_wm_1_2;
6494 }
6495
6496 partitioning = (best_lp_wm == &lp_wm_1_2) ?
6497 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
6498
6499 ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
6500
6501 ilk_write_wm_values(dev_priv, &results);
6502}
6503
6504static void ilk_initial_watermarks(struct intel_atomic_state *state,
6505 struct intel_crtc *crtc)
6506{
6507 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6508 const struct intel_crtc_state *crtc_state =
6509 intel_atomic_get_new_crtc_state(state, crtc);
6510
6511 mutex_lock(&dev_priv->wm.wm_mutex);
6512 crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
6513 ilk_program_watermarks(dev_priv);
6514 mutex_unlock(&dev_priv->wm.wm_mutex);
6515}
6516
6517static void ilk_optimize_watermarks(struct intel_atomic_state *state,
6518 struct intel_crtc *crtc)
6519{
6520 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6521 const struct intel_crtc_state *crtc_state =
6522 intel_atomic_get_new_crtc_state(state, crtc);
6523
6524 if (!crtc_state->wm.need_postvbl_update)
6525 return;
6526
6527 mutex_lock(&dev_priv->wm.wm_mutex);
6528 crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
6529 ilk_program_watermarks(dev_priv);
6530 mutex_unlock(&dev_priv->wm.wm_mutex);
6531}
6532
6533static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
6534{
6535 level->enable = val & PLANE_WM_EN;
6536 level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
6537 level->blocks = val & PLANE_WM_BLOCKS_MASK;
6538 level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
6539}
6540
6541void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
6542 struct skl_pipe_wm *out)
6543{
6544 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6545 enum pipe pipe = crtc->pipe;
6546 int level, max_level;
6547 enum plane_id plane_id;
6548 u32 val;
6549
6550 max_level = ilk_wm_max_level(dev_priv);
6551
6552 for_each_plane_id_on_crtc(crtc, plane_id) {
6553 struct skl_plane_wm *wm = &out->planes[plane_id];
6554
6555 for (level = 0; level <= max_level; level++) {
6556 if (plane_id != PLANE_CURSOR)
6557 val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level));
6558 else
6559 val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level));
6560
6561 skl_wm_level_from_reg_val(val, &wm->wm[level]);
6562 }
6563
6564 if (plane_id != PLANE_CURSOR)
6565 val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
6566 else
6567 val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
6568
6569 skl_wm_level_from_reg_val(val, &wm->trans_wm);
6570
6571 if (HAS_HW_SAGV_WM(dev_priv)) {
6572 if (plane_id != PLANE_CURSOR)
6573 val = intel_uncore_read(&dev_priv->uncore,
6574 PLANE_WM_SAGV(pipe, plane_id));
6575 else
6576 val = intel_uncore_read(&dev_priv->uncore,
6577 CUR_WM_SAGV(pipe));
6578
6579 skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
6580
6581 if (plane_id != PLANE_CURSOR)
6582 val = intel_uncore_read(&dev_priv->uncore,
6583 PLANE_WM_SAGV_TRANS(pipe, plane_id));
6584 else
6585 val = intel_uncore_read(&dev_priv->uncore,
6586 CUR_WM_SAGV_TRANS(pipe));
6587
6588 skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
6589 } else if (DISPLAY_VER(dev_priv) >= 12) {
6590 wm->sagv.wm0 = wm->wm[0];
6591 wm->sagv.trans_wm = wm->trans_wm;
6592 }
6593 }
6594}
6595
6596void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
6597{
6598 struct intel_dbuf_state *dbuf_state =
6599 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
6600 struct intel_crtc *crtc;
6601
6602 if (IS_ALDERLAKE_P(dev_priv))
6603 dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
6604
6605 for_each_intel_crtc(&dev_priv->drm, crtc) {
6606 struct intel_crtc_state *crtc_state =
6607 to_intel_crtc_state(crtc->base.state);
6608 enum pipe pipe = crtc->pipe;
6609 unsigned int mbus_offset;
6610 enum plane_id plane_id;
6611
6612 skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
6613 crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
6614
6615 memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
6616
6617 for_each_plane_id_on_crtc(crtc, plane_id) {
6618 struct skl_ddb_entry *ddb_y =
6619 &crtc_state->wm.skl.plane_ddb_y[plane_id];
6620 struct skl_ddb_entry *ddb_uv =
6621 &crtc_state->wm.skl.plane_ddb_uv[plane_id];
6622
6623 skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
6624 plane_id, ddb_y, ddb_uv);
6625
6626 skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
6627 skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
6628 }
6629
6630 dbuf_state->slices[pipe] =
6631 skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
6632
6633 dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
6634
6635
6636
6637
6638
6639 mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]);
6640 crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
6641 crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
6642
6643 drm_dbg_kms(&dev_priv->drm,
6644 "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
6645 crtc->base.base.id, crtc->base.name,
6646 dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
6647 dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
6648 yesno(dbuf_state->joined_mbus));
6649 }
6650
6651 dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
6652}
6653
6654static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
6655{
6656 struct drm_device *dev = crtc->base.dev;
6657 struct drm_i915_private *dev_priv = to_i915(dev);
6658 struct ilk_wm_values *hw = &dev_priv->wm.hw;
6659 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
6660 struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
6661 enum pipe pipe = crtc->pipe;
6662
6663 hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
6664
6665 memset(active, 0, sizeof(*active));
6666
6667 active->pipe_enabled = crtc->active;
6668
6669 if (active->pipe_enabled) {
6670 u32 tmp = hw->wm_pipe[pipe];
6671
6672
6673
6674
6675
6676
6677
6678 active->wm[0].enable = true;
6679 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
6680 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
6681 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
6682 } else {
6683 int level, max_level = ilk_wm_max_level(dev_priv);
6684
6685
6686
6687
6688
6689
6690 for (level = 0; level <= max_level; level++)
6691 active->wm[level].enable = true;
6692 }
6693
6694 crtc->wm.active.ilk = *active;
6695}
6696
6697#define _FW_WM(value, plane) \
6698 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
6699#define _FW_WM_VLV(value, plane) \
6700 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
6701
6702static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
6703 struct g4x_wm_values *wm)
6704{
6705 u32 tmp;
6706
6707 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
6708 wm->sr.plane = _FW_WM(tmp, SR);
6709 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
6710 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
6711 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
6712
6713 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
6714 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
6715 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
6716 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
6717 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
6718 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
6719 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
6720
6721 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
6722 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
6723 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
6724 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
6725 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
6726}
6727
6728static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
6729 struct vlv_wm_values *wm)
6730{
6731 enum pipe pipe;
6732 u32 tmp;
6733
6734 for_each_pipe(dev_priv, pipe) {
6735 tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
6736
6737 wm->ddl[pipe].plane[PLANE_PRIMARY] =
6738 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6739 wm->ddl[pipe].plane[PLANE_CURSOR] =
6740 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6741 wm->ddl[pipe].plane[PLANE_SPRITE0] =
6742 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6743 wm->ddl[pipe].plane[PLANE_SPRITE1] =
6744 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
6745 }
6746
6747 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
6748 wm->sr.plane = _FW_WM(tmp, SR);
6749 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
6750 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
6751 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
6752
6753 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
6754 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
6755 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
6756 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
6757
6758 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
6759 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
6760
6761 if (IS_CHERRYVIEW(dev_priv)) {
6762 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
6763 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
6764 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
6765
6766 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
6767 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
6768 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
6769
6770 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
6771 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
6772 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
6773
6774 tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
6775 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
6776 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
6777 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
6778 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
6779 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
6780 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
6781 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
6782 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
6783 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
6784 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
6785 } else {
6786 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
6787 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
6788 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
6789
6790 tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
6791 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
6792 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
6793 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
6794 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
6795 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
6796 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
6797 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
6798 }
6799}
6800
6801#undef _FW_WM
6802#undef _FW_WM_VLV
6803
6804void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
6805{
6806 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
6807 struct intel_crtc *crtc;
6808
6809 g4x_read_wm_values(dev_priv, wm);
6810
6811 wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
6812
6813 for_each_intel_crtc(&dev_priv->drm, crtc) {
6814 struct intel_crtc_state *crtc_state =
6815 to_intel_crtc_state(crtc->base.state);
6816 struct g4x_wm_state *active = &crtc->wm.active.g4x;
6817 struct g4x_pipe_wm *raw;
6818 enum pipe pipe = crtc->pipe;
6819 enum plane_id plane_id;
6820 int level, max_level;
6821
6822 active->cxsr = wm->cxsr;
6823 active->hpll_en = wm->hpll_en;
6824 active->fbc_en = wm->fbc_en;
6825
6826 active->sr = wm->sr;
6827 active->hpll = wm->hpll;
6828
6829 for_each_plane_id_on_crtc(crtc, plane_id) {
6830 active->wm.plane[plane_id] =
6831 wm->pipe[pipe].plane[plane_id];
6832 }
6833
6834 if (wm->cxsr && wm->hpll_en)
6835 max_level = G4X_WM_LEVEL_HPLL;
6836 else if (wm->cxsr)
6837 max_level = G4X_WM_LEVEL_SR;
6838 else
6839 max_level = G4X_WM_LEVEL_NORMAL;
6840
6841 level = G4X_WM_LEVEL_NORMAL;
6842 raw = &crtc_state->wm.g4x.raw[level];
6843 for_each_plane_id_on_crtc(crtc, plane_id)
6844 raw->plane[plane_id] = active->wm.plane[plane_id];
6845
6846 level = G4X_WM_LEVEL_SR;
6847 if (level > max_level)
6848 goto out;
6849
6850 raw = &crtc_state->wm.g4x.raw[level];
6851 raw->plane[PLANE_PRIMARY] = active->sr.plane;
6852 raw->plane[PLANE_CURSOR] = active->sr.cursor;
6853 raw->plane[PLANE_SPRITE0] = 0;
6854 raw->fbc = active->sr.fbc;
6855
6856 level = G4X_WM_LEVEL_HPLL;
6857 if (level > max_level)
6858 goto out;
6859
6860 raw = &crtc_state->wm.g4x.raw[level];
6861 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
6862 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
6863 raw->plane[PLANE_SPRITE0] = 0;
6864 raw->fbc = active->hpll.fbc;
6865
6866 level++;
6867 out:
6868 for_each_plane_id_on_crtc(crtc, plane_id)
6869 g4x_raw_plane_wm_set(crtc_state, level,
6870 plane_id, USHRT_MAX);
6871 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
6872
6873 crtc_state->wm.g4x.optimal = *active;
6874 crtc_state->wm.g4x.intermediate = *active;
6875
6876 drm_dbg_kms(&dev_priv->drm,
6877 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
6878 pipe_name(pipe),
6879 wm->pipe[pipe].plane[PLANE_PRIMARY],
6880 wm->pipe[pipe].plane[PLANE_CURSOR],
6881 wm->pipe[pipe].plane[PLANE_SPRITE0]);
6882 }
6883
6884 drm_dbg_kms(&dev_priv->drm,
6885 "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
6886 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
6887 drm_dbg_kms(&dev_priv->drm,
6888 "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
6889 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
6890 drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
6891 yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
6892}
6893
6894void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
6895{
6896 struct intel_plane *plane;
6897 struct intel_crtc *crtc;
6898
6899 mutex_lock(&dev_priv->wm.wm_mutex);
6900
6901 for_each_intel_plane(&dev_priv->drm, plane) {
6902 struct intel_crtc *crtc =
6903 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
6904 struct intel_crtc_state *crtc_state =
6905 to_intel_crtc_state(crtc->base.state);
6906 struct intel_plane_state *plane_state =
6907 to_intel_plane_state(plane->base.state);
6908 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
6909 enum plane_id plane_id = plane->id;
6910 int level;
6911
6912 if (plane_state->uapi.visible)
6913 continue;
6914
6915 for (level = 0; level < 3; level++) {
6916 struct g4x_pipe_wm *raw =
6917 &crtc_state->wm.g4x.raw[level];
6918
6919 raw->plane[plane_id] = 0;
6920 wm_state->wm.plane[plane_id] = 0;
6921 }
6922
6923 if (plane_id == PLANE_PRIMARY) {
6924 for (level = 0; level < 3; level++) {
6925 struct g4x_pipe_wm *raw =
6926 &crtc_state->wm.g4x.raw[level];
6927 raw->fbc = 0;
6928 }
6929
6930 wm_state->sr.fbc = 0;
6931 wm_state->hpll.fbc = 0;
6932 wm_state->fbc_en = false;
6933 }
6934 }
6935
6936 for_each_intel_crtc(&dev_priv->drm, crtc) {
6937 struct intel_crtc_state *crtc_state =
6938 to_intel_crtc_state(crtc->base.state);
6939
6940 crtc_state->wm.g4x.intermediate =
6941 crtc_state->wm.g4x.optimal;
6942 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
6943 }
6944
6945 g4x_program_watermarks(dev_priv);
6946
6947 mutex_unlock(&dev_priv->wm.wm_mutex);
6948}
6949
6950void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
6951{
6952 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
6953 struct intel_crtc *crtc;
6954 u32 val;
6955
6956 vlv_read_wm_values(dev_priv, wm);
6957
6958 wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
6959 wm->level = VLV_WM_LEVEL_PM2;
6960
6961 if (IS_CHERRYVIEW(dev_priv)) {
6962 vlv_punit_get(dev_priv);
6963
6964 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
6965 if (val & DSP_MAXFIFO_PM5_ENABLE)
6966 wm->level = VLV_WM_LEVEL_PM5;
6967
6968
6969
6970
6971
6972
6973
6974
6975
6976
6977 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6978 val |= FORCE_DDR_FREQ_REQ_ACK;
6979 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
6980
6981 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
6982 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
6983 drm_dbg_kms(&dev_priv->drm,
6984 "Punit not acking DDR DVFS request, "
6985 "assuming DDR DVFS is disabled\n");
6986 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
6987 } else {
6988 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
6989 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
6990 wm->level = VLV_WM_LEVEL_DDR_DVFS;
6991 }
6992
6993 vlv_punit_put(dev_priv);
6994 }
6995
6996 for_each_intel_crtc(&dev_priv->drm, crtc) {
6997 struct intel_crtc_state *crtc_state =
6998 to_intel_crtc_state(crtc->base.state);
6999 struct vlv_wm_state *active = &crtc->wm.active.vlv;
7000 const struct vlv_fifo_state *fifo_state =
7001 &crtc_state->wm.vlv.fifo_state;
7002 enum pipe pipe = crtc->pipe;
7003 enum plane_id plane_id;
7004 int level;
7005
7006 vlv_get_fifo_size(crtc_state);
7007
7008 active->num_levels = wm->level + 1;
7009 active->cxsr = wm->cxsr;
7010
7011 for (level = 0; level < active->num_levels; level++) {
7012 struct g4x_pipe_wm *raw =
7013 &crtc_state->wm.vlv.raw[level];
7014
7015 active->sr[level].plane = wm->sr.plane;
7016 active->sr[level].cursor = wm->sr.cursor;
7017
7018 for_each_plane_id_on_crtc(crtc, plane_id) {
7019 active->wm[level].plane[plane_id] =
7020 wm->pipe[pipe].plane[plane_id];
7021
7022 raw->plane[plane_id] =
7023 vlv_invert_wm_value(active->wm[level].plane[plane_id],
7024 fifo_state->plane[plane_id]);
7025 }
7026 }
7027
7028 for_each_plane_id_on_crtc(crtc, plane_id)
7029 vlv_raw_plane_wm_set(crtc_state, level,
7030 plane_id, USHRT_MAX);
7031 vlv_invalidate_wms(crtc, active, level);
7032
7033 crtc_state->wm.vlv.optimal = *active;
7034 crtc_state->wm.vlv.intermediate = *active;
7035
7036 drm_dbg_kms(&dev_priv->drm,
7037 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
7038 pipe_name(pipe),
7039 wm->pipe[pipe].plane[PLANE_PRIMARY],
7040 wm->pipe[pipe].plane[PLANE_CURSOR],
7041 wm->pipe[pipe].plane[PLANE_SPRITE0],
7042 wm->pipe[pipe].plane[PLANE_SPRITE1]);
7043 }
7044
7045 drm_dbg_kms(&dev_priv->drm,
7046 "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
7047 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
7048}
7049
7050void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
7051{
7052 struct intel_plane *plane;
7053 struct intel_crtc *crtc;
7054
7055 mutex_lock(&dev_priv->wm.wm_mutex);
7056
7057 for_each_intel_plane(&dev_priv->drm, plane) {
7058 struct intel_crtc *crtc =
7059 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
7060 struct intel_crtc_state *crtc_state =
7061 to_intel_crtc_state(crtc->base.state);
7062 struct intel_plane_state *plane_state =
7063 to_intel_plane_state(plane->base.state);
7064 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
7065 const struct vlv_fifo_state *fifo_state =
7066 &crtc_state->wm.vlv.fifo_state;
7067 enum plane_id plane_id = plane->id;
7068 int level;
7069
7070 if (plane_state->uapi.visible)
7071 continue;
7072
7073 for (level = 0; level < wm_state->num_levels; level++) {
7074 struct g4x_pipe_wm *raw =
7075 &crtc_state->wm.vlv.raw[level];
7076
7077 raw->plane[plane_id] = 0;
7078
7079 wm_state->wm[level].plane[plane_id] =
7080 vlv_invert_wm_value(raw->plane[plane_id],
7081 fifo_state->plane[plane_id]);
7082 }
7083 }
7084
7085 for_each_intel_crtc(&dev_priv->drm, crtc) {
7086 struct intel_crtc_state *crtc_state =
7087 to_intel_crtc_state(crtc->base.state);
7088
7089 crtc_state->wm.vlv.intermediate =
7090 crtc_state->wm.vlv.optimal;
7091 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
7092 }
7093
7094 vlv_program_watermarks(dev_priv);
7095
7096 mutex_unlock(&dev_priv->wm.wm_mutex);
7097}
7098
7099
7100
7101
7102
7103static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
7104{
7105 intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN);
7106 intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN);
7107 intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN);
7108
7109
7110
7111
7112
7113}
7114
7115void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
7116{
7117 struct ilk_wm_values *hw = &dev_priv->wm.hw;
7118 struct intel_crtc *crtc;
7119
7120 ilk_init_lp_watermarks(dev_priv);
7121
7122 for_each_intel_crtc(&dev_priv->drm, crtc)
7123 ilk_pipe_wm_get_hw_state(crtc);
7124
7125 hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
7126 hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
7127 hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
7128
7129 hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
7130 if (DISPLAY_VER(dev_priv) >= 7) {
7131 hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
7132 hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
7133 }
7134
7135 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
7136 hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
7137 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
7138 else if (IS_IVYBRIDGE(dev_priv))
7139 hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
7140 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
7141
7142 hw->enable_fbc_wm =
7143 !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
7144}
7145
7146void intel_enable_ipc(struct drm_i915_private *dev_priv)
7147{
7148 u32 val;
7149
7150 if (!HAS_IPC(dev_priv))
7151 return;
7152
7153 val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
7154
7155 if (dev_priv->ipc_enabled)
7156 val |= DISP_IPC_ENABLE;
7157 else
7158 val &= ~DISP_IPC_ENABLE;
7159
7160 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
7161}
7162
7163static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
7164{
7165
7166 if (IS_SKYLAKE(dev_priv))
7167 return false;
7168
7169
7170 if (IS_KABYLAKE(dev_priv) ||
7171 IS_COFFEELAKE(dev_priv) ||
7172 IS_COMETLAKE(dev_priv))
7173 return dev_priv->dram_info.symmetric_memory;
7174
7175 return true;
7176}
7177
7178void intel_init_ipc(struct drm_i915_private *dev_priv)
7179{
7180 if (!HAS_IPC(dev_priv))
7181 return;
7182
7183 dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
7184
7185 intel_enable_ipc(dev_priv);
7186}
7187
7188static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
7189{
7190
7191
7192
7193
7194
7195 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7196}
7197
7198static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
7199{
7200 enum pipe pipe;
7201
7202 for_each_pipe(dev_priv, pipe) {
7203 intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe),
7204 intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) |
7205 DISPPLANE_TRICKLE_FEED_DISABLE);
7206
7207 intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe)));
7208 intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
7209 }
7210}
7211
7212static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
7213{
7214 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7215
7216
7217
7218
7219
7220 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
7221 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
7222 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
7223
7224 intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0,
7225 MARIUNIT_CLOCK_GATE_DISABLE |
7226 SVSMUNIT_CLOCK_GATE_DISABLE);
7227 intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1,
7228 VFMUNIT_CLOCK_GATE_DISABLE);
7229
7230
7231
7232
7233
7234
7235
7236
7237 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7238 (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7239 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7240 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
7241 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL,
7242 (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7243 DISP_FBC_WM_DIS));
7244
7245
7246
7247
7248
7249
7250
7251
7252 if (IS_IRONLAKE_M(dev_priv)) {
7253
7254 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
7255 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
7256 ILK_FBCQ_DIS);
7257 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7258 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7259 ILK_DPARB_GATE);
7260 }
7261
7262 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
7263
7264 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7265 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7266 ILK_ELPIN_409_SELECT);
7267
7268 g4x_disable_trickle_feed(dev_priv);
7269
7270 ibx_init_clock_gating(dev_priv);
7271}
7272
7273static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
7274{
7275 enum pipe pipe;
7276 u32 val;
7277
7278
7279
7280
7281
7282
7283 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
7284 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
7285 PCH_CPUNIT_CLOCK_GATE_DISABLE);
7286 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) |
7287 DPLS_EDP_PPS_FIX_DIS);
7288
7289
7290
7291 for_each_pipe(dev_priv, pipe) {
7292 val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
7293 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
7294 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7295 if (dev_priv->vbt.fdi_rx_polarity_inverted)
7296 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7297 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
7298 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
7299 intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val);
7300 }
7301
7302 for_each_pipe(dev_priv, pipe) {
7303 intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe),
7304 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7305 }
7306}
7307
7308static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
7309{
7310 u32 tmp;
7311
7312 tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
7313 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
7314 drm_dbg_kms(&dev_priv->drm,
7315 "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
7316 tmp);
7317}
7318
7319static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
7320{
7321 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7322
7323 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
7324
7325 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7326 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7327 ILK_ELPIN_409_SELECT);
7328
7329 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
7330 intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
7331 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
7332 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7333
7334
7335
7336
7337
7338
7339
7340
7341
7342
7343
7344
7345
7346
7347 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
7348 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7349 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7350
7351
7352
7353
7354
7355
7356
7357
7358
7359
7360
7361
7362 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
7363 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
7364 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7365 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
7366 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
7367 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7368 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D,
7369 intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) |
7370 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
7371 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
7372
7373 g4x_disable_trickle_feed(dev_priv);
7374
7375 cpt_init_clock_gating(dev_priv);
7376
7377 gen6_check_mch_setup(dev_priv);
7378}
7379
7380static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
7381{
7382
7383
7384
7385
7386 if (HAS_PCH_LPT_LP(dev_priv))
7387 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D,
7388 intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
7389 PCH_LP_PARTITION_LEVEL_DISABLE);
7390
7391
7392 intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A),
7393 intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) |
7394 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7395}
7396
7397static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
7398{
7399 if (HAS_PCH_LPT_LP(dev_priv)) {
7400 u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
7401
7402 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7403 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
7404 }
7405}
7406
7407static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7408 int general_prio_credits,
7409 int high_prio_credits)
7410{
7411 u32 misccpctl;
7412 u32 val;
7413
7414
7415 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
7416 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7417
7418 val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1);
7419 val &= ~L3_PRIO_CREDITS_MASK;
7420 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
7421 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
7422 intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val);
7423
7424
7425
7426
7427
7428 intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1);
7429 udelay(1);
7430 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
7431}
7432
7433static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
7434{
7435
7436 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
7437 ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
7438
7439
7440 intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
7441 0, ICL_DELAY_PMRSP);
7442}
7443
7444static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
7445{
7446
7447 if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
7448 IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv))
7449 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
7450 ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
7451
7452
7453 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
7454 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
7455 TGL_VRH_GATING_DIS);
7456
7457
7458 if (DISPLAY_VER(dev_priv) == 12)
7459 intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY,
7460 CLKREQ_POLICY_MEM_UP_OVRD, 0);
7461}
7462
7463static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
7464{
7465 gen12lp_init_clock_gating(dev_priv);
7466
7467
7468 intel_de_rmw(dev_priv, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
7469}
7470
7471static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
7472{
7473 gen12lp_init_clock_gating(dev_priv);
7474
7475
7476 if (IS_DG1_GT_STEP(dev_priv, STEP_A0, STEP_B0))
7477 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
7478 DPT_GATING_DIS);
7479}
7480
7481static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
7482{
7483 if (!HAS_PCH_CNP(dev_priv))
7484 return;
7485
7486
7487 intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
7488 CNP_PWM_CGE_GATING_DISABLE);
7489}
7490
7491static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
7492{
7493 cnp_init_clock_gating(dev_priv);
7494 gen9_init_clock_gating(dev_priv);
7495
7496
7497 intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
7498 FBC_LLC_FULLY_OPEN);
7499
7500
7501
7502
7503
7504 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7505 DISP_FBC_WM_DIS);
7506
7507
7508
7509
7510
7511 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
7512 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7513}
7514
7515static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
7516{
7517 gen9_init_clock_gating(dev_priv);
7518
7519
7520 intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
7521 FBC_LLC_FULLY_OPEN);
7522
7523
7524 if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0))
7525 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
7526 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7527
7528
7529 if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0))
7530 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
7531 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7532
7533
7534
7535
7536
7537 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7538 DISP_FBC_WM_DIS);
7539
7540
7541
7542
7543
7544 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
7545 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7546}
7547
7548static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
7549{
7550 gen9_init_clock_gating(dev_priv);
7551
7552
7553 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) &
7554 ~GEN7_DOP_CLOCK_GATE_ENABLE);
7555
7556
7557 intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
7558 FBC_LLC_FULLY_OPEN);
7559
7560
7561
7562
7563
7564 intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
7565 DISP_FBC_WM_DIS);
7566
7567
7568
7569
7570
7571 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
7572 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7573
7574
7575
7576
7577
7578 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
7579 ILK_DPFC_DISABLE_DUMMY0);
7580}
7581
7582static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
7583{
7584 enum pipe pipe;
7585
7586
7587 intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
7588 intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
7589 HSW_FBCQ_DIS);
7590
7591
7592 intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7593
7594
7595 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
7596 intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7597
7598 for_each_pipe(dev_priv, pipe) {
7599
7600 intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
7601 intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
7602 BDW_DPRS_MASK_VBLANK_SRD);
7603 }
7604
7605
7606
7607 intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
7608 intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
7609 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7610
7611 intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
7612 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7613
7614
7615 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
7616 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7617
7618
7619 gen8_set_l3sqc_credits(dev_priv, 30, 2);
7620
7621
7622 intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1)
7623 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7624
7625 lpt_init_clock_gating(dev_priv);
7626
7627
7628
7629
7630
7631
7632 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
7633 intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
7634}
7635
7636static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
7637{
7638
7639 intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
7640 intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
7641 HSW_FBCQ_DIS);
7642
7643
7644 intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7645 intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7646 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7647
7648
7649 intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7650
7651 lpt_init_clock_gating(dev_priv);
7652}
7653
7654static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
7655{
7656 u32 snpcr;
7657
7658 intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7659
7660
7661 intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
7662 intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
7663 ILK_FBCQ_DIS);
7664
7665
7666 intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
7667 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7668 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7669
7670 if (IS_IVB_GT1(dev_priv))
7671 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
7672 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7673 else {
7674
7675 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
7676 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7677 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2,
7678 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7679 }
7680
7681
7682
7683
7684
7685 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
7686 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7687
7688
7689 intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7690 intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7691 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7692
7693 g4x_disable_trickle_feed(dev_priv);
7694
7695 snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR);
7696 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7697 snpcr |= GEN6_MBC_SNPCR_MED;
7698 intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr);
7699
7700 if (!HAS_PCH_NOP(dev_priv))
7701 cpt_init_clock_gating(dev_priv);
7702
7703 gen6_check_mch_setup(dev_priv);
7704}
7705
7706static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
7707{
7708
7709 intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
7710 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7711 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7712
7713
7714 intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
7715 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7716
7717
7718 intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7719 intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7720 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7721
7722
7723
7724
7725
7726 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
7727 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7728
7729
7730
7731
7732 intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4,
7733 intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7734
7735
7736
7737
7738
7739
7740 intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7741}
7742
7743static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
7744{
7745
7746
7747 intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
7748 intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
7749 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7750
7751
7752 intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
7753 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7754
7755
7756 intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
7757 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7758
7759
7760 intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
7761 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7762
7763
7764
7765
7766
7767
7768 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7769}
7770
7771static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
7772{
7773 u32 dspclk_gate;
7774
7775 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0);
7776 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7777 GS_UNIT_CLOCK_GATE_DISABLE |
7778 CL_UNIT_CLOCK_GATE_DISABLE);
7779 intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0);
7780 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7781 OVRUNIT_CLOCK_GATE_DISABLE |
7782 OVCUNIT_CLOCK_GATE_DISABLE;
7783 if (IS_GM45(dev_priv))
7784 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7785 intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate);
7786
7787 g4x_disable_trickle_feed(dev_priv);
7788}
7789
7790static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
7791{
7792 struct intel_uncore *uncore = &dev_priv->uncore;
7793
7794 intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7795 intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
7796 intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
7797 intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
7798 intel_uncore_write16(uncore, DEUC, 0);
7799 intel_uncore_write(uncore,
7800 MI_ARB_STATE,
7801 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7802}
7803
7804static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
7805{
7806 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7807 I965_RCC_CLOCK_GATE_DISABLE |
7808 I965_RCPB_CLOCK_GATE_DISABLE |
7809 I965_ISC_CLOCK_GATE_DISABLE |
7810 I965_FBC_CLOCK_GATE_DISABLE);
7811 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0);
7812 intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
7813 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7814}
7815
7816static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
7817{
7818 u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE);
7819
7820 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7821 DSTATE_DOT_CLOCK_GATING;
7822 intel_uncore_write(&dev_priv->uncore, D_STATE, dstate);
7823
7824 if (IS_PINEVIEW(dev_priv))
7825 intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7826
7827
7828 intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7829
7830
7831 intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7832
7833
7834 intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7835
7836 intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
7837 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7838}
7839
7840static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
7841{
7842 intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7843
7844
7845 intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7846 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7847
7848 intel_uncore_write(&dev_priv->uncore, MEM_MODE,
7849 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7850
7851
7852
7853
7854
7855
7856
7857
7858 intel_uncore_write(&dev_priv->uncore, SCPD0,
7859 _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
7860}
7861
7862static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
7863{
7864 intel_uncore_write(&dev_priv->uncore, MEM_MODE,
7865 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7866 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7867}
7868
7869void intel_init_clock_gating(struct drm_i915_private *dev_priv)
7870{
7871 dev_priv->clock_gating_funcs->init_clock_gating(dev_priv);
7872}
7873
7874void intel_suspend_hw(struct drm_i915_private *dev_priv)
7875{
7876 if (HAS_PCH_LPT(dev_priv))
7877 lpt_suspend_hw(dev_priv);
7878}
7879
7880static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
7881{
7882 drm_dbg_kms(&dev_priv->drm,
7883 "No clock gating settings or workarounds applied.\n");
7884}
7885
7886#define CG_FUNCS(platform) \
7887static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = { \
7888 .init_clock_gating = platform##_init_clock_gating, \
7889}
7890
7891CG_FUNCS(adlp);
7892CG_FUNCS(dg1);
7893CG_FUNCS(gen12lp);
7894CG_FUNCS(icl);
7895CG_FUNCS(cfl);
7896CG_FUNCS(skl);
7897CG_FUNCS(kbl);
7898CG_FUNCS(bxt);
7899CG_FUNCS(glk);
7900CG_FUNCS(bdw);
7901CG_FUNCS(chv);
7902CG_FUNCS(hsw);
7903CG_FUNCS(ivb);
7904CG_FUNCS(vlv);
7905CG_FUNCS(gen6);
7906CG_FUNCS(ilk);
7907CG_FUNCS(g4x);
7908CG_FUNCS(i965gm);
7909CG_FUNCS(i965g);
7910CG_FUNCS(gen3);
7911CG_FUNCS(i85x);
7912CG_FUNCS(i830);
7913CG_FUNCS(nop);
7914#undef CG_FUNCS
7915
7916
7917
7918
7919
7920
7921
7922
7923
7924
7925void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7926{
7927 if (IS_ALDERLAKE_P(dev_priv))
7928 dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
7929 else if (IS_DG1(dev_priv))
7930 dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
7931 else if (GRAPHICS_VER(dev_priv) == 12)
7932 dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs;
7933 else if (GRAPHICS_VER(dev_priv) == 11)
7934 dev_priv->clock_gating_funcs = &icl_clock_gating_funcs;
7935 else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
7936 dev_priv->clock_gating_funcs = &cfl_clock_gating_funcs;
7937 else if (IS_SKYLAKE(dev_priv))
7938 dev_priv->clock_gating_funcs = &skl_clock_gating_funcs;
7939 else if (IS_KABYLAKE(dev_priv))
7940 dev_priv->clock_gating_funcs = &kbl_clock_gating_funcs;
7941 else if (IS_BROXTON(dev_priv))
7942 dev_priv->clock_gating_funcs = &bxt_clock_gating_funcs;
7943 else if (IS_GEMINILAKE(dev_priv))
7944 dev_priv->clock_gating_funcs = &glk_clock_gating_funcs;
7945 else if (IS_BROADWELL(dev_priv))
7946 dev_priv->clock_gating_funcs = &bdw_clock_gating_funcs;
7947 else if (IS_CHERRYVIEW(dev_priv))
7948 dev_priv->clock_gating_funcs = &chv_clock_gating_funcs;
7949 else if (IS_HASWELL(dev_priv))
7950 dev_priv->clock_gating_funcs = &hsw_clock_gating_funcs;
7951 else if (IS_IVYBRIDGE(dev_priv))
7952 dev_priv->clock_gating_funcs = &ivb_clock_gating_funcs;
7953 else if (IS_VALLEYVIEW(dev_priv))
7954 dev_priv->clock_gating_funcs = &vlv_clock_gating_funcs;
7955 else if (GRAPHICS_VER(dev_priv) == 6)
7956 dev_priv->clock_gating_funcs = &gen6_clock_gating_funcs;
7957 else if (GRAPHICS_VER(dev_priv) == 5)
7958 dev_priv->clock_gating_funcs = &ilk_clock_gating_funcs;
7959 else if (IS_G4X(dev_priv))
7960 dev_priv->clock_gating_funcs = &g4x_clock_gating_funcs;
7961 else if (IS_I965GM(dev_priv))
7962 dev_priv->clock_gating_funcs = &i965gm_clock_gating_funcs;
7963 else if (IS_I965G(dev_priv))
7964 dev_priv->clock_gating_funcs = &i965g_clock_gating_funcs;
7965 else if (GRAPHICS_VER(dev_priv) == 3)
7966 dev_priv->clock_gating_funcs = &gen3_clock_gating_funcs;
7967 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7968 dev_priv->clock_gating_funcs = &i85x_clock_gating_funcs;
7969 else if (GRAPHICS_VER(dev_priv) == 2)
7970 dev_priv->clock_gating_funcs = &i830_clock_gating_funcs;
7971 else {
7972 MISSING_CASE(INTEL_DEVID(dev_priv));
7973 dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
7974 }
7975}
7976
7977static const struct drm_i915_wm_disp_funcs skl_wm_funcs = {
7978 .compute_global_watermarks = skl_compute_wm,
7979};
7980
7981static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = {
7982 .compute_pipe_wm = ilk_compute_pipe_wm,
7983 .compute_intermediate_wm = ilk_compute_intermediate_wm,
7984 .initial_watermarks = ilk_initial_watermarks,
7985 .optimize_watermarks = ilk_optimize_watermarks,
7986};
7987
7988static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
7989 .compute_pipe_wm = vlv_compute_pipe_wm,
7990 .compute_intermediate_wm = vlv_compute_intermediate_wm,
7991 .initial_watermarks = vlv_initial_watermarks,
7992 .optimize_watermarks = vlv_optimize_watermarks,
7993 .atomic_update_watermarks = vlv_atomic_update_fifo,
7994};
7995
7996static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = {
7997 .compute_pipe_wm = g4x_compute_pipe_wm,
7998 .compute_intermediate_wm = g4x_compute_intermediate_wm,
7999 .initial_watermarks = g4x_initial_watermarks,
8000 .optimize_watermarks = g4x_optimize_watermarks,
8001};
8002
8003static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = {
8004 .update_wm = pnv_update_wm,
8005};
8006
8007static const struct drm_i915_wm_disp_funcs i965_wm_funcs = {
8008 .update_wm = i965_update_wm,
8009};
8010
8011static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = {
8012 .update_wm = i9xx_update_wm,
8013};
8014
8015static const struct drm_i915_wm_disp_funcs i845_wm_funcs = {
8016 .update_wm = i845_update_wm,
8017};
8018
8019static const struct drm_i915_wm_disp_funcs nop_funcs = {
8020};
8021
8022
8023void intel_init_pm(struct drm_i915_private *dev_priv)
8024{
8025
8026 if (IS_PINEVIEW(dev_priv))
8027 pnv_get_mem_freq(dev_priv);
8028 else if (GRAPHICS_VER(dev_priv) == 5)
8029 ilk_get_mem_freq(dev_priv);
8030
8031 if (intel_has_sagv(dev_priv))
8032 skl_setup_sagv_block_time(dev_priv);
8033
8034
8035 if (DISPLAY_VER(dev_priv) >= 9) {
8036 skl_setup_wm_latency(dev_priv);
8037 dev_priv->wm_disp = &skl_wm_funcs;
8038 } else if (HAS_PCH_SPLIT(dev_priv)) {
8039 ilk_setup_wm_latency(dev_priv);
8040
8041 if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->wm.pri_latency[1] &&
8042 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
8043 (DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] &&
8044 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
8045 dev_priv->wm_disp = &ilk_wm_funcs;
8046 } else {
8047 drm_dbg_kms(&dev_priv->drm,
8048 "Failed to read display plane latency. "
8049 "Disable CxSR\n");
8050 dev_priv->wm_disp = &nop_funcs;
8051 }
8052 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8053 vlv_setup_wm_latency(dev_priv);
8054 dev_priv->wm_disp = &vlv_wm_funcs;
8055 } else if (IS_G4X(dev_priv)) {
8056 g4x_setup_wm_latency(dev_priv);
8057 dev_priv->wm_disp = &g4x_wm_funcs;
8058 } else if (IS_PINEVIEW(dev_priv)) {
8059 if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
8060 dev_priv->is_ddr3,
8061 dev_priv->fsb_freq,
8062 dev_priv->mem_freq)) {
8063 drm_info(&dev_priv->drm,
8064 "failed to find known CxSR latency "
8065 "(found ddr%s fsb freq %d, mem freq %d), "
8066 "disabling CxSR\n",
8067 (dev_priv->is_ddr3 == 1) ? "3" : "2",
8068 dev_priv->fsb_freq, dev_priv->mem_freq);
8069
8070 intel_set_memory_cxsr(dev_priv, false);
8071 dev_priv->wm_disp = &nop_funcs;
8072 } else
8073 dev_priv->wm_disp = &pnv_wm_funcs;
8074 } else if (DISPLAY_VER(dev_priv) == 4) {
8075 dev_priv->wm_disp = &i965_wm_funcs;
8076 } else if (DISPLAY_VER(dev_priv) == 3) {
8077 dev_priv->wm_disp = &i9xx_wm_funcs;
8078 } else if (DISPLAY_VER(dev_priv) == 2) {
8079 if (INTEL_NUM_PIPES(dev_priv) == 1)
8080 dev_priv->wm_disp = &i845_wm_funcs;
8081 else
8082 dev_priv->wm_disp = &i9xx_wm_funcs;
8083 } else {
8084 drm_err(&dev_priv->drm,
8085 "unexpected fall-through in %s\n", __func__);
8086 dev_priv->wm_disp = &nop_funcs;
8087 }
8088}
8089
8090void intel_pm_setup(struct drm_i915_private *dev_priv)
8091{
8092 dev_priv->runtime_pm.suspended = false;
8093 atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
8094}
8095
8096static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
8097{
8098 struct intel_dbuf_state *dbuf_state;
8099
8100 dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
8101 if (!dbuf_state)
8102 return NULL;
8103
8104 return &dbuf_state->base;
8105}
8106
8107static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
8108 struct intel_global_state *state)
8109{
8110 kfree(state);
8111}
8112
8113static const struct intel_global_state_funcs intel_dbuf_funcs = {
8114 .atomic_duplicate_state = intel_dbuf_duplicate_state,
8115 .atomic_destroy_state = intel_dbuf_destroy_state,
8116};
8117
8118struct intel_dbuf_state *
8119intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
8120{
8121 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8122 struct intel_global_state *dbuf_state;
8123
8124 dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj);
8125 if (IS_ERR(dbuf_state))
8126 return ERR_CAST(dbuf_state);
8127
8128 return to_intel_dbuf_state(dbuf_state);
8129}
8130
8131int intel_dbuf_init(struct drm_i915_private *dev_priv)
8132{
8133 struct intel_dbuf_state *dbuf_state;
8134
8135 dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
8136 if (!dbuf_state)
8137 return -ENOMEM;
8138
8139 intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj,
8140 &dbuf_state->base, &intel_dbuf_funcs);
8141
8142 return 0;
8143}
8144
8145
8146
8147
8148
8149static void update_mbus_pre_enable(struct intel_atomic_state *state)
8150{
8151 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8152 u32 mbus_ctl, dbuf_min_tracker_val;
8153 enum dbuf_slice slice;
8154 const struct intel_dbuf_state *dbuf_state =
8155 intel_atomic_get_new_dbuf_state(state);
8156
8157 if (!IS_ALDERLAKE_P(dev_priv))
8158 return;
8159
8160
8161
8162
8163
8164 if (dbuf_state->joined_mbus) {
8165 mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
8166 MBUS_JOIN_PIPE_SELECT_NONE;
8167 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
8168 } else {
8169 mbus_ctl = MBUS_HASHING_MODE_2x2 |
8170 MBUS_JOIN_PIPE_SELECT_NONE;
8171 dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
8172 }
8173
8174 intel_de_rmw(dev_priv, MBUS_CTL,
8175 MBUS_HASHING_MODE_MASK | MBUS_JOIN |
8176 MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
8177
8178 for_each_dbuf_slice(dev_priv, slice)
8179 intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
8180 DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
8181 dbuf_min_tracker_val);
8182}
8183
8184void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
8185{
8186 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8187 const struct intel_dbuf_state *new_dbuf_state =
8188 intel_atomic_get_new_dbuf_state(state);
8189 const struct intel_dbuf_state *old_dbuf_state =
8190 intel_atomic_get_old_dbuf_state(state);
8191
8192 if (!new_dbuf_state ||
8193 ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
8194 && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
8195 return;
8196
8197 WARN_ON(!new_dbuf_state->base.changed);
8198
8199 update_mbus_pre_enable(state);
8200 gen9_dbuf_slices_update(dev_priv,
8201 old_dbuf_state->enabled_slices |
8202 new_dbuf_state->enabled_slices);
8203}
8204
8205void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
8206{
8207 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8208 const struct intel_dbuf_state *new_dbuf_state =
8209 intel_atomic_get_new_dbuf_state(state);
8210 const struct intel_dbuf_state *old_dbuf_state =
8211 intel_atomic_get_old_dbuf_state(state);
8212
8213 if (!new_dbuf_state ||
8214 ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
8215 && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
8216 return;
8217
8218 WARN_ON(!new_dbuf_state->base.changed);
8219
8220 gen9_dbuf_slices_update(dev_priv,
8221 new_dbuf_state->enabled_slices);
8222}
8223