1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/i2c.h>
28#include <linux/input.h>
29#include <linux/intel-iommu.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/dma-resv.h>
33#include <linux/slab.h>
34#include <linux/vgaarb.h>
35
36#include <drm/drm_atomic.h>
37#include <drm/drm_atomic_helper.h>
38#include <drm/drm_atomic_uapi.h>
39#include <drm/drm_dp_helper.h>
40#include <drm/drm_edid.h>
41#include <drm/drm_fourcc.h>
42#include <drm/drm_plane_helper.h>
43#include <drm/drm_probe_helper.h>
44#include <drm/drm_rect.h>
45#include <drm/i915_drm.h>
46
47#include "display/intel_crt.h"
48#include "display/intel_ddi.h"
49#include "display/intel_dp.h"
50#include "display/intel_dsi.h"
51#include "display/intel_dvo.h"
52#include "display/intel_gmbus.h"
53#include "display/intel_hdmi.h"
54#include "display/intel_lvds.h"
55#include "display/intel_sdvo.h"
56#include "display/intel_tv.h"
57#include "display/intel_vdsc.h"
58
59#include "i915_drv.h"
60#include "i915_trace.h"
61#include "intel_acpi.h"
62#include "intel_atomic.h"
63#include "intel_atomic_plane.h"
64#include "intel_bw.h"
65#include "intel_cdclk.h"
66#include "intel_color.h"
67#include "intel_display_types.h"
68#include "intel_fbc.h"
69#include "intel_fbdev.h"
70#include "intel_fifo_underrun.h"
71#include "intel_frontbuffer.h"
72#include "intel_hdcp.h"
73#include "intel_hotplug.h"
74#include "intel_overlay.h"
75#include "intel_pipe_crc.h"
76#include "intel_pm.h"
77#include "intel_psr.h"
78#include "intel_quirks.h"
79#include "intel_sideband.h"
80#include "intel_sprite.h"
81#include "intel_tc.h"
82
83
84static const u32 i8xx_primary_formats[] = {
85 DRM_FORMAT_C8,
86 DRM_FORMAT_RGB565,
87 DRM_FORMAT_XRGB1555,
88 DRM_FORMAT_XRGB8888,
89};
90
91
92static const u32 i965_primary_formats[] = {
93 DRM_FORMAT_C8,
94 DRM_FORMAT_RGB565,
95 DRM_FORMAT_XRGB8888,
96 DRM_FORMAT_XBGR8888,
97 DRM_FORMAT_XRGB2101010,
98 DRM_FORMAT_XBGR2101010,
99};
100
101static const u64 i9xx_format_modifiers[] = {
102 I915_FORMAT_MOD_X_TILED,
103 DRM_FORMAT_MOD_LINEAR,
104 DRM_FORMAT_MOD_INVALID
105};
106
107
108static const u32 intel_cursor_formats[] = {
109 DRM_FORMAT_ARGB8888,
110};
111
112static const u64 cursor_format_modifiers[] = {
113 DRM_FORMAT_MOD_LINEAR,
114 DRM_FORMAT_MOD_INVALID
115};
116
117static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
118 struct intel_crtc_state *pipe_config);
119static void ironlake_pch_clock_get(struct intel_crtc *crtc,
120 struct intel_crtc_state *pipe_config);
121
122static int intel_framebuffer_init(struct intel_framebuffer *ifb,
123 struct drm_i915_gem_object *obj,
124 struct drm_mode_fb_cmd2 *mode_cmd);
125static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
126static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
127static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
128 const struct intel_link_m_n *m_n,
129 const struct intel_link_m_n *m2_n2);
130static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
131static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
132static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
133static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
134static void vlv_prepare_pll(struct intel_crtc *crtc,
135 const struct intel_crtc_state *pipe_config);
136static void chv_prepare_pll(struct intel_crtc *crtc,
137 const struct intel_crtc_state *pipe_config);
138static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
139static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
140static void intel_crtc_init_scalers(struct intel_crtc *crtc,
141 struct intel_crtc_state *crtc_state);
142static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
143static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
144static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
145static void intel_modeset_setup_hw_state(struct drm_device *dev,
146 struct drm_modeset_acquire_ctx *ctx);
147static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
148
149struct intel_limit {
150 struct {
151 int min, max;
152 } dot, vco, n, m, m1, m2, p, p1;
153
154 struct {
155 int dot_limit;
156 int p2_slow, p2_fast;
157 } p2;
158};
159
160
161int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
162{
163 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
164
165
166 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
167 CCK_FUSE_HPLL_FREQ_MASK;
168
169 return vco_freq[hpll_freq] * 1000;
170}
171
172int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
173 const char *name, u32 reg, int ref_freq)
174{
175 u32 val;
176 int divider;
177
178 val = vlv_cck_read(dev_priv, reg);
179 divider = val & CCK_FREQUENCY_VALUES;
180
181 WARN((val & CCK_FREQUENCY_STATUS) !=
182 (divider << CCK_FREQUENCY_STATUS_SHIFT),
183 "%s change in progress\n", name);
184
185 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
186}
187
188int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
189 const char *name, u32 reg)
190{
191 int hpll;
192
193 vlv_cck_get(dev_priv);
194
195 if (dev_priv->hpll_freq == 0)
196 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
197
198 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
199
200 vlv_cck_put(dev_priv);
201
202 return hpll;
203}
204
205static void intel_update_czclk(struct drm_i915_private *dev_priv)
206{
207 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
208 return;
209
210 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
211 CCK_CZ_CLOCK_CONTROL);
212
213 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
214}
215
216static inline u32
217intel_fdi_link_freq(struct drm_i915_private *dev_priv,
218 const struct intel_crtc_state *pipe_config)
219{
220 if (HAS_DDI(dev_priv))
221 return pipe_config->port_clock;
222 else
223 return dev_priv->fdi_pll_freq;
224}
225
226static const struct intel_limit intel_limits_i8xx_dac = {
227 .dot = { .min = 25000, .max = 350000 },
228 .vco = { .min = 908000, .max = 1512000 },
229 .n = { .min = 2, .max = 16 },
230 .m = { .min = 96, .max = 140 },
231 .m1 = { .min = 18, .max = 26 },
232 .m2 = { .min = 6, .max = 16 },
233 .p = { .min = 4, .max = 128 },
234 .p1 = { .min = 2, .max = 33 },
235 .p2 = { .dot_limit = 165000,
236 .p2_slow = 4, .p2_fast = 2 },
237};
238
239static const struct intel_limit intel_limits_i8xx_dvo = {
240 .dot = { .min = 25000, .max = 350000 },
241 .vco = { .min = 908000, .max = 1512000 },
242 .n = { .min = 2, .max = 16 },
243 .m = { .min = 96, .max = 140 },
244 .m1 = { .min = 18, .max = 26 },
245 .m2 = { .min = 6, .max = 16 },
246 .p = { .min = 4, .max = 128 },
247 .p1 = { .min = 2, .max = 33 },
248 .p2 = { .dot_limit = 165000,
249 .p2_slow = 4, .p2_fast = 4 },
250};
251
252static const struct intel_limit intel_limits_i8xx_lvds = {
253 .dot = { .min = 25000, .max = 350000 },
254 .vco = { .min = 908000, .max = 1512000 },
255 .n = { .min = 2, .max = 16 },
256 .m = { .min = 96, .max = 140 },
257 .m1 = { .min = 18, .max = 26 },
258 .m2 = { .min = 6, .max = 16 },
259 .p = { .min = 4, .max = 128 },
260 .p1 = { .min = 1, .max = 6 },
261 .p2 = { .dot_limit = 165000,
262 .p2_slow = 14, .p2_fast = 7 },
263};
264
265static const struct intel_limit intel_limits_i9xx_sdvo = {
266 .dot = { .min = 20000, .max = 400000 },
267 .vco = { .min = 1400000, .max = 2800000 },
268 .n = { .min = 1, .max = 6 },
269 .m = { .min = 70, .max = 120 },
270 .m1 = { .min = 8, .max = 18 },
271 .m2 = { .min = 3, .max = 7 },
272 .p = { .min = 5, .max = 80 },
273 .p1 = { .min = 1, .max = 8 },
274 .p2 = { .dot_limit = 200000,
275 .p2_slow = 10, .p2_fast = 5 },
276};
277
278static const struct intel_limit intel_limits_i9xx_lvds = {
279 .dot = { .min = 20000, .max = 400000 },
280 .vco = { .min = 1400000, .max = 2800000 },
281 .n = { .min = 1, .max = 6 },
282 .m = { .min = 70, .max = 120 },
283 .m1 = { .min = 8, .max = 18 },
284 .m2 = { .min = 3, .max = 7 },
285 .p = { .min = 7, .max = 98 },
286 .p1 = { .min = 1, .max = 8 },
287 .p2 = { .dot_limit = 112000,
288 .p2_slow = 14, .p2_fast = 7 },
289};
290
291
292static const struct intel_limit intel_limits_g4x_sdvo = {
293 .dot = { .min = 25000, .max = 270000 },
294 .vco = { .min = 1750000, .max = 3500000},
295 .n = { .min = 1, .max = 4 },
296 .m = { .min = 104, .max = 138 },
297 .m1 = { .min = 17, .max = 23 },
298 .m2 = { .min = 5, .max = 11 },
299 .p = { .min = 10, .max = 30 },
300 .p1 = { .min = 1, .max = 3},
301 .p2 = { .dot_limit = 270000,
302 .p2_slow = 10,
303 .p2_fast = 10
304 },
305};
306
307static const struct intel_limit intel_limits_g4x_hdmi = {
308 .dot = { .min = 22000, .max = 400000 },
309 .vco = { .min = 1750000, .max = 3500000},
310 .n = { .min = 1, .max = 4 },
311 .m = { .min = 104, .max = 138 },
312 .m1 = { .min = 16, .max = 23 },
313 .m2 = { .min = 5, .max = 11 },
314 .p = { .min = 5, .max = 80 },
315 .p1 = { .min = 1, .max = 8},
316 .p2 = { .dot_limit = 165000,
317 .p2_slow = 10, .p2_fast = 5 },
318};
319
320static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
321 .dot = { .min = 20000, .max = 115000 },
322 .vco = { .min = 1750000, .max = 3500000 },
323 .n = { .min = 1, .max = 3 },
324 .m = { .min = 104, .max = 138 },
325 .m1 = { .min = 17, .max = 23 },
326 .m2 = { .min = 5, .max = 11 },
327 .p = { .min = 28, .max = 112 },
328 .p1 = { .min = 2, .max = 8 },
329 .p2 = { .dot_limit = 0,
330 .p2_slow = 14, .p2_fast = 14
331 },
332};
333
334static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
335 .dot = { .min = 80000, .max = 224000 },
336 .vco = { .min = 1750000, .max = 3500000 },
337 .n = { .min = 1, .max = 3 },
338 .m = { .min = 104, .max = 138 },
339 .m1 = { .min = 17, .max = 23 },
340 .m2 = { .min = 5, .max = 11 },
341 .p = { .min = 14, .max = 42 },
342 .p1 = { .min = 2, .max = 6 },
343 .p2 = { .dot_limit = 0,
344 .p2_slow = 7, .p2_fast = 7
345 },
346};
347
348static const struct intel_limit intel_limits_pineview_sdvo = {
349 .dot = { .min = 20000, .max = 400000},
350 .vco = { .min = 1700000, .max = 3500000 },
351
352 .n = { .min = 3, .max = 6 },
353 .m = { .min = 2, .max = 256 },
354
355 .m1 = { .min = 0, .max = 0 },
356 .m2 = { .min = 0, .max = 254 },
357 .p = { .min = 5, .max = 80 },
358 .p1 = { .min = 1, .max = 8 },
359 .p2 = { .dot_limit = 200000,
360 .p2_slow = 10, .p2_fast = 5 },
361};
362
363static const struct intel_limit intel_limits_pineview_lvds = {
364 .dot = { .min = 20000, .max = 400000 },
365 .vco = { .min = 1700000, .max = 3500000 },
366 .n = { .min = 3, .max = 6 },
367 .m = { .min = 2, .max = 256 },
368 .m1 = { .min = 0, .max = 0 },
369 .m2 = { .min = 0, .max = 254 },
370 .p = { .min = 7, .max = 112 },
371 .p1 = { .min = 1, .max = 8 },
372 .p2 = { .dot_limit = 112000,
373 .p2_slow = 14, .p2_fast = 14 },
374};
375
376
377
378
379
380
381static const struct intel_limit intel_limits_ironlake_dac = {
382 .dot = { .min = 25000, .max = 350000 },
383 .vco = { .min = 1760000, .max = 3510000 },
384 .n = { .min = 1, .max = 5 },
385 .m = { .min = 79, .max = 127 },
386 .m1 = { .min = 12, .max = 22 },
387 .m2 = { .min = 5, .max = 9 },
388 .p = { .min = 5, .max = 80 },
389 .p1 = { .min = 1, .max = 8 },
390 .p2 = { .dot_limit = 225000,
391 .p2_slow = 10, .p2_fast = 5 },
392};
393
394static const struct intel_limit intel_limits_ironlake_single_lvds = {
395 .dot = { .min = 25000, .max = 350000 },
396 .vco = { .min = 1760000, .max = 3510000 },
397 .n = { .min = 1, .max = 3 },
398 .m = { .min = 79, .max = 118 },
399 .m1 = { .min = 12, .max = 22 },
400 .m2 = { .min = 5, .max = 9 },
401 .p = { .min = 28, .max = 112 },
402 .p1 = { .min = 2, .max = 8 },
403 .p2 = { .dot_limit = 225000,
404 .p2_slow = 14, .p2_fast = 14 },
405};
406
407static const struct intel_limit intel_limits_ironlake_dual_lvds = {
408 .dot = { .min = 25000, .max = 350000 },
409 .vco = { .min = 1760000, .max = 3510000 },
410 .n = { .min = 1, .max = 3 },
411 .m = { .min = 79, .max = 127 },
412 .m1 = { .min = 12, .max = 22 },
413 .m2 = { .min = 5, .max = 9 },
414 .p = { .min = 14, .max = 56 },
415 .p1 = { .min = 2, .max = 8 },
416 .p2 = { .dot_limit = 225000,
417 .p2_slow = 7, .p2_fast = 7 },
418};
419
420
421static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
422 .dot = { .min = 25000, .max = 350000 },
423 .vco = { .min = 1760000, .max = 3510000 },
424 .n = { .min = 1, .max = 2 },
425 .m = { .min = 79, .max = 126 },
426 .m1 = { .min = 12, .max = 22 },
427 .m2 = { .min = 5, .max = 9 },
428 .p = { .min = 28, .max = 112 },
429 .p1 = { .min = 2, .max = 8 },
430 .p2 = { .dot_limit = 225000,
431 .p2_slow = 14, .p2_fast = 14 },
432};
433
434static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
435 .dot = { .min = 25000, .max = 350000 },
436 .vco = { .min = 1760000, .max = 3510000 },
437 .n = { .min = 1, .max = 3 },
438 .m = { .min = 79, .max = 126 },
439 .m1 = { .min = 12, .max = 22 },
440 .m2 = { .min = 5, .max = 9 },
441 .p = { .min = 14, .max = 42 },
442 .p1 = { .min = 2, .max = 6 },
443 .p2 = { .dot_limit = 225000,
444 .p2_slow = 7, .p2_fast = 7 },
445};
446
447static const struct intel_limit intel_limits_vlv = {
448
449
450
451
452
453
454 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
455 .vco = { .min = 4000000, .max = 6000000 },
456 .n = { .min = 1, .max = 7 },
457 .m1 = { .min = 2, .max = 3 },
458 .m2 = { .min = 11, .max = 156 },
459 .p1 = { .min = 2, .max = 3 },
460 .p2 = { .p2_slow = 2, .p2_fast = 20 },
461};
462
463static const struct intel_limit intel_limits_chv = {
464
465
466
467
468
469
470 .dot = { .min = 25000 * 5, .max = 540000 * 5},
471 .vco = { .min = 4800000, .max = 6480000 },
472 .n = { .min = 1, .max = 1 },
473 .m1 = { .min = 2, .max = 2 },
474 .m2 = { .min = 24 << 22, .max = 175 << 22 },
475 .p1 = { .min = 2, .max = 4 },
476 .p2 = { .p2_slow = 1, .p2_fast = 14 },
477};
478
479static const struct intel_limit intel_limits_bxt = {
480
481 .dot = { .min = 0, .max = INT_MAX },
482 .vco = { .min = 4800000, .max = 6700000 },
483 .n = { .min = 1, .max = 1 },
484 .m1 = { .min = 2, .max = 2 },
485
486 .m2 = { .min = 2 << 22, .max = 255 << 22 },
487 .p1 = { .min = 2, .max = 4 },
488 .p2 = { .p2_slow = 1, .p2_fast = 20 },
489};
490
491
492static void
493skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
494{
495 if (enable)
496 I915_WRITE(CLKGATE_DIS_PSL(pipe),
497 I915_READ(CLKGATE_DIS_PSL(pipe)) |
498 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
499 else
500 I915_WRITE(CLKGATE_DIS_PSL(pipe),
501 I915_READ(CLKGATE_DIS_PSL(pipe)) &
502 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
503}
504
505
506static void
507icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
508 bool enable)
509{
510 if (enable)
511 I915_WRITE(CLKGATE_DIS_PSL(pipe),
512 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
513 else
514 I915_WRITE(CLKGATE_DIS_PSL(pipe),
515 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
516}
517
518static bool
519needs_modeset(const struct intel_crtc_state *state)
520{
521 return drm_atomic_crtc_needs_modeset(&state->base);
522}
523
524
525
526
527
528
529
530
531
532
533static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
534{
535 clock->m = clock->m2 + 2;
536 clock->p = clock->p1 * clock->p2;
537 if (WARN_ON(clock->n == 0 || clock->p == 0))
538 return 0;
539 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
540 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
541
542 return clock->dot;
543}
544
545static u32 i9xx_dpll_compute_m(struct dpll *dpll)
546{
547 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
548}
549
550static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
551{
552 clock->m = i9xx_dpll_compute_m(clock);
553 clock->p = clock->p1 * clock->p2;
554 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
555 return 0;
556 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
557 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
558
559 return clock->dot;
560}
561
562static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
563{
564 clock->m = clock->m1 * clock->m2;
565 clock->p = clock->p1 * clock->p2;
566 if (WARN_ON(clock->n == 0 || clock->p == 0))
567 return 0;
568 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
569 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
570
571 return clock->dot / 5;
572}
573
574int chv_calc_dpll_params(int refclk, struct dpll *clock)
575{
576 clock->m = clock->m1 * clock->m2;
577 clock->p = clock->p1 * clock->p2;
578 if (WARN_ON(clock->n == 0 || clock->p == 0))
579 return 0;
580 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
581 clock->n << 22);
582 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
583
584 return clock->dot / 5;
585}
586
587#define INTELPllInvalid(s) do { return false; } while (0)
588
589
590
591
592
593static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
594 const struct intel_limit *limit,
595 const struct dpll *clock)
596{
597 if (clock->n < limit->n.min || limit->n.max < clock->n)
598 INTELPllInvalid("n out of range\n");
599 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
600 INTELPllInvalid("p1 out of range\n");
601 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
602 INTELPllInvalid("m2 out of range\n");
603 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
604 INTELPllInvalid("m1 out of range\n");
605
606 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
607 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
608 if (clock->m1 <= clock->m2)
609 INTELPllInvalid("m1 <= m2\n");
610
611 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
612 !IS_GEN9_LP(dev_priv)) {
613 if (clock->p < limit->p.min || limit->p.max < clock->p)
614 INTELPllInvalid("p out of range\n");
615 if (clock->m < limit->m.min || limit->m.max < clock->m)
616 INTELPllInvalid("m out of range\n");
617 }
618
619 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
620 INTELPllInvalid("vco out of range\n");
621
622
623
624 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
625 INTELPllInvalid("dot out of range\n");
626
627 return true;
628}
629
630static int
631i9xx_select_p2_div(const struct intel_limit *limit,
632 const struct intel_crtc_state *crtc_state,
633 int target)
634{
635 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
636
637 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
638
639
640
641
642
643 if (intel_is_dual_link_lvds(dev_priv))
644 return limit->p2.p2_fast;
645 else
646 return limit->p2.p2_slow;
647 } else {
648 if (target < limit->p2.dot_limit)
649 return limit->p2.p2_slow;
650 else
651 return limit->p2.p2_fast;
652 }
653}
654
655
656
657
658
659
660
661
662
663
664
665static bool
666i9xx_find_best_dpll(const struct intel_limit *limit,
667 struct intel_crtc_state *crtc_state,
668 int target, int refclk, struct dpll *match_clock,
669 struct dpll *best_clock)
670{
671 struct drm_device *dev = crtc_state->base.crtc->dev;
672 struct dpll clock;
673 int err = target;
674
675 memset(best_clock, 0, sizeof(*best_clock));
676
677 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
678
679 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
680 clock.m1++) {
681 for (clock.m2 = limit->m2.min;
682 clock.m2 <= limit->m2.max; clock.m2++) {
683 if (clock.m2 >= clock.m1)
684 break;
685 for (clock.n = limit->n.min;
686 clock.n <= limit->n.max; clock.n++) {
687 for (clock.p1 = limit->p1.min;
688 clock.p1 <= limit->p1.max; clock.p1++) {
689 int this_err;
690
691 i9xx_calc_dpll_params(refclk, &clock);
692 if (!intel_PLL_is_valid(to_i915(dev),
693 limit,
694 &clock))
695 continue;
696 if (match_clock &&
697 clock.p != match_clock->p)
698 continue;
699
700 this_err = abs(clock.dot - target);
701 if (this_err < err) {
702 *best_clock = clock;
703 err = this_err;
704 }
705 }
706 }
707 }
708 }
709
710 return (err != target);
711}
712
713
714
715
716
717
718
719
720
721
722
723static bool
724pnv_find_best_dpll(const struct intel_limit *limit,
725 struct intel_crtc_state *crtc_state,
726 int target, int refclk, struct dpll *match_clock,
727 struct dpll *best_clock)
728{
729 struct drm_device *dev = crtc_state->base.crtc->dev;
730 struct dpll clock;
731 int err = target;
732
733 memset(best_clock, 0, sizeof(*best_clock));
734
735 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
736
737 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
738 clock.m1++) {
739 for (clock.m2 = limit->m2.min;
740 clock.m2 <= limit->m2.max; clock.m2++) {
741 for (clock.n = limit->n.min;
742 clock.n <= limit->n.max; clock.n++) {
743 for (clock.p1 = limit->p1.min;
744 clock.p1 <= limit->p1.max; clock.p1++) {
745 int this_err;
746
747 pnv_calc_dpll_params(refclk, &clock);
748 if (!intel_PLL_is_valid(to_i915(dev),
749 limit,
750 &clock))
751 continue;
752 if (match_clock &&
753 clock.p != match_clock->p)
754 continue;
755
756 this_err = abs(clock.dot - target);
757 if (this_err < err) {
758 *best_clock = clock;
759 err = this_err;
760 }
761 }
762 }
763 }
764 }
765
766 return (err != target);
767}
768
769
770
771
772
773
774
775
776
777
778
779static bool
780g4x_find_best_dpll(const struct intel_limit *limit,
781 struct intel_crtc_state *crtc_state,
782 int target, int refclk, struct dpll *match_clock,
783 struct dpll *best_clock)
784{
785 struct drm_device *dev = crtc_state->base.crtc->dev;
786 struct dpll clock;
787 int max_n;
788 bool found = false;
789
790 int err_most = (target >> 8) + (target >> 9);
791
792 memset(best_clock, 0, sizeof(*best_clock));
793
794 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
795
796 max_n = limit->n.max;
797
798 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
799
800 for (clock.m1 = limit->m1.max;
801 clock.m1 >= limit->m1.min; clock.m1--) {
802 for (clock.m2 = limit->m2.max;
803 clock.m2 >= limit->m2.min; clock.m2--) {
804 for (clock.p1 = limit->p1.max;
805 clock.p1 >= limit->p1.min; clock.p1--) {
806 int this_err;
807
808 i9xx_calc_dpll_params(refclk, &clock);
809 if (!intel_PLL_is_valid(to_i915(dev),
810 limit,
811 &clock))
812 continue;
813
814 this_err = abs(clock.dot - target);
815 if (this_err < err_most) {
816 *best_clock = clock;
817 err_most = this_err;
818 max_n = clock.n;
819 found = true;
820 }
821 }
822 }
823 }
824 }
825 return found;
826}
827
828
829
830
831
832static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
833 const struct dpll *calculated_clock,
834 const struct dpll *best_clock,
835 unsigned int best_error_ppm,
836 unsigned int *error_ppm)
837{
838
839
840
841
842 if (IS_CHERRYVIEW(to_i915(dev))) {
843 *error_ppm = 0;
844
845 return calculated_clock->p > best_clock->p;
846 }
847
848 if (WARN_ON_ONCE(!target_freq))
849 return false;
850
851 *error_ppm = div_u64(1000000ULL *
852 abs(target_freq - calculated_clock->dot),
853 target_freq);
854
855
856
857
858
859 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
860 *error_ppm = 0;
861
862 return true;
863 }
864
865 return *error_ppm + 10 < best_error_ppm;
866}
867
868
869
870
871
872
873static bool
874vlv_find_best_dpll(const struct intel_limit *limit,
875 struct intel_crtc_state *crtc_state,
876 int target, int refclk, struct dpll *match_clock,
877 struct dpll *best_clock)
878{
879 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
880 struct drm_device *dev = crtc->base.dev;
881 struct dpll clock;
882 unsigned int bestppm = 1000000;
883
884 int max_n = min(limit->n.max, refclk / 19200);
885 bool found = false;
886
887 target *= 5;
888
889 memset(best_clock, 0, sizeof(*best_clock));
890
891
892 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
893 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
894 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
895 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
896 clock.p = clock.p1 * clock.p2;
897
898 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
899 unsigned int ppm;
900
901 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
902 refclk * clock.m1);
903
904 vlv_calc_dpll_params(refclk, &clock);
905
906 if (!intel_PLL_is_valid(to_i915(dev),
907 limit,
908 &clock))
909 continue;
910
911 if (!vlv_PLL_is_optimal(dev, target,
912 &clock,
913 best_clock,
914 bestppm, &ppm))
915 continue;
916
917 *best_clock = clock;
918 bestppm = ppm;
919 found = true;
920 }
921 }
922 }
923 }
924
925 return found;
926}
927
928
929
930
931
932
933static bool
934chv_find_best_dpll(const struct intel_limit *limit,
935 struct intel_crtc_state *crtc_state,
936 int target, int refclk, struct dpll *match_clock,
937 struct dpll *best_clock)
938{
939 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
940 struct drm_device *dev = crtc->base.dev;
941 unsigned int best_error_ppm;
942 struct dpll clock;
943 u64 m2;
944 int found = false;
945
946 memset(best_clock, 0, sizeof(*best_clock));
947 best_error_ppm = 1000000;
948
949
950
951
952
953
954 clock.n = 1, clock.m1 = 2;
955 target *= 5;
956
957 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
958 for (clock.p2 = limit->p2.p2_fast;
959 clock.p2 >= limit->p2.p2_slow;
960 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
961 unsigned int error_ppm;
962
963 clock.p = clock.p1 * clock.p2;
964
965 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
966 refclk * clock.m1);
967
968 if (m2 > INT_MAX/clock.m1)
969 continue;
970
971 clock.m2 = m2;
972
973 chv_calc_dpll_params(refclk, &clock);
974
975 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
976 continue;
977
978 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
979 best_error_ppm, &error_ppm))
980 continue;
981
982 *best_clock = clock;
983 best_error_ppm = error_ppm;
984 found = true;
985 }
986 }
987
988 return found;
989}
990
991bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
992 struct dpll *best_clock)
993{
994 int refclk = 100000;
995 const struct intel_limit *limit = &intel_limits_bxt;
996
997 return chv_find_best_dpll(limit, crtc_state,
998 crtc_state->port_clock, refclk,
999 NULL, best_clock);
1000}
1001
1002bool intel_crtc_active(struct intel_crtc *crtc)
1003{
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 return crtc->active && crtc->base.primary->state->fb &&
1018 crtc->config->base.adjusted_mode.crtc_clock;
1019}
1020
1021enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1022 enum pipe pipe)
1023{
1024 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1025
1026 return crtc->config->cpu_transcoder;
1027}
1028
1029static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1030 enum pipe pipe)
1031{
1032 i915_reg_t reg = PIPEDSL(pipe);
1033 u32 line1, line2;
1034 u32 line_mask;
1035
1036 if (IS_GEN(dev_priv, 2))
1037 line_mask = DSL_LINEMASK_GEN2;
1038 else
1039 line_mask = DSL_LINEMASK_GEN3;
1040
1041 line1 = I915_READ(reg) & line_mask;
1042 msleep(5);
1043 line2 = I915_READ(reg) & line_mask;
1044
1045 return line1 != line2;
1046}
1047
1048static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1049{
1050 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1051 enum pipe pipe = crtc->pipe;
1052
1053
1054 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1055 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1056 pipe_name(pipe), onoff(state));
1057}
1058
1059static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1060{
1061 wait_for_pipe_scanline_moving(crtc, false);
1062}
1063
1064static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1065{
1066 wait_for_pipe_scanline_moving(crtc, true);
1067}
1068
1069static void
1070intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1071{
1072 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1073 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1074
1075 if (INTEL_GEN(dev_priv) >= 4) {
1076 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1077 i915_reg_t reg = PIPECONF(cpu_transcoder);
1078
1079
1080 if (intel_de_wait_for_clear(dev_priv, reg,
1081 I965_PIPECONF_ACTIVE, 100))
1082 WARN(1, "pipe_off wait timed out\n");
1083 } else {
1084 intel_wait_for_pipe_scanline_stopped(crtc);
1085 }
1086}
1087
1088
1089void assert_pll(struct drm_i915_private *dev_priv,
1090 enum pipe pipe, bool state)
1091{
1092 u32 val;
1093 bool cur_state;
1094
1095 val = I915_READ(DPLL(pipe));
1096 cur_state = !!(val & DPLL_VCO_ENABLE);
1097 I915_STATE_WARN(cur_state != state,
1098 "PLL state assertion failure (expected %s, current %s)\n",
1099 onoff(state), onoff(cur_state));
1100}
1101
1102
1103void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1104{
1105 u32 val;
1106 bool cur_state;
1107
1108 vlv_cck_get(dev_priv);
1109 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1110 vlv_cck_put(dev_priv);
1111
1112 cur_state = val & DSI_PLL_VCO_EN;
1113 I915_STATE_WARN(cur_state != state,
1114 "DSI PLL state assertion failure (expected %s, current %s)\n",
1115 onoff(state), onoff(cur_state));
1116}
1117
1118static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1119 enum pipe pipe, bool state)
1120{
1121 bool cur_state;
1122 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1123 pipe);
1124
1125 if (HAS_DDI(dev_priv)) {
1126
1127 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1128 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1129 } else {
1130 u32 val = I915_READ(FDI_TX_CTL(pipe));
1131 cur_state = !!(val & FDI_TX_ENABLE);
1132 }
1133 I915_STATE_WARN(cur_state != state,
1134 "FDI TX state assertion failure (expected %s, current %s)\n",
1135 onoff(state), onoff(cur_state));
1136}
1137#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1138#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1139
1140static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1141 enum pipe pipe, bool state)
1142{
1143 u32 val;
1144 bool cur_state;
1145
1146 val = I915_READ(FDI_RX_CTL(pipe));
1147 cur_state = !!(val & FDI_RX_ENABLE);
1148 I915_STATE_WARN(cur_state != state,
1149 "FDI RX state assertion failure (expected %s, current %s)\n",
1150 onoff(state), onoff(cur_state));
1151}
1152#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1153#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1154
1155static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1156 enum pipe pipe)
1157{
1158 u32 val;
1159
1160
1161 if (IS_GEN(dev_priv, 5))
1162 return;
1163
1164
1165 if (HAS_DDI(dev_priv))
1166 return;
1167
1168 val = I915_READ(FDI_TX_CTL(pipe));
1169 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1170}
1171
1172void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1173 enum pipe pipe, bool state)
1174{
1175 u32 val;
1176 bool cur_state;
1177
1178 val = I915_READ(FDI_RX_CTL(pipe));
1179 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1180 I915_STATE_WARN(cur_state != state,
1181 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1182 onoff(state), onoff(cur_state));
1183}
1184
1185void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1186{
1187 i915_reg_t pp_reg;
1188 u32 val;
1189 enum pipe panel_pipe = INVALID_PIPE;
1190 bool locked = true;
1191
1192 if (WARN_ON(HAS_DDI(dev_priv)))
1193 return;
1194
1195 if (HAS_PCH_SPLIT(dev_priv)) {
1196 u32 port_sel;
1197
1198 pp_reg = PP_CONTROL(0);
1199 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1200
1201 switch (port_sel) {
1202 case PANEL_PORT_SELECT_LVDS:
1203 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1204 break;
1205 case PANEL_PORT_SELECT_DPA:
1206 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1207 break;
1208 case PANEL_PORT_SELECT_DPC:
1209 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1210 break;
1211 case PANEL_PORT_SELECT_DPD:
1212 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1213 break;
1214 default:
1215 MISSING_CASE(port_sel);
1216 break;
1217 }
1218 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1219
1220 pp_reg = PP_CONTROL(pipe);
1221 panel_pipe = pipe;
1222 } else {
1223 u32 port_sel;
1224
1225 pp_reg = PP_CONTROL(0);
1226 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1227
1228 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1229 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1230 }
1231
1232 val = I915_READ(pp_reg);
1233 if (!(val & PANEL_POWER_ON) ||
1234 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1235 locked = false;
1236
1237 I915_STATE_WARN(panel_pipe == pipe && locked,
1238 "panel assertion failure, pipe %c regs locked\n",
1239 pipe_name(pipe));
1240}
1241
1242void assert_pipe(struct drm_i915_private *dev_priv,
1243 enum pipe pipe, bool state)
1244{
1245 bool cur_state;
1246 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1247 pipe);
1248 enum intel_display_power_domain power_domain;
1249 intel_wakeref_t wakeref;
1250
1251
1252 if (IS_I830(dev_priv))
1253 state = true;
1254
1255 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1256 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1257 if (wakeref) {
1258 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1259 cur_state = !!(val & PIPECONF_ENABLE);
1260
1261 intel_display_power_put(dev_priv, power_domain, wakeref);
1262 } else {
1263 cur_state = false;
1264 }
1265
1266 I915_STATE_WARN(cur_state != state,
1267 "pipe %c assertion failure (expected %s, current %s)\n",
1268 pipe_name(pipe), onoff(state), onoff(cur_state));
1269}
1270
1271static void assert_plane(struct intel_plane *plane, bool state)
1272{
1273 enum pipe pipe;
1274 bool cur_state;
1275
1276 cur_state = plane->get_hw_state(plane, &pipe);
1277
1278 I915_STATE_WARN(cur_state != state,
1279 "%s assertion failure (expected %s, current %s)\n",
1280 plane->base.name, onoff(state), onoff(cur_state));
1281}
1282
1283#define assert_plane_enabled(p) assert_plane(p, true)
1284#define assert_plane_disabled(p) assert_plane(p, false)
1285
1286static void assert_planes_disabled(struct intel_crtc *crtc)
1287{
1288 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1289 struct intel_plane *plane;
1290
1291 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1292 assert_plane_disabled(plane);
1293}
1294
1295static void assert_vblank_disabled(struct drm_crtc *crtc)
1296{
1297 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1298 drm_crtc_vblank_put(crtc);
1299}
1300
1301void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1302 enum pipe pipe)
1303{
1304 u32 val;
1305 bool enabled;
1306
1307 val = I915_READ(PCH_TRANSCONF(pipe));
1308 enabled = !!(val & TRANS_ENABLE);
1309 I915_STATE_WARN(enabled,
1310 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1311 pipe_name(pipe));
1312}
1313
1314static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1315 enum pipe pipe, enum port port,
1316 i915_reg_t dp_reg)
1317{
1318 enum pipe port_pipe;
1319 bool state;
1320
1321 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1322
1323 I915_STATE_WARN(state && port_pipe == pipe,
1324 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1325 port_name(port), pipe_name(pipe));
1326
1327 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1328 "IBX PCH DP %c still using transcoder B\n",
1329 port_name(port));
1330}
1331
1332static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1333 enum pipe pipe, enum port port,
1334 i915_reg_t hdmi_reg)
1335{
1336 enum pipe port_pipe;
1337 bool state;
1338
1339 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1340
1341 I915_STATE_WARN(state && port_pipe == pipe,
1342 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1343 port_name(port), pipe_name(pipe));
1344
1345 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1346 "IBX PCH HDMI %c still using transcoder B\n",
1347 port_name(port));
1348}
1349
1350static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1351 enum pipe pipe)
1352{
1353 enum pipe port_pipe;
1354
1355 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1356 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1357 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1358
1359 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1360 port_pipe == pipe,
1361 "PCH VGA enabled on transcoder %c, should be disabled\n",
1362 pipe_name(pipe));
1363
1364 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1365 port_pipe == pipe,
1366 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1367 pipe_name(pipe));
1368
1369
1370 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1371 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1372 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1373}
1374
1375static void _vlv_enable_pll(struct intel_crtc *crtc,
1376 const struct intel_crtc_state *pipe_config)
1377{
1378 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1379 enum pipe pipe = crtc->pipe;
1380
1381 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1382 POSTING_READ(DPLL(pipe));
1383 udelay(150);
1384
1385 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1386 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1387}
1388
1389static void vlv_enable_pll(struct intel_crtc *crtc,
1390 const struct intel_crtc_state *pipe_config)
1391{
1392 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1393 enum pipe pipe = crtc->pipe;
1394
1395 assert_pipe_disabled(dev_priv, pipe);
1396
1397
1398 assert_panel_unlocked(dev_priv, pipe);
1399
1400 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1401 _vlv_enable_pll(crtc, pipe_config);
1402
1403 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1404 POSTING_READ(DPLL_MD(pipe));
1405}
1406
1407
1408static void _chv_enable_pll(struct intel_crtc *crtc,
1409 const struct intel_crtc_state *pipe_config)
1410{
1411 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1412 enum pipe pipe = crtc->pipe;
1413 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1414 u32 tmp;
1415
1416 vlv_dpio_get(dev_priv);
1417
1418
1419 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1420 tmp |= DPIO_DCLKP_EN;
1421 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1422
1423 vlv_dpio_put(dev_priv);
1424
1425
1426
1427
1428 udelay(1);
1429
1430
1431 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1432
1433
1434 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1435 DRM_ERROR("PLL %d failed to lock\n", pipe);
1436}
1437
1438static void chv_enable_pll(struct intel_crtc *crtc,
1439 const struct intel_crtc_state *pipe_config)
1440{
1441 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1442 enum pipe pipe = crtc->pipe;
1443
1444 assert_pipe_disabled(dev_priv, pipe);
1445
1446
1447 assert_panel_unlocked(dev_priv, pipe);
1448
1449 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1450 _chv_enable_pll(crtc, pipe_config);
1451
1452 if (pipe != PIPE_A) {
1453
1454
1455
1456
1457
1458
1459 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1460 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1461 I915_WRITE(CBR4_VLV, 0);
1462 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1463
1464
1465
1466
1467
1468 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1469 } else {
1470 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1471 POSTING_READ(DPLL_MD(pipe));
1472 }
1473}
1474
1475static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1476{
1477 if (IS_I830(dev_priv))
1478 return false;
1479
1480 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1481}
1482
1483static void i9xx_enable_pll(struct intel_crtc *crtc,
1484 const struct intel_crtc_state *crtc_state)
1485{
1486 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1487 i915_reg_t reg = DPLL(crtc->pipe);
1488 u32 dpll = crtc_state->dpll_hw_state.dpll;
1489 int i;
1490
1491 assert_pipe_disabled(dev_priv, crtc->pipe);
1492
1493
1494 if (i9xx_has_pps(dev_priv))
1495 assert_panel_unlocked(dev_priv, crtc->pipe);
1496
1497
1498
1499
1500
1501
1502 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1503 I915_WRITE(reg, dpll);
1504
1505
1506 POSTING_READ(reg);
1507 udelay(150);
1508
1509 if (INTEL_GEN(dev_priv) >= 4) {
1510 I915_WRITE(DPLL_MD(crtc->pipe),
1511 crtc_state->dpll_hw_state.dpll_md);
1512 } else {
1513
1514
1515
1516
1517
1518 I915_WRITE(reg, dpll);
1519 }
1520
1521
1522 for (i = 0; i < 3; i++) {
1523 I915_WRITE(reg, dpll);
1524 POSTING_READ(reg);
1525 udelay(150);
1526 }
1527}
1528
1529static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1530{
1531 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1532 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1533 enum pipe pipe = crtc->pipe;
1534
1535
1536 if (IS_I830(dev_priv))
1537 return;
1538
1539
1540 assert_pipe_disabled(dev_priv, pipe);
1541
1542 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1543 POSTING_READ(DPLL(pipe));
1544}
1545
1546static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1547{
1548 u32 val;
1549
1550
1551 assert_pipe_disabled(dev_priv, pipe);
1552
1553 val = DPLL_INTEGRATED_REF_CLK_VLV |
1554 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1555 if (pipe != PIPE_A)
1556 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1557
1558 I915_WRITE(DPLL(pipe), val);
1559 POSTING_READ(DPLL(pipe));
1560}
1561
1562static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1563{
1564 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1565 u32 val;
1566
1567
1568 assert_pipe_disabled(dev_priv, pipe);
1569
1570 val = DPLL_SSC_REF_CLK_CHV |
1571 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1572 if (pipe != PIPE_A)
1573 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1574
1575 I915_WRITE(DPLL(pipe), val);
1576 POSTING_READ(DPLL(pipe));
1577
1578 vlv_dpio_get(dev_priv);
1579
1580
1581 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1582 val &= ~DPIO_DCLKP_EN;
1583 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1584
1585 vlv_dpio_put(dev_priv);
1586}
1587
1588void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1589 struct intel_digital_port *dport,
1590 unsigned int expected_mask)
1591{
1592 u32 port_mask;
1593 i915_reg_t dpll_reg;
1594
1595 switch (dport->base.port) {
1596 case PORT_B:
1597 port_mask = DPLL_PORTB_READY_MASK;
1598 dpll_reg = DPLL(0);
1599 break;
1600 case PORT_C:
1601 port_mask = DPLL_PORTC_READY_MASK;
1602 dpll_reg = DPLL(0);
1603 expected_mask <<= 4;
1604 break;
1605 case PORT_D:
1606 port_mask = DPLL_PORTD_READY_MASK;
1607 dpll_reg = DPIO_PHY_STATUS;
1608 break;
1609 default:
1610 BUG();
1611 }
1612
1613 if (intel_de_wait_for_register(dev_priv, dpll_reg,
1614 port_mask, expected_mask, 1000))
1615 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1616 port_name(dport->base.port),
1617 I915_READ(dpll_reg) & port_mask, expected_mask);
1618}
1619
1620static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1621{
1622 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1623 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1624 enum pipe pipe = crtc->pipe;
1625 i915_reg_t reg;
1626 u32 val, pipeconf_val;
1627
1628
1629 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1630
1631
1632 assert_fdi_tx_enabled(dev_priv, pipe);
1633 assert_fdi_rx_enabled(dev_priv, pipe);
1634
1635 if (HAS_PCH_CPT(dev_priv)) {
1636
1637
1638 reg = TRANS_CHICKEN2(pipe);
1639 val = I915_READ(reg);
1640 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1641 I915_WRITE(reg, val);
1642 }
1643
1644 reg = PCH_TRANSCONF(pipe);
1645 val = I915_READ(reg);
1646 pipeconf_val = I915_READ(PIPECONF(pipe));
1647
1648 if (HAS_PCH_IBX(dev_priv)) {
1649
1650
1651
1652
1653
1654 val &= ~PIPECONF_BPC_MASK;
1655 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1656 val |= PIPECONF_8BPC;
1657 else
1658 val |= pipeconf_val & PIPECONF_BPC_MASK;
1659 }
1660
1661 val &= ~TRANS_INTERLACE_MASK;
1662 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1663 if (HAS_PCH_IBX(dev_priv) &&
1664 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1665 val |= TRANS_LEGACY_INTERLACED_ILK;
1666 else
1667 val |= TRANS_INTERLACED;
1668 } else {
1669 val |= TRANS_PROGRESSIVE;
1670 }
1671
1672 I915_WRITE(reg, val | TRANS_ENABLE);
1673 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1674 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1675}
1676
1677static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1678 enum transcoder cpu_transcoder)
1679{
1680 u32 val, pipeconf_val;
1681
1682
1683 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1684 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1685
1686
1687 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1688 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1689 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1690
1691 val = TRANS_ENABLE;
1692 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1693
1694 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1695 PIPECONF_INTERLACED_ILK)
1696 val |= TRANS_INTERLACED;
1697 else
1698 val |= TRANS_PROGRESSIVE;
1699
1700 I915_WRITE(LPT_TRANSCONF, val);
1701 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1702 TRANS_STATE_ENABLE, 100))
1703 DRM_ERROR("Failed to enable PCH transcoder\n");
1704}
1705
1706static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1707 enum pipe pipe)
1708{
1709 i915_reg_t reg;
1710 u32 val;
1711
1712
1713 assert_fdi_tx_disabled(dev_priv, pipe);
1714 assert_fdi_rx_disabled(dev_priv, pipe);
1715
1716
1717 assert_pch_ports_disabled(dev_priv, pipe);
1718
1719 reg = PCH_TRANSCONF(pipe);
1720 val = I915_READ(reg);
1721 val &= ~TRANS_ENABLE;
1722 I915_WRITE(reg, val);
1723
1724 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1725 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1726
1727 if (HAS_PCH_CPT(dev_priv)) {
1728
1729 reg = TRANS_CHICKEN2(pipe);
1730 val = I915_READ(reg);
1731 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1732 I915_WRITE(reg, val);
1733 }
1734}
1735
1736void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1737{
1738 u32 val;
1739
1740 val = I915_READ(LPT_TRANSCONF);
1741 val &= ~TRANS_ENABLE;
1742 I915_WRITE(LPT_TRANSCONF, val);
1743
1744 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1745 TRANS_STATE_ENABLE, 50))
1746 DRM_ERROR("Failed to disable PCH transcoder\n");
1747
1748
1749 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1750 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1751 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1752}
1753
1754enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1755{
1756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1757
1758 if (HAS_PCH_LPT(dev_priv))
1759 return PIPE_A;
1760 else
1761 return crtc->pipe;
1762}
1763
1764static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1765{
1766 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1767
1768
1769
1770
1771
1772 if (IS_I965GM(dev_priv) &&
1773 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1774 return 0;
1775
1776 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1777 return 0xffffffff;
1778 else if (INTEL_GEN(dev_priv) >= 3)
1779 return 0xffffff;
1780 else
1781 return 0;
1782}
1783
1784static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1785{
1786 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1787
1788 drm_crtc_set_max_vblank_count(&crtc->base,
1789 intel_crtc_max_vblank_count(crtc_state));
1790 drm_crtc_vblank_on(&crtc->base);
1791}
1792
1793static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1794{
1795 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1796 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1797 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1798 enum pipe pipe = crtc->pipe;
1799 i915_reg_t reg;
1800 u32 val;
1801
1802 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1803
1804 assert_planes_disabled(crtc);
1805
1806
1807
1808
1809
1810
1811 if (HAS_GMCH(dev_priv)) {
1812 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1813 assert_dsi_pll_enabled(dev_priv);
1814 else
1815 assert_pll_enabled(dev_priv, pipe);
1816 } else {
1817 if (new_crtc_state->has_pch_encoder) {
1818
1819 assert_fdi_rx_pll_enabled(dev_priv,
1820 intel_crtc_pch_transcoder(crtc));
1821 assert_fdi_tx_pll_enabled(dev_priv,
1822 (enum pipe) cpu_transcoder);
1823 }
1824
1825 }
1826
1827 trace_intel_pipe_enable(crtc);
1828
1829 reg = PIPECONF(cpu_transcoder);
1830 val = I915_READ(reg);
1831 if (val & PIPECONF_ENABLE) {
1832
1833 WARN_ON(!IS_I830(dev_priv));
1834 return;
1835 }
1836
1837 I915_WRITE(reg, val | PIPECONF_ENABLE);
1838 POSTING_READ(reg);
1839
1840
1841
1842
1843
1844
1845
1846
1847 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1848 intel_wait_for_pipe_scanline_moving(crtc);
1849}
1850
1851static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1852{
1853 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1854 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1855 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1856 enum pipe pipe = crtc->pipe;
1857 i915_reg_t reg;
1858 u32 val;
1859
1860 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1861
1862
1863
1864
1865
1866 assert_planes_disabled(crtc);
1867
1868 trace_intel_pipe_disable(crtc);
1869
1870 reg = PIPECONF(cpu_transcoder);
1871 val = I915_READ(reg);
1872 if ((val & PIPECONF_ENABLE) == 0)
1873 return;
1874
1875
1876
1877
1878
1879 if (old_crtc_state->double_wide)
1880 val &= ~PIPECONF_DOUBLE_WIDE;
1881
1882
1883 if (!IS_I830(dev_priv))
1884 val &= ~PIPECONF_ENABLE;
1885
1886 I915_WRITE(reg, val);
1887 if ((val & PIPECONF_ENABLE) == 0)
1888 intel_wait_for_pipe_off(old_crtc_state);
1889}
1890
1891static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1892{
1893 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1894}
1895
1896static unsigned int
1897intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1898{
1899 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1900 unsigned int cpp = fb->format->cpp[color_plane];
1901
1902 switch (fb->modifier) {
1903 case DRM_FORMAT_MOD_LINEAR:
1904 return intel_tile_size(dev_priv);
1905 case I915_FORMAT_MOD_X_TILED:
1906 if (IS_GEN(dev_priv, 2))
1907 return 128;
1908 else
1909 return 512;
1910 case I915_FORMAT_MOD_Y_TILED_CCS:
1911 if (color_plane == 1)
1912 return 128;
1913
1914 case I915_FORMAT_MOD_Y_TILED:
1915 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1916 return 128;
1917 else
1918 return 512;
1919 case I915_FORMAT_MOD_Yf_TILED_CCS:
1920 if (color_plane == 1)
1921 return 128;
1922
1923 case I915_FORMAT_MOD_Yf_TILED:
1924 switch (cpp) {
1925 case 1:
1926 return 64;
1927 case 2:
1928 case 4:
1929 return 128;
1930 case 8:
1931 case 16:
1932 return 256;
1933 default:
1934 MISSING_CASE(cpp);
1935 return cpp;
1936 }
1937 break;
1938 default:
1939 MISSING_CASE(fb->modifier);
1940 return cpp;
1941 }
1942}
1943
1944static unsigned int
1945intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1946{
1947 return intel_tile_size(to_i915(fb->dev)) /
1948 intel_tile_width_bytes(fb, color_plane);
1949}
1950
1951
1952static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1953 unsigned int *tile_width,
1954 unsigned int *tile_height)
1955{
1956 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1957 unsigned int cpp = fb->format->cpp[color_plane];
1958
1959 *tile_width = tile_width_bytes / cpp;
1960 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1961}
1962
1963unsigned int
1964intel_fb_align_height(const struct drm_framebuffer *fb,
1965 int color_plane, unsigned int height)
1966{
1967 unsigned int tile_height = intel_tile_height(fb, color_plane);
1968
1969 return ALIGN(height, tile_height);
1970}
1971
1972unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1973{
1974 unsigned int size = 0;
1975 int i;
1976
1977 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1978 size += rot_info->plane[i].width * rot_info->plane[i].height;
1979
1980 return size;
1981}
1982
1983unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1984{
1985 unsigned int size = 0;
1986 int i;
1987
1988 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1989 size += rem_info->plane[i].width * rem_info->plane[i].height;
1990
1991 return size;
1992}
1993
1994static void
1995intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1996 const struct drm_framebuffer *fb,
1997 unsigned int rotation)
1998{
1999 view->type = I915_GGTT_VIEW_NORMAL;
2000 if (drm_rotation_90_or_270(rotation)) {
2001 view->type = I915_GGTT_VIEW_ROTATED;
2002 view->rotated = to_intel_framebuffer(fb)->rot_info;
2003 }
2004}
2005
2006static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2007{
2008 if (IS_I830(dev_priv))
2009 return 16 * 1024;
2010 else if (IS_I85X(dev_priv))
2011 return 256;
2012 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2013 return 32;
2014 else
2015 return 4 * 1024;
2016}
2017
2018static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2019{
2020 if (INTEL_GEN(dev_priv) >= 9)
2021 return 256 * 1024;
2022 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2023 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2024 return 128 * 1024;
2025 else if (INTEL_GEN(dev_priv) >= 4)
2026 return 4 * 1024;
2027 else
2028 return 0;
2029}
2030
2031static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2032 int color_plane)
2033{
2034 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2035
2036
2037 if (color_plane == 1)
2038 return 4096;
2039
2040 switch (fb->modifier) {
2041 case DRM_FORMAT_MOD_LINEAR:
2042 return intel_linear_alignment(dev_priv);
2043 case I915_FORMAT_MOD_X_TILED:
2044 if (INTEL_GEN(dev_priv) >= 9)
2045 return 256 * 1024;
2046 return 0;
2047 case I915_FORMAT_MOD_Y_TILED_CCS:
2048 case I915_FORMAT_MOD_Yf_TILED_CCS:
2049 case I915_FORMAT_MOD_Y_TILED:
2050 case I915_FORMAT_MOD_Yf_TILED:
2051 return 1 * 1024 * 1024;
2052 default:
2053 MISSING_CASE(fb->modifier);
2054 return 0;
2055 }
2056}
2057
2058static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2059{
2060 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2061 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2062
2063 return INTEL_GEN(dev_priv) < 4 ||
2064 (plane->has_fbc &&
2065 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2066}
2067
2068struct i915_vma *
2069intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2070 const struct i915_ggtt_view *view,
2071 bool uses_fence,
2072 unsigned long *out_flags)
2073{
2074 struct drm_device *dev = fb->dev;
2075 struct drm_i915_private *dev_priv = to_i915(dev);
2076 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2077 intel_wakeref_t wakeref;
2078 struct i915_vma *vma;
2079 unsigned int pinctl;
2080 u32 alignment;
2081
2082 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2083
2084 alignment = intel_surf_alignment(fb, 0);
2085
2086
2087
2088
2089
2090
2091 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2092 alignment = 256 * 1024;
2093
2094
2095
2096
2097
2098
2099
2100
2101 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2102 i915_gem_object_lock(obj);
2103
2104 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2105
2106 pinctl = 0;
2107
2108
2109
2110
2111
2112
2113
2114
2115 if (HAS_GMCH(dev_priv))
2116 pinctl |= PIN_MAPPABLE;
2117
2118 vma = i915_gem_object_pin_to_display_plane(obj,
2119 alignment, view, pinctl);
2120 if (IS_ERR(vma))
2121 goto err;
2122
2123 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2124 int ret;
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142 ret = i915_vma_pin_fence(vma);
2143 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2144 i915_gem_object_unpin_from_display_plane(vma);
2145 vma = ERR_PTR(ret);
2146 goto err;
2147 }
2148
2149 if (ret == 0 && vma->fence)
2150 *out_flags |= PLANE_HAS_FENCE;
2151 }
2152
2153 i915_vma_get(vma);
2154err:
2155 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2156
2157 i915_gem_object_unlock(obj);
2158 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2159 return vma;
2160}
2161
2162void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2163{
2164 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2165
2166 i915_gem_object_lock(vma->obj);
2167 if (flags & PLANE_HAS_FENCE)
2168 i915_vma_unpin_fence(vma);
2169 i915_gem_object_unpin_from_display_plane(vma);
2170 i915_gem_object_unlock(vma->obj);
2171
2172 i915_vma_put(vma);
2173}
2174
2175static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2176 unsigned int rotation)
2177{
2178 if (drm_rotation_90_or_270(rotation))
2179 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2180 else
2181 return fb->pitches[color_plane];
2182}
2183
2184
2185
2186
2187
2188
2189
2190u32 intel_fb_xy_to_linear(int x, int y,
2191 const struct intel_plane_state *state,
2192 int color_plane)
2193{
2194 const struct drm_framebuffer *fb = state->base.fb;
2195 unsigned int cpp = fb->format->cpp[color_plane];
2196 unsigned int pitch = state->color_plane[color_plane].stride;
2197
2198 return y * pitch + x * cpp;
2199}
2200
2201
2202
2203
2204
2205
2206void intel_add_fb_offsets(int *x, int *y,
2207 const struct intel_plane_state *state,
2208 int color_plane)
2209
2210{
2211 *x += state->color_plane[color_plane].x;
2212 *y += state->color_plane[color_plane].y;
2213}
2214
2215static u32 intel_adjust_tile_offset(int *x, int *y,
2216 unsigned int tile_width,
2217 unsigned int tile_height,
2218 unsigned int tile_size,
2219 unsigned int pitch_tiles,
2220 u32 old_offset,
2221 u32 new_offset)
2222{
2223 unsigned int pitch_pixels = pitch_tiles * tile_width;
2224 unsigned int tiles;
2225
2226 WARN_ON(old_offset & (tile_size - 1));
2227 WARN_ON(new_offset & (tile_size - 1));
2228 WARN_ON(new_offset > old_offset);
2229
2230 tiles = (old_offset - new_offset) / tile_size;
2231
2232 *y += tiles / pitch_tiles * tile_height;
2233 *x += tiles % pitch_tiles * tile_width;
2234
2235
2236 *y += *x / pitch_pixels * tile_height;
2237 *x %= pitch_pixels;
2238
2239 return new_offset;
2240}
2241
2242static bool is_surface_linear(u64 modifier, int color_plane)
2243{
2244 return modifier == DRM_FORMAT_MOD_LINEAR;
2245}
2246
2247static u32 intel_adjust_aligned_offset(int *x, int *y,
2248 const struct drm_framebuffer *fb,
2249 int color_plane,
2250 unsigned int rotation,
2251 unsigned int pitch,
2252 u32 old_offset, u32 new_offset)
2253{
2254 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2255 unsigned int cpp = fb->format->cpp[color_plane];
2256
2257 WARN_ON(new_offset > old_offset);
2258
2259 if (!is_surface_linear(fb->modifier, color_plane)) {
2260 unsigned int tile_size, tile_width, tile_height;
2261 unsigned int pitch_tiles;
2262
2263 tile_size = intel_tile_size(dev_priv);
2264 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2265
2266 if (drm_rotation_90_or_270(rotation)) {
2267 pitch_tiles = pitch / tile_height;
2268 swap(tile_width, tile_height);
2269 } else {
2270 pitch_tiles = pitch / (tile_width * cpp);
2271 }
2272
2273 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2274 tile_size, pitch_tiles,
2275 old_offset, new_offset);
2276 } else {
2277 old_offset += *y * pitch + *x * cpp;
2278
2279 *y = (old_offset - new_offset) / pitch;
2280 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2281 }
2282
2283 return new_offset;
2284}
2285
2286
2287
2288
2289
2290static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2291 const struct intel_plane_state *state,
2292 int color_plane,
2293 u32 old_offset, u32 new_offset)
2294{
2295 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2296 state->base.rotation,
2297 state->color_plane[color_plane].stride,
2298 old_offset, new_offset);
2299}
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2316 int *x, int *y,
2317 const struct drm_framebuffer *fb,
2318 int color_plane,
2319 unsigned int pitch,
2320 unsigned int rotation,
2321 u32 alignment)
2322{
2323 unsigned int cpp = fb->format->cpp[color_plane];
2324 u32 offset, offset_aligned;
2325
2326 if (alignment)
2327 alignment--;
2328
2329 if (!is_surface_linear(fb->modifier, color_plane)) {
2330 unsigned int tile_size, tile_width, tile_height;
2331 unsigned int tile_rows, tiles, pitch_tiles;
2332
2333 tile_size = intel_tile_size(dev_priv);
2334 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2335
2336 if (drm_rotation_90_or_270(rotation)) {
2337 pitch_tiles = pitch / tile_height;
2338 swap(tile_width, tile_height);
2339 } else {
2340 pitch_tiles = pitch / (tile_width * cpp);
2341 }
2342
2343 tile_rows = *y / tile_height;
2344 *y %= tile_height;
2345
2346 tiles = *x / tile_width;
2347 *x %= tile_width;
2348
2349 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2350 offset_aligned = offset & ~alignment;
2351
2352 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2353 tile_size, pitch_tiles,
2354 offset, offset_aligned);
2355 } else {
2356 offset = *y * pitch + *x * cpp;
2357 offset_aligned = offset & ~alignment;
2358
2359 *y = (offset & alignment) / pitch;
2360 *x = ((offset & alignment) - *y * pitch) / cpp;
2361 }
2362
2363 return offset_aligned;
2364}
2365
2366static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2367 const struct intel_plane_state *state,
2368 int color_plane)
2369{
2370 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2371 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2372 const struct drm_framebuffer *fb = state->base.fb;
2373 unsigned int rotation = state->base.rotation;
2374 int pitch = state->color_plane[color_plane].stride;
2375 u32 alignment;
2376
2377 if (intel_plane->id == PLANE_CURSOR)
2378 alignment = intel_cursor_alignment(dev_priv);
2379 else
2380 alignment = intel_surf_alignment(fb, color_plane);
2381
2382 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2383 pitch, rotation, alignment);
2384}
2385
2386
2387static int intel_fb_offset_to_xy(int *x, int *y,
2388 const struct drm_framebuffer *fb,
2389 int color_plane)
2390{
2391 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2392 unsigned int height;
2393
2394 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2395 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2396 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2397 fb->offsets[color_plane], color_plane);
2398 return -EINVAL;
2399 }
2400
2401 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2402 height = ALIGN(height, intel_tile_height(fb, color_plane));
2403
2404
2405 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2406 fb->offsets[color_plane])) {
2407 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2408 fb->offsets[color_plane], fb->pitches[color_plane],
2409 color_plane);
2410 return -ERANGE;
2411 }
2412
2413 *x = 0;
2414 *y = 0;
2415
2416 intel_adjust_aligned_offset(x, y,
2417 fb, color_plane, DRM_MODE_ROTATE_0,
2418 fb->pitches[color_plane],
2419 fb->offsets[color_plane], 0);
2420
2421 return 0;
2422}
2423
2424static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2425{
2426 switch (fb_modifier) {
2427 case I915_FORMAT_MOD_X_TILED:
2428 return I915_TILING_X;
2429 case I915_FORMAT_MOD_Y_TILED:
2430 case I915_FORMAT_MOD_Y_TILED_CCS:
2431 return I915_TILING_Y;
2432 default:
2433 return I915_TILING_NONE;
2434 }
2435}
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451static const struct drm_format_info ccs_formats[] = {
2452 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2453 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2454 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2455 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2456 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2457 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2458 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2459 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2460};
2461
2462static const struct drm_format_info *
2463lookup_format_info(const struct drm_format_info formats[],
2464 int num_formats, u32 format)
2465{
2466 int i;
2467
2468 for (i = 0; i < num_formats; i++) {
2469 if (formats[i].format == format)
2470 return &formats[i];
2471 }
2472
2473 return NULL;
2474}
2475
2476static const struct drm_format_info *
2477intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2478{
2479 switch (cmd->modifier[0]) {
2480 case I915_FORMAT_MOD_Y_TILED_CCS:
2481 case I915_FORMAT_MOD_Yf_TILED_CCS:
2482 return lookup_format_info(ccs_formats,
2483 ARRAY_SIZE(ccs_formats),
2484 cmd->pixel_format);
2485 default:
2486 return NULL;
2487 }
2488}
2489
2490bool is_ccs_modifier(u64 modifier)
2491{
2492 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2493 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2494}
2495
2496u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2497 u32 pixel_format, u64 modifier)
2498{
2499 struct intel_crtc *crtc;
2500 struct intel_plane *plane;
2501
2502
2503
2504
2505
2506 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2507 if (!crtc)
2508 return 0;
2509
2510 plane = to_intel_plane(crtc->base.primary);
2511
2512 return plane->max_stride(plane, pixel_format, modifier,
2513 DRM_MODE_ROTATE_0);
2514}
2515
2516static
2517u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2518 u32 pixel_format, u64 modifier)
2519{
2520
2521
2522
2523
2524
2525
2526 if (!is_ccs_modifier(modifier)) {
2527 if (INTEL_GEN(dev_priv) >= 7)
2528 return 256*1024;
2529 else if (INTEL_GEN(dev_priv) >= 4)
2530 return 128*1024;
2531 }
2532
2533 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2534}
2535
2536static u32
2537intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2538{
2539 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2540
2541 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2542 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2543 fb->format->format,
2544 fb->modifier);
2545
2546
2547
2548
2549
2550 if (fb->pitches[color_plane] > max_stride)
2551 return intel_tile_size(dev_priv);
2552 else
2553 return 64;
2554 } else {
2555 return intel_tile_width_bytes(fb, color_plane);
2556 }
2557}
2558
2559bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2560{
2561 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2562 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2563 const struct drm_framebuffer *fb = plane_state->base.fb;
2564 int i;
2565
2566
2567 if (plane->id == PLANE_CURSOR)
2568 return false;
2569
2570
2571
2572
2573
2574
2575
2576 if (INTEL_GEN(dev_priv) < 4)
2577 return false;
2578
2579
2580
2581
2582
2583 if (is_ccs_modifier(fb->modifier))
2584 return false;
2585
2586
2587 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2588 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2589
2590 for (i = 0; i < fb->format->num_planes; i++) {
2591 if (fb->pitches[i] & alignment)
2592 return false;
2593 }
2594 }
2595
2596 return true;
2597}
2598
2599static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2600{
2601 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2602 const struct drm_framebuffer *fb = plane_state->base.fb;
2603 unsigned int rotation = plane_state->base.rotation;
2604 u32 stride, max_stride;
2605
2606
2607
2608
2609
2610 if (!plane_state->base.visible)
2611 return false;
2612
2613 if (!intel_plane_can_remap(plane_state))
2614 return false;
2615
2616
2617
2618
2619
2620 stride = intel_fb_pitch(fb, 0, rotation);
2621 max_stride = plane->max_stride(plane, fb->format->format,
2622 fb->modifier, rotation);
2623
2624 return stride > max_stride;
2625}
2626
2627static int
2628intel_fill_fb_info(struct drm_i915_private *dev_priv,
2629 struct drm_framebuffer *fb)
2630{
2631 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2632 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2633 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2634 u32 gtt_offset_rotated = 0;
2635 unsigned int max_size = 0;
2636 int i, num_planes = fb->format->num_planes;
2637 unsigned int tile_size = intel_tile_size(dev_priv);
2638
2639 for (i = 0; i < num_planes; i++) {
2640 unsigned int width, height;
2641 unsigned int cpp, size;
2642 u32 offset;
2643 int x, y;
2644 int ret;
2645
2646 cpp = fb->format->cpp[i];
2647 width = drm_framebuffer_plane_width(fb->width, fb, i);
2648 height = drm_framebuffer_plane_height(fb->height, fb, i);
2649
2650 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2651 if (ret) {
2652 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2653 i, fb->offsets[i]);
2654 return ret;
2655 }
2656
2657 if (is_ccs_modifier(fb->modifier) && i == 1) {
2658 int hsub = fb->format->hsub;
2659 int vsub = fb->format->vsub;
2660 int tile_width, tile_height;
2661 int main_x, main_y;
2662 int ccs_x, ccs_y;
2663
2664 intel_tile_dims(fb, i, &tile_width, &tile_height);
2665 tile_width *= hsub;
2666 tile_height *= vsub;
2667
2668 ccs_x = (x * hsub) % tile_width;
2669 ccs_y = (y * vsub) % tile_height;
2670 main_x = intel_fb->normal[0].x % tile_width;
2671 main_y = intel_fb->normal[0].y % tile_height;
2672
2673
2674
2675
2676
2677 if (main_x != ccs_x || main_y != ccs_y) {
2678 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2679 main_x, main_y,
2680 ccs_x, ccs_y,
2681 intel_fb->normal[0].x,
2682 intel_fb->normal[0].y,
2683 x, y);
2684 return -EINVAL;
2685 }
2686 }
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2698 (x + width) * cpp > fb->pitches[i]) {
2699 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2700 i, fb->offsets[i]);
2701 return -EINVAL;
2702 }
2703
2704
2705
2706
2707
2708 intel_fb->normal[i].x = x;
2709 intel_fb->normal[i].y = y;
2710
2711 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2712 fb->pitches[i],
2713 DRM_MODE_ROTATE_0,
2714 tile_size);
2715 offset /= tile_size;
2716
2717 if (!is_surface_linear(fb->modifier, i)) {
2718 unsigned int tile_width, tile_height;
2719 unsigned int pitch_tiles;
2720 struct drm_rect r;
2721
2722 intel_tile_dims(fb, i, &tile_width, &tile_height);
2723
2724 rot_info->plane[i].offset = offset;
2725 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2726 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2727 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2728
2729 intel_fb->rotated[i].pitch =
2730 rot_info->plane[i].height * tile_height;
2731
2732
2733 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2734
2735
2736
2737
2738 if (x != 0)
2739 size++;
2740
2741
2742 r.x1 = x;
2743 r.y1 = y;
2744 r.x2 = x + width;
2745 r.y2 = y + height;
2746 drm_rect_rotate(&r,
2747 rot_info->plane[i].width * tile_width,
2748 rot_info->plane[i].height * tile_height,
2749 DRM_MODE_ROTATE_270);
2750 x = r.x1;
2751 y = r.y1;
2752
2753
2754 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2755 swap(tile_width, tile_height);
2756
2757
2758
2759
2760
2761 intel_adjust_tile_offset(&x, &y,
2762 tile_width, tile_height,
2763 tile_size, pitch_tiles,
2764 gtt_offset_rotated * tile_size, 0);
2765
2766 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2767
2768
2769
2770
2771
2772 intel_fb->rotated[i].x = x;
2773 intel_fb->rotated[i].y = y;
2774 } else {
2775 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2776 x * cpp, tile_size);
2777 }
2778
2779
2780 max_size = max(max_size, offset + size);
2781 }
2782
2783 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2784 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2785 mul_u32_u32(max_size, tile_size), obj->base.size);
2786 return -EINVAL;
2787 }
2788
2789 return 0;
2790}
2791
2792static void
2793intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2794{
2795 struct drm_i915_private *dev_priv =
2796 to_i915(plane_state->base.plane->dev);
2797 struct drm_framebuffer *fb = plane_state->base.fb;
2798 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2799 struct intel_rotation_info *info = &plane_state->view.rotated;
2800 unsigned int rotation = plane_state->base.rotation;
2801 int i, num_planes = fb->format->num_planes;
2802 unsigned int tile_size = intel_tile_size(dev_priv);
2803 unsigned int src_x, src_y;
2804 unsigned int src_w, src_h;
2805 u32 gtt_offset = 0;
2806
2807 memset(&plane_state->view, 0, sizeof(plane_state->view));
2808 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2809 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2810
2811 src_x = plane_state->base.src.x1 >> 16;
2812 src_y = plane_state->base.src.y1 >> 16;
2813 src_w = drm_rect_width(&plane_state->base.src) >> 16;
2814 src_h = drm_rect_height(&plane_state->base.src) >> 16;
2815
2816 WARN_ON(is_ccs_modifier(fb->modifier));
2817
2818
2819 drm_rect_translate(&plane_state->base.src,
2820 -(src_x << 16), -(src_y << 16));
2821
2822
2823 if (drm_rotation_90_or_270(rotation))
2824 drm_rect_rotate(&plane_state->base.src,
2825 src_w << 16, src_h << 16,
2826 DRM_MODE_ROTATE_270);
2827
2828 for (i = 0; i < num_planes; i++) {
2829 unsigned int hsub = i ? fb->format->hsub : 1;
2830 unsigned int vsub = i ? fb->format->vsub : 1;
2831 unsigned int cpp = fb->format->cpp[i];
2832 unsigned int tile_width, tile_height;
2833 unsigned int width, height;
2834 unsigned int pitch_tiles;
2835 unsigned int x, y;
2836 u32 offset;
2837
2838 intel_tile_dims(fb, i, &tile_width, &tile_height);
2839
2840 x = src_x / hsub;
2841 y = src_y / vsub;
2842 width = src_w / hsub;
2843 height = src_h / vsub;
2844
2845
2846
2847
2848
2849 x += intel_fb->normal[i].x;
2850 y += intel_fb->normal[i].y;
2851
2852 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2853 fb, i, fb->pitches[i],
2854 DRM_MODE_ROTATE_0, tile_size);
2855 offset /= tile_size;
2856
2857 info->plane[i].offset = offset;
2858 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2859 tile_width * cpp);
2860 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2861 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2862
2863 if (drm_rotation_90_or_270(rotation)) {
2864 struct drm_rect r;
2865
2866
2867 r.x1 = x;
2868 r.y1 = y;
2869 r.x2 = x + width;
2870 r.y2 = y + height;
2871 drm_rect_rotate(&r,
2872 info->plane[i].width * tile_width,
2873 info->plane[i].height * tile_height,
2874 DRM_MODE_ROTATE_270);
2875 x = r.x1;
2876 y = r.y1;
2877
2878 pitch_tiles = info->plane[i].height;
2879 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2880
2881
2882 swap(tile_width, tile_height);
2883 } else {
2884 pitch_tiles = info->plane[i].width;
2885 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2886 }
2887
2888
2889
2890
2891
2892 intel_adjust_tile_offset(&x, &y,
2893 tile_width, tile_height,
2894 tile_size, pitch_tiles,
2895 gtt_offset * tile_size, 0);
2896
2897 gtt_offset += info->plane[i].width * info->plane[i].height;
2898
2899 plane_state->color_plane[i].offset = 0;
2900 plane_state->color_plane[i].x = x;
2901 plane_state->color_plane[i].y = y;
2902 }
2903}
2904
2905static int
2906intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2907{
2908 const struct intel_framebuffer *fb =
2909 to_intel_framebuffer(plane_state->base.fb);
2910 unsigned int rotation = plane_state->base.rotation;
2911 int i, num_planes;
2912
2913 if (!fb)
2914 return 0;
2915
2916 num_planes = fb->base.format->num_planes;
2917
2918 if (intel_plane_needs_remap(plane_state)) {
2919 intel_plane_remap_gtt(plane_state);
2920
2921
2922
2923
2924
2925
2926
2927 return intel_plane_check_stride(plane_state);
2928 }
2929
2930 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2931
2932 for (i = 0; i < num_planes; i++) {
2933 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2934 plane_state->color_plane[i].offset = 0;
2935
2936 if (drm_rotation_90_or_270(rotation)) {
2937 plane_state->color_plane[i].x = fb->rotated[i].x;
2938 plane_state->color_plane[i].y = fb->rotated[i].y;
2939 } else {
2940 plane_state->color_plane[i].x = fb->normal[i].x;
2941 plane_state->color_plane[i].y = fb->normal[i].y;
2942 }
2943 }
2944
2945
2946 if (drm_rotation_90_or_270(rotation))
2947 drm_rect_rotate(&plane_state->base.src,
2948 fb->base.width << 16, fb->base.height << 16,
2949 DRM_MODE_ROTATE_270);
2950
2951 return intel_plane_check_stride(plane_state);
2952}
2953
2954static int i9xx_format_to_fourcc(int format)
2955{
2956 switch (format) {
2957 case DISPPLANE_8BPP:
2958 return DRM_FORMAT_C8;
2959 case DISPPLANE_BGRX555:
2960 return DRM_FORMAT_XRGB1555;
2961 case DISPPLANE_BGRX565:
2962 return DRM_FORMAT_RGB565;
2963 default:
2964 case DISPPLANE_BGRX888:
2965 return DRM_FORMAT_XRGB8888;
2966 case DISPPLANE_RGBX888:
2967 return DRM_FORMAT_XBGR8888;
2968 case DISPPLANE_BGRX101010:
2969 return DRM_FORMAT_XRGB2101010;
2970 case DISPPLANE_RGBX101010:
2971 return DRM_FORMAT_XBGR2101010;
2972 }
2973}
2974
2975int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2976{
2977 switch (format) {
2978 case PLANE_CTL_FORMAT_RGB_565:
2979 return DRM_FORMAT_RGB565;
2980 case PLANE_CTL_FORMAT_NV12:
2981 return DRM_FORMAT_NV12;
2982 case PLANE_CTL_FORMAT_P010:
2983 return DRM_FORMAT_P010;
2984 case PLANE_CTL_FORMAT_P012:
2985 return DRM_FORMAT_P012;
2986 case PLANE_CTL_FORMAT_P016:
2987 return DRM_FORMAT_P016;
2988 case PLANE_CTL_FORMAT_Y210:
2989 return DRM_FORMAT_Y210;
2990 case PLANE_CTL_FORMAT_Y212:
2991 return DRM_FORMAT_Y212;
2992 case PLANE_CTL_FORMAT_Y216:
2993 return DRM_FORMAT_Y216;
2994 case PLANE_CTL_FORMAT_Y410:
2995 return DRM_FORMAT_XVYU2101010;
2996 case PLANE_CTL_FORMAT_Y412:
2997 return DRM_FORMAT_XVYU12_16161616;
2998 case PLANE_CTL_FORMAT_Y416:
2999 return DRM_FORMAT_XVYU16161616;
3000 default:
3001 case PLANE_CTL_FORMAT_XRGB_8888:
3002 if (rgb_order) {
3003 if (alpha)
3004 return DRM_FORMAT_ABGR8888;
3005 else
3006 return DRM_FORMAT_XBGR8888;
3007 } else {
3008 if (alpha)
3009 return DRM_FORMAT_ARGB8888;
3010 else
3011 return DRM_FORMAT_XRGB8888;
3012 }
3013 case PLANE_CTL_FORMAT_XRGB_2101010:
3014 if (rgb_order)
3015 return DRM_FORMAT_XBGR2101010;
3016 else
3017 return DRM_FORMAT_XRGB2101010;
3018 case PLANE_CTL_FORMAT_XRGB_16161616F:
3019 if (rgb_order) {
3020 if (alpha)
3021 return DRM_FORMAT_ABGR16161616F;
3022 else
3023 return DRM_FORMAT_XBGR16161616F;
3024 } else {
3025 if (alpha)
3026 return DRM_FORMAT_ARGB16161616F;
3027 else
3028 return DRM_FORMAT_XRGB16161616F;
3029 }
3030 }
3031}
3032
3033static bool
3034intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3035 struct intel_initial_plane_config *plane_config)
3036{
3037 struct drm_device *dev = crtc->base.dev;
3038 struct drm_i915_private *dev_priv = to_i915(dev);
3039 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3040 struct drm_framebuffer *fb = &plane_config->fb->base;
3041 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3042 u32 size_aligned = round_up(plane_config->base + plane_config->size,
3043 PAGE_SIZE);
3044 struct drm_i915_gem_object *obj;
3045 bool ret = false;
3046
3047 size_aligned -= base_aligned;
3048
3049 if (plane_config->size == 0)
3050 return false;
3051
3052
3053
3054
3055 if (size_aligned * 2 > dev_priv->stolen_usable_size)
3056 return false;
3057
3058 switch (fb->modifier) {
3059 case DRM_FORMAT_MOD_LINEAR:
3060 case I915_FORMAT_MOD_X_TILED:
3061 case I915_FORMAT_MOD_Y_TILED:
3062 break;
3063 default:
3064 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3065 fb->modifier);
3066 return false;
3067 }
3068
3069 mutex_lock(&dev->struct_mutex);
3070 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3071 base_aligned,
3072 base_aligned,
3073 size_aligned);
3074 mutex_unlock(&dev->struct_mutex);
3075 if (!obj)
3076 return false;
3077
3078 switch (plane_config->tiling) {
3079 case I915_TILING_NONE:
3080 break;
3081 case I915_TILING_X:
3082 case I915_TILING_Y:
3083 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3084 break;
3085 default:
3086 MISSING_CASE(plane_config->tiling);
3087 goto out;
3088 }
3089
3090 mode_cmd.pixel_format = fb->format->format;
3091 mode_cmd.width = fb->width;
3092 mode_cmd.height = fb->height;
3093 mode_cmd.pitches[0] = fb->pitches[0];
3094 mode_cmd.modifier[0] = fb->modifier;
3095 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3096
3097 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3098 DRM_DEBUG_KMS("intel fb init failed\n");
3099 goto out;
3100 }
3101
3102
3103 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3104 ret = true;
3105out:
3106 i915_gem_object_put(obj);
3107 return ret;
3108}
3109
3110static void
3111intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3112 struct intel_plane_state *plane_state,
3113 bool visible)
3114{
3115 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3116
3117 plane_state->base.visible = visible;
3118
3119 if (visible)
3120 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
3121 else
3122 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
3123}
3124
3125static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3126{
3127 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3128 struct drm_plane *plane;
3129
3130
3131
3132
3133
3134
3135 crtc_state->active_planes = 0;
3136
3137 drm_for_each_plane_mask(plane, &dev_priv->drm,
3138 crtc_state->base.plane_mask)
3139 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3140}
3141
3142static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3143 struct intel_plane *plane)
3144{
3145 struct intel_crtc_state *crtc_state =
3146 to_intel_crtc_state(crtc->base.state);
3147 struct intel_plane_state *plane_state =
3148 to_intel_plane_state(plane->base.state);
3149
3150 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3151 plane->base.base.id, plane->base.name,
3152 crtc->base.base.id, crtc->base.name);
3153
3154 intel_set_plane_visible(crtc_state, plane_state, false);
3155 fixup_active_planes(crtc_state);
3156 crtc_state->data_rate[plane->id] = 0;
3157
3158 if (plane->id == PLANE_PRIMARY)
3159 intel_pre_disable_primary_noatomic(&crtc->base);
3160
3161 intel_disable_plane(plane, crtc_state);
3162}
3163
3164static struct intel_frontbuffer *
3165to_intel_frontbuffer(struct drm_framebuffer *fb)
3166{
3167 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3168}
3169
3170static void
3171intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3172 struct intel_initial_plane_config *plane_config)
3173{
3174 struct drm_device *dev = intel_crtc->base.dev;
3175 struct drm_i915_private *dev_priv = to_i915(dev);
3176 struct drm_crtc *c;
3177 struct drm_plane *primary = intel_crtc->base.primary;
3178 struct drm_plane_state *plane_state = primary->state;
3179 struct intel_plane *intel_plane = to_intel_plane(primary);
3180 struct intel_plane_state *intel_state =
3181 to_intel_plane_state(plane_state);
3182 struct drm_framebuffer *fb;
3183
3184 if (!plane_config->fb)
3185 return;
3186
3187 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3188 fb = &plane_config->fb->base;
3189 goto valid_fb;
3190 }
3191
3192 kfree(plane_config->fb);
3193
3194
3195
3196
3197
3198 for_each_crtc(dev, c) {
3199 struct intel_plane_state *state;
3200
3201 if (c == &intel_crtc->base)
3202 continue;
3203
3204 if (!to_intel_crtc(c)->active)
3205 continue;
3206
3207 state = to_intel_plane_state(c->primary->state);
3208 if (!state->vma)
3209 continue;
3210
3211 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3212 fb = state->base.fb;
3213 drm_framebuffer_get(fb);
3214 goto valid_fb;
3215 }
3216 }
3217
3218
3219
3220
3221
3222
3223
3224
3225 intel_plane_disable_noatomic(intel_crtc, intel_plane);
3226
3227 return;
3228
3229valid_fb:
3230 intel_state->base.rotation = plane_config->rotation;
3231 intel_fill_fb_ggtt_view(&intel_state->view, fb,
3232 intel_state->base.rotation);
3233 intel_state->color_plane[0].stride =
3234 intel_fb_pitch(fb, 0, intel_state->base.rotation);
3235
3236 mutex_lock(&dev->struct_mutex);
3237 intel_state->vma =
3238 intel_pin_and_fence_fb_obj(fb,
3239 &intel_state->view,
3240 intel_plane_uses_fence(intel_state),
3241 &intel_state->flags);
3242 mutex_unlock(&dev->struct_mutex);
3243 if (IS_ERR(intel_state->vma)) {
3244 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3245 intel_crtc->pipe, PTR_ERR(intel_state->vma));
3246
3247 intel_state->vma = NULL;
3248 drm_framebuffer_put(fb);
3249 return;
3250 }
3251
3252 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3253
3254 plane_state->src_x = 0;
3255 plane_state->src_y = 0;
3256 plane_state->src_w = fb->width << 16;
3257 plane_state->src_h = fb->height << 16;
3258
3259 plane_state->crtc_x = 0;
3260 plane_state->crtc_y = 0;
3261 plane_state->crtc_w = fb->width;
3262 plane_state->crtc_h = fb->height;
3263
3264 intel_state->base.src = drm_plane_state_src(plane_state);
3265 intel_state->base.dst = drm_plane_state_dest(plane_state);
3266
3267 if (plane_config->tiling)
3268 dev_priv->preserve_bios_swizzle = true;
3269
3270 plane_state->fb = fb;
3271 plane_state->crtc = &intel_crtc->base;
3272
3273 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3274 &to_intel_frontbuffer(fb)->bits);
3275}
3276
3277static int skl_max_plane_width(const struct drm_framebuffer *fb,
3278 int color_plane,
3279 unsigned int rotation)
3280{
3281 int cpp = fb->format->cpp[color_plane];
3282
3283 switch (fb->modifier) {
3284 case DRM_FORMAT_MOD_LINEAR:
3285 case I915_FORMAT_MOD_X_TILED:
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296 if (cpp == 8)
3297 return 4096;
3298 else
3299 return 5120;
3300 case I915_FORMAT_MOD_Y_TILED_CCS:
3301 case I915_FORMAT_MOD_Yf_TILED_CCS:
3302
3303 case I915_FORMAT_MOD_Y_TILED:
3304 case I915_FORMAT_MOD_Yf_TILED:
3305 if (cpp == 8)
3306 return 2048;
3307 else
3308 return 4096;
3309 default:
3310 MISSING_CASE(fb->modifier);
3311 return 2048;
3312 }
3313}
3314
3315static int glk_max_plane_width(const struct drm_framebuffer *fb,
3316 int color_plane,
3317 unsigned int rotation)
3318{
3319 int cpp = fb->format->cpp[color_plane];
3320
3321 switch (fb->modifier) {
3322 case DRM_FORMAT_MOD_LINEAR:
3323 case I915_FORMAT_MOD_X_TILED:
3324 if (cpp == 8)
3325 return 4096;
3326 else
3327 return 5120;
3328 case I915_FORMAT_MOD_Y_TILED_CCS:
3329 case I915_FORMAT_MOD_Yf_TILED_CCS:
3330
3331 case I915_FORMAT_MOD_Y_TILED:
3332 case I915_FORMAT_MOD_Yf_TILED:
3333 if (cpp == 8)
3334 return 2048;
3335 else
3336 return 5120;
3337 default:
3338 MISSING_CASE(fb->modifier);
3339 return 2048;
3340 }
3341}
3342
3343static int icl_max_plane_width(const struct drm_framebuffer *fb,
3344 int color_plane,
3345 unsigned int rotation)
3346{
3347 return 5120;
3348}
3349
3350static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3351 int main_x, int main_y, u32 main_offset)
3352{
3353 const struct drm_framebuffer *fb = plane_state->base.fb;
3354 int hsub = fb->format->hsub;
3355 int vsub = fb->format->vsub;
3356 int aux_x = plane_state->color_plane[1].x;
3357 int aux_y = plane_state->color_plane[1].y;
3358 u32 aux_offset = plane_state->color_plane[1].offset;
3359 u32 alignment = intel_surf_alignment(fb, 1);
3360
3361 while (aux_offset >= main_offset && aux_y <= main_y) {
3362 int x, y;
3363
3364 if (aux_x == main_x && aux_y == main_y)
3365 break;
3366
3367 if (aux_offset == 0)
3368 break;
3369
3370 x = aux_x / hsub;
3371 y = aux_y / vsub;
3372 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3373 aux_offset, aux_offset - alignment);
3374 aux_x = x * hsub + aux_x % hsub;
3375 aux_y = y * vsub + aux_y % vsub;
3376 }
3377
3378 if (aux_x != main_x || aux_y != main_y)
3379 return false;
3380
3381 plane_state->color_plane[1].offset = aux_offset;
3382 plane_state->color_plane[1].x = aux_x;
3383 plane_state->color_plane[1].y = aux_y;
3384
3385 return true;
3386}
3387
3388static int skl_check_main_surface(struct intel_plane_state *plane_state)
3389{
3390 struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
3391 const struct drm_framebuffer *fb = plane_state->base.fb;
3392 unsigned int rotation = plane_state->base.rotation;
3393 int x = plane_state->base.src.x1 >> 16;
3394 int y = plane_state->base.src.y1 >> 16;
3395 int w = drm_rect_width(&plane_state->base.src) >> 16;
3396 int h = drm_rect_height(&plane_state->base.src) >> 16;
3397 int max_width;
3398 int max_height = 4096;
3399 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3400
3401 if (INTEL_GEN(dev_priv) >= 11)
3402 max_width = icl_max_plane_width(fb, 0, rotation);
3403 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3404 max_width = glk_max_plane_width(fb, 0, rotation);
3405 else
3406 max_width = skl_max_plane_width(fb, 0, rotation);
3407
3408 if (w > max_width || h > max_height) {
3409 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3410 w, h, max_width, max_height);
3411 return -EINVAL;
3412 }
3413
3414 intel_add_fb_offsets(&x, &y, plane_state, 0);
3415 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3416 alignment = intel_surf_alignment(fb, 0);
3417
3418
3419
3420
3421
3422
3423 if (offset > aux_offset)
3424 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3425 offset, aux_offset & ~(alignment - 1));
3426
3427
3428
3429
3430
3431
3432
3433 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3434 int cpp = fb->format->cpp[0];
3435
3436 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3437 if (offset == 0) {
3438 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3439 return -EINVAL;
3440 }
3441
3442 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3443 offset, offset - alignment);
3444 }
3445 }
3446
3447
3448
3449
3450
3451 if (is_ccs_modifier(fb->modifier)) {
3452 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3453 if (offset == 0)
3454 break;
3455
3456 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3457 offset, offset - alignment);
3458 }
3459
3460 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3461 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3462 return -EINVAL;
3463 }
3464 }
3465
3466 plane_state->color_plane[0].offset = offset;
3467 plane_state->color_plane[0].x = x;
3468 plane_state->color_plane[0].y = y;
3469
3470
3471
3472
3473
3474 drm_rect_translate(&plane_state->base.src,
3475 (x << 16) - plane_state->base.src.x1,
3476 (y << 16) - plane_state->base.src.y1);
3477
3478 return 0;
3479}
3480
3481static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3482{
3483 const struct drm_framebuffer *fb = plane_state->base.fb;
3484 unsigned int rotation = plane_state->base.rotation;
3485 int max_width = skl_max_plane_width(fb, 1, rotation);
3486 int max_height = 4096;
3487 int x = plane_state->base.src.x1 >> 17;
3488 int y = plane_state->base.src.y1 >> 17;
3489 int w = drm_rect_width(&plane_state->base.src) >> 17;
3490 int h = drm_rect_height(&plane_state->base.src) >> 17;
3491 u32 offset;
3492
3493 intel_add_fb_offsets(&x, &y, plane_state, 1);
3494 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3495
3496
3497 if (w > max_width || h > max_height) {
3498 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3499 w, h, max_width, max_height);
3500 return -EINVAL;
3501 }
3502
3503 plane_state->color_plane[1].offset = offset;
3504 plane_state->color_plane[1].x = x;
3505 plane_state->color_plane[1].y = y;
3506
3507 return 0;
3508}
3509
3510static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3511{
3512 const struct drm_framebuffer *fb = plane_state->base.fb;
3513 int src_x = plane_state->base.src.x1 >> 16;
3514 int src_y = plane_state->base.src.y1 >> 16;
3515 int hsub = fb->format->hsub;
3516 int vsub = fb->format->vsub;
3517 int x = src_x / hsub;
3518 int y = src_y / vsub;
3519 u32 offset;
3520
3521 intel_add_fb_offsets(&x, &y, plane_state, 1);
3522 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3523
3524 plane_state->color_plane[1].offset = offset;
3525 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3526 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3527
3528 return 0;
3529}
3530
3531int skl_check_plane_surface(struct intel_plane_state *plane_state)
3532{
3533 const struct drm_framebuffer *fb = plane_state->base.fb;
3534 int ret;
3535
3536 ret = intel_plane_compute_gtt(plane_state);
3537 if (ret)
3538 return ret;
3539
3540 if (!plane_state->base.visible)
3541 return 0;
3542
3543
3544
3545
3546
3547 if (is_planar_yuv_format(fb->format->format)) {
3548 ret = skl_check_nv12_aux_surface(plane_state);
3549 if (ret)
3550 return ret;
3551 } else if (is_ccs_modifier(fb->modifier)) {
3552 ret = skl_check_ccs_aux_surface(plane_state);
3553 if (ret)
3554 return ret;
3555 } else {
3556 plane_state->color_plane[1].offset = ~0xfff;
3557 plane_state->color_plane[1].x = 0;
3558 plane_state->color_plane[1].y = 0;
3559 }
3560
3561 ret = skl_check_main_surface(plane_state);
3562 if (ret)
3563 return ret;
3564
3565 return 0;
3566}
3567
3568unsigned int
3569i9xx_plane_max_stride(struct intel_plane *plane,
3570 u32 pixel_format, u64 modifier,
3571 unsigned int rotation)
3572{
3573 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3574
3575 if (!HAS_GMCH(dev_priv)) {
3576 return 32*1024;
3577 } else if (INTEL_GEN(dev_priv) >= 4) {
3578 if (modifier == I915_FORMAT_MOD_X_TILED)
3579 return 16*1024;
3580 else
3581 return 32*1024;
3582 } else if (INTEL_GEN(dev_priv) >= 3) {
3583 if (modifier == I915_FORMAT_MOD_X_TILED)
3584 return 8*1024;
3585 else
3586 return 16*1024;
3587 } else {
3588 if (plane->i9xx_plane == PLANE_C)
3589 return 4*1024;
3590 else
3591 return 8*1024;
3592 }
3593}
3594
3595static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3596{
3597 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3598 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3599 u32 dspcntr = 0;
3600
3601 if (crtc_state->gamma_enable)
3602 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3603
3604 if (crtc_state->csc_enable)
3605 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3606
3607 if (INTEL_GEN(dev_priv) < 5)
3608 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3609
3610 return dspcntr;
3611}
3612
3613static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3614 const struct intel_plane_state *plane_state)
3615{
3616 struct drm_i915_private *dev_priv =
3617 to_i915(plane_state->base.plane->dev);
3618 const struct drm_framebuffer *fb = plane_state->base.fb;
3619 unsigned int rotation = plane_state->base.rotation;
3620 u32 dspcntr;
3621
3622 dspcntr = DISPLAY_PLANE_ENABLE;
3623
3624 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3625 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3626 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3627
3628 switch (fb->format->format) {
3629 case DRM_FORMAT_C8:
3630 dspcntr |= DISPPLANE_8BPP;
3631 break;
3632 case DRM_FORMAT_XRGB1555:
3633 dspcntr |= DISPPLANE_BGRX555;
3634 break;
3635 case DRM_FORMAT_RGB565:
3636 dspcntr |= DISPPLANE_BGRX565;
3637 break;
3638 case DRM_FORMAT_XRGB8888:
3639 dspcntr |= DISPPLANE_BGRX888;
3640 break;
3641 case DRM_FORMAT_XBGR8888:
3642 dspcntr |= DISPPLANE_RGBX888;
3643 break;
3644 case DRM_FORMAT_XRGB2101010:
3645 dspcntr |= DISPPLANE_BGRX101010;
3646 break;
3647 case DRM_FORMAT_XBGR2101010:
3648 dspcntr |= DISPPLANE_RGBX101010;
3649 break;
3650 default:
3651 MISSING_CASE(fb->format->format);
3652 return 0;
3653 }
3654
3655 if (INTEL_GEN(dev_priv) >= 4 &&
3656 fb->modifier == I915_FORMAT_MOD_X_TILED)
3657 dspcntr |= DISPPLANE_TILED;
3658
3659 if (rotation & DRM_MODE_ROTATE_180)
3660 dspcntr |= DISPPLANE_ROTATE_180;
3661
3662 if (rotation & DRM_MODE_REFLECT_X)
3663 dspcntr |= DISPPLANE_MIRROR;
3664
3665 return dspcntr;
3666}
3667
3668int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3669{
3670 struct drm_i915_private *dev_priv =
3671 to_i915(plane_state->base.plane->dev);
3672 int src_x, src_y;
3673 u32 offset;
3674 int ret;
3675
3676 ret = intel_plane_compute_gtt(plane_state);
3677 if (ret)
3678 return ret;
3679
3680 if (!plane_state->base.visible)
3681 return 0;
3682
3683 src_x = plane_state->base.src.x1 >> 16;
3684 src_y = plane_state->base.src.y1 >> 16;
3685
3686 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3687
3688 if (INTEL_GEN(dev_priv) >= 4)
3689 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3690 plane_state, 0);
3691 else
3692 offset = 0;
3693
3694
3695
3696
3697
3698 drm_rect_translate(&plane_state->base.src,
3699 (src_x << 16) - plane_state->base.src.x1,
3700 (src_y << 16) - plane_state->base.src.y1);
3701
3702
3703 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3704 unsigned int rotation = plane_state->base.rotation;
3705 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3706 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3707
3708 if (rotation & DRM_MODE_ROTATE_180) {
3709 src_x += src_w - 1;
3710 src_y += src_h - 1;
3711 } else if (rotation & DRM_MODE_REFLECT_X) {
3712 src_x += src_w - 1;
3713 }
3714 }
3715
3716 plane_state->color_plane[0].offset = offset;
3717 plane_state->color_plane[0].x = src_x;
3718 plane_state->color_plane[0].y = src_y;
3719
3720 return 0;
3721}
3722
3723static bool i9xx_plane_has_windowing(struct intel_plane *plane)
3724{
3725 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3726 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3727
3728 if (IS_CHERRYVIEW(dev_priv))
3729 return i9xx_plane == PLANE_B;
3730 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3731 return false;
3732 else if (IS_GEN(dev_priv, 4))
3733 return i9xx_plane == PLANE_C;
3734 else
3735 return i9xx_plane == PLANE_B ||
3736 i9xx_plane == PLANE_C;
3737}
3738
3739static int
3740i9xx_plane_check(struct intel_crtc_state *crtc_state,
3741 struct intel_plane_state *plane_state)
3742{
3743 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3744 int ret;
3745
3746 ret = chv_plane_check_rotation(plane_state);
3747 if (ret)
3748 return ret;
3749
3750 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3751 &crtc_state->base,
3752 DRM_PLANE_HELPER_NO_SCALING,
3753 DRM_PLANE_HELPER_NO_SCALING,
3754 i9xx_plane_has_windowing(plane),
3755 true);
3756 if (ret)
3757 return ret;
3758
3759 ret = i9xx_check_plane_surface(plane_state);
3760 if (ret)
3761 return ret;
3762
3763 if (!plane_state->base.visible)
3764 return 0;
3765
3766 ret = intel_plane_check_src_coordinates(plane_state);
3767 if (ret)
3768 return ret;
3769
3770 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3771
3772 return 0;
3773}
3774
3775static void i9xx_update_plane(struct intel_plane *plane,
3776 const struct intel_crtc_state *crtc_state,
3777 const struct intel_plane_state *plane_state)
3778{
3779 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3780 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3781 u32 linear_offset;
3782 int x = plane_state->color_plane[0].x;
3783 int y = plane_state->color_plane[0].y;
3784 int crtc_x = plane_state->base.dst.x1;
3785 int crtc_y = plane_state->base.dst.y1;
3786 int crtc_w = drm_rect_width(&plane_state->base.dst);
3787 int crtc_h = drm_rect_height(&plane_state->base.dst);
3788 unsigned long irqflags;
3789 u32 dspaddr_offset;
3790 u32 dspcntr;
3791
3792 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3793
3794 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3795
3796 if (INTEL_GEN(dev_priv) >= 4)
3797 dspaddr_offset = plane_state->color_plane[0].offset;
3798 else
3799 dspaddr_offset = linear_offset;
3800
3801 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3802
3803 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3804
3805 if (INTEL_GEN(dev_priv) < 4) {
3806
3807
3808
3809
3810
3811 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3812 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3813 ((crtc_h - 1) << 16) | (crtc_w - 1));
3814 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3815 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3816 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3817 ((crtc_h - 1) << 16) | (crtc_w - 1));
3818 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3819 }
3820
3821 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3822 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3823 } else if (INTEL_GEN(dev_priv) >= 4) {
3824 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3825 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3826 }
3827
3828
3829
3830
3831
3832
3833 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3834 if (INTEL_GEN(dev_priv) >= 4)
3835 I915_WRITE_FW(DSPSURF(i9xx_plane),
3836 intel_plane_ggtt_offset(plane_state) +
3837 dspaddr_offset);
3838 else
3839 I915_WRITE_FW(DSPADDR(i9xx_plane),
3840 intel_plane_ggtt_offset(plane_state) +
3841 dspaddr_offset);
3842
3843 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3844}
3845
3846static void i9xx_disable_plane(struct intel_plane *plane,
3847 const struct intel_crtc_state *crtc_state)
3848{
3849 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3850 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3851 unsigned long irqflags;
3852 u32 dspcntr;
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3865
3866 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3867
3868 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3869 if (INTEL_GEN(dev_priv) >= 4)
3870 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3871 else
3872 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3873
3874 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3875}
3876
3877static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3878 enum pipe *pipe)
3879{
3880 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3881 enum intel_display_power_domain power_domain;
3882 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3883 intel_wakeref_t wakeref;
3884 bool ret;
3885 u32 val;
3886
3887
3888
3889
3890
3891
3892 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3893 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3894 if (!wakeref)
3895 return false;
3896
3897 val = I915_READ(DSPCNTR(i9xx_plane));
3898
3899 ret = val & DISPLAY_PLANE_ENABLE;
3900
3901 if (INTEL_GEN(dev_priv) >= 5)
3902 *pipe = plane->pipe;
3903 else
3904 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3905 DISPPLANE_SEL_PIPE_SHIFT;
3906
3907 intel_display_power_put(dev_priv, power_domain, wakeref);
3908
3909 return ret;
3910}
3911
3912static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3913{
3914 struct drm_device *dev = intel_crtc->base.dev;
3915 struct drm_i915_private *dev_priv = to_i915(dev);
3916
3917 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3918 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3919 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3920}
3921
3922
3923
3924
3925static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3926{
3927 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3928 const struct intel_crtc_scaler_state *scaler_state =
3929 &crtc_state->scaler_state;
3930 int i;
3931
3932
3933 for (i = 0; i < intel_crtc->num_scalers; i++) {
3934 if (!scaler_state->scalers[i].in_use)
3935 skl_detach_scaler(intel_crtc, i);
3936 }
3937}
3938
3939static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3940 int color_plane, unsigned int rotation)
3941{
3942
3943
3944
3945
3946 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3947 return 64;
3948 else if (drm_rotation_90_or_270(rotation))
3949 return intel_tile_height(fb, color_plane);
3950 else
3951 return intel_tile_width_bytes(fb, color_plane);
3952}
3953
3954u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3955 int color_plane)
3956{
3957 const struct drm_framebuffer *fb = plane_state->base.fb;
3958 unsigned int rotation = plane_state->base.rotation;
3959 u32 stride = plane_state->color_plane[color_plane].stride;
3960
3961 if (color_plane >= fb->format->num_planes)
3962 return 0;
3963
3964 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3965}
3966
3967static u32 skl_plane_ctl_format(u32 pixel_format)
3968{
3969 switch (pixel_format) {
3970 case DRM_FORMAT_C8:
3971 return PLANE_CTL_FORMAT_INDEXED;
3972 case DRM_FORMAT_RGB565:
3973 return PLANE_CTL_FORMAT_RGB_565;
3974 case DRM_FORMAT_XBGR8888:
3975 case DRM_FORMAT_ABGR8888:
3976 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3977 case DRM_FORMAT_XRGB8888:
3978 case DRM_FORMAT_ARGB8888:
3979 return PLANE_CTL_FORMAT_XRGB_8888;
3980 case DRM_FORMAT_XBGR2101010:
3981 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
3982 case DRM_FORMAT_XRGB2101010:
3983 return PLANE_CTL_FORMAT_XRGB_2101010;
3984 case DRM_FORMAT_XBGR16161616F:
3985 case DRM_FORMAT_ABGR16161616F:
3986 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3987 case DRM_FORMAT_XRGB16161616F:
3988 case DRM_FORMAT_ARGB16161616F:
3989 return PLANE_CTL_FORMAT_XRGB_16161616F;
3990 case DRM_FORMAT_YUYV:
3991 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3992 case DRM_FORMAT_YVYU:
3993 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3994 case DRM_FORMAT_UYVY:
3995 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3996 case DRM_FORMAT_VYUY:
3997 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3998 case DRM_FORMAT_NV12:
3999 return PLANE_CTL_FORMAT_NV12;
4000 case DRM_FORMAT_P010:
4001 return PLANE_CTL_FORMAT_P010;
4002 case DRM_FORMAT_P012:
4003 return PLANE_CTL_FORMAT_P012;
4004 case DRM_FORMAT_P016:
4005 return PLANE_CTL_FORMAT_P016;
4006 case DRM_FORMAT_Y210:
4007 return PLANE_CTL_FORMAT_Y210;
4008 case DRM_FORMAT_Y212:
4009 return PLANE_CTL_FORMAT_Y212;
4010 case DRM_FORMAT_Y216:
4011 return PLANE_CTL_FORMAT_Y216;
4012 case DRM_FORMAT_XVYU2101010:
4013 return PLANE_CTL_FORMAT_Y410;
4014 case DRM_FORMAT_XVYU12_16161616:
4015 return PLANE_CTL_FORMAT_Y412;
4016 case DRM_FORMAT_XVYU16161616:
4017 return PLANE_CTL_FORMAT_Y416;
4018 default:
4019 MISSING_CASE(pixel_format);
4020 }
4021
4022 return 0;
4023}
4024
4025static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4026{
4027 if (!plane_state->base.fb->format->has_alpha)
4028 return PLANE_CTL_ALPHA_DISABLE;
4029
4030 switch (plane_state->base.pixel_blend_mode) {
4031 case DRM_MODE_BLEND_PIXEL_NONE:
4032 return PLANE_CTL_ALPHA_DISABLE;
4033 case DRM_MODE_BLEND_PREMULTI:
4034 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4035 case DRM_MODE_BLEND_COVERAGE:
4036 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4037 default:
4038 MISSING_CASE(plane_state->base.pixel_blend_mode);
4039 return PLANE_CTL_ALPHA_DISABLE;
4040 }
4041}
4042
4043static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4044{
4045 if (!plane_state->base.fb->format->has_alpha)
4046 return PLANE_COLOR_ALPHA_DISABLE;
4047
4048 switch (plane_state->base.pixel_blend_mode) {
4049 case DRM_MODE_BLEND_PIXEL_NONE:
4050 return PLANE_COLOR_ALPHA_DISABLE;
4051 case DRM_MODE_BLEND_PREMULTI:
4052 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4053 case DRM_MODE_BLEND_COVERAGE:
4054 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4055 default:
4056 MISSING_CASE(plane_state->base.pixel_blend_mode);
4057 return PLANE_COLOR_ALPHA_DISABLE;
4058 }
4059}
4060
4061static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4062{
4063 switch (fb_modifier) {
4064 case DRM_FORMAT_MOD_LINEAR:
4065 break;
4066 case I915_FORMAT_MOD_X_TILED:
4067 return PLANE_CTL_TILED_X;
4068 case I915_FORMAT_MOD_Y_TILED:
4069 return PLANE_CTL_TILED_Y;
4070 case I915_FORMAT_MOD_Y_TILED_CCS:
4071 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4072 case I915_FORMAT_MOD_Yf_TILED:
4073 return PLANE_CTL_TILED_YF;
4074 case I915_FORMAT_MOD_Yf_TILED_CCS:
4075 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4076 default:
4077 MISSING_CASE(fb_modifier);
4078 }
4079
4080 return 0;
4081}
4082
4083static u32 skl_plane_ctl_rotate(unsigned int rotate)
4084{
4085 switch (rotate) {
4086 case DRM_MODE_ROTATE_0:
4087 break;
4088
4089
4090
4091
4092 case DRM_MODE_ROTATE_90:
4093 return PLANE_CTL_ROTATE_270;
4094 case DRM_MODE_ROTATE_180:
4095 return PLANE_CTL_ROTATE_180;
4096 case DRM_MODE_ROTATE_270:
4097 return PLANE_CTL_ROTATE_90;
4098 default:
4099 MISSING_CASE(rotate);
4100 }
4101
4102 return 0;
4103}
4104
4105static u32 cnl_plane_ctl_flip(unsigned int reflect)
4106{
4107 switch (reflect) {
4108 case 0:
4109 break;
4110 case DRM_MODE_REFLECT_X:
4111 return PLANE_CTL_FLIP_HORIZONTAL;
4112 case DRM_MODE_REFLECT_Y:
4113 default:
4114 MISSING_CASE(reflect);
4115 }
4116
4117 return 0;
4118}
4119
4120u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4121{
4122 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4123 u32 plane_ctl = 0;
4124
4125 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4126 return plane_ctl;
4127
4128 if (crtc_state->gamma_enable)
4129 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4130
4131 if (crtc_state->csc_enable)
4132 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4133
4134 return plane_ctl;
4135}
4136
4137u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4138 const struct intel_plane_state *plane_state)
4139{
4140 struct drm_i915_private *dev_priv =
4141 to_i915(plane_state->base.plane->dev);
4142 const struct drm_framebuffer *fb = plane_state->base.fb;
4143 unsigned int rotation = plane_state->base.rotation;
4144 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4145 u32 plane_ctl;
4146
4147 plane_ctl = PLANE_CTL_ENABLE;
4148
4149 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4150 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4151 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4152
4153 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4154 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4155
4156 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4157 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4158 }
4159
4160 plane_ctl |= skl_plane_ctl_format(fb->format->format);
4161 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4162 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4163
4164 if (INTEL_GEN(dev_priv) >= 10)
4165 plane_ctl |= cnl_plane_ctl_flip(rotation &
4166 DRM_MODE_REFLECT_MASK);
4167
4168 if (key->flags & I915_SET_COLORKEY_DESTINATION)
4169 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4170 else if (key->flags & I915_SET_COLORKEY_SOURCE)
4171 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4172
4173 return plane_ctl;
4174}
4175
4176u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4177{
4178 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4179 u32 plane_color_ctl = 0;
4180
4181 if (INTEL_GEN(dev_priv) >= 11)
4182 return plane_color_ctl;
4183
4184 if (crtc_state->gamma_enable)
4185 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4186
4187 if (crtc_state->csc_enable)
4188 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4189
4190 return plane_color_ctl;
4191}
4192
4193u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4194 const struct intel_plane_state *plane_state)
4195{
4196 struct drm_i915_private *dev_priv =
4197 to_i915(plane_state->base.plane->dev);
4198 const struct drm_framebuffer *fb = plane_state->base.fb;
4199 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4200 u32 plane_color_ctl = 0;
4201
4202 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4203 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4204
4205 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4206 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4207 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4208 else
4209 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4210
4211 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4212 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4213 } else if (fb->format->is_yuv) {
4214 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4215 }
4216
4217 return plane_color_ctl;
4218}
4219
4220static int
4221__intel_display_resume(struct drm_device *dev,
4222 struct drm_atomic_state *state,
4223 struct drm_modeset_acquire_ctx *ctx)
4224{
4225 struct drm_crtc_state *crtc_state;
4226 struct drm_crtc *crtc;
4227 int i, ret;
4228
4229 intel_modeset_setup_hw_state(dev, ctx);
4230 i915_redisable_vga(to_i915(dev));
4231
4232 if (!state)
4233 return 0;
4234
4235
4236
4237
4238
4239
4240 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4241
4242
4243
4244
4245
4246 crtc_state->mode_changed = true;
4247 }
4248
4249
4250 if (!HAS_GMCH(to_i915(dev)))
4251 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4252
4253 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4254
4255 WARN_ON(ret == -EDEADLK);
4256 return ret;
4257}
4258
4259static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4260{
4261 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4262 intel_has_gpu_reset(dev_priv));
4263}
4264
4265void intel_prepare_reset(struct drm_i915_private *dev_priv)
4266{
4267 struct drm_device *dev = &dev_priv->drm;
4268 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4269 struct drm_atomic_state *state;
4270 int ret;
4271
4272
4273 if (!i915_modparams.force_reset_modeset_test &&
4274 !gpu_reset_clobbers_display(dev_priv))
4275 return;
4276
4277
4278 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4279 smp_mb__after_atomic();
4280 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4281
4282 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4283 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4284 intel_gt_set_wedged(&dev_priv->gt);
4285 }
4286
4287
4288
4289
4290
4291 mutex_lock(&dev->mode_config.mutex);
4292 drm_modeset_acquire_init(ctx, 0);
4293 while (1) {
4294 ret = drm_modeset_lock_all_ctx(dev, ctx);
4295 if (ret != -EDEADLK)
4296 break;
4297
4298 drm_modeset_backoff(ctx);
4299 }
4300
4301
4302
4303
4304 state = drm_atomic_helper_duplicate_state(dev, ctx);
4305 if (IS_ERR(state)) {
4306 ret = PTR_ERR(state);
4307 DRM_ERROR("Duplicating state failed with %i\n", ret);
4308 return;
4309 }
4310
4311 ret = drm_atomic_helper_disable_all(dev, ctx);
4312 if (ret) {
4313 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4314 drm_atomic_state_put(state);
4315 return;
4316 }
4317
4318 dev_priv->modeset_restore_state = state;
4319 state->acquire_ctx = ctx;
4320}
4321
4322void intel_finish_reset(struct drm_i915_private *dev_priv)
4323{
4324 struct drm_device *dev = &dev_priv->drm;
4325 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4326 struct drm_atomic_state *state;
4327 int ret;
4328
4329
4330 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4331 return;
4332
4333 state = fetch_and_zero(&dev_priv->modeset_restore_state);
4334 if (!state)
4335 goto unlock;
4336
4337
4338 if (!gpu_reset_clobbers_display(dev_priv)) {
4339
4340 ret = __intel_display_resume(dev, state, ctx);
4341 if (ret)
4342 DRM_ERROR("Restoring old state failed with %i\n", ret);
4343 } else {
4344
4345
4346
4347
4348 intel_pps_unlock_regs_wa(dev_priv);
4349 intel_modeset_init_hw(dev);
4350 intel_init_clock_gating(dev_priv);
4351
4352 spin_lock_irq(&dev_priv->irq_lock);
4353 if (dev_priv->display.hpd_irq_setup)
4354 dev_priv->display.hpd_irq_setup(dev_priv);
4355 spin_unlock_irq(&dev_priv->irq_lock);
4356
4357 ret = __intel_display_resume(dev, state, ctx);
4358 if (ret)
4359 DRM_ERROR("Restoring old state failed with %i\n", ret);
4360
4361 intel_hpd_init(dev_priv);
4362 }
4363
4364 drm_atomic_state_put(state);
4365unlock:
4366 drm_modeset_drop_locks(ctx);
4367 drm_modeset_acquire_fini(ctx);
4368 mutex_unlock(&dev->mode_config.mutex);
4369
4370 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4371}
4372
4373static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4374{
4375 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4376 enum pipe pipe = crtc->pipe;
4377 u32 tmp;
4378
4379 tmp = I915_READ(PIPE_CHICKEN(pipe));
4380
4381
4382
4383
4384
4385
4386 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4387
4388
4389
4390
4391
4392
4393 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4394 I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4395}
4396
4397static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4398 const struct intel_crtc_state *new_crtc_state)
4399{
4400 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4401 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4402
4403
4404 crtc->base.mode = new_crtc_state->base.mode;
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415 I915_WRITE(PIPESRC(crtc->pipe),
4416 ((new_crtc_state->pipe_src_w - 1) << 16) |
4417 (new_crtc_state->pipe_src_h - 1));
4418
4419
4420 if (INTEL_GEN(dev_priv) >= 9) {
4421 skl_detach_scalers(new_crtc_state);
4422
4423 if (new_crtc_state->pch_pfit.enabled)
4424 skylake_pfit_enable(new_crtc_state);
4425 } else if (HAS_PCH_SPLIT(dev_priv)) {
4426 if (new_crtc_state->pch_pfit.enabled)
4427 ironlake_pfit_enable(new_crtc_state);
4428 else if (old_crtc_state->pch_pfit.enabled)
4429 ironlake_pfit_disable(old_crtc_state);
4430 }
4431
4432 if (INTEL_GEN(dev_priv) >= 11)
4433 icl_set_pipe_chicken(crtc);
4434}
4435
4436static void intel_fdi_normal_train(struct intel_crtc *crtc)
4437{
4438 struct drm_device *dev = crtc->base.dev;
4439 struct drm_i915_private *dev_priv = to_i915(dev);
4440 int pipe = crtc->pipe;
4441 i915_reg_t reg;
4442 u32 temp;
4443
4444
4445 reg = FDI_TX_CTL(pipe);
4446 temp = I915_READ(reg);
4447 if (IS_IVYBRIDGE(dev_priv)) {
4448 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4449 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4450 } else {
4451 temp &= ~FDI_LINK_TRAIN_NONE;
4452 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4453 }
4454 I915_WRITE(reg, temp);
4455
4456 reg = FDI_RX_CTL(pipe);
4457 temp = I915_READ(reg);
4458 if (HAS_PCH_CPT(dev_priv)) {
4459 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4460 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4461 } else {
4462 temp &= ~FDI_LINK_TRAIN_NONE;
4463 temp |= FDI_LINK_TRAIN_NONE;
4464 }
4465 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4466
4467
4468 POSTING_READ(reg);
4469 udelay(1000);
4470
4471
4472 if (IS_IVYBRIDGE(dev_priv))
4473 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4474 FDI_FE_ERRC_ENABLE);
4475}
4476
4477
4478static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4479 const struct intel_crtc_state *crtc_state)
4480{
4481 struct drm_device *dev = crtc->base.dev;
4482 struct drm_i915_private *dev_priv = to_i915(dev);
4483 int pipe = crtc->pipe;
4484 i915_reg_t reg;
4485 u32 temp, tries;
4486
4487
4488 assert_pipe_enabled(dev_priv, pipe);
4489
4490
4491
4492 reg = FDI_RX_IMR(pipe);
4493 temp = I915_READ(reg);
4494 temp &= ~FDI_RX_SYMBOL_LOCK;
4495 temp &= ~FDI_RX_BIT_LOCK;
4496 I915_WRITE(reg, temp);
4497 I915_READ(reg);
4498 udelay(150);
4499
4500
4501 reg = FDI_TX_CTL(pipe);
4502 temp = I915_READ(reg);
4503 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4504 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4505 temp &= ~FDI_LINK_TRAIN_NONE;
4506 temp |= FDI_LINK_TRAIN_PATTERN_1;
4507 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4508
4509 reg = FDI_RX_CTL(pipe);
4510 temp = I915_READ(reg);
4511 temp &= ~FDI_LINK_TRAIN_NONE;
4512 temp |= FDI_LINK_TRAIN_PATTERN_1;
4513 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4514
4515 POSTING_READ(reg);
4516 udelay(150);
4517
4518
4519 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4520 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4521 FDI_RX_PHASE_SYNC_POINTER_EN);
4522
4523 reg = FDI_RX_IIR(pipe);
4524 for (tries = 0; tries < 5; tries++) {
4525 temp = I915_READ(reg);
4526 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4527
4528 if ((temp & FDI_RX_BIT_LOCK)) {
4529 DRM_DEBUG_KMS("FDI train 1 done.\n");
4530 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4531 break;
4532 }
4533 }
4534 if (tries == 5)
4535 DRM_ERROR("FDI train 1 fail!\n");
4536
4537
4538 reg = FDI_TX_CTL(pipe);
4539 temp = I915_READ(reg);
4540 temp &= ~FDI_LINK_TRAIN_NONE;
4541 temp |= FDI_LINK_TRAIN_PATTERN_2;
4542 I915_WRITE(reg, temp);
4543
4544 reg = FDI_RX_CTL(pipe);
4545 temp = I915_READ(reg);
4546 temp &= ~FDI_LINK_TRAIN_NONE;
4547 temp |= FDI_LINK_TRAIN_PATTERN_2;
4548 I915_WRITE(reg, temp);
4549
4550 POSTING_READ(reg);
4551 udelay(150);
4552
4553 reg = FDI_RX_IIR(pipe);
4554 for (tries = 0; tries < 5; tries++) {
4555 temp = I915_READ(reg);
4556 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4557
4558 if (temp & FDI_RX_SYMBOL_LOCK) {
4559 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4560 DRM_DEBUG_KMS("FDI train 2 done.\n");
4561 break;
4562 }
4563 }
4564 if (tries == 5)
4565 DRM_ERROR("FDI train 2 fail!\n");
4566
4567 DRM_DEBUG_KMS("FDI train done\n");
4568
4569}
4570
4571static const int snb_b_fdi_train_param[] = {
4572 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4573 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4574 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4575 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4576};
4577
4578
4579static void gen6_fdi_link_train(struct intel_crtc *crtc,
4580 const struct intel_crtc_state *crtc_state)
4581{
4582 struct drm_device *dev = crtc->base.dev;
4583 struct drm_i915_private *dev_priv = to_i915(dev);
4584 int pipe = crtc->pipe;
4585 i915_reg_t reg;
4586 u32 temp, i, retry;
4587
4588
4589
4590 reg = FDI_RX_IMR(pipe);
4591 temp = I915_READ(reg);
4592 temp &= ~FDI_RX_SYMBOL_LOCK;
4593 temp &= ~FDI_RX_BIT_LOCK;
4594 I915_WRITE(reg, temp);
4595
4596 POSTING_READ(reg);
4597 udelay(150);
4598
4599
4600 reg = FDI_TX_CTL(pipe);
4601 temp = I915_READ(reg);
4602 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4603 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4604 temp &= ~FDI_LINK_TRAIN_NONE;
4605 temp |= FDI_LINK_TRAIN_PATTERN_1;
4606 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4607
4608 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4609 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4610
4611 I915_WRITE(FDI_RX_MISC(pipe),
4612 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4613
4614 reg = FDI_RX_CTL(pipe);
4615 temp = I915_READ(reg);
4616 if (HAS_PCH_CPT(dev_priv)) {
4617 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4618 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4619 } else {
4620 temp &= ~FDI_LINK_TRAIN_NONE;
4621 temp |= FDI_LINK_TRAIN_PATTERN_1;
4622 }
4623 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4624
4625 POSTING_READ(reg);
4626 udelay(150);
4627
4628 for (i = 0; i < 4; i++) {
4629 reg = FDI_TX_CTL(pipe);
4630 temp = I915_READ(reg);
4631 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4632 temp |= snb_b_fdi_train_param[i];
4633 I915_WRITE(reg, temp);
4634
4635 POSTING_READ(reg);
4636 udelay(500);
4637
4638 for (retry = 0; retry < 5; retry++) {
4639 reg = FDI_RX_IIR(pipe);
4640 temp = I915_READ(reg);
4641 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4642 if (temp & FDI_RX_BIT_LOCK) {
4643 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4644 DRM_DEBUG_KMS("FDI train 1 done.\n");
4645 break;
4646 }
4647 udelay(50);
4648 }
4649 if (retry < 5)
4650 break;
4651 }
4652 if (i == 4)
4653 DRM_ERROR("FDI train 1 fail!\n");
4654
4655
4656 reg = FDI_TX_CTL(pipe);
4657 temp = I915_READ(reg);
4658 temp &= ~FDI_LINK_TRAIN_NONE;
4659 temp |= FDI_LINK_TRAIN_PATTERN_2;
4660 if (IS_GEN(dev_priv, 6)) {
4661 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4662
4663 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4664 }
4665 I915_WRITE(reg, temp);
4666
4667 reg = FDI_RX_CTL(pipe);
4668 temp = I915_READ(reg);
4669 if (HAS_PCH_CPT(dev_priv)) {
4670 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4671 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4672 } else {
4673 temp &= ~FDI_LINK_TRAIN_NONE;
4674 temp |= FDI_LINK_TRAIN_PATTERN_2;
4675 }
4676 I915_WRITE(reg, temp);
4677
4678 POSTING_READ(reg);
4679 udelay(150);
4680
4681 for (i = 0; i < 4; i++) {
4682 reg = FDI_TX_CTL(pipe);
4683 temp = I915_READ(reg);
4684 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4685 temp |= snb_b_fdi_train_param[i];
4686 I915_WRITE(reg, temp);
4687
4688 POSTING_READ(reg);
4689 udelay(500);
4690
4691 for (retry = 0; retry < 5; retry++) {
4692 reg = FDI_RX_IIR(pipe);
4693 temp = I915_READ(reg);
4694 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4695 if (temp & FDI_RX_SYMBOL_LOCK) {
4696 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4697 DRM_DEBUG_KMS("FDI train 2 done.\n");
4698 break;
4699 }
4700 udelay(50);
4701 }
4702 if (retry < 5)
4703 break;
4704 }
4705 if (i == 4)
4706 DRM_ERROR("FDI train 2 fail!\n");
4707
4708 DRM_DEBUG_KMS("FDI train done.\n");
4709}
4710
4711
4712static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4713 const struct intel_crtc_state *crtc_state)
4714{
4715 struct drm_device *dev = crtc->base.dev;
4716 struct drm_i915_private *dev_priv = to_i915(dev);
4717 int pipe = crtc->pipe;
4718 i915_reg_t reg;
4719 u32 temp, i, j;
4720
4721
4722
4723 reg = FDI_RX_IMR(pipe);
4724 temp = I915_READ(reg);
4725 temp &= ~FDI_RX_SYMBOL_LOCK;
4726 temp &= ~FDI_RX_BIT_LOCK;
4727 I915_WRITE(reg, temp);
4728
4729 POSTING_READ(reg);
4730 udelay(150);
4731
4732 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4733 I915_READ(FDI_RX_IIR(pipe)));
4734
4735
4736 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4737
4738 reg = FDI_TX_CTL(pipe);
4739 temp = I915_READ(reg);
4740 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4741 temp &= ~FDI_TX_ENABLE;
4742 I915_WRITE(reg, temp);
4743
4744 reg = FDI_RX_CTL(pipe);
4745 temp = I915_READ(reg);
4746 temp &= ~FDI_LINK_TRAIN_AUTO;
4747 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4748 temp &= ~FDI_RX_ENABLE;
4749 I915_WRITE(reg, temp);
4750
4751
4752 reg = FDI_TX_CTL(pipe);
4753 temp = I915_READ(reg);
4754 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4755 temp |= FDI_DP_PORT_WIDTH(