1
2
3
4
5
6#include "display/intel_crt.h"
7#include "display/intel_dp.h"
8
9#include "i915_drv.h"
10#include "i915_irq.h"
11#include "intel_cdclk.h"
12#include "intel_combo_phy.h"
13#include "intel_csr.h"
14#include "intel_display_power.h"
15#include "intel_display_types.h"
16#include "intel_dpio_phy.h"
17#include "intel_hotplug.h"
18#include "intel_pm.h"
19#include "intel_sideband.h"
20#include "intel_tc.h"
21#include "intel_vga.h"
22
23bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
24 enum i915_power_well_id power_well_id);
25
26const char *
27intel_display_power_domain_str(enum intel_display_power_domain domain)
28{
29 switch (domain) {
30 case POWER_DOMAIN_DISPLAY_CORE:
31 return "DISPLAY_CORE";
32 case POWER_DOMAIN_PIPE_A:
33 return "PIPE_A";
34 case POWER_DOMAIN_PIPE_B:
35 return "PIPE_B";
36 case POWER_DOMAIN_PIPE_C:
37 return "PIPE_C";
38 case POWER_DOMAIN_PIPE_D:
39 return "PIPE_D";
40 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
41 return "PIPE_A_PANEL_FITTER";
42 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
43 return "PIPE_B_PANEL_FITTER";
44 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
45 return "PIPE_C_PANEL_FITTER";
46 case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
47 return "PIPE_D_PANEL_FITTER";
48 case POWER_DOMAIN_TRANSCODER_A:
49 return "TRANSCODER_A";
50 case POWER_DOMAIN_TRANSCODER_B:
51 return "TRANSCODER_B";
52 case POWER_DOMAIN_TRANSCODER_C:
53 return "TRANSCODER_C";
54 case POWER_DOMAIN_TRANSCODER_D:
55 return "TRANSCODER_D";
56 case POWER_DOMAIN_TRANSCODER_EDP:
57 return "TRANSCODER_EDP";
58 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
59 return "TRANSCODER_VDSC_PW2";
60 case POWER_DOMAIN_TRANSCODER_DSI_A:
61 return "TRANSCODER_DSI_A";
62 case POWER_DOMAIN_TRANSCODER_DSI_C:
63 return "TRANSCODER_DSI_C";
64 case POWER_DOMAIN_PORT_DDI_A_LANES:
65 return "PORT_DDI_A_LANES";
66 case POWER_DOMAIN_PORT_DDI_B_LANES:
67 return "PORT_DDI_B_LANES";
68 case POWER_DOMAIN_PORT_DDI_C_LANES:
69 return "PORT_DDI_C_LANES";
70 case POWER_DOMAIN_PORT_DDI_D_LANES:
71 return "PORT_DDI_D_LANES";
72 case POWER_DOMAIN_PORT_DDI_E_LANES:
73 return "PORT_DDI_E_LANES";
74 case POWER_DOMAIN_PORT_DDI_F_LANES:
75 return "PORT_DDI_F_LANES";
76 case POWER_DOMAIN_PORT_DDI_G_LANES:
77 return "PORT_DDI_G_LANES";
78 case POWER_DOMAIN_PORT_DDI_H_LANES:
79 return "PORT_DDI_H_LANES";
80 case POWER_DOMAIN_PORT_DDI_I_LANES:
81 return "PORT_DDI_I_LANES";
82 case POWER_DOMAIN_PORT_DDI_A_IO:
83 return "PORT_DDI_A_IO";
84 case POWER_DOMAIN_PORT_DDI_B_IO:
85 return "PORT_DDI_B_IO";
86 case POWER_DOMAIN_PORT_DDI_C_IO:
87 return "PORT_DDI_C_IO";
88 case POWER_DOMAIN_PORT_DDI_D_IO:
89 return "PORT_DDI_D_IO";
90 case POWER_DOMAIN_PORT_DDI_E_IO:
91 return "PORT_DDI_E_IO";
92 case POWER_DOMAIN_PORT_DDI_F_IO:
93 return "PORT_DDI_F_IO";
94 case POWER_DOMAIN_PORT_DDI_G_IO:
95 return "PORT_DDI_G_IO";
96 case POWER_DOMAIN_PORT_DDI_H_IO:
97 return "PORT_DDI_H_IO";
98 case POWER_DOMAIN_PORT_DDI_I_IO:
99 return "PORT_DDI_I_IO";
100 case POWER_DOMAIN_PORT_DSI:
101 return "PORT_DSI";
102 case POWER_DOMAIN_PORT_CRT:
103 return "PORT_CRT";
104 case POWER_DOMAIN_PORT_OTHER:
105 return "PORT_OTHER";
106 case POWER_DOMAIN_VGA:
107 return "VGA";
108 case POWER_DOMAIN_AUDIO:
109 return "AUDIO";
110 case POWER_DOMAIN_AUX_A:
111 return "AUX_A";
112 case POWER_DOMAIN_AUX_B:
113 return "AUX_B";
114 case POWER_DOMAIN_AUX_C:
115 return "AUX_C";
116 case POWER_DOMAIN_AUX_D:
117 return "AUX_D";
118 case POWER_DOMAIN_AUX_E:
119 return "AUX_E";
120 case POWER_DOMAIN_AUX_F:
121 return "AUX_F";
122 case POWER_DOMAIN_AUX_G:
123 return "AUX_G";
124 case POWER_DOMAIN_AUX_H:
125 return "AUX_H";
126 case POWER_DOMAIN_AUX_I:
127 return "AUX_I";
128 case POWER_DOMAIN_AUX_IO_A:
129 return "AUX_IO_A";
130 case POWER_DOMAIN_AUX_C_TBT:
131 return "AUX_C_TBT";
132 case POWER_DOMAIN_AUX_D_TBT:
133 return "AUX_D_TBT";
134 case POWER_DOMAIN_AUX_E_TBT:
135 return "AUX_E_TBT";
136 case POWER_DOMAIN_AUX_F_TBT:
137 return "AUX_F_TBT";
138 case POWER_DOMAIN_AUX_G_TBT:
139 return "AUX_G_TBT";
140 case POWER_DOMAIN_AUX_H_TBT:
141 return "AUX_H_TBT";
142 case POWER_DOMAIN_AUX_I_TBT:
143 return "AUX_I_TBT";
144 case POWER_DOMAIN_GMBUS:
145 return "GMBUS";
146 case POWER_DOMAIN_INIT:
147 return "INIT";
148 case POWER_DOMAIN_MODESET:
149 return "MODESET";
150 case POWER_DOMAIN_GT_IRQ:
151 return "GT_IRQ";
152 case POWER_DOMAIN_DPLL_DC_OFF:
153 return "DPLL_DC_OFF";
154 case POWER_DOMAIN_TC_COLD_OFF:
155 return "TC_COLD_OFF";
156 default:
157 MISSING_CASE(domain);
158 return "?";
159 }
160}
161
162static void intel_power_well_enable(struct drm_i915_private *dev_priv,
163 struct i915_power_well *power_well)
164{
165 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
166 power_well->desc->ops->enable(dev_priv, power_well);
167 power_well->hw_enabled = true;
168}
169
170static void intel_power_well_disable(struct drm_i915_private *dev_priv,
171 struct i915_power_well *power_well)
172{
173 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
174 power_well->hw_enabled = false;
175 power_well->desc->ops->disable(dev_priv, power_well);
176}
177
178static void intel_power_well_get(struct drm_i915_private *dev_priv,
179 struct i915_power_well *power_well)
180{
181 if (!power_well->count++)
182 intel_power_well_enable(dev_priv, power_well);
183}
184
185static void intel_power_well_put(struct drm_i915_private *dev_priv,
186 struct i915_power_well *power_well)
187{
188 drm_WARN(&dev_priv->drm, !power_well->count,
189 "Use count on power well %s is already zero",
190 power_well->desc->name);
191
192 if (!--power_well->count)
193 intel_power_well_disable(dev_priv, power_well);
194}
195
196
197
198
199
200
201
202
203
204
205
206
207
208bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
209 enum intel_display_power_domain domain)
210{
211 struct i915_power_well *power_well;
212 bool is_enabled;
213
214 if (dev_priv->runtime_pm.suspended)
215 return false;
216
217 is_enabled = true;
218
219 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
220 if (power_well->desc->always_on)
221 continue;
222
223 if (!power_well->hw_enabled) {
224 is_enabled = false;
225 break;
226 }
227 }
228
229 return is_enabled;
230}
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
250 enum intel_display_power_domain domain)
251{
252 struct i915_power_domains *power_domains;
253 bool ret;
254
255 power_domains = &dev_priv->power_domains;
256
257 mutex_lock(&power_domains->lock);
258 ret = __intel_display_power_is_enabled(dev_priv, domain);
259 mutex_unlock(&power_domains->lock);
260
261 return ret;
262}
263
264
265
266
267
268
269
270static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
271 u8 irq_pipe_mask, bool has_vga)
272{
273 if (has_vga)
274 intel_vga_reset_io_mem(dev_priv);
275
276 if (irq_pipe_mask)
277 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
278}
279
280static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
281 u8 irq_pipe_mask)
282{
283 if (irq_pipe_mask)
284 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
285}
286
287#define ICL_AUX_PW_TO_CH(pw_idx) \
288 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
289
290#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
291 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
292
293static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
294 struct i915_power_well *power_well)
295{
296 int pw_idx = power_well->desc->hsw.idx;
297
298 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
299 ICL_AUX_PW_TO_CH(pw_idx);
300}
301
302static struct intel_digital_port *
303aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
304 enum aux_ch aux_ch)
305{
306 struct intel_digital_port *dig_port = NULL;
307 struct intel_encoder *encoder;
308
309 for_each_intel_encoder(&dev_priv->drm, encoder) {
310
311 if (encoder->type == INTEL_OUTPUT_DP_MST)
312 continue;
313
314 dig_port = enc_to_dig_port(encoder);
315 if (!dig_port)
316 continue;
317
318 if (dig_port->aux_ch != aux_ch) {
319 dig_port = NULL;
320 continue;
321 }
322
323 break;
324 }
325
326 return dig_port;
327}
328
329static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
330 struct i915_power_well *power_well,
331 bool timeout_expected)
332{
333 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
334 int pw_idx = power_well->desc->hsw.idx;
335
336
337 if (intel_de_wait_for_set(dev_priv, regs->driver,
338 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
339 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
340 power_well->desc->name);
341
342 drm_WARN_ON(&dev_priv->drm, !timeout_expected);
343
344 }
345}
346
347static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
348 const struct i915_power_well_regs *regs,
349 int pw_idx)
350{
351 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
352 u32 ret;
353
354 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
355 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
356 if (regs->kvmr.reg)
357 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
358 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
359
360 return ret;
361}
362
363static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
364 struct i915_power_well *power_well)
365{
366 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
367 int pw_idx = power_well->desc->hsw.idx;
368 bool disabled;
369 u32 reqs;
370
371
372
373
374
375
376
377
378
379
380 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
381 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
382 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
383 if (disabled)
384 return;
385
386 drm_dbg_kms(&dev_priv->drm,
387 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
388 power_well->desc->name,
389 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
390}
391
392static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
393 enum skl_power_gate pg)
394{
395
396 drm_WARN_ON(&dev_priv->drm,
397 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
398 SKL_FUSE_PG_DIST_STATUS(pg), 1));
399}
400
401static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
402 struct i915_power_well *power_well)
403{
404 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
405 int pw_idx = power_well->desc->hsw.idx;
406 u32 val;
407
408 if (power_well->desc->hsw.has_fuses) {
409 enum skl_power_gate pg;
410
411 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
412 SKL_PW_CTL_IDX_TO_PG(pw_idx);
413
414
415
416
417
418
419
420 if (pg == SKL_PG1)
421 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
422 }
423
424 val = intel_de_read(dev_priv, regs->driver);
425 intel_de_write(dev_priv, regs->driver,
426 val | HSW_PWR_WELL_CTL_REQ(pw_idx));
427
428 hsw_wait_for_power_well_enable(dev_priv, power_well, false);
429
430
431 if (IS_CANNONLAKE(dev_priv) &&
432 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
433 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
434 u32 val;
435
436 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
437 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
438 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
439 }
440
441 if (power_well->desc->hsw.has_fuses) {
442 enum skl_power_gate pg;
443
444 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
445 SKL_PW_CTL_IDX_TO_PG(pw_idx);
446 gen9_wait_for_power_well_fuses(dev_priv, pg);
447 }
448
449 hsw_power_well_post_enable(dev_priv,
450 power_well->desc->hsw.irq_pipe_mask,
451 power_well->desc->hsw.has_vga);
452}
453
454static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
455 struct i915_power_well *power_well)
456{
457 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
458 int pw_idx = power_well->desc->hsw.idx;
459 u32 val;
460
461 hsw_power_well_pre_disable(dev_priv,
462 power_well->desc->hsw.irq_pipe_mask);
463
464 val = intel_de_read(dev_priv, regs->driver);
465 intel_de_write(dev_priv, regs->driver,
466 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
467 hsw_wait_for_power_well_disable(dev_priv, power_well);
468}
469
470#define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
471
472static void
473icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
474 struct i915_power_well *power_well)
475{
476 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
477 int pw_idx = power_well->desc->hsw.idx;
478 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
479 u32 val;
480
481 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
482
483 val = intel_de_read(dev_priv, regs->driver);
484 intel_de_write(dev_priv, regs->driver,
485 val | HSW_PWR_WELL_CTL_REQ(pw_idx));
486
487 if (INTEL_GEN(dev_priv) < 12) {
488 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
489 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
490 val | ICL_LANE_ENABLE_AUX);
491 }
492
493 hsw_wait_for_power_well_enable(dev_priv, power_well, false);
494
495
496 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
497 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
498 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
499 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
500 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
501 }
502}
503
504static void
505icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
506 struct i915_power_well *power_well)
507{
508 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
509 int pw_idx = power_well->desc->hsw.idx;
510 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
511 u32 val;
512
513 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
514
515 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
516 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
517 val & ~ICL_LANE_ENABLE_AUX);
518
519 val = intel_de_read(dev_priv, regs->driver);
520 intel_de_write(dev_priv, regs->driver,
521 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
522
523 hsw_wait_for_power_well_disable(dev_priv, power_well);
524}
525
526#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
527
528static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
529
530static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
531 struct i915_power_well *power_well)
532{
533 int refs = hweight64(power_well->desc->domains &
534 async_put_domains_mask(&dev_priv->power_domains));
535
536 drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
537
538 return refs;
539}
540
541static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
542 struct i915_power_well *power_well,
543 struct intel_digital_port *dig_port)
544{
545
546 if (power_well_async_ref_count(dev_priv, power_well) ==
547 power_well->count)
548 return;
549
550 if (drm_WARN_ON(&dev_priv->drm, !dig_port))
551 return;
552
553 if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
554 return;
555
556 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
557}
558
559#else
560
561static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
562 struct i915_power_well *power_well,
563 struct intel_digital_port *dig_port)
564{
565}
566
567#endif
568
569#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
570
571static void icl_tc_cold_exit(struct drm_i915_private *i915)
572{
573 int ret, tries = 0;
574
575 while (1) {
576 ret = sandybridge_pcode_write_timeout(i915,
577 ICL_PCODE_EXIT_TCCOLD,
578 0, 250, 1);
579 if (ret != -EAGAIN || ++tries == 3)
580 break;
581 msleep(1);
582 }
583
584
585 if (!ret)
586 msleep(1);
587
588
589 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
590 "succeeded");
591}
592
593static void
594icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
595 struct i915_power_well *power_well)
596{
597 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
598 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
599 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
600 bool is_tbt = power_well->desc->hsw.is_tc_tbt;
601 bool timeout_expected;
602 u32 val;
603
604 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
605
606 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
607 val &= ~DP_AUX_CH_CTL_TBT_IO;
608 if (is_tbt)
609 val |= DP_AUX_CH_CTL_TBT_IO;
610 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
611
612 val = intel_de_read(dev_priv, regs->driver);
613 intel_de_write(dev_priv, regs->driver,
614 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
615
616
617
618
619
620
621 timeout_expected = is_tbt;
622 if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) {
623 icl_tc_cold_exit(dev_priv);
624 timeout_expected = true;
625 }
626
627 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
628
629 if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) {
630 enum tc_port tc_port;
631
632 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
633 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
634 HIP_INDEX_VAL(tc_port, 0x2));
635
636 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
637 DKL_CMN_UC_DW27_UC_HEALTH, 1))
638 drm_warn(&dev_priv->drm,
639 "Timeout waiting TC uC health\n");
640 }
641}
642
643static void
644icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
645 struct i915_power_well *power_well)
646{
647 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
648 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
649
650 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
651
652 hsw_power_well_disable(dev_priv, power_well);
653}
654
655static void
656icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
657 struct i915_power_well *power_well)
658{
659 int pw_idx = power_well->desc->hsw.idx;
660 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
661 bool is_tbt = power_well->desc->hsw.is_tc_tbt;
662
663 if (is_tbt || intel_phy_is_tc(dev_priv, phy))
664 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
665 else if (IS_ICELAKE(dev_priv))
666 return icl_combo_phy_aux_power_well_enable(dev_priv,
667 power_well);
668 else
669 return hsw_power_well_enable(dev_priv, power_well);
670}
671
672static void
673icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
674 struct i915_power_well *power_well)
675{
676 int pw_idx = power_well->desc->hsw.idx;
677 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
678 bool is_tbt = power_well->desc->hsw.is_tc_tbt;
679
680 if (is_tbt || intel_phy_is_tc(dev_priv, phy))
681 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
682 else if (IS_ICELAKE(dev_priv))
683 return icl_combo_phy_aux_power_well_disable(dev_priv,
684 power_well);
685 else
686 return hsw_power_well_disable(dev_priv, power_well);
687}
688
689
690
691
692
693
694static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
695 struct i915_power_well *power_well)
696{
697 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
698 enum i915_power_well_id id = power_well->desc->id;
699 int pw_idx = power_well->desc->hsw.idx;
700 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
701 HSW_PWR_WELL_CTL_STATE(pw_idx);
702 u32 val;
703
704 val = intel_de_read(dev_priv, regs->driver);
705
706
707
708
709
710
711
712 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
713 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
714 val |= intel_de_read(dev_priv, regs->bios);
715
716 return (val & mask) == mask;
717}
718
719static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
720{
721 drm_WARN_ONCE(&dev_priv->drm,
722 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
723 "DC9 already programmed to be enabled.\n");
724 drm_WARN_ONCE(&dev_priv->drm,
725 intel_de_read(dev_priv, DC_STATE_EN) &
726 DC_STATE_EN_UPTO_DC5,
727 "DC5 still not disabled to enable DC9.\n");
728 drm_WARN_ONCE(&dev_priv->drm,
729 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
730 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
731 "Power well 2 on.\n");
732 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
733 "Interrupts not disabled yet.\n");
734
735
736
737
738
739
740
741
742}
743
744static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
745{
746 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
747 "Interrupts not disabled yet.\n");
748 drm_WARN_ONCE(&dev_priv->drm,
749 intel_de_read(dev_priv, DC_STATE_EN) &
750 DC_STATE_EN_UPTO_DC5,
751 "DC5 still not disabled.\n");
752
753
754
755
756
757
758
759
760}
761
762static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
763 u32 state)
764{
765 int rewrites = 0;
766 int rereads = 0;
767 u32 v;
768
769 intel_de_write(dev_priv, DC_STATE_EN, state);
770
771
772
773
774
775
776 do {
777 v = intel_de_read(dev_priv, DC_STATE_EN);
778
779 if (v != state) {
780 intel_de_write(dev_priv, DC_STATE_EN, state);
781 rewrites++;
782 rereads = 0;
783 } else if (rereads++ > 5) {
784 break;
785 }
786
787 } while (rewrites < 100);
788
789 if (v != state)
790 drm_err(&dev_priv->drm,
791 "Writing dc state to 0x%x failed, now 0x%x\n",
792 state, v);
793
794
795 if (rewrites > 1)
796 drm_dbg_kms(&dev_priv->drm,
797 "Rewrote dc state to 0x%x %d times\n",
798 state, rewrites);
799}
800
801static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
802{
803 u32 mask;
804
805 mask = DC_STATE_EN_UPTO_DC5;
806
807 if (INTEL_GEN(dev_priv) >= 12)
808 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
809 | DC_STATE_EN_DC9;
810 else if (IS_GEN(dev_priv, 11))
811 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
812 else if (IS_GEN9_LP(dev_priv))
813 mask |= DC_STATE_EN_DC9;
814 else
815 mask |= DC_STATE_EN_UPTO_DC6;
816
817 return mask;
818}
819
820static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
821{
822 u32 val;
823
824 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
825
826 drm_dbg_kms(&dev_priv->drm,
827 "Resetting DC state tracking from %02x to %02x\n",
828 dev_priv->csr.dc_state, val);
829 dev_priv->csr.dc_state = val;
830}
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
856{
857 u32 val;
858 u32 mask;
859
860 if (drm_WARN_ON_ONCE(&dev_priv->drm,
861 state & ~dev_priv->csr.allowed_dc_mask))
862 state &= dev_priv->csr.allowed_dc_mask;
863
864 val = intel_de_read(dev_priv, DC_STATE_EN);
865 mask = gen9_dc_mask(dev_priv);
866 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
867 val & mask, state);
868
869
870 if ((val & mask) != dev_priv->csr.dc_state)
871 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
872 dev_priv->csr.dc_state, val & mask);
873
874 val &= ~mask;
875 val |= state;
876
877 gen9_write_dc_state(dev_priv, val);
878
879 dev_priv->csr.dc_state = val & mask;
880}
881
882static u32
883sanitize_target_dc_state(struct drm_i915_private *dev_priv,
884 u32 target_dc_state)
885{
886 u32 states[] = {
887 DC_STATE_EN_UPTO_DC6,
888 DC_STATE_EN_UPTO_DC5,
889 DC_STATE_EN_DC3CO,
890 DC_STATE_DISABLE,
891 };
892 int i;
893
894 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
895 if (target_dc_state != states[i])
896 continue;
897
898 if (dev_priv->csr.allowed_dc_mask & target_dc_state)
899 break;
900
901 target_dc_state = states[i + 1];
902 }
903
904 return target_dc_state;
905}
906
907static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
908{
909 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
910 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
911}
912
913static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
914{
915 u32 val;
916
917 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
918 val = intel_de_read(dev_priv, DC_STATE_EN);
919 val &= ~DC_STATE_DC3CO_STATUS;
920 intel_de_write(dev_priv, DC_STATE_EN, val);
921 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
922
923
924
925 usleep_range(200, 210);
926}
927
928static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
929{
930 assert_can_enable_dc9(dev_priv);
931
932 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
933
934
935
936
937
938 if (!HAS_PCH_SPLIT(dev_priv))
939 intel_power_sequencer_reset(dev_priv);
940 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
941}
942
943static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
944{
945 assert_can_disable_dc9(dev_priv);
946
947 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
948
949 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
950
951 intel_pps_unlock_regs_wa(dev_priv);
952}
953
954static void assert_csr_loaded(struct drm_i915_private *dev_priv)
955{
956 drm_WARN_ONCE(&dev_priv->drm,
957 !intel_de_read(dev_priv, CSR_PROGRAM(0)),
958 "CSR program storage start is NULL\n");
959 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
960 "CSR SSP Base Not fine\n");
961 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
962 "CSR HTP Not fine\n");
963}
964
965static struct i915_power_well *
966lookup_power_well(struct drm_i915_private *dev_priv,
967 enum i915_power_well_id power_well_id)
968{
969 struct i915_power_well *power_well;
970
971 for_each_power_well(dev_priv, power_well)
972 if (power_well->desc->id == power_well_id)
973 return power_well;
974
975
976
977
978
979
980
981
982 drm_WARN(&dev_priv->drm, 1,
983 "Power well %d not defined for this platform\n",
984 power_well_id);
985 return &dev_priv->power_domains.power_wells[0];
986}
987
988
989
990
991
992
993
994
995
996
997void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
998 u32 state)
999{
1000 struct i915_power_well *power_well;
1001 bool dc_off_enabled;
1002 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1003
1004 mutex_lock(&power_domains->lock);
1005 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
1006
1007 if (drm_WARN_ON(&dev_priv->drm, !power_well))
1008 goto unlock;
1009
1010 state = sanitize_target_dc_state(dev_priv, state);
1011
1012 if (state == dev_priv->csr.target_dc_state)
1013 goto unlock;
1014
1015 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1016 power_well);
1017
1018
1019
1020
1021 if (!dc_off_enabled)
1022 power_well->desc->ops->enable(dev_priv, power_well);
1023
1024 dev_priv->csr.target_dc_state = state;
1025
1026 if (!dc_off_enabled)
1027 power_well->desc->ops->disable(dev_priv, power_well);
1028
1029unlock:
1030 mutex_unlock(&power_domains->lock);
1031}
1032
1033static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1034{
1035 enum i915_power_well_id high_pg;
1036
1037
1038 if (INTEL_GEN(dev_priv) >= 12)
1039 high_pg = ICL_DISP_PW_3;
1040 else
1041 high_pg = SKL_DISP_PW_2;
1042
1043 drm_WARN_ONCE(&dev_priv->drm,
1044 intel_display_power_well_is_enabled(dev_priv, high_pg),
1045 "Power wells above platform's DC5 limit still enabled.\n");
1046
1047 drm_WARN_ONCE(&dev_priv->drm,
1048 (intel_de_read(dev_priv, DC_STATE_EN) &
1049 DC_STATE_EN_UPTO_DC5),
1050 "DC5 already programmed to be enabled.\n");
1051 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1052
1053 assert_csr_loaded(dev_priv);
1054}
1055
1056static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1057{
1058 assert_can_enable_dc5(dev_priv);
1059
1060 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
1061
1062
1063 if (IS_GEN9_BC(dev_priv))
1064 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1065 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1066
1067 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1068}
1069
1070static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1071{
1072 drm_WARN_ONCE(&dev_priv->drm,
1073 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1074 "Backlight is not disabled.\n");
1075 drm_WARN_ONCE(&dev_priv->drm,
1076 (intel_de_read(dev_priv, DC_STATE_EN) &
1077 DC_STATE_EN_UPTO_DC6),
1078 "DC6 already programmed to be enabled.\n");
1079
1080 assert_csr_loaded(dev_priv);
1081}
1082
1083static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1084{
1085 assert_can_enable_dc6(dev_priv);
1086
1087 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1088
1089
1090 if (IS_GEN9_BC(dev_priv))
1091 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1092 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1093
1094 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1095}
1096
1097static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1098 struct i915_power_well *power_well)
1099{
1100 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1101 int pw_idx = power_well->desc->hsw.idx;
1102 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1103 u32 bios_req = intel_de_read(dev_priv, regs->bios);
1104
1105
1106 if (bios_req & mask) {
1107 u32 drv_req = intel_de_read(dev_priv, regs->driver);
1108
1109 if (!(drv_req & mask))
1110 intel_de_write(dev_priv, regs->driver, drv_req | mask);
1111 intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1112 }
1113}
1114
1115static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1116 struct i915_power_well *power_well)
1117{
1118 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1119}
1120
1121static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1122 struct i915_power_well *power_well)
1123{
1124 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1125}
1126
1127static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1128 struct i915_power_well *power_well)
1129{
1130 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1131}
1132
1133static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1134{
1135 struct i915_power_well *power_well;
1136
1137 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1138 if (power_well->count > 0)
1139 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1140
1141 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1142 if (power_well->count > 0)
1143 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1144
1145 if (IS_GEMINILAKE(dev_priv)) {
1146 power_well = lookup_power_well(dev_priv,
1147 GLK_DISP_PW_DPIO_CMN_C);
1148 if (power_well->count > 0)
1149 bxt_ddi_phy_verify_state(dev_priv,
1150 power_well->desc->bxt.phy);
1151 }
1152}
1153
1154static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1155 struct i915_power_well *power_well)
1156{
1157 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1158 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1159}
1160
1161static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1162{
1163 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1164 u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
1165
1166 drm_WARN(&dev_priv->drm,
1167 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1168 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1169 hw_enabled_dbuf_slices,
1170 enabled_dbuf_slices);
1171}
1172
1173static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1174{
1175 struct intel_cdclk_config cdclk_config = {};
1176
1177 if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1178 tgl_disable_dc3co(dev_priv);
1179 return;
1180 }
1181
1182 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1183
1184 dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1185
1186 drm_WARN_ON(&dev_priv->drm,
1187 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1188 &cdclk_config));
1189
1190 gen9_assert_dbuf_enabled(dev_priv);
1191
1192 if (IS_GEN9_LP(dev_priv))
1193 bxt_verify_ddi_phy_power_wells(dev_priv);
1194
1195 if (INTEL_GEN(dev_priv) >= 11)
1196
1197
1198
1199
1200
1201 intel_combo_phy_init(dev_priv);
1202}
1203
1204static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1205 struct i915_power_well *power_well)
1206{
1207 gen9_disable_dc_states(dev_priv);
1208}
1209
1210static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1211 struct i915_power_well *power_well)
1212{
1213 if (!dev_priv->csr.dmc_payload)
1214 return;
1215
1216 switch (dev_priv->csr.target_dc_state) {
1217 case DC_STATE_EN_DC3CO:
1218 tgl_enable_dc3co(dev_priv);
1219 break;
1220 case DC_STATE_EN_UPTO_DC6:
1221 skl_enable_dc6(dev_priv);
1222 break;
1223 case DC_STATE_EN_UPTO_DC5:
1224 gen9_enable_dc5(dev_priv);
1225 break;
1226 }
1227}
1228
1229static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1230 struct i915_power_well *power_well)
1231{
1232}
1233
1234static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1235 struct i915_power_well *power_well)
1236{
1237}
1238
1239static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1240 struct i915_power_well *power_well)
1241{
1242 return true;
1243}
1244
1245static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1246 struct i915_power_well *power_well)
1247{
1248 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1249 i830_enable_pipe(dev_priv, PIPE_A);
1250 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1251 i830_enable_pipe(dev_priv, PIPE_B);
1252}
1253
1254static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1255 struct i915_power_well *power_well)
1256{
1257 i830_disable_pipe(dev_priv, PIPE_B);
1258 i830_disable_pipe(dev_priv, PIPE_A);
1259}
1260
1261static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1262 struct i915_power_well *power_well)
1263{
1264 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1265 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1266}
1267
1268static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1269 struct i915_power_well *power_well)
1270{
1271 if (power_well->count > 0)
1272 i830_pipes_power_well_enable(dev_priv, power_well);
1273 else
1274 i830_pipes_power_well_disable(dev_priv, power_well);
1275}
1276
1277static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1278 struct i915_power_well *power_well, bool enable)
1279{
1280 int pw_idx = power_well->desc->vlv.idx;
1281 u32 mask;
1282 u32 state;
1283 u32 ctrl;
1284
1285 mask = PUNIT_PWRGT_MASK(pw_idx);
1286 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1287 PUNIT_PWRGT_PWR_GATE(pw_idx);
1288
1289 vlv_punit_get(dev_priv);
1290
1291#define COND \
1292 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1293
1294 if (COND)
1295 goto out;
1296
1297 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1298 ctrl &= ~mask;
1299 ctrl |= state;
1300 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1301
1302 if (wait_for(COND, 100))
1303 drm_err(&dev_priv->drm,
1304 "timeout setting power well state %08x (%08x)\n",
1305 state,
1306 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1307
1308#undef COND
1309
1310out:
1311 vlv_punit_put(dev_priv);
1312}
1313
1314static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1315 struct i915_power_well *power_well)
1316{
1317 vlv_set_power_well(dev_priv, power_well, true);
1318}
1319
1320static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1321 struct i915_power_well *power_well)
1322{
1323 vlv_set_power_well(dev_priv, power_well, false);
1324}
1325
1326static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1327 struct i915_power_well *power_well)
1328{
1329 int pw_idx = power_well->desc->vlv.idx;
1330 bool enabled = false;
1331 u32 mask;
1332 u32 state;
1333 u32 ctrl;
1334
1335 mask = PUNIT_PWRGT_MASK(pw_idx);
1336 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1337
1338 vlv_punit_get(dev_priv);
1339
1340 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1341
1342
1343
1344
1345 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1346 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1347 if (state == ctrl)
1348 enabled = true;
1349
1350
1351
1352
1353
1354 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1355 drm_WARN_ON(&dev_priv->drm, ctrl != state);
1356
1357 vlv_punit_put(dev_priv);
1358
1359 return enabled;
1360}
1361
1362static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1363{
1364 u32 val;
1365
1366
1367
1368
1369
1370
1371
1372 val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1373 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1374 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1375 intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1376
1377
1378
1379
1380 intel_de_write(dev_priv, MI_ARB_VLV,
1381 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1382 intel_de_write(dev_priv, CBR1_VLV, 0);
1383
1384 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1385 intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1386 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1387 1000));
1388}
1389
1390static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1391{
1392 struct intel_encoder *encoder;
1393 enum pipe pipe;
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403 for_each_pipe(dev_priv, pipe) {
1404 u32 val = intel_de_read(dev_priv, DPLL(pipe));
1405
1406 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1407 if (pipe != PIPE_A)
1408 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1409
1410 intel_de_write(dev_priv, DPLL(pipe), val);
1411 }
1412
1413 vlv_init_display_clock_gating(dev_priv);
1414
1415 spin_lock_irq(&dev_priv->irq_lock);
1416 valleyview_enable_display_irqs(dev_priv);
1417 spin_unlock_irq(&dev_priv->irq_lock);
1418
1419
1420
1421
1422
1423 if (dev_priv->power_domains.initializing)
1424 return;
1425
1426 intel_hpd_init(dev_priv);
1427
1428
1429 for_each_intel_encoder(&dev_priv->drm, encoder) {
1430 if (encoder->type == INTEL_OUTPUT_ANALOG)
1431 intel_crt_reset(&encoder->base);
1432 }
1433
1434 intel_vga_redisable_power_on(dev_priv);
1435
1436 intel_pps_unlock_regs_wa(dev_priv);
1437}
1438
1439static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1440{
1441 spin_lock_irq(&dev_priv->irq_lock);
1442 valleyview_disable_display_irqs(dev_priv);
1443 spin_unlock_irq(&dev_priv->irq_lock);
1444
1445
1446 intel_synchronize_irq(dev_priv);
1447
1448 intel_power_sequencer_reset(dev_priv);
1449
1450
1451 if (!dev_priv->drm.dev->power.is_suspended)
1452 intel_hpd_poll_init(dev_priv);
1453}
1454
1455static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1456 struct i915_power_well *power_well)
1457{
1458 vlv_set_power_well(dev_priv, power_well, true);
1459
1460 vlv_display_power_well_init(dev_priv);
1461}
1462
1463static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1464 struct i915_power_well *power_well)
1465{
1466 vlv_display_power_well_deinit(dev_priv);
1467
1468 vlv_set_power_well(dev_priv, power_well, false);
1469}
1470
1471static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1472 struct i915_power_well *power_well)
1473{
1474
1475 udelay(1);
1476
1477 vlv_set_power_well(dev_priv, power_well, true);
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490 intel_de_write(dev_priv, DPIO_CTL,
1491 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1492}
1493
1494static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1495 struct i915_power_well *power_well)
1496{
1497 enum pipe pipe;
1498
1499 for_each_pipe(dev_priv, pipe)
1500 assert_pll_disabled(dev_priv, pipe);
1501
1502
1503 intel_de_write(dev_priv, DPIO_CTL,
1504 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1505
1506 vlv_set_power_well(dev_priv, power_well, false);
1507}
1508
1509#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1510
1511#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1512
1513static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1514{
1515 struct i915_power_well *cmn_bc =
1516 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1517 struct i915_power_well *cmn_d =
1518 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1519 u32 phy_control = dev_priv->chv_phy_control;
1520 u32 phy_status = 0;
1521 u32 phy_status_mask = 0xffffffff;
1522
1523
1524
1525
1526
1527
1528
1529
1530 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1531 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1532 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1533 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1534 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1535 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1536 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1537
1538 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1539 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1540 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1541 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1542
1543 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1544 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1545
1546
1547 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1548 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1549
1550 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1551 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1552
1553
1554 if (BITS_SET(phy_control,
1555 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1556 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1557 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1558
1559
1560
1561
1562
1563
1564 if (BITS_SET(phy_control,
1565 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1566 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1567 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1568
1569 if (BITS_SET(phy_control,
1570 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1571 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1572 if (BITS_SET(phy_control,
1573 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1574 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1575
1576 if (BITS_SET(phy_control,
1577 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1578 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1579 if (BITS_SET(phy_control,
1580 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1581 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1582 }
1583
1584 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1585 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1586
1587
1588 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1589 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1590
1591 if (BITS_SET(phy_control,
1592 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1593 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1594
1595 if (BITS_SET(phy_control,
1596 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1597 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1598 if (BITS_SET(phy_control,
1599 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1600 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1601 }
1602
1603 phy_status &= phy_status_mask;
1604
1605
1606
1607
1608
1609 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1610 phy_status_mask, phy_status, 10))
1611 drm_err(&dev_priv->drm,
1612 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1613 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1614 phy_status, dev_priv->chv_phy_control);
1615}
1616
1617#undef BITS_SET
1618
1619static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1620 struct i915_power_well *power_well)
1621{
1622 enum dpio_phy phy;
1623 enum pipe pipe;
1624 u32 tmp;
1625
1626 drm_WARN_ON_ONCE(&dev_priv->drm,
1627 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1628 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1629
1630 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1631 pipe = PIPE_A;
1632 phy = DPIO_PHY0;
1633 } else {
1634 pipe = PIPE_C;
1635 phy = DPIO_PHY1;
1636 }
1637
1638
1639 udelay(1);
1640 vlv_set_power_well(dev_priv, power_well, true);
1641
1642
1643 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1644 PHY_POWERGOOD(phy), 1))
1645 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1646 phy);
1647
1648 vlv_dpio_get(dev_priv);
1649
1650
1651 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1652 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1653 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1654 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1655
1656 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1657 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1658 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1659 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1660 } else {
1661
1662
1663
1664
1665
1666 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1667 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1668 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1669 }
1670
1671 vlv_dpio_put(dev_priv);
1672
1673 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1674 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1675 dev_priv->chv_phy_control);
1676
1677 drm_dbg_kms(&dev_priv->drm,
1678 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1679 phy, dev_priv->chv_phy_control);
1680
1681 assert_chv_phy_status(dev_priv);
1682}
1683
1684static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1685 struct i915_power_well *power_well)
1686{
1687 enum dpio_phy phy;
1688
1689 drm_WARN_ON_ONCE(&dev_priv->drm,
1690 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1691 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1692
1693 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1694 phy = DPIO_PHY0;
1695 assert_pll_disabled(dev_priv, PIPE_A);
1696 assert_pll_disabled(dev_priv, PIPE_B);
1697 } else {
1698 phy = DPIO_PHY1;
1699 assert_pll_disabled(dev_priv, PIPE_C);
1700 }
1701
1702 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1703 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1704 dev_priv->chv_phy_control);
1705
1706 vlv_set_power_well(dev_priv, power_well, false);
1707
1708 drm_dbg_kms(&dev_priv->drm,
1709 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1710 phy, dev_priv->chv_phy_control);
1711
1712
1713 dev_priv->chv_phy_assert[phy] = true;
1714
1715 assert_chv_phy_status(dev_priv);
1716}
1717
1718static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1719 enum dpio_channel ch, bool override, unsigned int mask)
1720{
1721 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1722 u32 reg, val, expected, actual;
1723
1724
1725
1726
1727
1728
1729
1730
1731 if (!dev_priv->chv_phy_assert[phy])
1732 return;
1733
1734 if (ch == DPIO_CH0)
1735 reg = _CHV_CMN_DW0_CH0;
1736 else
1737 reg = _CHV_CMN_DW6_CH1;
1738
1739 vlv_dpio_get(dev_priv);
1740 val = vlv_dpio_read(dev_priv, pipe, reg);
1741 vlv_dpio_put(dev_priv);
1742
1743
1744
1745
1746
1747
1748 if (!override || mask == 0xf) {
1749 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759 if (ch == DPIO_CH1 && val == 0)
1760 expected = 0;
1761 } else if (mask != 0x0) {
1762 expected = DPIO_ANYDL_POWERDOWN;
1763 } else {
1764 expected = 0;
1765 }
1766
1767 if (ch == DPIO_CH0)
1768 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1769 else
1770 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1771 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1772
1773 drm_WARN(&dev_priv->drm, actual != expected,
1774 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1775 !!(actual & DPIO_ALLDL_POWERDOWN),
1776 !!(actual & DPIO_ANYDL_POWERDOWN),
1777 !!(expected & DPIO_ALLDL_POWERDOWN),
1778 !!(expected & DPIO_ANYDL_POWERDOWN),
1779 reg, val);
1780}
1781
1782bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1783 enum dpio_channel ch, bool override)
1784{
1785 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1786 bool was_override;
1787
1788 mutex_lock(&power_domains->lock);
1789
1790 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1791
1792 if (override == was_override)
1793 goto out;
1794
1795 if (override)
1796 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1797 else
1798 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1799
1800 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1801 dev_priv->chv_phy_control);
1802
1803 drm_dbg_kms(&dev_priv->drm,
1804 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1805 phy, ch, dev_priv->chv_phy_control);
1806
1807 assert_chv_phy_status(dev_priv);
1808
1809out:
1810 mutex_unlock(&power_domains->lock);
1811
1812 return was_override;
1813}
1814
1815void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1816 bool override, unsigned int mask)
1817{
1818 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1819 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1820 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
1821 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
1822
1823 mutex_lock(&power_domains->lock);
1824
1825 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1826 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1827
1828 if (override)
1829 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1830 else
1831 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1832
1833 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1834 dev_priv->chv_phy_control);
1835
1836 drm_dbg_kms(&dev_priv->drm,
1837 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1838 phy, ch, mask, dev_priv->chv_phy_control);
1839
1840 assert_chv_phy_status(dev_priv);
1841
1842 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1843
1844 mutex_unlock(&power_domains->lock);
1845}
1846
1847static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1848 struct i915_power_well *power_well)
1849{
1850 enum pipe pipe = PIPE_A;
1851 bool enabled;
1852 u32 state, ctrl;
1853
1854 vlv_punit_get(dev_priv);
1855
1856 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1857
1858
1859
1860
1861 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1862 state != DP_SSS_PWR_GATE(pipe));
1863 enabled = state == DP_SSS_PWR_ON(pipe);
1864
1865
1866
1867
1868
1869 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1870 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1871
1872 vlv_punit_put(dev_priv);
1873
1874 return enabled;
1875}
1876
1877static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1878 struct i915_power_well *power_well,
1879 bool enable)
1880{
1881 enum pipe pipe = PIPE_A;
1882 u32 state;
1883 u32 ctrl;
1884
1885 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1886
1887 vlv_punit_get(dev_priv);
1888
1889#define COND \
1890 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1891
1892 if (COND)
1893 goto out;
1894
1895 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1896 ctrl &= ~DP_SSC_MASK(pipe);
1897 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1898 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1899
1900 if (wait_for(COND, 100))
1901 drm_err(&dev_priv->drm,
1902 "timeout setting power well state %08x (%08x)\n",
1903 state,
1904 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1905
1906#undef COND
1907
1908out:
1909 vlv_punit_put(dev_priv);
1910}
1911
1912static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1913 struct i915_power_well *power_well)
1914{
1915 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1916 dev_priv->chv_phy_control);
1917}
1918
1919static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1920 struct i915_power_well *power_well)
1921{
1922 chv_set_pipe_power_well(dev_priv, power_well, true);
1923
1924 vlv_display_power_well_init(dev_priv);
1925}
1926
1927static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1928 struct i915_power_well *power_well)
1929{
1930 vlv_display_power_well_deinit(dev_priv);
1931
1932 chv_set_pipe_power_well(dev_priv, power_well, false);
1933}
1934
1935static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1936{
1937 return power_domains->async_put_domains[0] |
1938 power_domains->async_put_domains[1];
1939}
1940
1941#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1942
1943static bool
1944assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1945{
1946 return !WARN_ON(power_domains->async_put_domains[0] &
1947 power_domains->async_put_domains[1]);
1948}
1949
1950static bool
1951__async_put_domains_state_ok(struct i915_power_domains *power_domains)
1952{
1953 enum intel_display_power_domain domain;
1954 bool err = false;
1955
1956 err |= !assert_async_put_domain_masks_disjoint(power_domains);
1957 err |= WARN_ON(!!power_domains->async_put_wakeref !=
1958 !!__async_put_domains_mask(power_domains));
1959
1960 for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1961 err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1962
1963 return !err;
1964}
1965
1966static void print_power_domains(struct i915_power_domains *power_domains,
1967 const char *prefix, u64 mask)
1968{
1969 struct drm_i915_private *i915 = container_of(power_domains,
1970 struct drm_i915_private,
1971 power_domains);
1972 enum intel_display_power_domain domain;
1973
1974 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
1975 for_each_power_domain(domain, mask)
1976 drm_dbg(&i915->drm, "%s use_count %d\n",
1977 intel_display_power_domain_str(domain),
1978 power_domains->domain_use_count[domain]);
1979}
1980
1981static void
1982print_async_put_domains_state(struct i915_power_domains *power_domains)
1983{
1984 struct drm_i915_private *i915 = container_of(power_domains,
1985 struct drm_i915_private,
1986 power_domains);
1987
1988 drm_dbg(&i915->drm, "async_put_wakeref %u\n",
1989 power_domains->async_put_wakeref);
1990
1991 print_power_domains(power_domains, "async_put_domains[0]",
1992 power_domains->async_put_domains[0]);
1993 print_power_domains(power_domains, "async_put_domains[1]",
1994 power_domains->async_put_domains[1]);
1995}
1996
1997static void
1998verify_async_put_domains_state(struct i915_power_domains *power_domains)
1999{
2000 if (!__async_put_domains_state_ok(power_domains))
2001 print_async_put_domains_state(power_domains);
2002}
2003
2004#else
2005
2006static void
2007assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
2008{
2009}
2010
2011static void
2012verify_async_put_domains_state(struct i915_power_domains *power_domains)
2013{
2014}
2015
2016#endif
2017
2018static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
2019{
2020 assert_async_put_domain_masks_disjoint(power_domains);
2021
2022 return __async_put_domains_mask(power_domains);
2023}
2024
2025static void
2026async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2027 enum intel_display_power_domain domain)
2028{
2029 assert_async_put_domain_masks_disjoint(power_domains);
2030
2031 power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2032 power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2033}
2034
2035static bool
2036intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2037 enum intel_display_power_domain domain)
2038{
2039 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2040 bool ret = false;
2041
2042 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2043 goto out_verify;
2044
2045 async_put_domains_clear_domain(power_domains, domain);
2046
2047 ret = true;
2048
2049 if (async_put_domains_mask(power_domains))
2050 goto out_verify;
2051
2052 cancel_delayed_work(&power_domains->async_put_work);
2053 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2054 fetch_and_zero(&power_domains->async_put_wakeref));
2055out_verify:
2056 verify_async_put_domains_state(power_domains);
2057
2058 return ret;
2059}
2060
2061static void
2062__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2063 enum intel_display_power_domain domain)
2064{
2065 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2066 struct i915_power_well *power_well;
2067
2068 if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2069 return;
2070
2071 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2072 intel_power_well_get(dev_priv, power_well);
2073
2074 power_domains->domain_use_count[domain]++;
2075}
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2090 enum intel_display_power_domain domain)
2091{
2092 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2093 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2094
2095 mutex_lock(&power_domains->lock);
2096 __intel_display_power_get_domain(dev_priv, domain);
2097 mutex_unlock(&power_domains->lock);
2098
2099 return wakeref;
2100}
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114intel_wakeref_t
2115intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2116 enum intel_display_power_domain domain)
2117{
2118 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2119 intel_wakeref_t wakeref;
2120 bool is_enabled;
2121
2122 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2123 if (!wakeref)
2124 return false;
2125
2126 mutex_lock(&power_domains->lock);
2127
2128 if (__intel_display_power_is_enabled(dev_priv, domain)) {
2129 __intel_display_power_get_domain(dev_priv, domain);
2130 is_enabled = true;
2131 } else {
2132 is_enabled = false;
2133 }
2134
2135 mutex_unlock(&power_domains->lock);
2136
2137 if (!is_enabled) {
2138 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2139 wakeref = 0;
2140 }
2141
2142 return wakeref;
2143}
2144
2145static void
2146__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2147 enum intel_display_power_domain domain)
2148{
2149 struct i915_power_domains *power_domains;
2150 struct i915_power_well *power_well;
2151 const char *name = intel_display_power_domain_str(domain);
2152
2153 power_domains = &dev_priv->power_domains;
2154
2155 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2156 "Use count on domain %s is already zero\n",
2157 name);
2158 drm_WARN(&dev_priv->drm,
2159 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2160 "Async disabling of domain %s is pending\n",
2161 name);
2162
2163 power_domains->domain_use_count[domain]--;
2164
2165 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2166 intel_power_well_put(dev_priv, power_well);
2167}
2168
2169static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2170 enum intel_display_power_domain domain)
2171{
2172 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2173
2174 mutex_lock(&power_domains->lock);
2175 __intel_display_power_put_domain(dev_priv, domain);
2176 mutex_unlock(&power_domains->lock);
2177}
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2193 enum intel_display_power_domain domain)
2194{
2195 __intel_display_power_put(dev_priv, domain);
2196 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2197}
2198
2199static void
2200queue_async_put_domains_work(struct i915_power_domains *power_domains,
2201 intel_wakeref_t wakeref)
2202{
2203 WARN_ON(power_domains->async_put_wakeref);
2204 power_domains->async_put_wakeref = wakeref;
2205 WARN_ON(!queue_delayed_work(system_unbound_wq,
2206 &power_domains->async_put_work,
2207 msecs_to_jiffies(100)));
2208}
2209
2210static void
2211release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2212{
2213 struct drm_i915_private *dev_priv =
2214 container_of(power_domains, struct drm_i915_private,
2215 power_domains);
2216 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2217 enum intel_display_power_domain domain;
2218 intel_wakeref_t wakeref;
2219
2220
2221
2222
2223
2224
2225 assert_rpm_raw_wakeref_held(rpm);
2226 wakeref = intel_runtime_pm_get(rpm);
2227
2228 for_each_power_domain(domain, mask) {
2229
2230 async_put_domains_clear_domain(power_domains, domain);
2231 __intel_display_power_put_domain(dev_priv, domain);
2232 }
2233
2234 intel_runtime_pm_put(rpm, wakeref);
2235}
2236
2237static void
2238intel_display_power_put_async_work(struct work_struct *work)
2239{
2240 struct drm_i915_private *dev_priv =
2241 container_of(work, struct drm_i915_private,
2242 power_domains.async_put_work.work);
2243 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2244 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2245 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2246 intel_wakeref_t old_work_wakeref = 0;
2247
2248 mutex_lock(&power_domains->lock);
2249
2250
2251
2252
2253
2254 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2255 if (!old_work_wakeref)
2256 goto out_verify;
2257
2258 release_async_put_domains(power_domains,
2259 power_domains->async_put_domains[0]);
2260
2261
2262 if (power_domains->async_put_domains[1]) {
2263 power_domains->async_put_domains[0] =
2264 fetch_and_zero(&power_domains->async_put_domains[1]);
2265 queue_async_put_domains_work(power_domains,
2266 fetch_and_zero(&new_work_wakeref));
2267 }
2268
2269out_verify:
2270 verify_async_put_domains_state(power_domains);
2271
2272 mutex_unlock(&power_domains->lock);
2273
2274 if (old_work_wakeref)
2275 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2276 if (new_work_wakeref)
2277 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2278}
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290void __intel_display_power_put_async(struct drm_i915_private *i915,
2291 enum intel_display_power_domain domain,
2292 intel_wakeref_t wakeref)
2293{
2294 struct i915_power_domains *power_domains = &i915->power_domains;
2295 struct intel_runtime_pm *rpm = &i915->runtime_pm;
2296 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2297
2298 mutex_lock(&power_domains->lock);
2299
2300 if (power_domains->domain_use_count[domain] > 1) {
2301 __intel_display_power_put_domain(i915, domain);
2302
2303 goto out_verify;
2304 }
2305
2306 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2307
2308
2309 if (power_domains->async_put_wakeref) {
2310 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2311 } else {
2312 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2313 queue_async_put_domains_work(power_domains,
2314 fetch_and_zero(&work_wakeref));
2315 }
2316
2317out_verify:
2318 verify_async_put_domains_state(power_domains);
2319
2320 mutex_unlock(&power_domains->lock);
2321
2322 if (work_wakeref)
2323 intel_runtime_pm_put_raw(rpm, work_wakeref);
2324
2325 intel_runtime_pm_put(rpm, wakeref);
2326}
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340void intel_display_power_flush_work(struct drm_i915_private *i915)
2341{
2342 struct i915_power_domains *power_domains = &i915->power_domains;
2343 intel_wakeref_t work_wakeref;
2344
2345 mutex_lock(&power_domains->lock);
2346
2347 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2348 if (!work_wakeref)
2349 goto out_verify;
2350
2351 release_async_put_domains(power_domains,
2352 async_put_domains_mask(power_domains));
2353 cancel_delayed_work(&power_domains->async_put_work);
2354
2355out_verify:
2356 verify_async_put_domains_state(power_domains);
2357
2358 mutex_unlock(&power_domains->lock);
2359
2360 if (work_wakeref)
2361 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2362}
2363
2364
2365
2366
2367
2368
2369
2370
2371static void
2372intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2373{
2374 struct i915_power_domains *power_domains = &i915->power_domains;
2375
2376 intel_display_power_flush_work(i915);
2377 cancel_delayed_work_sync(&power_domains->async_put_work);
2378
2379 verify_async_put_domains_state(power_domains);
2380
2381 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2382}
2383
2384#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395void intel_display_power_put(struct drm_i915_private *dev_priv,
2396 enum intel_display_power_domain domain,
2397 intel_wakeref_t wakeref)
2398{
2399 __intel_display_power_put(dev_priv, domain);
2400 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2401}
2402#endif
2403
2404#define I830_PIPES_POWER_DOMAINS ( \
2405 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2406 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2407 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2408 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2409 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2410 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2411 BIT_ULL(POWER_DOMAIN_INIT))
2412
2413#define VLV_DISPLAY_POWER_DOMAINS ( \
2414 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2415 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2416 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2417 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2418 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2419 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2420 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2421 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2422 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2423 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2424 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2425 BIT_ULL(POWER_DOMAIN_VGA) | \
2426 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2427 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2428 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2429 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2430 BIT_ULL(POWER_DOMAIN_INIT))
2431
2432#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
2433 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2434 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2435 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2436 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2437 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2438 BIT_ULL(POWER_DOMAIN_INIT))
2439
2440#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
2441 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2442 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2443 BIT_ULL(POWER_DOMAIN_INIT))
2444
2445#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
2446 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2447 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2448 BIT_ULL(POWER_DOMAIN_INIT))
2449
2450#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
2451 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2452 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2453 BIT_ULL(POWER_DOMAIN_INIT))
2454
2455#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
2456 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2457 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2458 BIT_ULL(POWER_DOMAIN_INIT))
2459
2460#define CHV_DISPLAY_POWER_DOMAINS ( \
2461 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2462 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2463 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2464 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2465 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2466 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2467 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2468 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2469 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2470 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2471 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2472 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2473 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2474 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2475 BIT_ULL(POWER_DOMAIN_VGA) | \
2476 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2477 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2478 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2479 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2480 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2481 BIT_ULL(POWER_DOMAIN_INIT))
2482
2483#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
2484 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2485 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2486 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2487 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2488 BIT_ULL(POWER_DOMAIN_INIT))
2489
2490#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
2491 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2492 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2493 BIT_ULL(POWER_DOMAIN_INIT))
2494
2495#define HSW_DISPLAY_POWER_DOMAINS ( \
2496 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2497 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2498 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2499 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2500 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2501 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2502 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2503 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2504 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2505 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2506 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2507 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2508 BIT_ULL(POWER_DOMAIN_VGA) | \
2509 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2510 BIT_ULL(POWER_DOMAIN_INIT))
2511
2512#define BDW_DISPLAY_POWER_DOMAINS ( \
2513 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2514 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2515 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2516 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2517 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2518 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2519 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2520 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2521 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2522 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2523 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2524 BIT_ULL(POWER_DOMAIN_VGA) | \
2525 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2526 BIT_ULL(POWER_DOMAIN_INIT))
2527
2528#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2529 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2530 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2531 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2532 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2533 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2534 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2535 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2536 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2537 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2538 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2539 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2540 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2541 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2542 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2543 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2544 BIT_ULL(POWER_DOMAIN_VGA) | \
2545 BIT_ULL(POWER_DOMAIN_INIT))
2546#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2547 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2548 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2549 BIT_ULL(POWER_DOMAIN_INIT))
2550#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2551 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2552 BIT_ULL(POWER_DOMAIN_INIT))
2553#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2554 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2555 BIT_ULL(POWER_DOMAIN_INIT))
2556#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2557 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2558 BIT_ULL(POWER_DOMAIN_INIT))
2559#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2560 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2561 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2562 BIT_ULL(POWER_DOMAIN_MODESET) | \
2563 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2564 BIT_ULL(POWER_DOMAIN_INIT))
2565
2566#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2567 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2568 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2569 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2570 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2571 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2572 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2573 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2574 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2575 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2576 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2577 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2578 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2579 BIT_ULL(POWER_DOMAIN_VGA) | \
2580 BIT_ULL(POWER_DOMAIN_INIT))
2581#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2582 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2583 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2584 BIT_ULL(POWER_DOMAIN_MODESET) | \
2585 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2586 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2587 BIT_ULL(POWER_DOMAIN_INIT))
2588#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2589 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2590 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2591 BIT_ULL(POWER_DOMAIN_INIT))
2592#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2593 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2594 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2595 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2596 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2597 BIT_ULL(POWER_DOMAIN_INIT))
2598
2599#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2600 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2601 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2602 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2603 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2604 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2605 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2606 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2607 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2608 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2609 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2610 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2611 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2612 BIT_ULL(POWER_DOMAIN_VGA) | \
2613 BIT_ULL(POWER_DOMAIN_INIT))
2614#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2615 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2616#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2617 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2618#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2619 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2620#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2621 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2622 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2623 BIT_ULL(POWER_DOMAIN_INIT))
2624#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2625 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2626 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2627 BIT_ULL(POWER_DOMAIN_INIT))
2628#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2629 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2630 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2631 BIT_ULL(POWER_DOMAIN_INIT))
2632#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2633 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2634 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2635 BIT_ULL(POWER_DOMAIN_INIT))
2636#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2637 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2638 BIT_ULL(POWER_DOMAIN_INIT))
2639#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2640 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2641 BIT_ULL(POWER_DOMAIN_INIT))
2642#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2643 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2644 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2645 BIT_ULL(POWER_DOMAIN_MODESET) | \
2646 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2647 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2648 BIT_ULL(POWER_DOMAIN_INIT))
2649
2650#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2651 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2652 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2653 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2654 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2655 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2656 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2657 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2658 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2659 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2660 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2661 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2662 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2663 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2664 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2665 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2666 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2667 BIT_ULL(POWER_DOMAIN_VGA) | \
2668 BIT_ULL(POWER_DOMAIN_INIT))
2669#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2670 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2671 BIT_ULL(POWER_DOMAIN_INIT))
2672#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2673 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2674 BIT_ULL(POWER_DOMAIN_INIT))
2675#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2676 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2677 BIT_ULL(POWER_DOMAIN_INIT))
2678#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2679 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2680 BIT_ULL(POWER_DOMAIN_INIT))
2681#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2682 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2683 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2684 BIT_ULL(POWER_DOMAIN_INIT))
2685#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2686 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2687 BIT_ULL(POWER_DOMAIN_INIT))
2688#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2689 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2690 BIT_ULL(POWER_DOMAIN_INIT))
2691#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2692 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2693 BIT_ULL(POWER_DOMAIN_INIT))
2694#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2695 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2696 BIT_ULL(POWER_DOMAIN_INIT))
2697#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2698 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2699 BIT_ULL(POWER_DOMAIN_INIT))
2700#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2701 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2702 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2703 BIT_ULL(POWER_DOMAIN_MODESET) | \
2704 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2705 BIT_ULL(POWER_DOMAIN_INIT))
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721#define ICL_PW_4_POWER_DOMAINS ( \
2722 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2723 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2724 BIT_ULL(POWER_DOMAIN_INIT))
2725
2726#define ICL_PW_3_POWER_DOMAINS ( \
2727 ICL_PW_4_POWER_DOMAINS | \
2728 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2729 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2730 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2731 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2732 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2733 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2734 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2735 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2736 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2737 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2738 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2739 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2740 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2741 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2742 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2743 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
2744 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2745 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2746 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2747 BIT_ULL(POWER_DOMAIN_VGA) | \
2748 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2749 BIT_ULL(POWER_DOMAIN_INIT))
2750
2751
2752
2753
2754#define ICL_PW_2_POWER_DOMAINS ( \
2755 ICL_PW_3_POWER_DOMAINS | \
2756 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2757 BIT_ULL(POWER_DOMAIN_INIT))
2758
2759
2760
2761#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2762 ICL_PW_2_POWER_DOMAINS | \
2763 BIT_ULL(POWER_DOMAIN_MODESET) | \
2764 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2765 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
2766 BIT_ULL(POWER_DOMAIN_INIT))
2767
2768#define ICL_DDI_IO_A_POWER_DOMAINS ( \
2769 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2770#define ICL_DDI_IO_B_POWER_DOMAINS ( \
2771 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2772#define ICL_DDI_IO_C_POWER_DOMAINS ( \
2773 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2774#define ICL_DDI_IO_D_POWER_DOMAINS ( \
2775 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2776#define ICL_DDI_IO_E_POWER_DOMAINS ( \
2777 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2778#define ICL_DDI_IO_F_POWER_DOMAINS ( \
2779 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2780
2781#define ICL_AUX_A_IO_POWER_DOMAINS ( \
2782 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2783 BIT_ULL(POWER_DOMAIN_AUX_A))
2784#define ICL_AUX_B_IO_POWER_DOMAINS ( \
2785 BIT_ULL(POWER_DOMAIN_AUX_B))
2786#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
2787 BIT_ULL(POWER_DOMAIN_AUX_C))
2788#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
2789 BIT_ULL(POWER_DOMAIN_AUX_D))
2790#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
2791 BIT_ULL(POWER_DOMAIN_AUX_E))
2792#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
2793 BIT_ULL(POWER_DOMAIN_AUX_F))
2794#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
2795 BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2796#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
2797 BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2798#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
2799 BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2800#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
2801 BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2802
2803#define TGL_PW_5_POWER_DOMAINS ( \
2804 BIT_ULL(POWER_DOMAIN_PIPE_D) | \
2805 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
2806 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
2807 BIT_ULL(POWER_DOMAIN_INIT))
2808
2809#define TGL_PW_4_POWER_DOMAINS ( \
2810 TGL_PW_5_POWER_DOMAINS | \
2811 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2812 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2813 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2814 BIT_ULL(POWER_DOMAIN_INIT))
2815
2816#define TGL_PW_3_POWER_DOMAINS ( \
2817 TGL_PW_4_POWER_DOMAINS | \
2818 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2819 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2820 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2821 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2822 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2823 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2824 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
2825 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
2826 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
2827 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2828 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2829 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2830 BIT_ULL(POWER_DOMAIN_AUX_G) | \
2831 BIT_ULL(POWER_DOMAIN_AUX_H) | \
2832 BIT_ULL(POWER_DOMAIN_AUX_I) | \
2833 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2834 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2835 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2836 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
2837 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
2838 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
2839 BIT_ULL(POWER_DOMAIN_VGA) | \
2840 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2841 BIT_ULL(POWER_DOMAIN_INIT))
2842
2843#define TGL_PW_2_POWER_DOMAINS ( \
2844 TGL_PW_3_POWER_DOMAINS | \
2845 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2846 BIT_ULL(POWER_DOMAIN_INIT))
2847
2848#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2849 TGL_PW_3_POWER_DOMAINS | \
2850 BIT_ULL(POWER_DOMAIN_MODESET) | \
2851 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2852 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2853 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2854 BIT_ULL(POWER_DOMAIN_INIT))
2855
2856#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
2857 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2858#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
2859 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2860#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
2861 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2862#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
2863 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2864#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
2865 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2866#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
2867 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2868
2869#define TGL_AUX_A_IO_POWER_DOMAINS ( \
2870 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2871 BIT_ULL(POWER_DOMAIN_AUX_A))
2872#define TGL_AUX_B_IO_POWER_DOMAINS ( \
2873 BIT_ULL(POWER_DOMAIN_AUX_B))
2874#define TGL_AUX_C_IO_POWER_DOMAINS ( \
2875 BIT_ULL(POWER_DOMAIN_AUX_C))
2876#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
2877 BIT_ULL(POWER_DOMAIN_AUX_D))
2878#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
2879 BIT_ULL(POWER_DOMAIN_AUX_E))
2880#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
2881 BIT_ULL(POWER_DOMAIN_AUX_F))
2882#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
2883 BIT_ULL(POWER_DOMAIN_AUX_G))
2884#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
2885 BIT_ULL(POWER_DOMAIN_AUX_H))
2886#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
2887 BIT_ULL(POWER_DOMAIN_AUX_I))
2888#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
2889 BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2890#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
2891 BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2892#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
2893 BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2894#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
2895 BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2896#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
2897 BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2898#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
2899 BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2900
2901#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
2902 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2903 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2904 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2905 BIT_ULL(POWER_DOMAIN_AUX_G) | \
2906 BIT_ULL(POWER_DOMAIN_AUX_H) | \
2907 BIT_ULL(POWER_DOMAIN_AUX_I) | \
2908 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2909 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2910 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2911 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
2912 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
2913 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
2914 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
2915
2916static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2917 .sync_hw = i9xx_power_well_sync_hw_noop,
2918 .enable = i9xx_always_on_power_well_noop,
2919 .disable = i9xx_always_on_power_well_noop,
2920 .is_enabled = i9xx_always_on_power_well_enabled,
2921};
2922
2923static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2924 .sync_hw = chv_pipe_power_well_sync_hw,
2925 .enable = chv_pipe_power_well_enable,
2926 .disable = chv_pipe_power_well_disable,
2927 .is_enabled = chv_pipe_power_well_enabled,
2928};
2929
2930static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2931 .sync_hw = i9xx_power_well_sync_hw_noop,
2932 .enable = chv_dpio_cmn_power_well_enable,
2933 .disable = chv_dpio_cmn_power_well_disable,
2934 .is_enabled = vlv_power_well_enabled,
2935};
2936
2937static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2938 {
2939 .name = "always-on",
2940 .always_on = true,
2941 .domains = POWER_DOMAIN_MASK,
2942 .ops = &i9xx_always_on_power_well_ops,
2943 .id = DISP_PW_ID_NONE,
2944 },
2945};
2946
2947static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2948 .sync_hw = i830_pipes_power_well_sync_hw,
2949 .enable = i830_pipes_power_well_enable,
2950 .disable = i830_pipes_power_well_disable,
2951 .is_enabled = i830_pipes_power_well_enabled,
2952};
2953
2954static const struct i915_power_well_desc i830_power_wells[] = {
2955 {
2956 .name = "always-on",
2957 .always_on = true,
2958 .domains = POWER_DOMAIN_MASK,
2959 .ops = &i9xx_always_on_power_well_ops,
2960 .id = DISP_PW_ID_NONE,
2961 },
2962 {
2963 .name = "pipes",
2964 .domains = I830_PIPES_POWER_DOMAINS,
2965 .ops = &i830_pipes_power_well_ops,
2966 .id = DISP_PW_ID_NONE,
2967 },
2968};
2969
2970static const struct i915_power_well_ops hsw_power_well_ops = {
2971 .sync_hw = hsw_power_well_sync_hw,
2972 .enable = hsw_power_well_enable,
2973 .disable = hsw_power_well_disable,
2974 .is_enabled = hsw_power_well_enabled,
2975};
2976
2977static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2978 .sync_hw = i9xx_power_well_sync_hw_noop,
2979 .enable = gen9_dc_off_power_well_enable,
2980 .disable = gen9_dc_off_power_well_disable,
2981 .is_enabled = gen9_dc_off_power_well_enabled,
2982};
2983
2984static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2985 .sync_hw = i9xx_power_well_sync_hw_noop,
2986 .enable = bxt_dpio_cmn_power_well_enable,
2987 .disable = bxt_dpio_cmn_power_well_disable,
2988 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2989};
2990
2991static const struct i915_power_well_regs hsw_power_well_regs = {
2992 .bios = HSW_PWR_WELL_CTL1,
2993 .driver = HSW_PWR_WELL_CTL2,
2994 .kvmr = HSW_PWR_WELL_CTL3,
2995 .debug = HSW_PWR_WELL_CTL4,
2996};
2997
2998static const struct i915_power_well_desc hsw_power_wells[] = {
2999 {
3000 .name = "always-on",
3001 .always_on = true,
3002 .domains = POWER_DOMAIN_MASK,
3003 .ops = &i9xx_always_on_power_well_ops,
3004 .id = DISP_PW_ID_NONE,
3005 },
3006 {
3007 .name = "display",
3008 .domains = HSW_DISPLAY_POWER_DOMAINS,
3009 .ops = &hsw_power_well_ops,
3010 .id = HSW_DISP_PW_GLOBAL,
3011 {
3012 .hsw.regs = &hsw_power_well_regs,
3013 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3014 .hsw.has_vga = true,
3015 },
3016 },
3017};
3018
3019static const struct i915_power_well_desc bdw_power_wells[] = {
3020 {
3021 .name = "always-on",
3022 .always_on = true,
3023 .domains = POWER_DOMAIN_MASK,
3024 .ops = &i9xx_always_on_power_well_ops,
3025 .id = DISP_PW_ID_NONE,
3026 },
3027 {
3028 .name = "display",
3029 .domains = BDW_DISPLAY_POWER_DOMAINS,
3030 .ops = &hsw_power_well_ops,
3031 .id = HSW_DISP_PW_GLOBAL,
3032 {
3033 .hsw.regs = &hsw_power_well_regs,
3034 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3035 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3036 .hsw.has_vga = true,
3037 },
3038 },
3039};
3040
3041static const struct i915_power_well_ops vlv_display_power_well_ops = {
3042 .sync_hw = i9xx_power_well_sync_hw_noop,
3043 .enable = vlv_display_power_well_enable,
3044 .disable = vlv_display_power_well_disable,
3045 .is_enabled = vlv_power_well_enabled,
3046};
3047
3048static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3049 .sync_hw = i9xx_power_well_sync_hw_noop,
3050 .enable = vlv_dpio_cmn_power_well_enable,
3051 .disable = vlv_dpio_cmn_power_well_disable,
3052 .is_enabled = vlv_power_well_enabled,
3053};
3054
3055static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3056 .sync_hw = i9xx_power_well_sync_hw_noop,
3057 .enable = vlv_power_well_enable,
3058 .disable = vlv_power_well_disable,
3059 .is_enabled = vlv_power_well_enabled,
3060};
3061
3062static const struct i915_power_well_desc vlv_power_wells[] = {
3063 {
3064 .name = "always-on",
3065 .always_on = true,
3066 .domains = POWER_DOMAIN_MASK,
3067 .ops = &i9xx_always_on_power_well_ops,
3068 .id = DISP_PW_ID_NONE,
3069 },
3070 {
3071 .name = "display",
3072 .domains = VLV_DISPLAY_POWER_DOMAINS,
3073 .ops = &vlv_display_power_well_ops,
3074 .id = VLV_DISP_PW_DISP2D,
3075 {
3076 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3077 },
3078 },
3079 {
3080 .name = "dpio-tx-b-01",
3081 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3082 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3083 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3084 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3085 .ops = &vlv_dpio_power_well_ops,
3086 .id = DISP_PW_ID_NONE,
3087 {
3088 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3089 },
3090 },
3091 {
3092 .name = "dpio-tx-b-23",
3093 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3094 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3095 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3096 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3097 .ops = &vlv_dpio_power_well_ops,
3098 .id = DISP_PW_ID_NONE,
3099 {
3100 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3101 },
3102 },
3103 {
3104 .name = "dpio-tx-c-01",
3105 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3106 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3107 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3108 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3109 .ops = &vlv_dpio_power_well_ops,
3110 .id = DISP_PW_ID_NONE,
3111 {
3112 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3113 },
3114 },
3115 {
3116 .name = "dpio-tx-c-23",
3117 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3118 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3119 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3120 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3121 .ops = &vlv_dpio_power_well_ops,
3122 .id = DISP_PW_ID_NONE,
3123 {
3124 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3125 },
3126 },
3127 {
3128 .name = "dpio-common",
3129 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3130 .ops = &vlv_dpio_cmn_power_well_ops,
3131 .id = VLV_DISP_PW_DPIO_CMN_BC,
3132 {
3133 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3134 },
3135 },
3136};
3137
3138static const struct i915_power_well_desc chv_power_wells[] = {
3139 {
3140 .name = "always-on",
3141 .always_on = true,
3142 .domains = POWER_DOMAIN_MASK,
3143 .ops = &i9xx_always_on_power_well_ops,
3144 .id = DISP_PW_ID_NONE,
3145 },
3146 {
3147 .name = "display",
3148
3149
3150
3151
3152
3153 .domains = CHV_DISPLAY_POWER_DOMAINS,
3154 .ops = &chv_pipe_power_well_ops,
3155 .id = DISP_PW_ID_NONE,
3156 },
3157 {
3158 .name = "dpio-common-bc",
3159 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3160 .ops = &chv_dpio_cmn_power_well_ops,
3161 .id = VLV_DISP_PW_DPIO_CMN_BC,
3162 {
3163 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3164 },
3165 },
3166 {
3167 .name = "dpio-common-d",
3168 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3169 .ops = &chv_dpio_cmn_power_well_ops,
3170 .id = CHV_DISP_PW_DPIO_CMN_D,
3171 {
3172 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3173 },
3174 },
3175};
3176
3177bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3178 enum i915_power_well_id power_well_id)
3179{
3180 struct i915_power_well *power_well;
3181 bool ret;
3182
3183 power_well = lookup_power_well(dev_priv, power_well_id);
3184 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3185
3186 return ret;
3187}
3188
3189static const struct i915_power_well_desc skl_power_wells[] = {
3190 {
3191 .name = "always-on",
3192 .always_on = true,
3193 .domains = POWER_DOMAIN_MASK,
3194 .ops = &i9xx_always_on_power_well_ops,
3195 .id = DISP_PW_ID_NONE,
3196 },
3197 {
3198 .name = "power well 1",
3199
3200 .always_on = true,
3201 .domains = 0,
3202 .ops = &hsw_power_well_ops,
3203 .id = SKL_DISP_PW_1,
3204 {
3205 .hsw.regs = &hsw_power_well_regs,
3206 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3207 .hsw.has_fuses = true,
3208 },
3209 },
3210 {
3211 .name = "MISC IO power well",
3212
3213 .always_on = true,
3214 .domains = 0,
3215 .ops = &hsw_power_well_ops,
3216 .id = SKL_DISP_PW_MISC_IO,
3217 {
3218 .hsw.regs = &hsw_power_well_regs,
3219 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3220 },
3221 },
3222 {
3223 .name = "DC off",
3224 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3225 .ops = &gen9_dc_off_power_well_ops,
3226 .id = SKL_DISP_DC_OFF,
3227 },
3228 {
3229 .name = "power well 2",
3230 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3231 .ops = &hsw_power_well_ops,
3232 .id = SKL_DISP_PW_2,
3233 {
3234 .hsw.regs = &hsw_power_well_regs,
3235 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3236 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3237 .hsw.has_vga = true,
3238 .hsw.has_fuses = true,
3239 },
3240 },
3241 {
3242 .name = "DDI A/E IO power well",
3243 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3244 .ops = &hsw_power_well_ops,
3245 .id = DISP_PW_ID_NONE,
3246 {
3247 .hsw.regs = &hsw_power_well_regs,
3248 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3249 },
3250 },
3251 {
3252 .name = "DDI B IO power well",
3253 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3254 .ops = &hsw_power_well_ops,
3255 .id = DISP_PW_ID_NONE,
3256 {
3257 .hsw.regs = &hsw_power_well_regs,
3258 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3259 },
3260 },
3261 {
3262 .name = "DDI C IO power well",
3263 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3264 .ops = &hsw_power_well_ops,
3265 .id = DISP_PW_ID_NONE,
3266 {
3267 .hsw.regs = &hsw_power_well_regs,
3268 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3269 },
3270 },
3271 {
3272 .name = "DDI D IO power well",
3273 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3274 .ops = &hsw_power_well_ops,
3275 .id = DISP_PW_ID_NONE,
3276 {
3277 .hsw.regs = &hsw_power_well_regs,
3278 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3279 },
3280 },
3281};
3282
3283static const struct i915_power_well_desc bxt_power_wells[] = {
3284 {
3285 .name = "always-on",
3286 .always_on = true,
3287 .domains = POWER_DOMAIN_MASK,
3288 .ops = &i9xx_always_on_power_well_ops,
3289 .id = DISP_PW_ID_NONE,
3290 },
3291 {
3292 .name = "power well 1",
3293
3294 .always_on = true,
3295 .domains = 0,
3296 .ops = &hsw_power_well_ops,
3297 .id = SKL_DISP_PW_1,
3298 {
3299 .hsw.regs = &hsw_power_well_regs,
3300 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3301 .hsw.has_fuses = true,
3302 },
3303 },
3304 {
3305 .name = "DC off",
3306 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3307 .ops = &gen9_dc_off_power_well_ops,
3308 .id = SKL_DISP_DC_OFF,
3309 },
3310 {
3311 .name = "power well 2",
3312 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3313 .ops = &hsw_power_well_ops,
3314 .id = SKL_DISP_PW_2,
3315 {
3316 .hsw.regs = &hsw_power_well_regs,
3317 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3318 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3319 .hsw.has_vga = true,
3320 .hsw.has_fuses = true,
3321 },
3322 },
3323 {
3324 .name = "dpio-common-a",
3325 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3326 .ops = &bxt_dpio_cmn_power_well_ops,
3327 .id = BXT_DISP_PW_DPIO_CMN_A,
3328 {
3329 .bxt.phy = DPIO_PHY1,
3330 },
3331 },
3332 {
3333 .name = "dpio-common-bc",
3334 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3335 .ops = &bxt_dpio_cmn_power_well_ops,
3336 .id = VLV_DISP_PW_DPIO_CMN_BC,
3337 {
3338 .bxt.phy = DPIO_PHY0,
3339 },
3340 },
3341};
3342
3343static const struct i915_power_well_desc glk_power_wells[] = {
3344 {
3345 .name = "always-on",
3346 .always_on = true,
3347 .domains = POWER_DOMAIN_MASK,
3348 .ops = &i9xx_always_on_power_well_ops,
3349 .id = DISP_PW_ID_NONE,
3350 },
3351 {
3352 .name = "power well 1",
3353
3354 .always_on = true,
3355 .domains = 0,
3356 .ops = &hsw_power_well_ops,
3357 .id = SKL_DISP_PW_1,
3358 {
3359 .hsw.regs = &hsw_power_well_regs,
3360 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3361 .hsw.has_fuses = true,
3362 },
3363 },
3364 {
3365 .name = "DC off",
3366 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3367 .ops = &gen9_dc_off_power_well_ops,
3368 .id = SKL_DISP_DC_OFF,
3369 },
3370 {
3371 .name = "power well 2",
3372 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3373 .ops = &hsw_power_well_ops,
3374 .id = SKL_DISP_PW_2,
3375 {
3376 .hsw.regs = &hsw_power_well_regs,
3377 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3378 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3379 .hsw.has_vga = true,
3380 .hsw.has_fuses = true,
3381 },
3382 },
3383 {
3384 .name = "dpio-common-a",
3385 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3386 .ops = &bxt_dpio_cmn_power_well_ops,
3387 .id = BXT_DISP_PW_DPIO_CMN_A,
3388 {
3389 .bxt.phy = DPIO_PHY1,
3390 },
3391 },
3392 {
3393 .name = "dpio-common-b",
3394 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3395 .ops = &bxt_dpio_cmn_power_well_ops,
3396 .id = VLV_DISP_PW_DPIO_CMN_BC,
3397 {
3398 .bxt.phy = DPIO_PHY0,
3399 },
3400 },
3401 {
3402 .name = "dpio-common-c",
3403 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3404 .ops = &bxt_dpio_cmn_power_well_ops,
3405 .id = GLK_DISP_PW_DPIO_CMN_C,
3406 {
3407 .bxt.phy = DPIO_PHY2,
3408 },
3409 },
3410 {
3411 .name = "AUX A",
3412 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3413 .ops = &hsw_power_well_ops,
3414 .id = DISP_PW_ID_NONE,
3415 {
3416 .hsw.regs = &hsw_power_well_regs,
3417 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3418 },
3419 },
3420 {
3421 .name = "AUX B",
3422 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3423 .ops = &hsw_power_well_ops,
3424 .id = DISP_PW_ID_NONE,
3425 {
3426 .hsw.regs = &hsw_power_well_regs,
3427 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3428 },
3429 },
3430 {
3431 .name = "AUX C",
3432 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3433 .ops = &hsw_power_well_ops,
3434 .id = DISP_PW_ID_NONE,
3435 {
3436 .hsw.regs = &hsw_power_well_regs,
3437 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3438 },
3439 },
3440 {
3441 .name = "DDI A IO power well",
3442 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3443 .ops = &hsw_power_well_ops,
3444 .id = DISP_PW_ID_NONE,
3445 {
3446 .hsw.regs = &hsw_power_well_regs,
3447 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3448 },
3449 },
3450 {
3451 .name = "DDI B IO power well",
3452 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3453 .ops = &hsw_power_well_ops,
3454 .id = DISP_PW_ID_NONE,
3455 {
3456 .hsw.regs = &hsw_power_well_regs,
3457 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3458 },
3459 },
3460 {
3461 .name = "DDI C IO power well",
3462 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3463 .ops = &hsw_power_well_ops,
3464 .id = DISP_PW_ID_NONE,
3465 {
3466 .hsw.regs = &hsw_power_well_regs,
3467 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3468 },
3469 },
3470};
3471
3472static const struct i915_power_well_desc cnl_power_wells[] = {
3473 {
3474 .name = "always-on",
3475 .always_on = true,
3476 .domains = POWER_DOMAIN_MASK,
3477 .ops = &i9xx_always_on_power_well_ops,
3478 .id = DISP_PW_ID_NONE,
3479 },
3480 {
3481 .name = "power well 1",
3482
3483 .always_on = true,
3484 .domains = 0,
3485 .ops = &hsw_power_well_ops,
3486 .id = SKL_DISP_PW_1,
3487 {
3488 .hsw.regs = &hsw_power_well_regs,
3489 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3490 .hsw.has_fuses = true,
3491 },
3492 },
3493 {
3494 .name = "AUX A",
3495 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3496 .ops = &hsw_power_well_ops,
3497 .id = DISP_PW_ID_NONE,
3498 {
3499 .hsw.regs = &hsw_power_well_regs,
3500 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3501 },
3502 },
3503 {
3504 .name = "AUX B",
3505 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3506 .ops = &hsw_power_well_ops,
3507 .id = DISP_PW_ID_NONE,
3508 {
3509 .hsw.regs = &hsw_power_well_regs,
3510 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3511 },
3512 },
3513 {
3514 .name = "AUX C",
3515 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3516 .ops = &hsw_power_well_ops,
3517 .id = DISP_PW_ID_NONE,
3518 {
3519 .hsw.regs = &hsw_power_well_regs,
3520 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3521 },
3522 },
3523 {
3524 .name = "AUX D",
3525 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3526 .ops = &hsw_power_well_ops,
3527 .id = DISP_PW_ID_NONE,
3528 {
3529 .hsw.regs = &hsw_power_well_regs,
3530 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3531 },
3532 },
3533 {
3534 .name = "DC off",
3535 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3536 .ops = &gen9_dc_off_power_well_ops,
3537 .id = SKL_DISP_DC_OFF,
3538 },
3539 {
3540 .name = "power well 2",
3541 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3542 .ops = &hsw_power_well_ops,
3543 .id = SKL_DISP_PW_2,
3544 {
3545 .hsw.regs = &hsw_power_well_regs,
3546 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3547 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3548 .hsw.has_vga = true,
3549 .hsw.has_fuses = true,
3550 },
3551 },
3552 {
3553 .name = "DDI A IO power well",
3554 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3555 .ops = &hsw_power_well_ops,
3556 .id = DISP_PW_ID_NONE,
3557 {
3558 .hsw.regs = &hsw_power_well_regs,
3559 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3560 },
3561 },
3562 {
3563 .name = "DDI B IO power well",
3564 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3565 .ops = &hsw_power_well_ops,
3566 .id = DISP_PW_ID_NONE,
3567 {
3568 .hsw.regs = &hsw_power_well_regs,
3569 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3570 },
3571 },
3572 {
3573 .name = "DDI C IO power well",
3574 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3575 .ops = &hsw_power_well_ops,
3576 .id = DISP_PW_ID_NONE,
3577 {
3578 .hsw.regs = &hsw_power_well_regs,
3579 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3580 },
3581 },
3582 {
3583 .name = "DDI D IO power well",
3584 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3585 .ops = &hsw_power_well_ops,
3586 .id = DISP_PW_ID_NONE,
3587 {
3588 .hsw.regs = &hsw_power_well_regs,
3589 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3590 },
3591 },
3592 {
3593 .name = "DDI F IO power well",
3594 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3595 .ops = &hsw_power_well_ops,
3596 .id = DISP_PW_ID_NONE,
3597 {
3598 .hsw.regs = &hsw_power_well_regs,
3599 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3600 },
3601 },
3602 {
3603 .name = "AUX F",
3604 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3605 .ops = &hsw_power_well_ops,
3606 .id = DISP_PW_ID_NONE,
3607 {
3608 .hsw.regs = &hsw_power_well_regs,
3609 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3610 },
3611 },
3612};
3613
3614static const struct i915_power_well_ops icl_aux_power_well_ops = {
3615 .sync_hw = hsw_power_well_sync_hw,
3616 .enable = icl_aux_power_well_enable,
3617 .disable = icl_aux_power_well_disable,
3618 .is_enabled = hsw_power_well_enabled,
3619};
3620
3621static const struct i915_power_well_regs icl_aux_power_well_regs = {
3622 .bios = ICL_PWR_WELL_CTL_AUX1,
3623 .driver = ICL_PWR_WELL_CTL_AUX2,
3624 .debug = ICL_PWR_WELL_CTL_AUX4,
3625};
3626
3627static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3628 .bios = ICL_PWR_WELL_CTL_DDI1,
3629 .driver = ICL_PWR_WELL_CTL_DDI2,
3630 .debug = ICL_PWR_WELL_CTL_DDI4,
3631};
3632
3633static const struct i915_power_well_desc icl_power_wells[] = {
3634 {
3635 .name = "always-on",
3636 .always_on = true,
3637 .domains = POWER_DOMAIN_MASK,
3638 .ops = &i9xx_always_on_power_well_ops,
3639 .id = DISP_PW_ID_NONE,
3640 },
3641 {
3642 .name = "power well 1",
3643
3644 .always_on = true,
3645 .domains = 0,
3646 .ops = &hsw_power_well_ops,
3647 .id = SKL_DISP_PW_1,
3648 {
3649 .hsw.regs = &hsw_power_well_regs,
3650 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3651 .hsw.has_fuses = true,
3652 },
3653 },
3654 {
3655 .name = "DC off",
3656 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3657 .ops = &gen9_dc_off_power_well_ops,
3658 .id = SKL_DISP_DC_OFF,
3659 },
3660 {
3661 .name = "power well 2",
3662 .domains = ICL_PW_2_POWER_DOMAINS,
3663 .ops = &hsw_power_well_ops,
3664 .id = SKL_DISP_PW_2,
3665 {
3666 .hsw.regs = &hsw_power_well_regs,
3667 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3668 .hsw.has_fuses = true,
3669 },
3670 },
3671 {
3672 .name = "power well 3",
3673 .domains = ICL_PW_3_POWER_DOMAINS,
3674 .ops = &hsw_power_well_ops,
3675 .id = ICL_DISP_PW_3,
3676 {
3677 .hsw.regs = &hsw_power_well_regs,
3678 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3679 .hsw.irq_pipe_mask = BIT(PIPE_B),
3680 .hsw.has_vga = true,
3681 .hsw.has_fuses = true,
3682 },
3683 },
3684 {
3685 .name = "DDI A IO",
3686 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3687 .ops = &hsw_power_well_ops,
3688 .id = DISP_PW_ID_NONE,
3689 {
3690 .hsw.regs = &icl_ddi_power_well_regs,
3691 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3692 },
3693 },
3694 {
3695 .name = "DDI B IO",
3696 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3697 .ops = &hsw_power_well_ops,
3698 .id = DISP_PW_ID_NONE,
3699 {
3700 .hsw.regs = &icl_ddi_power_well_regs,
3701 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3702 },
3703 },
3704 {
3705 .name = "DDI C IO",
3706 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3707 .ops = &hsw_power_well_ops,
3708 .id = DISP_PW_ID_NONE,
3709 {
3710 .hsw.regs = &icl_ddi_power_well_regs,
3711 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3712 },
3713 },
3714 {
3715 .name = "DDI D IO",
3716 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3717 .ops = &hsw_power_well_ops,
3718 .id = DISP_PW_ID_NONE,
3719 {
3720 .hsw.regs = &icl_ddi_power_well_regs,
3721 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3722 },
3723 },
3724 {
3725 .name = "DDI E IO",
3726 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3727 .ops = &hsw_power_well_ops,
3728 .id = DISP_PW_ID_NONE,
3729 {
3730 .hsw.regs = &icl_ddi_power_well_regs,
3731 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3732 },
3733 },
3734 {
3735 .name = "DDI F IO",
3736 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3737 .ops = &hsw_power_well_ops,
3738 .id = DISP_PW_ID_NONE,
3739 {
3740 .hsw.regs = &icl_ddi_power_well_regs,
3741 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3742 },
3743 },
3744 {
3745 .name = "AUX A",
3746 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3747 .ops = &icl_aux_power_well_ops,
3748 .id = DISP_PW_ID_NONE,
3749 {
3750 .hsw.regs = &icl_aux_power_well_regs,
3751 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3752 },
3753 },
3754 {
3755 .name = "AUX B",
3756 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3757 .ops = &icl_aux_power_well_ops,
3758 .id = DISP_PW_ID_NONE,
3759 {
3760 .hsw.regs = &icl_aux_power_well_regs,
3761 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3762 },
3763 },
3764 {
3765 .name = "AUX C TC1",
3766 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3767 .ops = &icl_aux_power_well_ops,
3768 .id = DISP_PW_ID_NONE,
3769 {
3770 .hsw.regs = &icl_aux_power_well_regs,
3771 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3772 .hsw.is_tc_tbt = false,
3773 },
3774 },
3775 {
3776 .name = "AUX D TC2",
3777 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3778 .ops = &icl_aux_power_well_ops,
3779 .id = DISP_PW_ID_NONE,
3780 {
3781 .hsw.regs = &icl_aux_power_well_regs,
3782 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3783 .hsw.is_tc_tbt = false,
3784 },
3785 },
3786 {
3787 .name = "AUX E TC3",
3788 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3789 .ops = &icl_aux_power_well_ops,
3790 .id = DISP_PW_ID_NONE,
3791 {
3792 .hsw.regs = &icl_aux_power_well_regs,
3793 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3794 .hsw.is_tc_tbt = false,
3795 },
3796 },
3797 {
3798 .name = "AUX F TC4",
3799 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3800 .ops = &icl_aux_power_well_ops,
3801 .id = DISP_PW_ID_NONE,
3802 {
3803 .hsw.regs = &icl_aux_power_well_regs,
3804 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3805 .hsw.is_tc_tbt = false,
3806 },
3807 },
3808 {
3809 .name = "AUX C TBT1",
3810 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3811 .ops = &icl_aux_power_well_ops,
3812 .id = DISP_PW_ID_NONE,
3813 {
3814 .hsw.regs = &icl_aux_power_well_regs,
3815 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3816 .hsw.is_tc_tbt = true,
3817 },
3818 },
3819 {
3820 .name = "AUX D TBT2",
3821 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3822 .ops = &icl_aux_power_well_ops,
3823 .id = DISP_PW_ID_NONE,
3824 {
3825 .hsw.regs = &icl_aux_power_well_regs,
3826 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3827 .hsw.is_tc_tbt = true,
3828 },
3829 },
3830 {
3831 .name = "AUX E TBT3",
3832 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3833 .ops = &icl_aux_power_well_ops,
3834 .id = DISP_PW_ID_NONE,
3835 {
3836 .hsw.regs = &icl_aux_power_well_regs,
3837 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3838 .hsw.is_tc_tbt = true,
3839 },
3840 },
3841 {
3842 .name = "AUX F TBT4",
3843 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3844 .ops = &icl_aux_power_well_ops,
3845 .id = DISP_PW_ID_NONE,
3846 {
3847 .hsw.regs = &icl_aux_power_well_regs,
3848 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3849 .hsw.is_tc_tbt = true,
3850 },
3851 },
3852 {
3853 .name = "power well 4",
3854 .domains = ICL_PW_4_POWER_DOMAINS,
3855 .ops = &hsw_power_well_ops,
3856 .id = DISP_PW_ID_NONE,
3857 {
3858 .hsw.regs = &hsw_power_well_regs,
3859 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3860 .hsw.has_fuses = true,
3861 .hsw.irq_pipe_mask = BIT(PIPE_C),
3862 },
3863 },
3864};
3865
3866static void
3867tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
3868{
3869 u8 tries = 0;
3870 int ret;
3871
3872 while (1) {
3873 u32 low_val = 0, high_val;
3874
3875 if (block)
3876 high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
3877 else
3878 high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
3879
3880
3881
3882
3883
3884 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
3885 &high_val);
3886 if (ret == 0) {
3887 if (block &&
3888 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
3889 ret = -EIO;
3890 else
3891 break;
3892 }
3893
3894 if (++tries == 3)
3895 break;
3896
3897 if (ret == -EAGAIN)
3898 msleep(1);
3899 }
3900
3901 if (ret)
3902 drm_err(&i915->drm, "TC cold %sblock failed\n",
3903 block ? "" : "un");
3904 else
3905 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
3906 block ? "" : "un");
3907}
3908
3909static void
3910tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
3911 struct i915_power_well *power_well)
3912{
3913 tgl_tc_cold_request(i915, true);
3914}
3915
3916static void
3917tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
3918 struct i915_power_well *power_well)
3919{
3920 tgl_tc_cold_request(i915, false);
3921}
3922
3923static void
3924tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
3925 struct i915_power_well *power_well)
3926{
3927 if (power_well->count > 0)
3928 tgl_tc_cold_off_power_well_enable(i915, power_well);
3929 else
3930 tgl_tc_cold_off_power_well_disable(i915, power_well);
3931}
3932
3933static bool
3934tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
3935 struct i915_power_well *power_well)
3936{
3937
3938
3939
3940
3941 return power_well->count;
3942}
3943
3944static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
3945 .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
3946 .enable = tgl_tc_cold_off_power_well_enable,
3947 .disable = tgl_tc_cold_off_power_well_disable,
3948 .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
3949};
3950
3951static const struct i915_power_well_desc tgl_power_wells[] = {
3952 {
3953 .name = "always-on",
3954 .always_on = true,
3955 .domains = POWER_DOMAIN_MASK,
3956 .ops = &i9xx_always_on_power_well_ops,
3957 .id = DISP_PW_ID_NONE,
3958 },
3959 {
3960 .name = "power well 1",
3961
3962 .always_on = true,
3963 .domains = 0,
3964 .ops = &hsw_power_well_ops,
3965 .id = SKL_DISP_PW_1,
3966 {
3967 .hsw.regs = &hsw_power_well_regs,
3968 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3969 .hsw.has_fuses = true,
3970 },
3971 },
3972 {
3973 .name = "DC off",
3974 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3975 .ops = &gen9_dc_off_power_well_ops,
3976 .id = SKL_DISP_DC_OFF,
3977 },
3978 {
3979 .name = "power well 2",
3980 .domains = TGL_PW_2_POWER_DOMAINS,
3981 .ops = &hsw_power_well_ops,
3982 .id = SKL_DISP_PW_2,
3983 {
3984 .hsw.regs = &hsw_power_well_regs,
3985 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3986 .hsw.has_fuses = true,
3987 },
3988 },
3989 {
3990 .name = "power well 3",
3991 .domains = TGL_PW_3_POWER_DOMAINS,
3992 .ops = &hsw_power_well_ops,
3993 .id = ICL_DISP_PW_3,
3994 {
3995 .hsw.regs = &hsw_power_well_regs,
3996 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3997 .hsw.irq_pipe_mask = BIT(PIPE_B),
3998 .hsw.has_vga = true,
3999 .hsw.has_fuses = true,
4000 },
4001 },
4002 {
4003 .name = "DDI A IO",
4004 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
4005 .ops = &hsw_power_well_ops,
4006 .id = DISP_PW_ID_NONE,
4007 {
4008 .hsw.regs = &icl_ddi_power_well_regs,
4009 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4010 }
4011 },
4012 {
4013 .name = "DDI B IO",
4014 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
4015 .ops = &hsw_power_well_ops,
4016 .id = DISP_PW_ID_NONE,
4017 {
4018 .hsw.regs = &icl_ddi_power_well_regs,
4019 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4020 }
4021 },
4022 {
4023 .name = "DDI C IO",
4024 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
4025 .ops = &hsw_power_well_ops,
4026 .id = DISP_PW_ID_NONE,
4027 {
4028 .hsw.regs = &icl_ddi_power_well_regs,
4029 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4030 }
4031 },
4032 {
4033 .name = "DDI D TC1 IO",
4034 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
4035 .ops = &hsw_power_well_ops,
4036 .id = DISP_PW_ID_NONE,
4037 {
4038 .hsw.regs = &icl_ddi_power_well_regs,
4039 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4040 },
4041 },
4042 {
4043 .name = "DDI E TC2 IO",
4044 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
4045 .ops = &hsw_power_well_ops,
4046 .id = DISP_PW_ID_NONE,
4047 {
4048 .hsw.regs = &icl_ddi_power_well_regs,
4049 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4050 },
4051 },
4052 {
4053 .name = "DDI F TC3 IO",
4054 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
4055 .ops = &hsw_power_well_ops,
4056 .id = DISP_PW_ID_NONE,
4057 {
4058 .hsw.regs = &icl_ddi_power_well_regs,
4059 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4060 },
4061 },
4062 {
4063 .name = "DDI G TC4 IO",
4064 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
4065 .ops = &hsw_power_well_ops,
4066 .id = DISP_PW_ID_NONE,
4067 {
4068 .hsw.regs = &icl_ddi_power_well_regs,
4069 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4070 },
4071 },
4072 {
4073 .name = "DDI H TC5 IO",
4074 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
4075 .ops = &hsw_power_well_ops,
4076 .id = DISP_PW_ID_NONE,
4077 {
4078 .hsw.regs = &icl_ddi_power_well_regs,
4079 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4080 },
4081 },
4082 {
4083 .name = "DDI I TC6 IO",
4084 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
4085 .ops = &hsw_power_well_ops,
4086 .id = DISP_PW_ID_NONE,
4087 {
4088 .hsw.regs = &icl_ddi_power_well_regs,
4089 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4090 },
4091 },
4092 {
4093 .name = "AUX A",
4094 .domains = TGL_AUX_A_IO_POWER_DOMAINS,
4095 .ops = &icl_aux_power_well_ops,
4096 .id = DISP_PW_ID_NONE,
4097 {
4098 .hsw.regs = &icl_aux_power_well_regs,
4099 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4100 },
4101 },
4102 {
4103 .name = "AUX B",
4104 .domains = TGL_AUX_B_IO_POWER_DOMAINS,
4105 .ops = &icl_aux_power_well_ops,
4106 .id = DISP_PW_ID_NONE,
4107 {
4108 .hsw.regs = &icl_aux_power_well_regs,
4109 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4110 },
4111 },
4112 {
4113 .name = "AUX C",
4114 .domains = TGL_AUX_C_IO_POWER_DOMAINS,
4115 .ops = &icl_aux_power_well_ops,
4116 .id = DISP_PW_ID_NONE,
4117 {
4118 .hsw.regs = &icl_aux_power_well_regs,
4119 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4120 },
4121 },
4122 {
4123 .name = "AUX D TC1",
4124 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4125 .ops = &icl_aux_power_well_ops,
4126 .id = DISP_PW_ID_NONE,
4127 {
4128 .hsw.regs = &icl_aux_power_well_regs,
4129 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4130 .hsw.is_tc_tbt = false,
4131 },
4132 },
4133 {
4134 .name = "AUX E TC2",
4135 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4136 .ops = &icl_aux_power_well_ops,
4137 .id = DISP_PW_ID_NONE,
4138 {
4139 .hsw.regs = &icl_aux_power_well_regs,
4140 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4141 .hsw.is_tc_tbt = false,
4142 },
4143 },
4144 {
4145 .name = "AUX F TC3",
4146 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4147 .ops = &icl_aux_power_well_ops,
4148 .id = DISP_PW_ID_NONE,
4149 {
4150 .hsw.regs = &icl_aux_power_well_regs,
4151 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4152 .hsw.is_tc_tbt = false,
4153 },
4154 },
4155 {
4156 .name = "AUX G TC4",
4157 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4158 .ops = &icl_aux_power_well_ops,
4159 .id = DISP_PW_ID_NONE,
4160 {
4161 .hsw.regs = &icl_aux_power_well_regs,
4162 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4163 .hsw.is_tc_tbt = false,
4164 },
4165 },
4166 {
4167 .name = "AUX H TC5",
4168 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4169 .ops = &icl_aux_power_well_ops,
4170 .id = DISP_PW_ID_NONE,
4171 {
4172 .hsw.regs = &icl_aux_power_well_regs,
4173 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4174 .hsw.is_tc_tbt = false,
4175 },
4176 },
4177 {
4178 .name = "AUX I TC6",
4179 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4180 .ops = &icl_aux_power_well_ops,
4181 .id = DISP_PW_ID_NONE,
4182 {
4183 .hsw.regs = &icl_aux_power_well_regs,
4184 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4185 .hsw.is_tc_tbt = false,
4186 },
4187 },
4188 {
4189 .name = "AUX D TBT1",
4190 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4191 .ops = &icl_aux_power_well_ops,
4192 .id = DISP_PW_ID_NONE,
4193 {
4194 .hsw.regs = &icl_aux_power_well_regs,
4195 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4196 .hsw.is_tc_tbt = true,
4197 },
4198 },
4199 {
4200 .name = "AUX E TBT2",
4201 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4202 .ops = &icl_aux_power_well_ops,
4203 .id = DISP_PW_ID_NONE,
4204 {
4205 .hsw.regs = &icl_aux_power_well_regs,
4206 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4207 .hsw.is_tc_tbt = true,
4208 },
4209 },
4210 {
4211 .name = "AUX F TBT3",
4212 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4213 .ops = &icl_aux_power_well_ops,
4214 .id = DISP_PW_ID_NONE,
4215 {
4216 .hsw.regs = &icl_aux_power_well_regs,
4217 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4218 .hsw.is_tc_tbt = true,
4219 },
4220 },
4221 {
4222 .name = "AUX G TBT4",
4223 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4224 .ops = &icl_aux_power_well_ops,
4225 .id = DISP_PW_ID_NONE,
4226 {
4227 .hsw.regs = &icl_aux_power_well_regs,
4228 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4229 .hsw.is_tc_tbt = true,
4230 },
4231 },
4232 {
4233 .name = "AUX H TBT5",
4234 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4235 .ops = &icl_aux_power_well_ops,
4236 .id = DISP_PW_ID_NONE,
4237 {
4238 .hsw.regs = &icl_aux_power_well_regs,
4239 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4240 .hsw.is_tc_tbt = true,
4241 },
4242 },
4243 {
4244 .name = "AUX I TBT6",
4245 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4246 .ops = &icl_aux_power_well_ops,
4247 .id = DISP_PW_ID_NONE,
4248 {
4249 .hsw.regs = &icl_aux_power_well_regs,
4250 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4251 .hsw.is_tc_tbt = true,
4252 },
4253 },
4254 {
4255 .name = "power well 4",
4256 .domains = TGL_PW_4_POWER_DOMAINS,
4257 .ops = &hsw_power_well_ops,
4258 .id = DISP_PW_ID_NONE,
4259 {
4260 .hsw.regs = &hsw_power_well_regs,
4261 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
4262 .hsw.has_fuses = true,
4263 .hsw.irq_pipe_mask = BIT(PIPE_C),
4264 }
4265 },
4266 {
4267 .name = "power well 5",
4268 .domains = TGL_PW_5_POWER_DOMAINS,
4269 .ops = &hsw_power_well_ops,
4270 .id = DISP_PW_ID_NONE,
4271 {
4272 .hsw.regs = &hsw_power_well_regs,
4273 .hsw.idx = TGL_PW_CTL_IDX_PW_5,
4274 .hsw.has_fuses = true,
4275 .hsw.irq_pipe_mask = BIT(PIPE_D),
4276 },
4277 },
4278 {
4279 .name = "TC cold off",
4280 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
4281 .ops = &tgl_tc_cold_off_ops,
4282 .id = DISP_PW_ID_NONE,
4283 },
4284};
4285
4286static int
4287sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4288 int disable_power_well)
4289{
4290 if (disable_power_well >= 0)
4291 return !!disable_power_well;
4292
4293 return 1;
4294}
4295
4296static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4297 int enable_dc)
4298{
4299 u32 mask;
4300 int requested_dc;
4301 int max_dc;
4302
4303 if (INTEL_GEN(dev_priv) >= 12) {
4304 max_dc = 4;
4305
4306
4307
4308
4309
4310 mask = DC_STATE_EN_DC9;
4311 } else if (IS_GEN(dev_priv, 11)) {
4312 max_dc = 2;
4313 mask = DC_STATE_EN_DC9;
4314 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4315 max_dc = 2;
4316 mask = 0;
4317 } else if (IS_GEN9_LP(dev_priv)) {
4318 max_dc = 1;
4319 mask = DC_STATE_EN_DC9;
4320 } else {
4321 max_dc = 0;
4322 mask = 0;
4323 }
4324
4325 if (!i915_modparams.disable_power_well)
4326 max_dc = 0;
4327
4328 if (enable_dc >= 0 && enable_dc <= max_dc) {
4329 requested_dc = enable_dc;
4330 } else if (enable_dc == -1) {
4331 requested_dc = max_dc;
4332 } else if (enable_dc > max_dc && enable_dc <= 4) {
4333 drm_dbg_kms(&dev_priv->drm,
4334 "Adjusting requested max DC state (%d->%d)\n",
4335 enable_dc, max_dc);
4336 requested_dc = max_dc;
4337 } else {
4338 drm_err(&dev_priv->drm,
4339 "Unexpected value for enable_dc (%d)\n", enable_dc);
4340 requested_dc = max_dc;
4341 }
4342
4343 switch (requested_dc) {
4344 case 4:
4345 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4346 break;
4347 case 3:
4348 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4349 break;
4350 case 2:
4351 mask |= DC_STATE_EN_UPTO_DC6;
4352 break;
4353 case 1:
4354 mask |= DC_STATE_EN_UPTO_DC5;
4355 break;
4356 }
4357
4358 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
4359
4360 return mask;
4361}
4362
4363static int
4364__set_power_wells(struct i915_power_domains *power_domains,
4365 const struct i915_power_well_desc *power_well_descs,
4366 int power_well_count)
4367{
4368 u64 power_well_ids = 0;
4369 int i;
4370
4371 power_domains->power_well_count = power_well_count;
4372 power_domains->power_wells =
4373 kcalloc(power_well_count,
4374 sizeof(*power_domains->power_wells),
4375 GFP_KERNEL);
4376 if (!power_domains->power_wells)
4377 return -ENOMEM;
4378
4379 for (i = 0; i < power_well_count; i++) {
4380 enum i915_power_well_id id = power_well_descs[i].id;
4381
4382 power_domains->power_wells[i].desc = &power_well_descs[i];
4383
4384 if (id == DISP_PW_ID_NONE)
4385 continue;
4386
4387 WARN_ON(id >= sizeof(power_well_ids) * 8);
4388 WARN_ON(power_well_ids & BIT_ULL(id));
4389 power_well_ids |= BIT_ULL(id);
4390 }
4391
4392 return 0;
4393}
4394
4395#define set_power_wells(power_domains, __power_well_descs) \
4396 __set_power_wells(power_domains, __power_well_descs, \
4397 ARRAY_SIZE(__power_well_descs))
4398
4399
4400
4401
4402
4403
4404
4405
4406int intel_power_domains_init(struct drm_i915_private *dev_priv)
4407{
4408 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4409 int err;
4410
4411 i915_modparams.disable_power_well =
4412 sanitize_disable_power_well_option(dev_priv,
4413 i915_modparams.disable_power_well);
4414 dev_priv->csr.allowed_dc_mask =
4415 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4416
4417 dev_priv->csr.target_dc_state =
4418 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4419
4420 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4421
4422 mutex_init(&power_domains->lock);
4423
4424 INIT_DELAYED_WORK(&power_domains->async_put_work,
4425 intel_display_power_put_async_work);
4426
4427
4428
4429
4430
4431 if (IS_GEN(dev_priv, 12)) {
4432 err = set_power_wells(power_domains, tgl_power_wells);
4433 } else if (IS_GEN(dev_priv, 11)) {
4434 err = set_power_wells(power_domains, icl_power_wells);
4435 } else if (IS_CANNONLAKE(dev_priv)) {
4436 err = set_power_wells(power_domains, cnl_power_wells);
4437
4438
4439
4440
4441
4442
4443
4444 if (!IS_CNL_WITH_PORT_F(dev_priv))
4445 power_domains->power_well_count -= 2;
4446 } else if (IS_GEMINILAKE(dev_priv)) {
4447 err = set_power_wells(power_domains, glk_power_wells);
4448 } else if (IS_BROXTON(dev_priv)) {
4449 err = set_power_wells(power_domains, bxt_power_wells);
4450 } else if (IS_GEN9_BC(dev_priv)) {
4451 err = set_power_wells(power_domains, skl_power_wells);
4452 } else if (IS_CHERRYVIEW(dev_priv)) {
4453 err = set_power_wells(power_domains, chv_power_wells);
4454 } else if (IS_BROADWELL(dev_priv)) {
4455 err = set_power_wells(power_domains, bdw_power_wells);
4456 } else if (IS_HASWELL(dev_priv)) {
4457 err = set_power_wells(power_domains, hsw_power_wells);
4458 } else if (IS_VALLEYVIEW(dev_priv)) {
4459 err = set_power_wells(power_domains, vlv_power_wells);
4460 } else if (IS_I830(dev_priv)) {
4461 err = set_power_wells(power_domains, i830_power_wells);
4462 } else {
4463 err = set_power_wells(power_domains, i9xx_always_on_power_well);
4464 }
4465
4466 return err;
4467}
4468
4469
4470
4471
4472
4473
4474
4475void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4476{
4477 kfree(dev_priv->power_domains.power_wells);
4478}
4479
4480static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4481{
4482 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4483 struct i915_power_well *power_well;
4484
4485 mutex_lock(&power_domains->lock);
4486 for_each_power_well(dev_priv, power_well) {
4487 power_well->desc->ops->sync_hw(dev_priv, power_well);
4488 power_well->hw_enabled =
4489 power_well->desc->ops->is_enabled(dev_priv, power_well);
4490 }
4491 mutex_unlock(&power_domains->lock);
4492}
4493
4494static bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4495 i915_reg_t reg, bool enable)
4496{
4497 u32 val, status;
4498
4499 val = intel_de_read(dev_priv, reg);
4500 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4501 intel_de_write(dev_priv, reg, val);
4502 intel_de_posting_read(dev_priv, reg);
4503 udelay(10);
4504
4505 status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
4506 if ((enable && !status) || (!enable && status)) {
4507 drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
4508 enable ? "enable" : "disable");
4509 return false;
4510 }
4511 return true;
4512}
4513
4514static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4515{
4516 icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
4517}
4518
4519static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4520{
4521 icl_dbuf_slices_update(dev_priv, 0);
4522}
4523
4524void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4525 u8 req_slices)
4526{
4527 int i;
4528 int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4529 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4530
4531 drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
4532 "Invalid number of dbuf slices requested\n");
4533
4534 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
4535 req_slices);
4536
4537
4538
4539
4540
4541
4542
4543
4544 mutex_lock(&power_domains->lock);
4545
4546 for (i = 0; i < max_slices; i++) {
4547 intel_dbuf_slice_set(dev_priv,
4548 DBUF_CTL_S(i),
4549 (req_slices & BIT(i)) != 0);
4550 }
4551
4552 dev_priv->enabled_dbuf_slices_mask = req_slices;
4553
4554 mutex_unlock(&power_domains->lock);
4555}
4556
4557static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4558{
4559 skl_ddb_get_hw_state(dev_priv);
4560
4561
4562
4563
4564 icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
4565 BIT(DBUF_S1));
4566}
4567
4568static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4569{
4570 icl_dbuf_slices_update(dev_priv, 0);
4571}
4572
4573static void icl_mbus_init(struct drm_i915_private *dev_priv)
4574{
4575 u32 mask, val;
4576
4577 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
4578 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
4579 MBUS_ABOX_B_CREDIT_MASK |
4580 MBUS_ABOX_BW_CREDIT_MASK;
4581 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4582 MBUS_ABOX_BT_CREDIT_POOL2(16) |
4583 MBUS_ABOX_B_CREDIT(1) |
4584 MBUS_ABOX_BW_CREDIT(1);
4585
4586 intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
4587 if (INTEL_GEN(dev_priv) >= 12) {
4588 intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
4589 intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
4590 }
4591}
4592
4593static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4594{
4595 u32 val = intel_de_read(dev_priv, LCPLL_CTL);
4596
4597
4598
4599
4600
4601
4602
4603 if (val & LCPLL_CD_SOURCE_FCLK)
4604 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
4605
4606 if (val & LCPLL_PLL_DISABLE)
4607 drm_err(&dev_priv->drm, "LCPLL is disabled\n");
4608
4609 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4610 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
4611}
4612
4613static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4614{
4615 struct drm_device *dev = &dev_priv->drm;
4616 struct intel_crtc *crtc;
4617
4618 for_each_intel_crtc(dev, crtc)
4619 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4620 pipe_name(crtc->pipe));
4621
4622 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
4623 "Display power well on\n");
4624 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
4625 "SPLL enabled\n");
4626 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4627 "WRPLL1 enabled\n");
4628 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4629 "WRPLL2 enabled\n");
4630 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
4631 "Panel power on\n");
4632 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4633 "CPU PWM1 enabled\n");
4634 if (IS_HASWELL(dev_priv))
4635 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4636 "CPU PWM2 enabled\n");
4637 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4638 "PCH PWM1 enabled\n");
4639 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4640 "Utility pin enabled\n");
4641 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
4642 "PCH GTC enabled\n");
4643
4644
4645
4646
4647
4648
4649
4650 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4651}
4652
4653static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4654{
4655 if (IS_HASWELL(dev_priv))
4656 return intel_de_read(dev_priv, D_COMP_HSW);
4657 else
4658 return intel_de_read(dev_priv, D_COMP_BDW);
4659}
4660
4661static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4662{
4663 if (IS_HASWELL(dev_priv)) {
4664 if (sandybridge_pcode_write(dev_priv,
4665 GEN6_PCODE_WRITE_D_COMP, val))
4666 drm_dbg_kms(&dev_priv->drm,
4667 "Failed to write to D_COMP\n");
4668 } else {
4669 intel_de_write(dev_priv, D_COMP_BDW, val);
4670 intel_de_posting_read(dev_priv, D_COMP_BDW);
4671 }
4672}
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4683 bool switch_to_fclk, bool allow_power_down)
4684{
4685 u32 val;
4686
4687 assert_can_disable_lcpll(dev_priv);
4688
4689 val = intel_de_read(dev_priv, LCPLL_CTL);
4690
4691 if (switch_to_fclk) {
4692 val |= LCPLL_CD_SOURCE_FCLK;
4693 intel_de_write(dev_priv, LCPLL_CTL, val);
4694
4695 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
4696 LCPLL_CD_SOURCE_FCLK_DONE, 1))
4697 drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
4698
4699 val = intel_de_read(dev_priv, LCPLL_CTL);
4700 }
4701
4702 val |= LCPLL_PLL_DISABLE;
4703 intel_de_write(dev_priv, LCPLL_CTL, val);
4704 intel_de_posting_read(dev_priv, LCPLL_CTL);
4705
4706 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4707 drm_err(&dev_priv->drm, "LCPLL still locked\n");
4708
4709 val = hsw_read_dcomp(dev_priv);
4710 val |= D_COMP_COMP_DISABLE;
4711 hsw_write_dcomp(dev_priv, val);
4712 ndelay(100);
4713
4714 if (wait_for((hsw_read_dcomp(dev_priv) &
4715 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4716 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
4717
4718 if (allow_power_down) {
4719 val = intel_de_read(dev_priv, LCPLL_CTL);
4720 val |= LCPLL_POWER_DOWN_ALLOW;
4721 intel_de_write(dev_priv, LCPLL_CTL, val);
4722 intel_de_posting_read(dev_priv, LCPLL_CTL);
4723 }
4724}
4725
4726
4727
4728
4729
4730static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4731{
4732 u32 val;
4733
4734 val = intel_de_read(dev_priv, LCPLL_CTL);
4735
4736 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4737 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4738 return;
4739
4740
4741
4742
4743
4744 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4745
4746 if (val & LCPLL_POWER_DOWN_ALLOW) {
4747 val &= ~LCPLL_POWER_DOWN_ALLOW;
4748 intel_de_write(dev_priv, LCPLL_CTL, val);
4749 intel_de_posting_read(dev_priv, LCPLL_CTL);
4750 }
4751
4752 val = hsw_read_dcomp(dev_priv);
4753 val |= D_COMP_COMP_FORCE;
4754 val &= ~D_COMP_COMP_DISABLE;
4755 hsw_write_dcomp(dev_priv, val);
4756
4757 val = intel_de_read(dev_priv, LCPLL_CTL);
4758 val &= ~LCPLL_PLL_DISABLE;
4759 intel_de_write(dev_priv, LCPLL_CTL, val);
4760
4761 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4762 drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
4763
4764 if (val & LCPLL_CD_SOURCE_FCLK) {
4765 val = intel_de_read(dev_priv, LCPLL_CTL);
4766 val &= ~LCPLL_CD_SOURCE_FCLK;
4767 intel_de_write(dev_priv, LCPLL_CTL, val);
4768
4769 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
4770 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4771 drm_err(&dev_priv->drm,
4772 "Switching back to LCPLL failed\n");
4773 }
4774
4775 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4776
4777 intel_update_cdclk(dev_priv);
4778 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4779}
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4805{
4806 u32 val;
4807
4808 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
4809
4810 if (HAS_PCH_LPT_LP(dev_priv)) {
4811 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4812 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4813 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4814 }
4815
4816 lpt_disable_clkout_dp(dev_priv);
4817 hsw_disable_lcpll(dev_priv, true, true);
4818}
4819
4820static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4821{
4822 u32 val;
4823
4824 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
4825
4826 hsw_restore_lcpll(dev_priv);
4827 intel_init_pch_refclk(dev_priv);
4828
4829 if (HAS_PCH_LPT_LP(dev_priv)) {
4830 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4831 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4832 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4833 }
4834}
4835
4836static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4837 bool enable)
4838{
4839 i915_reg_t reg;
4840 u32 reset_bits, val;
4841
4842 if (IS_IVYBRIDGE(dev_priv)) {
4843 reg = GEN7_MSG_CTL;
4844 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4845 } else {
4846 reg = HSW_NDE_RSTWRN_OPT;
4847 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4848 }
4849
4850 val = intel_de_read(dev_priv, reg);
4851
4852 if (enable)
4853 val |= reset_bits;
4854 else
4855 val &= ~reset_bits;
4856
4857 intel_de_write(dev_priv, reg, val);
4858}
4859
4860static void skl_display_core_init(struct drm_i915_private *dev_priv,
4861 bool resume)
4862{
4863 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4864 struct i915_power_well *well;
4865
4866 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4867
4868
4869 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4870
4871
4872 mutex_lock(&power_domains->lock);
4873
4874 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4875 intel_power_well_enable(dev_priv, well);
4876
4877 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4878 intel_power_well_enable(dev_priv, well);
4879
4880 mutex_unlock(&power_domains->lock);
4881
4882 intel_cdclk_init_hw(dev_priv);
4883
4884 gen9_dbuf_enable(dev_priv);
4885
4886 if (resume && dev_priv->csr.dmc_payload)
4887 intel_csr_load_program(dev_priv);
4888}
4889
4890static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4891{
4892 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4893 struct i915_power_well *well;
4894
4895 gen9_disable_dc_states(dev_priv);
4896
4897 gen9_dbuf_disable(dev_priv);
4898
4899 intel_cdclk_uninit_hw(dev_priv);
4900
4901
4902
4903
4904 mutex_lock(&power_domains->lock);
4905
4906
4907
4908
4909
4910
4911
4912 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4913 intel_power_well_disable(dev_priv, well);
4914
4915 mutex_unlock(&power_domains->lock);
4916
4917 usleep_range(10, 30);
4918}
4919
4920static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4921{
4922 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4923 struct i915_power_well *well;
4924
4925 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4926
4927
4928
4929
4930
4931
4932
4933 intel_pch_reset_handshake(dev_priv, false);
4934
4935
4936 mutex_lock(&power_domains->lock);
4937
4938 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4939 intel_power_well_enable(dev_priv, well);
4940
4941 mutex_unlock(&power_domains->lock);
4942
4943 intel_cdclk_init_hw(dev_priv);
4944
4945 gen9_dbuf_enable(dev_priv);
4946
4947 if (resume && dev_priv->csr.dmc_payload)
4948 intel_csr_load_program(dev_priv);
4949}
4950
4951static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4952{
4953 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4954 struct i915_power_well *well;
4955
4956 gen9_disable_dc_states(dev_priv);
4957
4958 gen9_dbuf_disable(dev_priv);
4959
4960 intel_cdclk_uninit_hw(dev_priv);
4961
4962
4963
4964
4965
4966
4967
4968
4969 mutex_lock(&power_domains->lock);
4970
4971 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4972 intel_power_well_disable(dev_priv, well);
4973
4974 mutex_unlock(&power_domains->lock);
4975
4976 usleep_range(10, 30);
4977}
4978
4979static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4980{
4981 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4982 struct i915_power_well *well;
4983
4984 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4985
4986
4987 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4988
4989
4990 intel_combo_phy_init(dev_priv);
4991
4992
4993
4994
4995
4996 mutex_lock(&power_domains->lock);
4997 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4998 intel_power_well_enable(dev_priv, well);
4999 mutex_unlock(&power_domains->lock);
5000
5001
5002 intel_cdclk_init_hw(dev_priv);
5003
5004
5005 gen9_dbuf_enable(dev_priv);
5006
5007 if (resume && dev_priv->csr.dmc_payload)
5008 intel_csr_load_program(dev_priv);
5009}
5010
5011static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
5012{
5013 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5014 struct i915_power_well *well;
5015
5016 gen9_disable_dc_states(dev_priv);
5017
5018
5019
5020
5021 gen9_dbuf_disable(dev_priv);
5022
5023
5024 intel_cdclk_uninit_hw(dev_priv);
5025
5026
5027
5028
5029
5030
5031 mutex_lock(&power_domains->lock);
5032 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5033 intel_power_well_disable(dev_priv, well);
5034 mutex_unlock(&power_domains->lock);
5035
5036 usleep_range(10, 30);
5037
5038
5039 intel_combo_phy_uninit(dev_priv);
5040}
5041
5042struct buddy_page_mask {
5043 u32 page_mask;
5044 u8 type;
5045 u8 num_channels;
5046};
5047
5048static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5049 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
5050 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
5051 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5052 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
5053 {}
5054};
5055
5056static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5057 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5058 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
5059 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5060 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
5061 {}
5062};
5063
5064static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5065{
5066 enum intel_dram_type type = dev_priv->dram_info.type;
5067 u8 num_channels = dev_priv->dram_info.num_channels;
5068 const struct buddy_page_mask *table;
5069 int i;
5070
5071 if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
5072
5073 table = wa_1409767108_buddy_page_masks;
5074 else
5075 table = tgl_buddy_page_masks;
5076
5077 for (i = 0; table[i].page_mask != 0; i++)
5078 if (table[i].num_channels == num_channels &&
5079 table[i].type == type)
5080 break;
5081
5082 if (table[i].page_mask == 0) {
5083 drm_dbg(&dev_priv->drm,
5084 "Unknown memory configuration; disabling address buddy logic.\n");
5085 intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
5086 intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
5087 } else {
5088 intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
5089 table[i].page_mask);
5090 intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
5091 table[i].page_mask);
5092
5093
5094 intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
5095 BW_BUDDY_TLB_REQ_TIMER_MASK,
5096 REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5097 intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
5098 BW_BUDDY_TLB_REQ_TIMER_MASK,
5099 REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5100 }
5101}
5102
5103static void icl_display_core_init(struct drm_i915_private *dev_priv,
5104 bool resume)
5105{
5106 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5107 struct i915_power_well *well;
5108
5109 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5110
5111
5112 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5113
5114
5115 intel_combo_phy_init(dev_priv);
5116
5117
5118
5119
5120
5121 mutex_lock(&power_domains->lock);
5122 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5123 intel_power_well_enable(dev_priv, well);
5124 mutex_unlock(&power_domains->lock);
5125
5126
5127 intel_cdclk_init_hw(dev_priv);
5128
5129
5130 icl_dbuf_enable(dev_priv);
5131
5132
5133 icl_mbus_init(dev_priv);
5134
5135
5136 if (INTEL_GEN(dev_priv) >= 12)
5137 tgl_bw_buddy_init(dev_priv);
5138
5139 if (resume && dev_priv->csr.dmc_payload)
5140 intel_csr_load_program(dev_priv);
5141}
5142
5143static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5144{
5145 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5146 struct i915_power_well *well;
5147
5148 gen9_disable_dc_states(dev_priv);
5149
5150
5151
5152
5153 icl_dbuf_disable(dev_priv);
5154
5155
5156 intel_cdclk_uninit_hw(dev_priv);
5157
5158
5159
5160
5161
5162
5163 mutex_lock(&power_domains->lock);
5164 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5165 intel_power_well_disable(dev_priv, well);
5166 mutex_unlock(&power_domains->lock);
5167
5168
5169 intel_combo_phy_uninit(dev_priv);
5170}
5171
5172static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5173{
5174 struct i915_power_well *cmn_bc =
5175 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5176 struct i915_power_well *cmn_d =
5177 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5178
5179
5180
5181
5182
5183
5184
5185
5186 dev_priv->chv_phy_control =
5187 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5188 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5189 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5190 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5191 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5192
5193
5194
5195
5196
5197
5198
5199
5200 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5201 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5202 unsigned int mask;
5203
5204 mask = status & DPLL_PORTB_READY_MASK;
5205 if (mask == 0xf)
5206 mask = 0x0;
5207 else
5208 dev_priv->chv_phy_control |=
5209 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5210
5211 dev_priv->chv_phy_control |=
5212 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5213
5214 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5215 if (mask == 0xf)
5216 mask = 0x0;
5217 else
5218 dev_priv->chv_phy_control |=
5219 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5220
5221 dev_priv->chv_phy_control |=
5222 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5223
5224 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5225
5226 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5227 } else {
5228 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5229 }
5230
5231 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5232 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5233 unsigned int mask;
5234
5235 mask = status & DPLL_PORTD_READY_MASK;
5236
5237 if (mask == 0xf)
5238 mask = 0x0;
5239 else
5240 dev_priv->chv_phy_control |=
5241 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5242
5243 dev_priv->chv_phy_control |=
5244 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5245
5246 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5247
5248 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5249 } else {
5250 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5251 }
5252
5253 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5254 dev_priv->chv_phy_control);
5255
5256
5257}
5258
5259static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5260{
5261 struct i915_power_well *cmn =
5262 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5263 struct i915_power_well *disp2d =
5264 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5265
5266
5267 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5268 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5269 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5270 return;
5271
5272 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5273
5274
5275 disp2d->desc->ops->enable(dev_priv, disp2d);
5276
5277
5278
5279
5280
5281
5282
5283
5284 cmn->desc->ops->disable(dev_priv, cmn);
5285}
5286
5287static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5288{
5289 bool ret;
5290
5291 vlv_punit_get(dev_priv);
5292 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5293 vlv_punit_put(dev_priv);
5294
5295 return ret;
5296}
5297
5298static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5299{
5300 drm_WARN(&dev_priv->drm,
5301 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5302 "VED not power gated\n");
5303}
5304
5305static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5306{
5307 static const struct pci_device_id isp_ids[] = {
5308 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5309 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5310 {}
5311 };
5312
5313 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
5314 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5315 "ISP not power gated\n");
5316}
5317
5318static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5337{
5338 struct i915_power_domains *power_domains = &i915->power_domains;
5339
5340 power_domains->initializing = true;
5341
5342 if (INTEL_GEN(i915) >= 11) {
5343 icl_display_core_init(i915, resume);
5344 } else if (IS_CANNONLAKE(i915)) {
5345 cnl_display_core_init(i915, resume);
5346 } else if (IS_GEN9_BC(i915)) {
5347 skl_display_core_init(i915, resume);
5348 } else if (IS_GEN9_LP(i915)) {
5349 bxt_display_core_init(i915, resume);
5350 } else if (IS_CHERRYVIEW(i915)) {
5351 mutex_lock(&power_domains->lock);
5352 chv_phy_control_init(i915);
5353 mutex_unlock(&power_domains->lock);
5354 assert_isp_power_gated(i915);
5355 } else if (IS_VALLEYVIEW(i915)) {
5356 mutex_lock(&power_domains->lock);
5357 vlv_cmnlane_wa(i915);
5358 mutex_unlock(&power_domains->lock);
5359 assert_ved_power_gated(i915);
5360 assert_isp_power_gated(i915);
5361 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5362 hsw_assert_cdclk(i915);
5363 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5364 } else if (IS_IVYBRIDGE(i915)) {
5365 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5366 }
5367
5368
5369
5370
5371
5372
5373
5374 power_domains->wakeref =
5375 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5376
5377
5378 if (!i915_modparams.disable_power_well)
5379 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5380 intel_power_domains_sync_hw(i915);
5381
5382 power_domains->initializing = false;
5383}
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5397{
5398 intel_wakeref_t wakeref __maybe_unused =
5399 fetch_and_zero(&i915->power_domains.wakeref);
5400
5401
5402 if (!i915_modparams.disable_power_well)
5403 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5404
5405 intel_display_power_flush_work_sync(i915);
5406
5407 intel_power_domains_verify_state(i915);
5408
5409
5410 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5411}
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425void intel_power_domains_enable(struct drm_i915_private *i915)
5426{
5427 intel_wakeref_t wakeref __maybe_unused =
5428 fetch_and_zero(&i915->power_domains.wakeref);
5429
5430 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5431 intel_power_domains_verify_state(i915);
5432}
5433
5434
5435
5436
5437
5438
5439
5440
5441void intel_power_domains_disable(struct drm_i915_private *i915)
5442{
5443 struct i915_power_domains *power_domains = &i915->power_domains;
5444
5445 drm_WARN_ON(&i915->drm, power_domains->wakeref);
5446 power_domains->wakeref =
5447 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5448
5449 intel_power_domains_verify_state(i915);
5450}
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463void intel_power_domains_suspend(struct drm_i915_private *i915,
5464 enum i915_drm_suspend_mode suspend_mode)
5465{
5466 struct i915_power_domains *power_domains = &i915->power_domains;
5467 intel_wakeref_t wakeref __maybe_unused =
5468 fetch_and_zero(&power_domains->wakeref);
5469
5470 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5471
5472
5473
5474
5475
5476
5477
5478
5479 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5480 suspend_mode == I915_DRM_SUSPEND_IDLE &&
5481 i915->csr.dmc_payload) {
5482 intel_display_power_flush_work(i915);
5483 intel_power_domains_verify_state(i915);
5484 return;
5485 }
5486
5487
5488
5489
5490
5491 if (!i915_modparams.disable_power_well)
5492 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5493
5494 intel_display_power_flush_work(i915);
5495 intel_power_domains_verify_state(i915);
5496
5497 if (INTEL_GEN(i915) >= 11)
5498 icl_display_core_uninit(i915);
5499 else if (IS_CANNONLAKE(i915))
5500 cnl_display_core_uninit(i915);
5501 else if (IS_GEN9_BC(i915))
5502 skl_display_core_uninit(i915);
5503 else if (IS_GEN9_LP(i915))
5504 bxt_display_core_uninit(i915);
5505
5506 power_domains->display_core_suspended = true;
5507}
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519void intel_power_domains_resume(struct drm_i915_private *i915)
5520{
5521 struct i915_power_domains *power_domains = &i915->power_domains;
5522
5523 if (power_domains->display_core_suspended) {
5524 intel_power_domains_init_hw(i915, true);
5525 power_domains->display_core_suspended = false;
5526 } else {
5527 drm_WARN_ON(&i915->drm, power_domains->wakeref);
5528 power_domains->wakeref =
5529 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5530 }
5531
5532 intel_power_domains_verify_state(i915);
5533}
5534
5535#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5536
5537static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5538{
5539 struct i915_power_domains *power_domains = &i915->power_domains;
5540 struct i915_power_well *power_well;
5541
5542 for_each_power_well(i915, power_well) {
5543 enum intel_display_power_domain domain;
5544
5545 drm_dbg(&i915->drm, "%-25s %d\n",
5546 power_well->desc->name, power_well->count);
5547
5548 for_each_power_domain(domain, power_well->desc->domains)
5549 drm_dbg(&i915->drm, " %-23s %d\n",
5550 intel_display_power_domain_str(domain),
5551 power_domains->domain_use_count[domain]);
5552 }
5553}
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564
5565static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5566{
5567 struct i915_power_domains *power_domains = &i915->power_domains;
5568 struct i915_power_well *power_well;
5569 bool dump_domain_info;
5570
5571 mutex_lock(&power_domains->lock);
5572
5573 verify_async_put_domains_state(power_domains);
5574
5575 dump_domain_info = false;
5576 for_each_power_well(i915, power_well) {
5577 enum intel_display_power_domain domain;
5578 int domains_count;
5579 bool enabled;
5580
5581 enabled = power_well->desc->ops->is_enabled(i915, power_well);
5582 if ((power_well->count || power_well->desc->always_on) !=
5583 enabled)
5584 drm_err(&i915->drm,
5585 "power well %s state mismatch (refcount %d/enabled %d)",
5586 power_well->desc->name,
5587 power_well->count, enabled);
5588
5589 domains_count = 0;
5590 for_each_power_domain(domain, power_well->desc->domains)
5591 domains_count += power_domains->domain_use_count[domain];
5592
5593 if (power_well->count != domains_count) {
5594 drm_err(&i915->drm,
5595 "power well %s refcount/domain refcount mismatch "
5596 "(refcount %d/domains refcount %d)\n",
5597 power_well->desc->name, power_well->count,
5598 domains_count);
5599 dump_domain_info = true;
5600 }
5601 }
5602
5603 if (dump_domain_info) {
5604 static bool dumped;
5605
5606 if (!dumped) {
5607 intel_power_domains_dump_info(i915);
5608 dumped = true;
5609 }
5610 }
5611
5612 mutex_unlock(&power_domains->lock);
5613}
5614
5615#else
5616
5617static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5618{
5619}
5620
5621#endif
5622
5623void intel_display_power_suspend_late(struct drm_i915_private *i915)
5624{
5625 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5626 bxt_enable_dc9(i915);
5627 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5628 hsw_enable_pc8(i915);
5629}
5630
5631void intel_display_power_resume_early(struct drm_i915_private *i915)
5632{
5633 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5634 gen9_sanitize_dc_state(i915);
5635 bxt_disable_dc9(i915);
5636 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5637 hsw_disable_pc8(i915);
5638 }
5639}
5640
5641void intel_display_power_suspend(struct drm_i915_private *i915)
5642{
5643 if (INTEL_GEN(i915) >= 11) {
5644 icl_display_core_uninit(i915);
5645 bxt_enable_dc9(i915);
5646 } else if (IS_GEN9_LP(i915)) {
5647 bxt_display_core_uninit(i915);
5648 bxt_enable_dc9(i915);
5649 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5650 hsw_enable_pc8(i915);
5651 }
5652}
5653
5654void intel_display_power_resume(struct drm_i915_private *i915)
5655{
5656 if (INTEL_GEN(i915) >= 11) {
5657 bxt_disable_dc9(i915);
5658 icl_display_core_init(i915, true);
5659 if (i915->csr.dmc_payload) {
5660 if (i915->csr.allowed_dc_mask &
5661 DC_STATE_EN_UPTO_DC6)
5662 skl_enable_dc6(i915);
5663 else if (i915->csr.allowed_dc_mask &
5664 DC_STATE_EN_UPTO_DC5)
5665 gen9_enable_dc5(i915);
5666 }
5667 } else if (IS_GEN9_LP(i915)) {
5668 bxt_disable_dc9(i915);
5669 bxt_display_core_init(i915, true);
5670 if (i915->csr.dmc_payload &&
5671 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5672 gen9_enable_dc5(i915);
5673 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5674 hsw_disable_pc8(i915);
5675 }
5676}
5677