1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/cpufreq.h>
29#include "i915_drv.h"
30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h>
33#include <linux/vgaarb.h>
34#include <drm/i915_powerwell.h>
35#include <linux/pm_runtime.h>
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#define INTEL_RC6_ENABLE (1<<0)
55#define INTEL_RC6p_ENABLE (1<<1)
56#define INTEL_RC6pp_ENABLE (1<<2)
57
58
59
60
61
62
63
64
65
66
67
68
69static void i8xx_disable_fbc(struct drm_device *dev)
70{
71 struct drm_i915_private *dev_priv = dev->dev_private;
72 u32 fbc_ctl;
73
74
75 fbc_ctl = I915_READ(FBC_CONTROL);
76 if ((fbc_ctl & FBC_CTL_EN) == 0)
77 return;
78
79 fbc_ctl &= ~FBC_CTL_EN;
80 I915_WRITE(FBC_CONTROL, fbc_ctl);
81
82
83 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
84 DRM_DEBUG_KMS("FBC idle timed out\n");
85 return;
86 }
87
88 DRM_DEBUG_KMS("disabled FBC\n");
89}
90
91static void i8xx_enable_fbc(struct drm_crtc *crtc)
92{
93 struct drm_device *dev = crtc->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_framebuffer *fb = crtc->primary->fb;
96 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
97 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
98 int cfb_pitch;
99 int i;
100 u32 fbc_ctl;
101
102 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
103 if (fb->pitches[0] < cfb_pitch)
104 cfb_pitch = fb->pitches[0];
105
106
107 if (IS_GEN2(dev))
108 cfb_pitch = (cfb_pitch / 32) - 1;
109 else
110 cfb_pitch = (cfb_pitch / 64) - 1;
111
112
113 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
114 I915_WRITE(FBC_TAG + (i * 4), 0);
115
116 if (IS_GEN4(dev)) {
117 u32 fbc_ctl2;
118
119
120 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
121 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
122 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
123 I915_WRITE(FBC_FENCE_OFF, crtc->y);
124 }
125
126
127 fbc_ctl = I915_READ(FBC_CONTROL);
128 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
129 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
130 if (IS_I945GM(dev))
131 fbc_ctl |= FBC_CTL_C3_IDLE;
132 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
133 fbc_ctl |= obj->fence_reg;
134 I915_WRITE(FBC_CONTROL, fbc_ctl);
135
136 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
137 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
138}
139
140static bool i8xx_fbc_enabled(struct drm_device *dev)
141{
142 struct drm_i915_private *dev_priv = dev->dev_private;
143
144 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
145}
146
147static void g4x_enable_fbc(struct drm_crtc *crtc)
148{
149 struct drm_device *dev = crtc->dev;
150 struct drm_i915_private *dev_priv = dev->dev_private;
151 struct drm_framebuffer *fb = crtc->primary->fb;
152 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
153 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
154 u32 dpfc_ctl;
155
156 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
157 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
158 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
159 else
160 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
161 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
162
163 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
164
165
166 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
167
168 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
169}
170
171static void g4x_disable_fbc(struct drm_device *dev)
172{
173 struct drm_i915_private *dev_priv = dev->dev_private;
174 u32 dpfc_ctl;
175
176
177 dpfc_ctl = I915_READ(DPFC_CONTROL);
178 if (dpfc_ctl & DPFC_CTL_EN) {
179 dpfc_ctl &= ~DPFC_CTL_EN;
180 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
181
182 DRM_DEBUG_KMS("disabled FBC\n");
183 }
184}
185
186static bool g4x_fbc_enabled(struct drm_device *dev)
187{
188 struct drm_i915_private *dev_priv = dev->dev_private;
189
190 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
191}
192
193static void sandybridge_blit_fbc_update(struct drm_device *dev)
194{
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 u32 blt_ecoskpd;
197
198
199
200
201
202 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
203
204 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
205 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
206 GEN6_BLITTER_LOCK_SHIFT;
207 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
208 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
209 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
210 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
211 GEN6_BLITTER_LOCK_SHIFT);
212 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
213 POSTING_READ(GEN6_BLITTER_ECOSKPD);
214
215 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
216}
217
218static void ironlake_enable_fbc(struct drm_crtc *crtc)
219{
220 struct drm_device *dev = crtc->dev;
221 struct drm_i915_private *dev_priv = dev->dev_private;
222 struct drm_framebuffer *fb = crtc->primary->fb;
223 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
225 u32 dpfc_ctl;
226
227 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
228 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
229 dev_priv->fbc.threshold++;
230
231 switch (dev_priv->fbc.threshold) {
232 case 4:
233 case 3:
234 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
235 break;
236 case 2:
237 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
238 break;
239 case 1:
240 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
241 break;
242 }
243 dpfc_ctl |= DPFC_CTL_FENCE_EN;
244 if (IS_GEN5(dev))
245 dpfc_ctl |= obj->fence_reg;
246
247 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
248 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
249
250 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
251
252 if (IS_GEN6(dev)) {
253 I915_WRITE(SNB_DPFC_CTL_SA,
254 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
255 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
256 sandybridge_blit_fbc_update(dev);
257 }
258
259 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
260}
261
262static void ironlake_disable_fbc(struct drm_device *dev)
263{
264 struct drm_i915_private *dev_priv = dev->dev_private;
265 u32 dpfc_ctl;
266
267
268 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
269 if (dpfc_ctl & DPFC_CTL_EN) {
270 dpfc_ctl &= ~DPFC_CTL_EN;
271 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
272
273 DRM_DEBUG_KMS("disabled FBC\n");
274 }
275}
276
277static bool ironlake_fbc_enabled(struct drm_device *dev)
278{
279 struct drm_i915_private *dev_priv = dev->dev_private;
280
281 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
282}
283
284static void gen7_enable_fbc(struct drm_crtc *crtc)
285{
286 struct drm_device *dev = crtc->dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 struct drm_framebuffer *fb = crtc->primary->fb;
289 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
291 u32 dpfc_ctl;
292
293 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
294 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
295 dev_priv->fbc.threshold++;
296
297 switch (dev_priv->fbc.threshold) {
298 case 4:
299 case 3:
300 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
301 break;
302 case 2:
303 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
304 break;
305 case 1:
306 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
307 break;
308 }
309
310 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
311
312 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
313
314 if (IS_IVYBRIDGE(dev)) {
315
316 I915_WRITE(ILK_DISPLAY_CHICKEN1,
317 I915_READ(ILK_DISPLAY_CHICKEN1) |
318 ILK_FBCQ_DIS);
319 } else {
320
321 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
322 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
323 HSW_FBCQ_DIS);
324 }
325
326 I915_WRITE(SNB_DPFC_CTL_SA,
327 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
328 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
329
330 sandybridge_blit_fbc_update(dev);
331
332 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
333}
334
335bool intel_fbc_enabled(struct drm_device *dev)
336{
337 struct drm_i915_private *dev_priv = dev->dev_private;
338
339 if (!dev_priv->display.fbc_enabled)
340 return false;
341
342 return dev_priv->display.fbc_enabled(dev);
343}
344
345static void intel_fbc_work_fn(struct work_struct *__work)
346{
347 struct intel_fbc_work *work =
348 container_of(to_delayed_work(__work),
349 struct intel_fbc_work, work);
350 struct drm_device *dev = work->crtc->dev;
351 struct drm_i915_private *dev_priv = dev->dev_private;
352
353 mutex_lock(&dev->struct_mutex);
354 if (work == dev_priv->fbc.fbc_work) {
355
356
357
358 if (work->crtc->primary->fb == work->fb) {
359 dev_priv->display.enable_fbc(work->crtc);
360
361 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
362 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
363 dev_priv->fbc.y = work->crtc->y;
364 }
365
366 dev_priv->fbc.fbc_work = NULL;
367 }
368 mutex_unlock(&dev->struct_mutex);
369
370 kfree(work);
371}
372
373static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
374{
375 if (dev_priv->fbc.fbc_work == NULL)
376 return;
377
378 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
379
380
381
382
383
384 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
385
386 kfree(dev_priv->fbc.fbc_work);
387
388
389
390
391
392
393 dev_priv->fbc.fbc_work = NULL;
394}
395
396static void intel_enable_fbc(struct drm_crtc *crtc)
397{
398 struct intel_fbc_work *work;
399 struct drm_device *dev = crtc->dev;
400 struct drm_i915_private *dev_priv = dev->dev_private;
401
402 if (!dev_priv->display.enable_fbc)
403 return;
404
405 intel_cancel_fbc_work(dev_priv);
406
407 work = kzalloc(sizeof(*work), GFP_KERNEL);
408 if (work == NULL) {
409 DRM_ERROR("Failed to allocate FBC work structure\n");
410 dev_priv->display.enable_fbc(crtc);
411 return;
412 }
413
414 work->crtc = crtc;
415 work->fb = crtc->primary->fb;
416 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
417
418 dev_priv->fbc.fbc_work = work;
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
434}
435
436void intel_disable_fbc(struct drm_device *dev)
437{
438 struct drm_i915_private *dev_priv = dev->dev_private;
439
440 intel_cancel_fbc_work(dev_priv);
441
442 if (!dev_priv->display.disable_fbc)
443 return;
444
445 dev_priv->display.disable_fbc(dev);
446 dev_priv->fbc.plane = -1;
447}
448
449static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
450 enum no_fbc_reason reason)
451{
452 if (dev_priv->fbc.no_fbc_reason == reason)
453 return false;
454
455 dev_priv->fbc.no_fbc_reason = reason;
456 return true;
457}
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478void intel_update_fbc(struct drm_device *dev)
479{
480 struct drm_i915_private *dev_priv = dev->dev_private;
481 struct drm_crtc *crtc = NULL, *tmp_crtc;
482 struct intel_crtc *intel_crtc;
483 struct drm_framebuffer *fb;
484 struct drm_i915_gem_object *obj;
485 const struct drm_display_mode *adjusted_mode;
486 unsigned int max_width, max_height;
487
488 if (!HAS_FBC(dev)) {
489 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
490 return;
491 }
492
493 if (!i915.powersave) {
494 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
495 DRM_DEBUG_KMS("fbc disabled per module param\n");
496 return;
497 }
498
499
500
501
502
503
504
505
506
507
508 for_each_crtc(dev, tmp_crtc) {
509 if (intel_crtc_active(tmp_crtc) &&
510 to_intel_crtc(tmp_crtc)->primary_enabled) {
511 if (crtc) {
512 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
513 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
514 goto out_disable;
515 }
516 crtc = tmp_crtc;
517 }
518 }
519
520 if (!crtc || crtc->primary->fb == NULL) {
521 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
522 DRM_DEBUG_KMS("no output, disabling\n");
523 goto out_disable;
524 }
525
526 intel_crtc = to_intel_crtc(crtc);
527 fb = crtc->primary->fb;
528 obj = intel_fb_obj(fb);
529 adjusted_mode = &intel_crtc->config.adjusted_mode;
530
531 if (i915.enable_fbc < 0) {
532 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
533 DRM_DEBUG_KMS("disabled per chip default\n");
534 goto out_disable;
535 }
536 if (!i915.enable_fbc) {
537 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
538 DRM_DEBUG_KMS("fbc disabled per module param\n");
539 goto out_disable;
540 }
541 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
542 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
543 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
544 DRM_DEBUG_KMS("mode incompatible with compression, "
545 "disabling\n");
546 goto out_disable;
547 }
548
549 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
550 max_width = 4096;
551 max_height = 4096;
552 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
553 max_width = 4096;
554 max_height = 2048;
555 } else {
556 max_width = 2048;
557 max_height = 1536;
558 }
559 if (intel_crtc->config.pipe_src_w > max_width ||
560 intel_crtc->config.pipe_src_h > max_height) {
561 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
562 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
563 goto out_disable;
564 }
565 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
566 intel_crtc->plane != PLANE_A) {
567 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
568 DRM_DEBUG_KMS("plane not A, disabling compression\n");
569 goto out_disable;
570 }
571
572
573
574
575 if (obj->tiling_mode != I915_TILING_X ||
576 obj->fence_reg == I915_FENCE_REG_NONE) {
577 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
578 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
579 goto out_disable;
580 }
581
582
583 if (in_dbg_master())
584 goto out_disable;
585
586 if (i915_gem_stolen_setup_compression(dev, obj->base.size,
587 drm_format_plane_cpp(fb->pixel_format, 0))) {
588 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
589 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
590 goto out_disable;
591 }
592
593
594
595
596
597
598 if (dev_priv->fbc.plane == intel_crtc->plane &&
599 dev_priv->fbc.fb_id == fb->base.id &&
600 dev_priv->fbc.y == crtc->y)
601 return;
602
603 if (intel_fbc_enabled(dev)) {
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627 DRM_DEBUG_KMS("disabling active FBC for update\n");
628 intel_disable_fbc(dev);
629 }
630
631 intel_enable_fbc(crtc);
632 dev_priv->fbc.no_fbc_reason = FBC_OK;
633 return;
634
635out_disable:
636
637 if (intel_fbc_enabled(dev)) {
638 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
639 intel_disable_fbc(dev);
640 }
641 i915_gem_stolen_cleanup_compression(dev);
642}
643
644static void i915_pineview_get_mem_freq(struct drm_device *dev)
645{
646 struct drm_i915_private *dev_priv = dev->dev_private;
647 u32 tmp;
648
649 tmp = I915_READ(CLKCFG);
650
651 switch (tmp & CLKCFG_FSB_MASK) {
652 case CLKCFG_FSB_533:
653 dev_priv->fsb_freq = 533;
654 break;
655 case CLKCFG_FSB_800:
656 dev_priv->fsb_freq = 800;
657 break;
658 case CLKCFG_FSB_667:
659 dev_priv->fsb_freq = 667;
660 break;
661 case CLKCFG_FSB_400:
662 dev_priv->fsb_freq = 400;
663 break;
664 }
665
666 switch (tmp & CLKCFG_MEM_MASK) {
667 case CLKCFG_MEM_533:
668 dev_priv->mem_freq = 533;
669 break;
670 case CLKCFG_MEM_667:
671 dev_priv->mem_freq = 667;
672 break;
673 case CLKCFG_MEM_800:
674 dev_priv->mem_freq = 800;
675 break;
676 }
677
678
679 tmp = I915_READ(CSHRDDR3CTL);
680 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
681}
682
683static void i915_ironlake_get_mem_freq(struct drm_device *dev)
684{
685 struct drm_i915_private *dev_priv = dev->dev_private;
686 u16 ddrpll, csipll;
687
688 ddrpll = I915_READ16(DDRMPLL1);
689 csipll = I915_READ16(CSIPLL0);
690
691 switch (ddrpll & 0xff) {
692 case 0xc:
693 dev_priv->mem_freq = 800;
694 break;
695 case 0x10:
696 dev_priv->mem_freq = 1066;
697 break;
698 case 0x14:
699 dev_priv->mem_freq = 1333;
700 break;
701 case 0x18:
702 dev_priv->mem_freq = 1600;
703 break;
704 default:
705 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
706 ddrpll & 0xff);
707 dev_priv->mem_freq = 0;
708 break;
709 }
710
711 dev_priv->ips.r_t = dev_priv->mem_freq;
712
713 switch (csipll & 0x3ff) {
714 case 0x00c:
715 dev_priv->fsb_freq = 3200;
716 break;
717 case 0x00e:
718 dev_priv->fsb_freq = 3733;
719 break;
720 case 0x010:
721 dev_priv->fsb_freq = 4266;
722 break;
723 case 0x012:
724 dev_priv->fsb_freq = 4800;
725 break;
726 case 0x014:
727 dev_priv->fsb_freq = 5333;
728 break;
729 case 0x016:
730 dev_priv->fsb_freq = 5866;
731 break;
732 case 0x018:
733 dev_priv->fsb_freq = 6400;
734 break;
735 default:
736 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
737 csipll & 0x3ff);
738 dev_priv->fsb_freq = 0;
739 break;
740 }
741
742 if (dev_priv->fsb_freq == 3200) {
743 dev_priv->ips.c_m = 0;
744 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
745 dev_priv->ips.c_m = 1;
746 } else {
747 dev_priv->ips.c_m = 2;
748 }
749}
750
751static const struct cxsr_latency cxsr_latency_table[] = {
752 {1, 0, 800, 400, 3382, 33382, 3983, 33983},
753 {1, 0, 800, 667, 3354, 33354, 3807, 33807},
754 {1, 0, 800, 800, 3347, 33347, 3763, 33763},
755 {1, 1, 800, 667, 6420, 36420, 6873, 36873},
756 {1, 1, 800, 800, 5902, 35902, 6318, 36318},
757
758 {1, 0, 667, 400, 3400, 33400, 4021, 34021},
759 {1, 0, 667, 667, 3372, 33372, 3845, 33845},
760 {1, 0, 667, 800, 3386, 33386, 3822, 33822},
761 {1, 1, 667, 667, 6438, 36438, 6911, 36911},
762 {1, 1, 667, 800, 5941, 35941, 6377, 36377},
763
764 {1, 0, 400, 400, 3472, 33472, 4173, 34173},
765 {1, 0, 400, 667, 3443, 33443, 3996, 33996},
766 {1, 0, 400, 800, 3430, 33430, 3946, 33946},
767 {1, 1, 400, 667, 6509, 36509, 7062, 37062},
768 {1, 1, 400, 800, 5985, 35985, 6501, 36501},
769
770 {0, 0, 800, 400, 3438, 33438, 4065, 34065},
771 {0, 0, 800, 667, 3410, 33410, 3889, 33889},
772 {0, 0, 800, 800, 3403, 33403, 3845, 33845},
773 {0, 1, 800, 667, 6476, 36476, 6955, 36955},
774 {0, 1, 800, 800, 5958, 35958, 6400, 36400},
775
776 {0, 0, 667, 400, 3456, 33456, 4103, 34106},
777 {0, 0, 667, 667, 3428, 33428, 3927, 33927},
778 {0, 0, 667, 800, 3443, 33443, 3905, 33905},
779 {0, 1, 667, 667, 6494, 36494, 6993, 36993},
780 {0, 1, 667, 800, 5998, 35998, 6460, 36460},
781
782 {0, 0, 400, 400, 3528, 33528, 4255, 34255},
783 {0, 0, 400, 667, 3500, 33500, 4079, 34079},
784 {0, 0, 400, 800, 3487, 33487, 4029, 34029},
785 {0, 1, 400, 667, 6566, 36566, 7145, 37145},
786 {0, 1, 400, 800, 6042, 36042, 6584, 36584},
787};
788
789static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
790 int is_ddr3,
791 int fsb,
792 int mem)
793{
794 const struct cxsr_latency *latency;
795 int i;
796
797 if (fsb == 0 || mem == 0)
798 return NULL;
799
800 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
801 latency = &cxsr_latency_table[i];
802 if (is_desktop == latency->is_desktop &&
803 is_ddr3 == latency->is_ddr3 &&
804 fsb == latency->fsb_freq && mem == latency->mem_freq)
805 return latency;
806 }
807
808 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
809
810 return NULL;
811}
812
813void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
814{
815 struct drm_device *dev = dev_priv->dev;
816 u32 val;
817
818 if (IS_VALLEYVIEW(dev)) {
819 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
820 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
821 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
822 } else if (IS_PINEVIEW(dev)) {
823 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
824 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
825 I915_WRITE(DSPFW3, val);
826 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
827 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
828 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
829 I915_WRITE(FW_BLC_SELF, val);
830 } else if (IS_I915GM(dev)) {
831 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
832 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
833 I915_WRITE(INSTPM, val);
834 } else {
835 return;
836 }
837
838 DRM_DEBUG_KMS("memory self-refresh is %s\n",
839 enable ? "enabled" : "disabled");
840}
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856static const int latency_ns = 5000;
857
858static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
859{
860 struct drm_i915_private *dev_priv = dev->dev_private;
861 uint32_t dsparb = I915_READ(DSPARB);
862 int size;
863
864 size = dsparb & 0x7f;
865 if (plane)
866 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
867
868 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
869 plane ? "B" : "A", size);
870
871 return size;
872}
873
874static int i830_get_fifo_size(struct drm_device *dev, int plane)
875{
876 struct drm_i915_private *dev_priv = dev->dev_private;
877 uint32_t dsparb = I915_READ(DSPARB);
878 int size;
879
880 size = dsparb & 0x1ff;
881 if (plane)
882 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
883 size >>= 1;
884
885 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
886 plane ? "B" : "A", size);
887
888 return size;
889}
890
891static int i845_get_fifo_size(struct drm_device *dev, int plane)
892{
893 struct drm_i915_private *dev_priv = dev->dev_private;
894 uint32_t dsparb = I915_READ(DSPARB);
895 int size;
896
897 size = dsparb & 0x7f;
898 size >>= 2;
899
900 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
901 plane ? "B" : "A",
902 size);
903
904 return size;
905}
906
907
908static const struct intel_watermark_params pineview_display_wm = {
909 .fifo_size = PINEVIEW_DISPLAY_FIFO,
910 .max_wm = PINEVIEW_MAX_WM,
911 .default_wm = PINEVIEW_DFT_WM,
912 .guard_size = PINEVIEW_GUARD_WM,
913 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
914};
915static const struct intel_watermark_params pineview_display_hplloff_wm = {
916 .fifo_size = PINEVIEW_DISPLAY_FIFO,
917 .max_wm = PINEVIEW_MAX_WM,
918 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
919 .guard_size = PINEVIEW_GUARD_WM,
920 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
921};
922static const struct intel_watermark_params pineview_cursor_wm = {
923 .fifo_size = PINEVIEW_CURSOR_FIFO,
924 .max_wm = PINEVIEW_CURSOR_MAX_WM,
925 .default_wm = PINEVIEW_CURSOR_DFT_WM,
926 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
927 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
928};
929static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
930 .fifo_size = PINEVIEW_CURSOR_FIFO,
931 .max_wm = PINEVIEW_CURSOR_MAX_WM,
932 .default_wm = PINEVIEW_CURSOR_DFT_WM,
933 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
934 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
935};
936static const struct intel_watermark_params g4x_wm_info = {
937 .fifo_size = G4X_FIFO_SIZE,
938 .max_wm = G4X_MAX_WM,
939 .default_wm = G4X_MAX_WM,
940 .guard_size = 2,
941 .cacheline_size = G4X_FIFO_LINE_SIZE,
942};
943static const struct intel_watermark_params g4x_cursor_wm_info = {
944 .fifo_size = I965_CURSOR_FIFO,
945 .max_wm = I965_CURSOR_MAX_WM,
946 .default_wm = I965_CURSOR_DFT_WM,
947 .guard_size = 2,
948 .cacheline_size = G4X_FIFO_LINE_SIZE,
949};
950static const struct intel_watermark_params valleyview_wm_info = {
951 .fifo_size = VALLEYVIEW_FIFO_SIZE,
952 .max_wm = VALLEYVIEW_MAX_WM,
953 .default_wm = VALLEYVIEW_MAX_WM,
954 .guard_size = 2,
955 .cacheline_size = G4X_FIFO_LINE_SIZE,
956};
957static const struct intel_watermark_params valleyview_cursor_wm_info = {
958 .fifo_size = I965_CURSOR_FIFO,
959 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
960 .default_wm = I965_CURSOR_DFT_WM,
961 .guard_size = 2,
962 .cacheline_size = G4X_FIFO_LINE_SIZE,
963};
964static const struct intel_watermark_params i965_cursor_wm_info = {
965 .fifo_size = I965_CURSOR_FIFO,
966 .max_wm = I965_CURSOR_MAX_WM,
967 .default_wm = I965_CURSOR_DFT_WM,
968 .guard_size = 2,
969 .cacheline_size = I915_FIFO_LINE_SIZE,
970};
971static const struct intel_watermark_params i945_wm_info = {
972 .fifo_size = I945_FIFO_SIZE,
973 .max_wm = I915_MAX_WM,
974 .default_wm = 1,
975 .guard_size = 2,
976 .cacheline_size = I915_FIFO_LINE_SIZE,
977};
978static const struct intel_watermark_params i915_wm_info = {
979 .fifo_size = I915_FIFO_SIZE,
980 .max_wm = I915_MAX_WM,
981 .default_wm = 1,
982 .guard_size = 2,
983 .cacheline_size = I915_FIFO_LINE_SIZE,
984};
985static const struct intel_watermark_params i830_wm_info = {
986 .fifo_size = I855GM_FIFO_SIZE,
987 .max_wm = I915_MAX_WM,
988 .default_wm = 1,
989 .guard_size = 2,
990 .cacheline_size = I830_FIFO_LINE_SIZE,
991};
992static const struct intel_watermark_params i845_wm_info = {
993 .fifo_size = I830_FIFO_SIZE,
994 .max_wm = I915_MAX_WM,
995 .default_wm = 1,
996 .guard_size = 2,
997 .cacheline_size = I830_FIFO_LINE_SIZE,
998};
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1019 const struct intel_watermark_params *wm,
1020 int fifo_size,
1021 int pixel_size,
1022 unsigned long latency_ns)
1023{
1024 long entries_required, wm_size;
1025
1026
1027
1028
1029
1030
1031
1032 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1033 1000;
1034 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1035
1036 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1037
1038 wm_size = fifo_size - (entries_required + wm->guard_size);
1039
1040 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1041
1042
1043 if (wm_size > (long)wm->max_wm)
1044 wm_size = wm->max_wm;
1045 if (wm_size <= 0)
1046 wm_size = wm->default_wm;
1047 return wm_size;
1048}
1049
1050static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1051{
1052 struct drm_crtc *crtc, *enabled = NULL;
1053
1054 for_each_crtc(dev, crtc) {
1055 if (intel_crtc_active(crtc)) {
1056 if (enabled)
1057 return NULL;
1058 enabled = crtc;
1059 }
1060 }
1061
1062 return enabled;
1063}
1064
1065static void pineview_update_wm(struct drm_crtc *unused_crtc)
1066{
1067 struct drm_device *dev = unused_crtc->dev;
1068 struct drm_i915_private *dev_priv = dev->dev_private;
1069 struct drm_crtc *crtc;
1070 const struct cxsr_latency *latency;
1071 u32 reg;
1072 unsigned long wm;
1073
1074 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1075 dev_priv->fsb_freq, dev_priv->mem_freq);
1076 if (!latency) {
1077 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1078 intel_set_memory_cxsr(dev_priv, false);
1079 return;
1080 }
1081
1082 crtc = single_enabled_crtc(dev);
1083 if (crtc) {
1084 const struct drm_display_mode *adjusted_mode;
1085 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1086 int clock;
1087
1088 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1089 clock = adjusted_mode->crtc_clock;
1090
1091
1092 wm = intel_calculate_wm(clock, &pineview_display_wm,
1093 pineview_display_wm.fifo_size,
1094 pixel_size, latency->display_sr);
1095 reg = I915_READ(DSPFW1);
1096 reg &= ~DSPFW_SR_MASK;
1097 reg |= wm << DSPFW_SR_SHIFT;
1098 I915_WRITE(DSPFW1, reg);
1099 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1100
1101
1102 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1103 pineview_display_wm.fifo_size,
1104 pixel_size, latency->cursor_sr);
1105 reg = I915_READ(DSPFW3);
1106 reg &= ~DSPFW_CURSOR_SR_MASK;
1107 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1108 I915_WRITE(DSPFW3, reg);
1109
1110
1111 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1112 pineview_display_hplloff_wm.fifo_size,
1113 pixel_size, latency->display_hpll_disable);
1114 reg = I915_READ(DSPFW3);
1115 reg &= ~DSPFW_HPLL_SR_MASK;
1116 reg |= wm & DSPFW_HPLL_SR_MASK;
1117 I915_WRITE(DSPFW3, reg);
1118
1119
1120 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1121 pineview_display_hplloff_wm.fifo_size,
1122 pixel_size, latency->cursor_hpll_disable);
1123 reg = I915_READ(DSPFW3);
1124 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1125 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1126 I915_WRITE(DSPFW3, reg);
1127 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1128
1129 intel_set_memory_cxsr(dev_priv, true);
1130 } else {
1131 intel_set_memory_cxsr(dev_priv, false);
1132 }
1133}
1134
1135static bool g4x_compute_wm0(struct drm_device *dev,
1136 int plane,
1137 const struct intel_watermark_params *display,
1138 int display_latency_ns,
1139 const struct intel_watermark_params *cursor,
1140 int cursor_latency_ns,
1141 int *plane_wm,
1142 int *cursor_wm)
1143{
1144 struct drm_crtc *crtc;
1145 const struct drm_display_mode *adjusted_mode;
1146 int htotal, hdisplay, clock, pixel_size;
1147 int line_time_us, line_count;
1148 int entries, tlb_miss;
1149
1150 crtc = intel_get_crtc_for_plane(dev, plane);
1151 if (!intel_crtc_active(crtc)) {
1152 *cursor_wm = cursor->guard_size;
1153 *plane_wm = display->guard_size;
1154 return false;
1155 }
1156
1157 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1158 clock = adjusted_mode->crtc_clock;
1159 htotal = adjusted_mode->crtc_htotal;
1160 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1161 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1162
1163
1164 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1165 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1166 if (tlb_miss > 0)
1167 entries += tlb_miss;
1168 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1169 *plane_wm = entries + display->guard_size;
1170 if (*plane_wm > (int)display->max_wm)
1171 *plane_wm = display->max_wm;
1172
1173
1174 line_time_us = max(htotal * 1000 / clock, 1);
1175 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1176 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1177 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1178 if (tlb_miss > 0)
1179 entries += tlb_miss;
1180 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1181 *cursor_wm = entries + cursor->guard_size;
1182 if (*cursor_wm > (int)cursor->max_wm)
1183 *cursor_wm = (int)cursor->max_wm;
1184
1185 return true;
1186}
1187
1188
1189
1190
1191
1192
1193
1194
1195static bool g4x_check_srwm(struct drm_device *dev,
1196 int display_wm, int cursor_wm,
1197 const struct intel_watermark_params *display,
1198 const struct intel_watermark_params *cursor)
1199{
1200 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1201 display_wm, cursor_wm);
1202
1203 if (display_wm > display->max_wm) {
1204 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1205 display_wm, display->max_wm);
1206 return false;
1207 }
1208
1209 if (cursor_wm > cursor->max_wm) {
1210 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1211 cursor_wm, cursor->max_wm);
1212 return false;
1213 }
1214
1215 if (!(display_wm || cursor_wm)) {
1216 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1217 return false;
1218 }
1219
1220 return true;
1221}
1222
1223static bool g4x_compute_srwm(struct drm_device *dev,
1224 int plane,
1225 int latency_ns,
1226 const struct intel_watermark_params *display,
1227 const struct intel_watermark_params *cursor,
1228 int *display_wm, int *cursor_wm)
1229{
1230 struct drm_crtc *crtc;
1231 const struct drm_display_mode *adjusted_mode;
1232 int hdisplay, htotal, pixel_size, clock;
1233 unsigned long line_time_us;
1234 int line_count, line_size;
1235 int small, large;
1236 int entries;
1237
1238 if (!latency_ns) {
1239 *display_wm = *cursor_wm = 0;
1240 return false;
1241 }
1242
1243 crtc = intel_get_crtc_for_plane(dev, plane);
1244 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1245 clock = adjusted_mode->crtc_clock;
1246 htotal = adjusted_mode->crtc_htotal;
1247 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1248 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1249
1250 line_time_us = max(htotal * 1000 / clock, 1);
1251 line_count = (latency_ns / line_time_us + 1000) / 1000;
1252 line_size = hdisplay * pixel_size;
1253
1254
1255 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1256 large = line_count * line_size;
1257
1258 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1259 *display_wm = entries + display->guard_size;
1260
1261
1262 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1263 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1264 *cursor_wm = entries + cursor->guard_size;
1265
1266 return g4x_check_srwm(dev,
1267 *display_wm, *cursor_wm,
1268 display, cursor);
1269}
1270
1271static bool vlv_compute_drain_latency(struct drm_device *dev,
1272 int plane,
1273 int *plane_prec_mult,
1274 int *plane_dl,
1275 int *cursor_prec_mult,
1276 int *cursor_dl)
1277{
1278 struct drm_crtc *crtc;
1279 int clock, pixel_size;
1280 int entries;
1281
1282 crtc = intel_get_crtc_for_plane(dev, plane);
1283 if (!intel_crtc_active(crtc))
1284 return false;
1285
1286 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1287 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1288
1289 entries = (clock / 1000) * pixel_size;
1290 *plane_prec_mult = (entries > 128) ?
1291 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
1292 *plane_dl = (64 * (*plane_prec_mult) * 4) / entries;
1293
1294 entries = (clock / 1000) * 4;
1295 *cursor_prec_mult = (entries > 128) ?
1296 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
1297 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / entries;
1298
1299 return true;
1300}
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310static void vlv_update_drain_latency(struct drm_device *dev)
1311{
1312 struct drm_i915_private *dev_priv = dev->dev_private;
1313 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1314 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1315 int plane_prec_mult, cursor_prec_mult;
1316
1317
1318
1319 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1320 &cursor_prec_mult, &cursora_dl)) {
1321 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1322 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64;
1323 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1324 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64;
1325
1326 I915_WRITE(VLV_DDL1, cursora_prec |
1327 (cursora_dl << DDL_CURSORA_SHIFT) |
1328 planea_prec | planea_dl);
1329 }
1330
1331
1332 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1333 &cursor_prec_mult, &cursorb_dl)) {
1334 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1335 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64;
1336 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1337 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64;
1338
1339 I915_WRITE(VLV_DDL2, cursorb_prec |
1340 (cursorb_dl << DDL_CURSORB_SHIFT) |
1341 planeb_prec | planeb_dl);
1342 }
1343}
1344
1345#define single_plane_enabled(mask) is_power_of_2(mask)
1346
1347static void valleyview_update_wm(struct drm_crtc *crtc)
1348{
1349 struct drm_device *dev = crtc->dev;
1350 static const int sr_latency_ns = 12000;
1351 struct drm_i915_private *dev_priv = dev->dev_private;
1352 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1353 int plane_sr, cursor_sr;
1354 int ignore_plane_sr, ignore_cursor_sr;
1355 unsigned int enabled = 0;
1356 bool cxsr_enabled;
1357
1358 vlv_update_drain_latency(dev);
1359
1360 if (g4x_compute_wm0(dev, PIPE_A,
1361 &valleyview_wm_info, latency_ns,
1362 &valleyview_cursor_wm_info, latency_ns,
1363 &planea_wm, &cursora_wm))
1364 enabled |= 1 << PIPE_A;
1365
1366 if (g4x_compute_wm0(dev, PIPE_B,
1367 &valleyview_wm_info, latency_ns,
1368 &valleyview_cursor_wm_info, latency_ns,
1369 &planeb_wm, &cursorb_wm))
1370 enabled |= 1 << PIPE_B;
1371
1372 if (single_plane_enabled(enabled) &&
1373 g4x_compute_srwm(dev, ffs(enabled) - 1,
1374 sr_latency_ns,
1375 &valleyview_wm_info,
1376 &valleyview_cursor_wm_info,
1377 &plane_sr, &ignore_cursor_sr) &&
1378 g4x_compute_srwm(dev, ffs(enabled) - 1,
1379 2*sr_latency_ns,
1380 &valleyview_wm_info,
1381 &valleyview_cursor_wm_info,
1382 &ignore_plane_sr, &cursor_sr)) {
1383 cxsr_enabled = true;
1384 } else {
1385 cxsr_enabled = false;
1386 intel_set_memory_cxsr(dev_priv, false);
1387 plane_sr = cursor_sr = 0;
1388 }
1389
1390 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1391 planea_wm, cursora_wm,
1392 planeb_wm, cursorb_wm,
1393 plane_sr, cursor_sr);
1394
1395 I915_WRITE(DSPFW1,
1396 (plane_sr << DSPFW_SR_SHIFT) |
1397 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1398 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1399 planea_wm);
1400 I915_WRITE(DSPFW2,
1401 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1402 (cursora_wm << DSPFW_CURSORA_SHIFT));
1403 I915_WRITE(DSPFW3,
1404 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1405 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1406
1407 if (cxsr_enabled)
1408 intel_set_memory_cxsr(dev_priv, true);
1409}
1410
1411static void g4x_update_wm(struct drm_crtc *crtc)
1412{
1413 struct drm_device *dev = crtc->dev;
1414 static const int sr_latency_ns = 12000;
1415 struct drm_i915_private *dev_priv = dev->dev_private;
1416 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1417 int plane_sr, cursor_sr;
1418 unsigned int enabled = 0;
1419 bool cxsr_enabled;
1420
1421 if (g4x_compute_wm0(dev, PIPE_A,
1422 &g4x_wm_info, latency_ns,
1423 &g4x_cursor_wm_info, latency_ns,
1424 &planea_wm, &cursora_wm))
1425 enabled |= 1 << PIPE_A;
1426
1427 if (g4x_compute_wm0(dev, PIPE_B,
1428 &g4x_wm_info, latency_ns,
1429 &g4x_cursor_wm_info, latency_ns,
1430 &planeb_wm, &cursorb_wm))
1431 enabled |= 1 << PIPE_B;
1432
1433 if (single_plane_enabled(enabled) &&
1434 g4x_compute_srwm(dev, ffs(enabled) - 1,
1435 sr_latency_ns,
1436 &g4x_wm_info,
1437 &g4x_cursor_wm_info,
1438 &plane_sr, &cursor_sr)) {
1439 cxsr_enabled = true;
1440 } else {
1441 cxsr_enabled = false;
1442 intel_set_memory_cxsr(dev_priv, false);
1443 plane_sr = cursor_sr = 0;
1444 }
1445
1446 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1447 planea_wm, cursora_wm,
1448 planeb_wm, cursorb_wm,
1449 plane_sr, cursor_sr);
1450
1451 I915_WRITE(DSPFW1,
1452 (plane_sr << DSPFW_SR_SHIFT) |
1453 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1454 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1455 planea_wm);
1456 I915_WRITE(DSPFW2,
1457 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1458 (cursora_wm << DSPFW_CURSORA_SHIFT));
1459
1460 I915_WRITE(DSPFW3,
1461 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1462 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1463
1464 if (cxsr_enabled)
1465 intel_set_memory_cxsr(dev_priv, true);
1466}
1467
1468static void i965_update_wm(struct drm_crtc *unused_crtc)
1469{
1470 struct drm_device *dev = unused_crtc->dev;
1471 struct drm_i915_private *dev_priv = dev->dev_private;
1472 struct drm_crtc *crtc;
1473 int srwm = 1;
1474 int cursor_sr = 16;
1475 bool cxsr_enabled;
1476
1477
1478 crtc = single_enabled_crtc(dev);
1479 if (crtc) {
1480
1481 static const int sr_latency_ns = 12000;
1482 const struct drm_display_mode *adjusted_mode =
1483 &to_intel_crtc(crtc)->config.adjusted_mode;
1484 int clock = adjusted_mode->crtc_clock;
1485 int htotal = adjusted_mode->crtc_htotal;
1486 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1487 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1488 unsigned long line_time_us;
1489 int entries;
1490
1491 line_time_us = max(htotal * 1000 / clock, 1);
1492
1493
1494 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1495 pixel_size * hdisplay;
1496 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1497 srwm = I965_FIFO_SIZE - entries;
1498 if (srwm < 0)
1499 srwm = 1;
1500 srwm &= 0x1ff;
1501 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1502 entries, srwm);
1503
1504 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1505 pixel_size * to_intel_crtc(crtc)->cursor_width;
1506 entries = DIV_ROUND_UP(entries,
1507 i965_cursor_wm_info.cacheline_size);
1508 cursor_sr = i965_cursor_wm_info.fifo_size -
1509 (entries + i965_cursor_wm_info.guard_size);
1510
1511 if (cursor_sr > i965_cursor_wm_info.max_wm)
1512 cursor_sr = i965_cursor_wm_info.max_wm;
1513
1514 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1515 "cursor %d\n", srwm, cursor_sr);
1516
1517 cxsr_enabled = true;
1518 } else {
1519 cxsr_enabled = false;
1520
1521 intel_set_memory_cxsr(dev_priv, false);
1522 }
1523
1524 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1525 srwm);
1526
1527
1528 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1529 (8 << 16) | (8 << 8) | (8 << 0));
1530 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1531
1532 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1533
1534 if (cxsr_enabled)
1535 intel_set_memory_cxsr(dev_priv, true);
1536}
1537
1538static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1539{
1540 struct drm_device *dev = unused_crtc->dev;
1541 struct drm_i915_private *dev_priv = dev->dev_private;
1542 const struct intel_watermark_params *wm_info;
1543 uint32_t fwater_lo;
1544 uint32_t fwater_hi;
1545 int cwm, srwm = 1;
1546 int fifo_size;
1547 int planea_wm, planeb_wm;
1548 struct drm_crtc *crtc, *enabled = NULL;
1549
1550 if (IS_I945GM(dev))
1551 wm_info = &i945_wm_info;
1552 else if (!IS_GEN2(dev))
1553 wm_info = &i915_wm_info;
1554 else
1555 wm_info = &i830_wm_info;
1556
1557 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1558 crtc = intel_get_crtc_for_plane(dev, 0);
1559 if (intel_crtc_active(crtc)) {
1560 const struct drm_display_mode *adjusted_mode;
1561 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1562 if (IS_GEN2(dev))
1563 cpp = 4;
1564
1565 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1566 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1567 wm_info, fifo_size, cpp,
1568 latency_ns);
1569 enabled = crtc;
1570 } else
1571 planea_wm = fifo_size - wm_info->guard_size;
1572
1573 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1574 crtc = intel_get_crtc_for_plane(dev, 1);
1575 if (intel_crtc_active(crtc)) {
1576 const struct drm_display_mode *adjusted_mode;
1577 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1578 if (IS_GEN2(dev))
1579 cpp = 4;
1580
1581 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1582 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1583 wm_info, fifo_size, cpp,
1584 latency_ns);
1585 if (enabled == NULL)
1586 enabled = crtc;
1587 else
1588 enabled = NULL;
1589 } else
1590 planeb_wm = fifo_size - wm_info->guard_size;
1591
1592 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1593
1594 if (IS_I915GM(dev) && enabled) {
1595 struct drm_i915_gem_object *obj;
1596
1597 obj = intel_fb_obj(enabled->primary->fb);
1598
1599
1600 if (obj->tiling_mode == I915_TILING_NONE)
1601 enabled = NULL;
1602 }
1603
1604
1605
1606
1607 cwm = 2;
1608
1609
1610 intel_set_memory_cxsr(dev_priv, false);
1611
1612
1613 if (HAS_FW_BLC(dev) && enabled) {
1614
1615 static const int sr_latency_ns = 6000;
1616 const struct drm_display_mode *adjusted_mode =
1617 &to_intel_crtc(enabled)->config.adjusted_mode;
1618 int clock = adjusted_mode->crtc_clock;
1619 int htotal = adjusted_mode->crtc_htotal;
1620 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1621 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1622 unsigned long line_time_us;
1623 int entries;
1624
1625 line_time_us = max(htotal * 1000 / clock, 1);
1626
1627
1628 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1629 pixel_size * hdisplay;
1630 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1631 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1632 srwm = wm_info->fifo_size - entries;
1633 if (srwm < 0)
1634 srwm = 1;
1635
1636 if (IS_I945G(dev) || IS_I945GM(dev))
1637 I915_WRITE(FW_BLC_SELF,
1638 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1639 else if (IS_I915GM(dev))
1640 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1641 }
1642
1643 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1644 planea_wm, planeb_wm, cwm, srwm);
1645
1646 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1647 fwater_hi = (cwm & 0x1f);
1648
1649
1650 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1651 fwater_hi = fwater_hi | (1 << 8);
1652
1653 I915_WRITE(FW_BLC, fwater_lo);
1654 I915_WRITE(FW_BLC2, fwater_hi);
1655
1656 if (enabled)
1657 intel_set_memory_cxsr(dev_priv, true);
1658}
1659
1660static void i845_update_wm(struct drm_crtc *unused_crtc)
1661{
1662 struct drm_device *dev = unused_crtc->dev;
1663 struct drm_i915_private *dev_priv = dev->dev_private;
1664 struct drm_crtc *crtc;
1665 const struct drm_display_mode *adjusted_mode;
1666 uint32_t fwater_lo;
1667 int planea_wm;
1668
1669 crtc = single_enabled_crtc(dev);
1670 if (crtc == NULL)
1671 return;
1672
1673 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1674 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1675 &i845_wm_info,
1676 dev_priv->display.get_fifo_size(dev, 0),
1677 4, latency_ns);
1678 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1679 fwater_lo |= (3<<8) | planea_wm;
1680
1681 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1682
1683 I915_WRITE(FW_BLC, fwater_lo);
1684}
1685
1686static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1687 struct drm_crtc *crtc)
1688{
1689 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1690 uint32_t pixel_rate;
1691
1692 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
1693
1694
1695
1696
1697 if (intel_crtc->config.pch_pfit.enabled) {
1698 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1699 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
1700
1701 pipe_w = intel_crtc->config.pipe_src_w;
1702 pipe_h = intel_crtc->config.pipe_src_h;
1703 pfit_w = (pfit_size >> 16) & 0xFFFF;
1704 pfit_h = pfit_size & 0xFFFF;
1705 if (pipe_w < pfit_w)
1706 pipe_w = pfit_w;
1707 if (pipe_h < pfit_h)
1708 pipe_h = pfit_h;
1709
1710 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1711 pfit_w * pfit_h);
1712 }
1713
1714 return pixel_rate;
1715}
1716
1717
1718static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1719 uint32_t latency)
1720{
1721 uint64_t ret;
1722
1723 if (WARN(latency == 0, "Latency value missing\n"))
1724 return UINT_MAX;
1725
1726 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1727 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1728
1729 return ret;
1730}
1731
1732
1733static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1734 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1735 uint32_t latency)
1736{
1737 uint32_t ret;
1738
1739 if (WARN(latency == 0, "Latency value missing\n"))
1740 return UINT_MAX;
1741
1742 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1743 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1744 ret = DIV_ROUND_UP(ret, 64) + 2;
1745 return ret;
1746}
1747
1748static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1749 uint8_t bytes_per_pixel)
1750{
1751 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1752}
1753
1754struct ilk_pipe_wm_parameters {
1755 bool active;
1756 uint32_t pipe_htotal;
1757 uint32_t pixel_rate;
1758 struct intel_plane_wm_parameters pri;
1759 struct intel_plane_wm_parameters spr;
1760 struct intel_plane_wm_parameters cur;
1761};
1762
1763struct ilk_wm_maximums {
1764 uint16_t pri;
1765 uint16_t spr;
1766 uint16_t cur;
1767 uint16_t fbc;
1768};
1769
1770
1771struct intel_wm_config {
1772 unsigned int num_pipes_active;
1773 bool sprites_enabled;
1774 bool sprites_scaled;
1775};
1776
1777
1778
1779
1780
1781static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1782 uint32_t mem_value,
1783 bool is_lp)
1784{
1785 uint32_t method1, method2;
1786
1787 if (!params->active || !params->pri.enabled)
1788 return 0;
1789
1790 method1 = ilk_wm_method1(params->pixel_rate,
1791 params->pri.bytes_per_pixel,
1792 mem_value);
1793
1794 if (!is_lp)
1795 return method1;
1796
1797 method2 = ilk_wm_method2(params->pixel_rate,
1798 params->pipe_htotal,
1799 params->pri.horiz_pixels,
1800 params->pri.bytes_per_pixel,
1801 mem_value);
1802
1803 return min(method1, method2);
1804}
1805
1806
1807
1808
1809
1810static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1811 uint32_t mem_value)
1812{
1813 uint32_t method1, method2;
1814
1815 if (!params->active || !params->spr.enabled)
1816 return 0;
1817
1818 method1 = ilk_wm_method1(params->pixel_rate,
1819 params->spr.bytes_per_pixel,
1820 mem_value);
1821 method2 = ilk_wm_method2(params->pixel_rate,
1822 params->pipe_htotal,
1823 params->spr.horiz_pixels,
1824 params->spr.bytes_per_pixel,
1825 mem_value);
1826 return min(method1, method2);
1827}
1828
1829
1830
1831
1832
1833static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
1834 uint32_t mem_value)
1835{
1836 if (!params->active || !params->cur.enabled)
1837 return 0;
1838
1839 return ilk_wm_method2(params->pixel_rate,
1840 params->pipe_htotal,
1841 params->cur.horiz_pixels,
1842 params->cur.bytes_per_pixel,
1843 mem_value);
1844}
1845
1846
1847static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
1848 uint32_t pri_val)
1849{
1850 if (!params->active || !params->pri.enabled)
1851 return 0;
1852
1853 return ilk_wm_fbc(pri_val,
1854 params->pri.horiz_pixels,
1855 params->pri.bytes_per_pixel);
1856}
1857
1858static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1859{
1860 if (INTEL_INFO(dev)->gen >= 8)
1861 return 3072;
1862 else if (INTEL_INFO(dev)->gen >= 7)
1863 return 768;
1864 else
1865 return 512;
1866}
1867
1868static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1869 int level, bool is_sprite)
1870{
1871 if (INTEL_INFO(dev)->gen >= 8)
1872
1873 return level == 0 ? 255 : 2047;
1874 else if (INTEL_INFO(dev)->gen >= 7)
1875
1876 return level == 0 ? 127 : 1023;
1877 else if (!is_sprite)
1878
1879 return level == 0 ? 127 : 511;
1880 else
1881
1882 return level == 0 ? 63 : 255;
1883}
1884
1885static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1886 int level)
1887{
1888 if (INTEL_INFO(dev)->gen >= 7)
1889 return level == 0 ? 63 : 255;
1890 else
1891 return level == 0 ? 31 : 63;
1892}
1893
1894static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1895{
1896 if (INTEL_INFO(dev)->gen >= 8)
1897 return 31;
1898 else
1899 return 15;
1900}
1901
1902
1903static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1904 int level,
1905 const struct intel_wm_config *config,
1906 enum intel_ddb_partitioning ddb_partitioning,
1907 bool is_sprite)
1908{
1909 unsigned int fifo_size = ilk_display_fifo_size(dev);
1910
1911
1912 if (is_sprite && !config->sprites_enabled)
1913 return 0;
1914
1915
1916 if (level == 0 || config->num_pipes_active > 1) {
1917 fifo_size /= INTEL_INFO(dev)->num_pipes;
1918
1919
1920
1921
1922
1923
1924 if (INTEL_INFO(dev)->gen <= 6)
1925 fifo_size /= 2;
1926 }
1927
1928 if (config->sprites_enabled) {
1929
1930 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1931 if (is_sprite)
1932 fifo_size *= 5;
1933 fifo_size /= 6;
1934 } else {
1935 fifo_size /= 2;
1936 }
1937 }
1938
1939
1940 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1941}
1942
1943
1944static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1945 int level,
1946 const struct intel_wm_config *config)
1947{
1948
1949 if (level > 0 && config->num_pipes_active > 1)
1950 return 64;
1951
1952
1953 return ilk_cursor_wm_reg_max(dev, level);
1954}
1955
1956static void ilk_compute_wm_maximums(const struct drm_device *dev,
1957 int level,
1958 const struct intel_wm_config *config,
1959 enum intel_ddb_partitioning ddb_partitioning,
1960 struct ilk_wm_maximums *max)
1961{
1962 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1963 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1964 max->cur = ilk_cursor_wm_max(dev, level, config);
1965 max->fbc = ilk_fbc_wm_reg_max(dev);
1966}
1967
1968static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1969 int level,
1970 struct ilk_wm_maximums *max)
1971{
1972 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1973 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1974 max->cur = ilk_cursor_wm_reg_max(dev, level);
1975 max->fbc = ilk_fbc_wm_reg_max(dev);
1976}
1977
1978static bool ilk_validate_wm_level(int level,
1979 const struct ilk_wm_maximums *max,
1980 struct intel_wm_level *result)
1981{
1982 bool ret;
1983
1984
1985 if (!result->enable)
1986 return false;
1987
1988 result->enable = result->pri_val <= max->pri &&
1989 result->spr_val <= max->spr &&
1990 result->cur_val <= max->cur;
1991
1992 ret = result->enable;
1993
1994
1995
1996
1997
1998
1999 if (level == 0 && !result->enable) {
2000 if (result->pri_val > max->pri)
2001 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2002 level, result->pri_val, max->pri);
2003 if (result->spr_val > max->spr)
2004 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2005 level, result->spr_val, max->spr);
2006 if (result->cur_val > max->cur)
2007 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2008 level, result->cur_val, max->cur);
2009
2010 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2011 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2012 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2013 result->enable = true;
2014 }
2015
2016 return ret;
2017}
2018
2019static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2020 int level,
2021 const struct ilk_pipe_wm_parameters *p,
2022 struct intel_wm_level *result)
2023{
2024 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2025 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2026 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2027
2028
2029 if (level > 0) {
2030 pri_latency *= 5;
2031 spr_latency *= 5;
2032 cur_latency *= 5;
2033 }
2034
2035 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2036 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2037 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2038 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2039 result->enable = true;
2040}
2041
2042static uint32_t
2043hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2044{
2045 struct drm_i915_private *dev_priv = dev->dev_private;
2046 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2047 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2048 u32 linetime, ips_linetime;
2049
2050 if (!intel_crtc_active(crtc))
2051 return 0;
2052
2053
2054
2055
2056 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2057 mode->crtc_clock);
2058 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2059 intel_ddi_get_cdclk_freq(dev_priv));
2060
2061 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2062 PIPE_WM_LINETIME_TIME(linetime);
2063}
2064
2065static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2066{
2067 struct drm_i915_private *dev_priv = dev->dev_private;
2068
2069 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2070 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2071
2072 wm[0] = (sskpd >> 56) & 0xFF;
2073 if (wm[0] == 0)
2074 wm[0] = sskpd & 0xF;
2075 wm[1] = (sskpd >> 4) & 0xFF;
2076 wm[2] = (sskpd >> 12) & 0xFF;
2077 wm[3] = (sskpd >> 20) & 0x1FF;
2078 wm[4] = (sskpd >> 32) & 0x1FF;
2079 } else if (INTEL_INFO(dev)->gen >= 6) {
2080 uint32_t sskpd = I915_READ(MCH_SSKPD);
2081
2082 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2083 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2084 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2085 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2086 } else if (INTEL_INFO(dev)->gen >= 5) {
2087 uint32_t mltr = I915_READ(MLTR_ILK);
2088
2089
2090 wm[0] = 7;
2091 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2092 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2093 }
2094}
2095
2096static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2097{
2098
2099 if (INTEL_INFO(dev)->gen == 5)
2100 wm[0] = 13;
2101}
2102
2103static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2104{
2105
2106 if (INTEL_INFO(dev)->gen == 5)
2107 wm[0] = 13;
2108
2109
2110 if (IS_IVYBRIDGE(dev))
2111 wm[3] *= 2;
2112}
2113
2114int ilk_wm_max_level(const struct drm_device *dev)
2115{
2116
2117 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2118 return 4;
2119 else if (INTEL_INFO(dev)->gen >= 6)
2120 return 3;
2121 else
2122 return 2;
2123}
2124
2125static void intel_print_wm_latency(struct drm_device *dev,
2126 const char *name,
2127 const uint16_t wm[5])
2128{
2129 int level, max_level = ilk_wm_max_level(dev);
2130
2131 for (level = 0; level <= max_level; level++) {
2132 unsigned int latency = wm[level];
2133
2134 if (latency == 0) {
2135 DRM_ERROR("%s WM%d latency not provided\n",
2136 name, level);
2137 continue;
2138 }
2139
2140
2141 if (level > 0)
2142 latency *= 5;
2143
2144 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2145 name, level, wm[level],
2146 latency / 10, latency % 10);
2147 }
2148}
2149
2150static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2151 uint16_t wm[5], uint16_t min)
2152{
2153 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2154
2155 if (wm[0] >= min)
2156 return false;
2157
2158 wm[0] = max(wm[0], min);
2159 for (level = 1; level <= max_level; level++)
2160 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2161
2162 return true;
2163}
2164
2165static void snb_wm_latency_quirk(struct drm_device *dev)
2166{
2167 struct drm_i915_private *dev_priv = dev->dev_private;
2168 bool changed;
2169
2170
2171
2172
2173
2174 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2175 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2176 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2177
2178 if (!changed)
2179 return;
2180
2181 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2182 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2183 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2184 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2185}
2186
2187static void ilk_setup_wm_latency(struct drm_device *dev)
2188{
2189 struct drm_i915_private *dev_priv = dev->dev_private;
2190
2191 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2192
2193 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2194 sizeof(dev_priv->wm.pri_latency));
2195 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2196 sizeof(dev_priv->wm.pri_latency));
2197
2198 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2199 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2200
2201 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2202 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2203 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2204
2205 if (IS_GEN6(dev))
2206 snb_wm_latency_quirk(dev);
2207}
2208
2209static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2210 struct ilk_pipe_wm_parameters *p)
2211{
2212 struct drm_device *dev = crtc->dev;
2213 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2214 enum pipe pipe = intel_crtc->pipe;
2215 struct drm_plane *plane;
2216
2217 if (!intel_crtc_active(crtc))
2218 return;
2219
2220 p->active = true;
2221 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2222 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2223 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2224 p->cur.bytes_per_pixel = 4;
2225 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2226 p->cur.horiz_pixels = intel_crtc->cursor_width;
2227
2228 p->pri.enabled = true;
2229 p->cur.enabled = true;
2230
2231 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2232 struct intel_plane *intel_plane = to_intel_plane(plane);
2233
2234 if (intel_plane->pipe == pipe) {
2235 p->spr = intel_plane->wm;
2236 break;
2237 }
2238 }
2239}
2240
2241static void ilk_compute_wm_config(struct drm_device *dev,
2242 struct intel_wm_config *config)
2243{
2244 struct intel_crtc *intel_crtc;
2245
2246
2247 for_each_intel_crtc(dev, intel_crtc) {
2248 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2249
2250 if (!wm->pipe_enabled)
2251 continue;
2252
2253 config->sprites_enabled |= wm->sprites_enabled;
2254 config->sprites_scaled |= wm->sprites_scaled;
2255 config->num_pipes_active++;
2256 }
2257}
2258
2259
2260static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2261 const struct ilk_pipe_wm_parameters *params,
2262 struct intel_pipe_wm *pipe_wm)
2263{
2264 struct drm_device *dev = crtc->dev;
2265 const struct drm_i915_private *dev_priv = dev->dev_private;
2266 int level, max_level = ilk_wm_max_level(dev);
2267
2268 struct intel_wm_config config = {
2269 .num_pipes_active = 1,
2270 .sprites_enabled = params->spr.enabled,
2271 .sprites_scaled = params->spr.scaled,
2272 };
2273 struct ilk_wm_maximums max;
2274
2275 pipe_wm->pipe_enabled = params->active;
2276 pipe_wm->sprites_enabled = params->spr.enabled;
2277 pipe_wm->sprites_scaled = params->spr.scaled;
2278
2279
2280 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2281 max_level = 1;
2282
2283
2284 if (params->spr.scaled)
2285 max_level = 0;
2286
2287 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
2288
2289 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2290 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2291
2292
2293 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2294
2295
2296 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2297 return false;
2298
2299 ilk_compute_wm_reg_maximums(dev, 1, &max);
2300
2301 for (level = 1; level <= max_level; level++) {
2302 struct intel_wm_level wm = {};
2303
2304 ilk_compute_wm_level(dev_priv, level, params, &wm);
2305
2306
2307
2308
2309
2310
2311 if (!ilk_validate_wm_level(level, &max, &wm))
2312 break;
2313
2314 pipe_wm->wm[level] = wm;
2315 }
2316
2317 return true;
2318}
2319
2320
2321
2322
2323static void ilk_merge_wm_level(struct drm_device *dev,
2324 int level,
2325 struct intel_wm_level *ret_wm)
2326{
2327 const struct intel_crtc *intel_crtc;
2328
2329 ret_wm->enable = true;
2330
2331 for_each_intel_crtc(dev, intel_crtc) {
2332 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2333 const struct intel_wm_level *wm = &active->wm[level];
2334
2335 if (!active->pipe_enabled)
2336 continue;
2337
2338
2339
2340
2341
2342
2343 if (!wm->enable)
2344 ret_wm->enable = false;
2345
2346 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2347 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2348 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2349 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2350 }
2351}
2352
2353
2354
2355
2356static void ilk_wm_merge(struct drm_device *dev,
2357 const struct intel_wm_config *config,
2358 const struct ilk_wm_maximums *max,
2359 struct intel_pipe_wm *merged)
2360{
2361 int level, max_level = ilk_wm_max_level(dev);
2362 int last_enabled_level = max_level;
2363
2364
2365 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2366 config->num_pipes_active > 1)
2367 return;
2368
2369
2370 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2371
2372
2373 for (level = 1; level <= max_level; level++) {
2374 struct intel_wm_level *wm = &merged->wm[level];
2375
2376 ilk_merge_wm_level(dev, level, wm);
2377
2378 if (level > last_enabled_level)
2379 wm->enable = false;
2380 else if (!ilk_validate_wm_level(level, max, wm))
2381
2382 last_enabled_level = level - 1;
2383
2384
2385
2386
2387
2388 if (wm->fbc_val > max->fbc) {
2389 if (wm->enable)
2390 merged->fbc_wm_enabled = false;
2391 wm->fbc_val = 0;
2392 }
2393 }
2394
2395
2396
2397
2398
2399
2400
2401 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2402 for (level = 2; level <= max_level; level++) {
2403 struct intel_wm_level *wm = &merged->wm[level];
2404
2405 wm->enable = false;
2406 }
2407 }
2408}
2409
2410static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2411{
2412
2413 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2414}
2415
2416
2417static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2418{
2419 struct drm_i915_private *dev_priv = dev->dev_private;
2420
2421 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2422 return 2 * level;
2423 else
2424 return dev_priv->wm.pri_latency[level];
2425}
2426
2427static void ilk_compute_wm_results(struct drm_device *dev,
2428 const struct intel_pipe_wm *merged,
2429 enum intel_ddb_partitioning partitioning,
2430 struct ilk_wm_values *results)
2431{
2432 struct intel_crtc *intel_crtc;
2433 int level, wm_lp;
2434
2435 results->enable_fbc_wm = merged->fbc_wm_enabled;
2436 results->partitioning = partitioning;
2437
2438
2439 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2440 const struct intel_wm_level *r;
2441
2442 level = ilk_wm_lp_to_level(wm_lp, merged);
2443
2444 r = &merged->wm[level];
2445
2446
2447
2448
2449
2450 results->wm_lp[wm_lp - 1] =
2451 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2452 (r->pri_val << WM1_LP_SR_SHIFT) |
2453 r->cur_val;
2454
2455 if (r->enable)
2456 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2457
2458 if (INTEL_INFO(dev)->gen >= 8)
2459 results->wm_lp[wm_lp - 1] |=
2460 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2461 else
2462 results->wm_lp[wm_lp - 1] |=
2463 r->fbc_val << WM1_LP_FBC_SHIFT;
2464
2465
2466
2467
2468
2469 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2470 WARN_ON(wm_lp != 1);
2471 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2472 } else
2473 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2474 }
2475
2476
2477 for_each_intel_crtc(dev, intel_crtc) {
2478 enum pipe pipe = intel_crtc->pipe;
2479 const struct intel_wm_level *r =
2480 &intel_crtc->wm.active.wm[0];
2481
2482 if (WARN_ON(!r->enable))
2483 continue;
2484
2485 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2486
2487 results->wm_pipe[pipe] =
2488 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2489 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2490 r->cur_val;
2491 }
2492}
2493
2494
2495
2496static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2497 struct intel_pipe_wm *r1,
2498 struct intel_pipe_wm *r2)
2499{
2500 int level, max_level = ilk_wm_max_level(dev);
2501 int level1 = 0, level2 = 0;
2502
2503 for (level = 1; level <= max_level; level++) {
2504 if (r1->wm[level].enable)
2505 level1 = level;
2506 if (r2->wm[level].enable)
2507 level2 = level;
2508 }
2509
2510 if (level1 == level2) {
2511 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2512 return r2;
2513 else
2514 return r1;
2515 } else if (level1 > level2) {
2516 return r1;
2517 } else {
2518 return r2;
2519 }
2520}
2521
2522
2523#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2524#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2525#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2526#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2527#define WM_DIRTY_FBC (1 << 24)
2528#define WM_DIRTY_DDB (1 << 25)
2529
2530static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2531 const struct ilk_wm_values *old,
2532 const struct ilk_wm_values *new)
2533{
2534 unsigned int dirty = 0;
2535 enum pipe pipe;
2536 int wm_lp;
2537
2538 for_each_pipe(pipe) {
2539 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2540 dirty |= WM_DIRTY_LINETIME(pipe);
2541
2542 dirty |= WM_DIRTY_LP_ALL;
2543 }
2544
2545 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2546 dirty |= WM_DIRTY_PIPE(pipe);
2547
2548 dirty |= WM_DIRTY_LP_ALL;
2549 }
2550 }
2551
2552 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2553 dirty |= WM_DIRTY_FBC;
2554
2555 dirty |= WM_DIRTY_LP_ALL;
2556 }
2557
2558 if (old->partitioning != new->partitioning) {
2559 dirty |= WM_DIRTY_DDB;
2560
2561 dirty |= WM_DIRTY_LP_ALL;
2562 }
2563
2564
2565 if (dirty & WM_DIRTY_LP_ALL)
2566 return dirty;
2567
2568
2569 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2570 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2571 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2572 break;
2573 }
2574
2575
2576 for (; wm_lp <= 3; wm_lp++)
2577 dirty |= WM_DIRTY_LP(wm_lp);
2578
2579 return dirty;
2580}
2581
2582static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2583 unsigned int dirty)
2584{
2585 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2586 bool changed = false;
2587
2588 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2589 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2590 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2591 changed = true;
2592 }
2593 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2594 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2595 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2596 changed = true;
2597 }
2598 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2599 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2600 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2601 changed = true;
2602 }
2603
2604
2605
2606
2607
2608
2609 return changed;
2610}
2611
2612
2613
2614
2615
2616static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2617 struct ilk_wm_values *results)
2618{
2619 struct drm_device *dev = dev_priv->dev;
2620 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2621 unsigned int dirty;
2622 uint32_t val;
2623
2624 dirty = ilk_compute_wm_dirty(dev, previous, results);
2625 if (!dirty)
2626 return;
2627
2628 _ilk_disable_lp_wm(dev_priv, dirty);
2629
2630 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2631 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2632 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2633 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2634 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2635 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2636
2637 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2638 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2639 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2640 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2641 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2642 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2643
2644 if (dirty & WM_DIRTY_DDB) {
2645 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2646 val = I915_READ(WM_MISC);
2647 if (results->partitioning == INTEL_DDB_PART_1_2)
2648 val &= ~WM_MISC_DATA_PARTITION_5_6;
2649 else
2650 val |= WM_MISC_DATA_PARTITION_5_6;
2651 I915_WRITE(WM_MISC, val);
2652 } else {
2653 val = I915_READ(DISP_ARB_CTL2);
2654 if (results->partitioning == INTEL_DDB_PART_1_2)
2655 val &= ~DISP_DATA_PARTITION_5_6;
2656 else
2657 val |= DISP_DATA_PARTITION_5_6;
2658 I915_WRITE(DISP_ARB_CTL2, val);
2659 }
2660 }
2661
2662 if (dirty & WM_DIRTY_FBC) {
2663 val = I915_READ(DISP_ARB_CTL);
2664 if (results->enable_fbc_wm)
2665 val &= ~DISP_FBC_WM_DIS;
2666 else
2667 val |= DISP_FBC_WM_DIS;
2668 I915_WRITE(DISP_ARB_CTL, val);
2669 }
2670
2671 if (dirty & WM_DIRTY_LP(1) &&
2672 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2673 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2674
2675 if (INTEL_INFO(dev)->gen >= 7) {
2676 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2677 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2678 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2679 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2680 }
2681
2682 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2683 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2684 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2685 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2686 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2687 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2688
2689 dev_priv->wm.hw = *results;
2690}
2691
2692static bool ilk_disable_lp_wm(struct drm_device *dev)
2693{
2694 struct drm_i915_private *dev_priv = dev->dev_private;
2695
2696 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2697}
2698
2699static void ilk_update_wm(struct drm_crtc *crtc)
2700{
2701 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2702 struct drm_device *dev = crtc->dev;
2703 struct drm_i915_private *dev_priv = dev->dev_private;
2704 struct ilk_wm_maximums max;
2705 struct ilk_pipe_wm_parameters params = {};
2706 struct ilk_wm_values results = {};
2707 enum intel_ddb_partitioning partitioning;
2708 struct intel_pipe_wm pipe_wm = {};
2709 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2710 struct intel_wm_config config = {};
2711
2712 ilk_compute_wm_parameters(crtc, ¶ms);
2713
2714 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
2715
2716 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2717 return;
2718
2719 intel_crtc->wm.active = pipe_wm;
2720
2721 ilk_compute_wm_config(dev, &config);
2722
2723 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2724 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2725
2726
2727 if (INTEL_INFO(dev)->gen >= 7 &&
2728 config.num_pipes_active == 1 && config.sprites_enabled) {
2729 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2730 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2731
2732 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2733 } else {
2734 best_lp_wm = &lp_wm_1_2;
2735 }
2736
2737 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2738 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2739
2740 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2741
2742 ilk_write_wm_values(dev_priv, &results);
2743}
2744
2745static void
2746ilk_update_sprite_wm(struct drm_plane *plane,
2747 struct drm_crtc *crtc,
2748 uint32_t sprite_width, uint32_t sprite_height,
2749 int pixel_size, bool enabled, bool scaled)
2750{
2751 struct drm_device *dev = plane->dev;
2752 struct intel_plane *intel_plane = to_intel_plane(plane);
2753
2754 intel_plane->wm.enabled = enabled;
2755 intel_plane->wm.scaled = scaled;
2756 intel_plane->wm.horiz_pixels = sprite_width;
2757 intel_plane->wm.vert_pixels = sprite_width;
2758 intel_plane->wm.bytes_per_pixel = pixel_size;
2759
2760
2761
2762
2763
2764
2765
2766
2767 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2768 intel_wait_for_vblank(dev, intel_plane->pipe);
2769
2770 ilk_update_wm(crtc);
2771}
2772
2773static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2774{
2775 struct drm_device *dev = crtc->dev;
2776 struct drm_i915_private *dev_priv = dev->dev_private;
2777 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2778 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2779 struct intel_pipe_wm *active = &intel_crtc->wm.active;
2780 enum pipe pipe = intel_crtc->pipe;
2781 static const unsigned int wm0_pipe_reg[] = {
2782 [PIPE_A] = WM0_PIPEA_ILK,
2783 [PIPE_B] = WM0_PIPEB_ILK,
2784 [PIPE_C] = WM0_PIPEC_IVB,
2785 };
2786
2787 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2788 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2789 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2790
2791 active->pipe_enabled = intel_crtc_active(crtc);
2792
2793 if (active->pipe_enabled) {
2794 u32 tmp = hw->wm_pipe[pipe];
2795
2796
2797
2798
2799
2800
2801
2802 active->wm[0].enable = true;
2803 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
2804 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
2805 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
2806 active->linetime = hw->wm_linetime[pipe];
2807 } else {
2808 int level, max_level = ilk_wm_max_level(dev);
2809
2810
2811
2812
2813
2814
2815 for (level = 0; level <= max_level; level++)
2816 active->wm[level].enable = true;
2817 }
2818}
2819
2820void ilk_wm_get_hw_state(struct drm_device *dev)
2821{
2822 struct drm_i915_private *dev_priv = dev->dev_private;
2823 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2824 struct drm_crtc *crtc;
2825
2826 for_each_crtc(dev, crtc)
2827 ilk_pipe_wm_get_hw_state(crtc);
2828
2829 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2830 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
2831 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2832
2833 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2834 if (INTEL_INFO(dev)->gen >= 7) {
2835 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2836 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2837 }
2838
2839 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2840 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2841 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2842 else if (IS_IVYBRIDGE(dev))
2843 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
2844 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2845
2846 hw->enable_fbc_wm =
2847 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2848}
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882void intel_update_watermarks(struct drm_crtc *crtc)
2883{
2884 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
2885
2886 if (dev_priv->display.update_wm)
2887 dev_priv->display.update_wm(crtc);
2888}
2889
2890void intel_update_sprite_watermarks(struct drm_plane *plane,
2891 struct drm_crtc *crtc,
2892 uint32_t sprite_width,
2893 uint32_t sprite_height,
2894 int pixel_size,
2895 bool enabled, bool scaled)
2896{
2897 struct drm_i915_private *dev_priv = plane->dev->dev_private;
2898
2899 if (dev_priv->display.update_sprite_wm)
2900 dev_priv->display.update_sprite_wm(plane, crtc,
2901 sprite_width, sprite_height,
2902 pixel_size, enabled, scaled);
2903}
2904
2905static struct drm_i915_gem_object *
2906intel_alloc_context_page(struct drm_device *dev)
2907{
2908 struct drm_i915_gem_object *ctx;
2909 int ret;
2910
2911 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2912
2913 ctx = i915_gem_alloc_object(dev, 4096);
2914 if (!ctx) {
2915 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2916 return NULL;
2917 }
2918
2919 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
2920 if (ret) {
2921 DRM_ERROR("failed to pin power context: %d\n", ret);
2922 goto err_unref;
2923 }
2924
2925 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2926 if (ret) {
2927 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2928 goto err_unpin;
2929 }
2930
2931 return ctx;
2932
2933err_unpin:
2934 i915_gem_object_ggtt_unpin(ctx);
2935err_unref:
2936 drm_gem_object_unreference(&ctx->base);
2937 return NULL;
2938}
2939
2940
2941
2942
2943DEFINE_SPINLOCK(mchdev_lock);
2944
2945
2946
2947static struct drm_i915_private *i915_mch_dev;
2948
2949bool ironlake_set_drps(struct drm_device *dev, u8 val)
2950{
2951 struct drm_i915_private *dev_priv = dev->dev_private;
2952 u16 rgvswctl;
2953
2954 assert_spin_locked(&mchdev_lock);
2955
2956 rgvswctl = I915_READ16(MEMSWCTL);
2957 if (rgvswctl & MEMCTL_CMD_STS) {
2958 DRM_DEBUG("gpu busy, RCS change rejected\n");
2959 return false;
2960 }
2961
2962 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2963 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2964 I915_WRITE16(MEMSWCTL, rgvswctl);
2965 POSTING_READ16(MEMSWCTL);
2966
2967 rgvswctl |= MEMCTL_CMD_STS;
2968 I915_WRITE16(MEMSWCTL, rgvswctl);
2969
2970 return true;
2971}
2972
2973static void ironlake_enable_drps(struct drm_device *dev)
2974{
2975 struct drm_i915_private *dev_priv = dev->dev_private;
2976 u32 rgvmodectl = I915_READ(MEMMODECTL);
2977 u8 fmax, fmin, fstart, vstart;
2978
2979 spin_lock_irq(&mchdev_lock);
2980
2981
2982 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2983 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2984
2985
2986 I915_WRITE(RCUPEI, 100000);
2987 I915_WRITE(RCDNEI, 100000);
2988
2989
2990 I915_WRITE(RCBMAXAVG, 90000);
2991 I915_WRITE(RCBMINAVG, 80000);
2992
2993 I915_WRITE(MEMIHYST, 1);
2994
2995
2996 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2997 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2998 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2999 MEMMODE_FSTART_SHIFT;
3000
3001 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
3002 PXVFREQ_PX_SHIFT;
3003
3004 dev_priv->ips.fmax = fmax;
3005 dev_priv->ips.fstart = fstart;
3006
3007 dev_priv->ips.max_delay = fstart;
3008 dev_priv->ips.min_delay = fmin;
3009 dev_priv->ips.cur_delay = fstart;
3010
3011 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3012 fmax, fmin, fstart);
3013
3014 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
3015
3016
3017
3018
3019
3020 I915_WRITE(VIDSTART, vstart);
3021 POSTING_READ(VIDSTART);
3022
3023 rgvmodectl |= MEMMODE_SWMODE_EN;
3024 I915_WRITE(MEMMODECTL, rgvmodectl);
3025
3026 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
3027 DRM_ERROR("stuck trying to change perf mode\n");
3028 mdelay(1);
3029
3030 ironlake_set_drps(dev, fstart);
3031
3032 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
3033 I915_READ(0x112e0);
3034 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
3035 dev_priv->ips.last_count2 = I915_READ(0x112f4);
3036 dev_priv->ips.last_time2 = ktime_get_raw_ns();
3037
3038 spin_unlock_irq(&mchdev_lock);
3039}
3040
3041static void ironlake_disable_drps(struct drm_device *dev)
3042{
3043 struct drm_i915_private *dev_priv = dev->dev_private;
3044 u16 rgvswctl;
3045
3046 spin_lock_irq(&mchdev_lock);
3047
3048 rgvswctl = I915_READ16(MEMSWCTL);
3049
3050
3051 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3052 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3053 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3054 I915_WRITE(DEIIR, DE_PCU_EVENT);
3055 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3056
3057
3058 ironlake_set_drps(dev, dev_priv->ips.fstart);
3059 mdelay(1);
3060 rgvswctl |= MEMCTL_CMD_STS;
3061 I915_WRITE(MEMSWCTL, rgvswctl);
3062 mdelay(1);
3063
3064 spin_unlock_irq(&mchdev_lock);
3065}
3066
3067
3068
3069
3070
3071
3072static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
3073{
3074 u32 limits;
3075
3076
3077
3078
3079
3080
3081
3082 limits = dev_priv->rps.max_freq_softlimit << 24;
3083 if (val <= dev_priv->rps.min_freq_softlimit)
3084 limits |= dev_priv->rps.min_freq_softlimit << 16;
3085
3086 return limits;
3087}
3088
3089static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3090{
3091 int new_power;
3092
3093 new_power = dev_priv->rps.power;
3094 switch (dev_priv->rps.power) {
3095 case LOW_POWER:
3096 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
3097 new_power = BETWEEN;
3098 break;
3099
3100 case BETWEEN:
3101 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
3102 new_power = LOW_POWER;
3103 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
3104 new_power = HIGH_POWER;
3105 break;
3106
3107 case HIGH_POWER:
3108 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
3109 new_power = BETWEEN;
3110 break;
3111 }
3112
3113 if (val == dev_priv->rps.min_freq_softlimit)
3114 new_power = LOW_POWER;
3115 if (val == dev_priv->rps.max_freq_softlimit)
3116 new_power = HIGH_POWER;
3117 if (new_power == dev_priv->rps.power)
3118 return;
3119
3120
3121 switch (new_power) {
3122 case LOW_POWER:
3123
3124 I915_WRITE(GEN6_RP_UP_EI, 12500);
3125 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3126
3127
3128 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3129 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3130
3131 I915_WRITE(GEN6_RP_CONTROL,
3132 GEN6_RP_MEDIA_TURBO |
3133 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3134 GEN6_RP_MEDIA_IS_GFX |
3135 GEN6_RP_ENABLE |
3136 GEN6_RP_UP_BUSY_AVG |
3137 GEN6_RP_DOWN_IDLE_AVG);
3138 break;
3139
3140 case BETWEEN:
3141
3142 I915_WRITE(GEN6_RP_UP_EI, 10250);
3143 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3144
3145
3146 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3147 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3148
3149 I915_WRITE(GEN6_RP_CONTROL,
3150 GEN6_RP_MEDIA_TURBO |
3151 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3152 GEN6_RP_MEDIA_IS_GFX |
3153 GEN6_RP_ENABLE |
3154 GEN6_RP_UP_BUSY_AVG |
3155 GEN6_RP_DOWN_IDLE_AVG);
3156 break;
3157
3158 case HIGH_POWER:
3159
3160 I915_WRITE(GEN6_RP_UP_EI, 8000);
3161 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3162
3163
3164 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3165 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3166
3167 I915_WRITE(GEN6_RP_CONTROL,
3168 GEN6_RP_MEDIA_TURBO |
3169 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3170 GEN6_RP_MEDIA_IS_GFX |
3171 GEN6_RP_ENABLE |
3172 GEN6_RP_UP_BUSY_AVG |
3173 GEN6_RP_DOWN_IDLE_AVG);
3174 break;
3175 }
3176
3177 dev_priv->rps.power = new_power;
3178 dev_priv->rps.last_adj = 0;
3179}
3180
3181static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3182{
3183 u32 mask = 0;
3184
3185 if (val > dev_priv->rps.min_freq_softlimit)
3186 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3187 if (val < dev_priv->rps.max_freq_softlimit)
3188 mask |= GEN6_PM_RP_UP_THRESHOLD;
3189
3190 mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
3191 mask &= dev_priv->pm_rps_events;
3192
3193
3194
3195
3196 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3197 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3198
3199 if (IS_GEN8(dev_priv->dev))
3200 mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
3201
3202 return ~mask;
3203}
3204
3205
3206
3207
3208void gen6_set_rps(struct drm_device *dev, u8 val)
3209{
3210 struct drm_i915_private *dev_priv = dev->dev_private;
3211
3212 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3213 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3214 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3215
3216
3217
3218
3219 if (val != dev_priv->rps.cur_freq) {
3220 gen6_set_rps_thresholds(dev_priv, val);
3221
3222 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3223 I915_WRITE(GEN6_RPNSWREQ,
3224 HSW_FREQUENCY(val));
3225 else
3226 I915_WRITE(GEN6_RPNSWREQ,
3227 GEN6_FREQUENCY(val) |
3228 GEN6_OFFSET(0) |
3229 GEN6_AGGRESSIVE_TURBO);
3230 }
3231
3232
3233
3234
3235 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3236 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3237
3238 POSTING_READ(GEN6_RPNSWREQ);
3239
3240 dev_priv->rps.cur_freq = val;
3241 trace_intel_gpu_freq_change(val * 50);
3242}
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3254{
3255 struct drm_device *dev = dev_priv->dev;
3256
3257
3258 if (dev->pdev->revision >= 0xd) {
3259 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3260 return;
3261 }
3262
3263
3264
3265
3266
3267 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3268 return;
3269
3270
3271 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3272
3273 vlv_force_gfx_clock(dev_priv, true);
3274
3275 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3276
3277 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3278 dev_priv->rps.min_freq_softlimit);
3279
3280 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3281 & GENFREQSTATUS) == 0, 5))
3282 DRM_ERROR("timed out waiting for Punit\n");
3283
3284 vlv_force_gfx_clock(dev_priv, false);
3285
3286 I915_WRITE(GEN6_PMINTRMSK,
3287 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3288}
3289
3290void gen6_rps_idle(struct drm_i915_private *dev_priv)
3291{
3292 struct drm_device *dev = dev_priv->dev;
3293
3294 mutex_lock(&dev_priv->rps.hw_lock);
3295 if (dev_priv->rps.enabled) {
3296 if (IS_CHERRYVIEW(dev))
3297 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3298 else if (IS_VALLEYVIEW(dev))
3299 vlv_set_rps_idle(dev_priv);
3300 else
3301 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3302 dev_priv->rps.last_adj = 0;
3303 }
3304 mutex_unlock(&dev_priv->rps.hw_lock);
3305}
3306
3307void gen6_rps_boost(struct drm_i915_private *dev_priv)
3308{
3309 struct drm_device *dev = dev_priv->dev;
3310
3311 mutex_lock(&dev_priv->rps.hw_lock);
3312 if (dev_priv->rps.enabled) {
3313 if (IS_VALLEYVIEW(dev))
3314 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3315 else
3316 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3317 dev_priv->rps.last_adj = 0;
3318 }
3319 mutex_unlock(&dev_priv->rps.hw_lock);
3320}
3321
3322void valleyview_set_rps(struct drm_device *dev, u8 val)
3323{
3324 struct drm_i915_private *dev_priv = dev->dev_private;
3325
3326 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3327 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3328 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3329
3330 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3331 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3332 dev_priv->rps.cur_freq,
3333 vlv_gpu_freq(dev_priv, val), val);
3334
3335 if (val != dev_priv->rps.cur_freq)
3336 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3337
3338 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3339
3340 dev_priv->rps.cur_freq = val;
3341 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3342}
3343
3344static void gen8_disable_rps_interrupts(struct drm_device *dev)
3345{
3346 struct drm_i915_private *dev_priv = dev->dev_private;
3347
3348 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3349 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3350 ~dev_priv->pm_rps_events);
3351
3352
3353
3354
3355
3356
3357 spin_lock_irq(&dev_priv->irq_lock);
3358 dev_priv->rps.pm_iir = 0;
3359 spin_unlock_irq(&dev_priv->irq_lock);
3360
3361 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3362}
3363
3364static void gen6_disable_rps_interrupts(struct drm_device *dev)
3365{
3366 struct drm_i915_private *dev_priv = dev->dev_private;
3367
3368 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3369 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3370 ~dev_priv->pm_rps_events);
3371
3372
3373
3374
3375
3376 spin_lock_irq(&dev_priv->irq_lock);
3377 dev_priv->rps.pm_iir = 0;
3378 spin_unlock_irq(&dev_priv->irq_lock);
3379
3380 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3381}
3382
3383static void gen6_disable_rps(struct drm_device *dev)
3384{
3385 struct drm_i915_private *dev_priv = dev->dev_private;
3386
3387 I915_WRITE(GEN6_RC_CONTROL, 0);
3388 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3389
3390 if (IS_BROADWELL(dev))
3391 gen8_disable_rps_interrupts(dev);
3392 else
3393 gen6_disable_rps_interrupts(dev);
3394}
3395
3396static void cherryview_disable_rps(struct drm_device *dev)
3397{
3398 struct drm_i915_private *dev_priv = dev->dev_private;
3399
3400 I915_WRITE(GEN6_RC_CONTROL, 0);
3401
3402 gen8_disable_rps_interrupts(dev);
3403}
3404
3405static void valleyview_disable_rps(struct drm_device *dev)
3406{
3407 struct drm_i915_private *dev_priv = dev->dev_private;
3408
3409 I915_WRITE(GEN6_RC_CONTROL, 0);
3410
3411 gen6_disable_rps_interrupts(dev);
3412}
3413
3414static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3415{
3416 if (IS_VALLEYVIEW(dev)) {
3417 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
3418 mode = GEN6_RC_CTL_RC6_ENABLE;
3419 else
3420 mode = 0;
3421 }
3422 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3423 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3424 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3425 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3426}
3427
3428static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3429{
3430
3431 if (INTEL_INFO(dev)->gen < 5)
3432 return 0;
3433
3434
3435 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
3436 return 0;
3437
3438
3439 if (enable_rc6 >= 0) {
3440 int mask;
3441
3442 if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
3443 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3444 INTEL_RC6pp_ENABLE;
3445 else
3446 mask = INTEL_RC6_ENABLE;
3447
3448 if ((enable_rc6 & mask) != enable_rc6)
3449 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3450 enable_rc6 & mask, enable_rc6, mask);
3451
3452 return enable_rc6 & mask;
3453 }
3454
3455
3456 if (INTEL_INFO(dev)->gen == 5)
3457 return 0;
3458
3459 if (IS_IVYBRIDGE(dev))
3460 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3461
3462 return INTEL_RC6_ENABLE;
3463}
3464
3465int intel_enable_rc6(const struct drm_device *dev)
3466{
3467 return i915.enable_rc6;
3468}
3469
3470static void gen8_enable_rps_interrupts(struct drm_device *dev)
3471{
3472 struct drm_i915_private *dev_priv = dev->dev_private;
3473
3474 spin_lock_irq(&dev_priv->irq_lock);
3475 WARN_ON(dev_priv->rps.pm_iir);
3476 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3477 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3478 spin_unlock_irq(&dev_priv->irq_lock);
3479}
3480
3481static void gen6_enable_rps_interrupts(struct drm_device *dev)
3482{
3483 struct drm_i915_private *dev_priv = dev->dev_private;
3484
3485 spin_lock_irq(&dev_priv->irq_lock);
3486 WARN_ON(dev_priv->rps.pm_iir);
3487 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3488 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3489 spin_unlock_irq(&dev_priv->irq_lock);
3490}
3491
3492static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
3493{
3494
3495 dev_priv->rps.cur_freq = 0;
3496
3497 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3498 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3499 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3500
3501 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3502
3503 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3504
3505
3506 if (dev_priv->rps.max_freq_softlimit == 0)
3507 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3508
3509 if (dev_priv->rps.min_freq_softlimit == 0)
3510 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3511}
3512
3513static void gen8_enable_rps(struct drm_device *dev)
3514{
3515 struct drm_i915_private *dev_priv = dev->dev_private;
3516 struct intel_engine_cs *ring;
3517 uint32_t rc6_mask = 0, rp_state_cap;
3518 int unused;
3519
3520
3521 I915_WRITE(GEN6_RC_STATE, 0);
3522
3523
3524
3525 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3526
3527
3528 I915_WRITE(GEN6_RC_CONTROL, 0);
3529
3530 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3531 parse_rp_state_cap(dev_priv, rp_state_cap);
3532
3533
3534 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3535 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3536 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3537 for_each_ring(ring, dev_priv, unused)
3538 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3539 I915_WRITE(GEN6_RC_SLEEP, 0);
3540 if (IS_BROADWELL(dev))
3541 I915_WRITE(GEN6_RC6_THRESHOLD, 625);
3542 else
3543 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3544
3545
3546 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3547 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3548 intel_print_rc6_info(dev, rc6_mask);
3549 if (IS_BROADWELL(dev))
3550 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3551 GEN7_RC_CTL_TO_MODE |
3552 rc6_mask);
3553 else
3554 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3555 GEN6_RC_CTL_EI_MODE(1) |
3556 rc6_mask);
3557
3558
3559 I915_WRITE(GEN6_RPNSWREQ,
3560 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3561 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3562 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3563
3564 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128);
3565
3566
3567 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3568 dev_priv->rps.max_freq_softlimit << 24 |
3569 dev_priv->rps.min_freq_softlimit << 16);
3570
3571 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128);
3572 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128);
3573 I915_WRITE(GEN6_RP_UP_EI, 66000);
3574 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3575
3576 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3577
3578
3579 I915_WRITE(GEN6_RP_CONTROL,
3580 GEN6_RP_MEDIA_TURBO |
3581 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3582 GEN6_RP_MEDIA_IS_GFX |
3583 GEN6_RP_ENABLE |
3584 GEN6_RP_UP_BUSY_AVG |
3585 GEN6_RP_DOWN_IDLE_AVG);
3586
3587
3588
3589 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3590
3591 gen8_enable_rps_interrupts(dev);
3592
3593 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3594}
3595
3596static void gen6_enable_rps(struct drm_device *dev)
3597{
3598 struct drm_i915_private *dev_priv = dev->dev_private;
3599 struct intel_engine_cs *ring;
3600 u32 rp_state_cap;
3601 u32 gt_perf_status;
3602 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3603 u32 gtfifodbg;
3604 int rc6_mode;
3605 int i, ret;
3606
3607 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3608
3609
3610
3611
3612
3613
3614
3615 I915_WRITE(GEN6_RC_STATE, 0);
3616
3617
3618 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3619 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3620 I915_WRITE(GTFIFODBG, gtfifodbg);
3621 }
3622
3623 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3624
3625 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3626 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3627
3628 parse_rp_state_cap(dev_priv, rp_state_cap);
3629
3630
3631 I915_WRITE(GEN6_RC_CONTROL, 0);
3632
3633 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3634 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3635 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3636 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3637 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3638
3639 for_each_ring(ring, dev_priv, i)
3640 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3641
3642 I915_WRITE(GEN6_RC_SLEEP, 0);
3643 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3644 if (IS_IVYBRIDGE(dev))
3645 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3646 else
3647 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3648 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3649 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000);
3650
3651
3652 rc6_mode = intel_enable_rc6(dev_priv->dev);
3653 if (rc6_mode & INTEL_RC6_ENABLE)
3654 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3655
3656
3657 if (!IS_HASWELL(dev)) {
3658 if (rc6_mode & INTEL_RC6p_ENABLE)
3659 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3660
3661 if (rc6_mode & INTEL_RC6pp_ENABLE)
3662 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3663 }
3664
3665 intel_print_rc6_info(dev, rc6_mask);
3666
3667 I915_WRITE(GEN6_RC_CONTROL,
3668 rc6_mask |
3669 GEN6_RC_CTL_EI_MODE(1) |
3670 GEN6_RC_CTL_HW_ENABLE);
3671
3672
3673 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3674 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3675
3676 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3677 if (ret)
3678 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3679
3680 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3681 if (!ret && (pcu_mbox & (1<<31))) {
3682 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3683 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3684 (pcu_mbox & 0xff) * 50);
3685 dev_priv->rps.max_freq = pcu_mbox & 0xff;
3686 }
3687
3688 dev_priv->rps.power = HIGH_POWER;
3689 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3690
3691 gen6_enable_rps_interrupts(dev);
3692
3693 rc6vids = 0;
3694 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3695 if (IS_GEN6(dev) && ret) {
3696 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3697 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3698 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3699 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3700 rc6vids &= 0xffff00;
3701 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3702 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3703 if (ret)
3704 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3705 }
3706
3707 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3708}
3709
3710static void __gen6_update_ring_freq(struct drm_device *dev)
3711{
3712 struct drm_i915_private *dev_priv = dev->dev_private;
3713 int min_freq = 15;
3714 unsigned int gpu_freq;
3715 unsigned int max_ia_freq, min_ring_freq;
3716 int scaling_factor = 180;
3717 struct cpufreq_policy *policy;
3718
3719 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3720
3721 policy = cpufreq_cpu_get(0);
3722 if (policy) {
3723 max_ia_freq = policy->cpuinfo.max_freq;
3724 cpufreq_cpu_put(policy);
3725 } else {
3726
3727
3728
3729
3730 max_ia_freq = tsc_khz;
3731 }
3732
3733
3734 max_ia_freq /= 1000;
3735
3736 min_ring_freq = I915_READ(DCLK) & 0xf;
3737
3738 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3739
3740
3741
3742
3743
3744
3745 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
3746 gpu_freq--) {
3747 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
3748 unsigned int ia_freq = 0, ring_freq = 0;
3749
3750 if (INTEL_INFO(dev)->gen >= 8) {
3751
3752 ring_freq = max(min_ring_freq, gpu_freq);
3753 } else if (IS_HASWELL(dev)) {
3754 ring_freq = mult_frac(gpu_freq, 5, 4);
3755 ring_freq = max(min_ring_freq, ring_freq);
3756
3757 } else {
3758
3759
3760
3761
3762
3763
3764
3765 if (gpu_freq < min_freq)
3766 ia_freq = 800;
3767 else
3768 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3769 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3770 }
3771
3772 sandybridge_pcode_write(dev_priv,
3773 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3774 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3775 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3776 gpu_freq);
3777 }
3778}
3779
3780void gen6_update_ring_freq(struct drm_device *dev)
3781{
3782 struct drm_i915_private *dev_priv = dev->dev_private;
3783
3784 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
3785 return;
3786
3787 mutex_lock(&dev_priv->rps.hw_lock);
3788 __gen6_update_ring_freq(dev);
3789 mutex_unlock(&dev_priv->rps.hw_lock);
3790}
3791
3792static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
3793{
3794 u32 val, rp0;
3795
3796 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
3797 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3798
3799 return rp0;
3800}
3801
3802static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3803{
3804 u32 val, rpe;
3805
3806 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
3807 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
3808
3809 return rpe;
3810}
3811
3812static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
3813{
3814 u32 val, rp1;
3815
3816 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3817 rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3818
3819 return rp1;
3820}
3821
3822static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
3823{
3824 u32 val, rpn;
3825
3826 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
3827 rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
3828 return rpn;
3829}
3830
3831static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
3832{
3833 u32 val, rp1;
3834
3835 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3836
3837 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
3838
3839 return rp1;
3840}
3841
3842static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3843{
3844 u32 val, rp0;
3845
3846 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3847
3848 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3849
3850 rp0 = min_t(u32, rp0, 0xea);
3851
3852 return rp0;
3853}
3854
3855static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3856{
3857 u32 val, rpe;
3858
3859 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3860 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3861 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3862 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3863
3864 return rpe;
3865}
3866
3867static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3868{
3869 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3870}
3871
3872
3873static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
3874{
3875 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3876
3877 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
3878 dev_priv->vlv_pctx->stolen->start);
3879}
3880
3881
3882
3883static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
3884{
3885 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3886
3887 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
3888}
3889
3890static void cherryview_setup_pctx(struct drm_device *dev)
3891{
3892 struct drm_i915_private *dev_priv = dev->dev_private;
3893 unsigned long pctx_paddr, paddr;
3894 struct i915_gtt *gtt = &dev_priv->gtt;
3895 u32 pcbr;
3896 int pctx_size = 32*1024;
3897
3898 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3899
3900 pcbr = I915_READ(VLV_PCBR);
3901 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
3902 paddr = (dev_priv->mm.stolen_base +
3903 (gtt->stolen_size - pctx_size));
3904
3905 pctx_paddr = (paddr & (~4095));
3906 I915_WRITE(VLV_PCBR, pctx_paddr);
3907 }
3908}
3909
3910static void valleyview_setup_pctx(struct drm_device *dev)
3911{
3912 struct drm_i915_private *dev_priv = dev->dev_private;
3913 struct drm_i915_gem_object *pctx;
3914 unsigned long pctx_paddr;
3915 u32 pcbr;
3916 int pctx_size = 24*1024;
3917
3918 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3919
3920 pcbr = I915_READ(VLV_PCBR);
3921 if (pcbr) {
3922
3923 int pcbr_offset;
3924
3925 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3926 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3927 pcbr_offset,
3928 I915_GTT_OFFSET_NONE,
3929 pctx_size);
3930 goto out;
3931 }
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941 pctx = i915_gem_object_create_stolen(dev, pctx_size);
3942 if (!pctx) {
3943 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3944 return;
3945 }
3946
3947 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3948 I915_WRITE(VLV_PCBR, pctx_paddr);
3949
3950out:
3951 dev_priv->vlv_pctx = pctx;
3952}
3953
3954static void valleyview_cleanup_pctx(struct drm_device *dev)
3955{
3956 struct drm_i915_private *dev_priv = dev->dev_private;
3957
3958 if (WARN_ON(!dev_priv->vlv_pctx))
3959 return;
3960
3961 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3962 dev_priv->vlv_pctx = NULL;
3963}
3964
3965static void valleyview_init_gt_powersave(struct drm_device *dev)
3966{
3967 struct drm_i915_private *dev_priv = dev->dev_private;
3968
3969 valleyview_setup_pctx(dev);
3970
3971 mutex_lock(&dev_priv->rps.hw_lock);
3972
3973 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3974 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3975 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3976 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
3977 dev_priv->rps.max_freq);
3978
3979 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
3980 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3981 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3982 dev_priv->rps.efficient_freq);
3983
3984 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
3985 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
3986 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
3987 dev_priv->rps.rp1_freq);
3988
3989 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
3990 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3991 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
3992 dev_priv->rps.min_freq);
3993
3994
3995 if (dev_priv->rps.max_freq_softlimit == 0)
3996 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3997
3998 if (dev_priv->rps.min_freq_softlimit == 0)
3999 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4000
4001 mutex_unlock(&dev_priv->rps.hw_lock);
4002}
4003
4004static void cherryview_init_gt_powersave(struct drm_device *dev)
4005{
4006 struct drm_i915_private *dev_priv = dev->dev_private;
4007
4008 cherryview_setup_pctx(dev);
4009
4010 mutex_lock(&dev_priv->rps.hw_lock);
4011
4012 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4013 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4014 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4015 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4016 dev_priv->rps.max_freq);
4017
4018 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
4019 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4020 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4021 dev_priv->rps.efficient_freq);
4022
4023 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
4024 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
4025 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4026 dev_priv->rps.rp1_freq);
4027
4028 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
4029 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4030 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4031 dev_priv->rps.min_freq);
4032
4033
4034 if (dev_priv->rps.max_freq_softlimit == 0)
4035 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4036
4037 if (dev_priv->rps.min_freq_softlimit == 0)
4038 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4039
4040 mutex_unlock(&dev_priv->rps.hw_lock);
4041}
4042
4043static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
4044{
4045 valleyview_cleanup_pctx(dev);
4046}
4047
4048static void cherryview_enable_rps(struct drm_device *dev)
4049{
4050 struct drm_i915_private *dev_priv = dev->dev_private;
4051 struct intel_engine_cs *ring;
4052 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
4053 int i;
4054
4055 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4056
4057 gtfifodbg = I915_READ(GTFIFODBG);
4058 if (gtfifodbg) {
4059 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4060 gtfifodbg);
4061 I915_WRITE(GTFIFODBG, gtfifodbg);
4062 }
4063
4064 cherryview_check_pctx(dev_priv);
4065
4066
4067
4068 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4069
4070
4071 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4072 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4073 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4074
4075 for_each_ring(ring, dev_priv, i)
4076 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4077 I915_WRITE(GEN6_RC_SLEEP, 0);
4078
4079 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
4080
4081
4082 I915_WRITE(VLV_COUNTER_CONTROL,
4083 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
4084 VLV_MEDIA_RC6_COUNT_EN |
4085 VLV_RENDER_RC6_COUNT_EN));
4086
4087
4088 pcbr = I915_READ(VLV_PCBR);
4089
4090 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
4091
4092
4093 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
4094 (pcbr >> VLV_PCBR_ADDR_SHIFT))
4095 rc6_mode = GEN6_RC_CTL_EI_MODE(1);
4096
4097 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4098
4099
4100 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4101 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4102 I915_WRITE(GEN6_RP_UP_EI, 66000);
4103 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4104
4105 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4106
4107
4108 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
4109 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
4110
4111
4112 I915_WRITE(GEN6_RP_CONTROL,
4113 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4114 GEN6_RP_MEDIA_IS_GFX |
4115 GEN6_RP_ENABLE |
4116 GEN6_RP_UP_BUSY_AVG |
4117 GEN6_RP_DOWN_IDLE_AVG);
4118
4119 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4120
4121 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4122 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4123
4124 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4125 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4126 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4127 dev_priv->rps.cur_freq);
4128
4129 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4130 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4131 dev_priv->rps.efficient_freq);
4132
4133 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4134
4135 gen8_enable_rps_interrupts(dev);
4136
4137 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4138}
4139
4140static void valleyview_enable_rps(struct drm_device *dev)
4141{
4142 struct drm_i915_private *dev_priv = dev->dev_private;
4143 struct intel_engine_cs *ring;
4144 u32 gtfifodbg, val, rc6_mode = 0;
4145 int i;
4146
4147 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4148
4149 valleyview_check_pctx(dev_priv);
4150
4151 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4152 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4153 gtfifodbg);
4154 I915_WRITE(GTFIFODBG, gtfifodbg);
4155 }
4156
4157
4158 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4159
4160 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4161 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4162 I915_WRITE(GEN6_RP_UP_EI, 66000);
4163 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4164
4165 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4166 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
4167
4168 I915_WRITE(GEN6_RP_CONTROL,
4169 GEN6_RP_MEDIA_TURBO |
4170 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4171 GEN6_RP_MEDIA_IS_GFX |
4172 GEN6_RP_ENABLE |
4173 GEN6_RP_UP_BUSY_AVG |
4174 GEN6_RP_DOWN_IDLE_CONT);
4175
4176 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
4177 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4178 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4179
4180 for_each_ring(ring, dev_priv, i)
4181 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4182
4183 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
4184
4185
4186 I915_WRITE(VLV_COUNTER_CONTROL,
4187 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
4188 VLV_RENDER_RC0_COUNT_EN |
4189 VLV_MEDIA_RC6_COUNT_EN |
4190 VLV_RENDER_RC6_COUNT_EN));
4191
4192 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4193 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
4194
4195 intel_print_rc6_info(dev, rc6_mode);
4196
4197 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4198
4199 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4200
4201 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4202 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4203
4204 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4205 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4206 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4207 dev_priv->rps.cur_freq);
4208
4209 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4210 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4211 dev_priv->rps.efficient_freq);
4212
4213 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4214
4215 gen6_enable_rps_interrupts(dev);
4216
4217 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4218}
4219
4220void ironlake_teardown_rc6(struct drm_device *dev)
4221{
4222 struct drm_i915_private *dev_priv = dev->dev_private;
4223
4224 if (dev_priv->ips.renderctx) {
4225 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
4226 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
4227 dev_priv->ips.renderctx = NULL;
4228 }
4229
4230 if (dev_priv->ips.pwrctx) {
4231 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
4232 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
4233 dev_priv->ips.pwrctx = NULL;
4234 }
4235}
4236
4237static void ironlake_disable_rc6(struct drm_device *dev)
4238{
4239 struct drm_i915_private *dev_priv = dev->dev_private;
4240
4241 if (I915_READ(PWRCTXA)) {
4242
4243 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
4244 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
4245 50);
4246
4247 I915_WRITE(PWRCTXA, 0);
4248 POSTING_READ(PWRCTXA);
4249
4250 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4251 POSTING_READ(RSTDBYCTL);
4252 }
4253}
4254
4255static int ironlake_setup_rc6(struct drm_device *dev)
4256{
4257 struct drm_i915_private *dev_priv = dev->dev_private;
4258
4259 if (dev_priv->ips.renderctx == NULL)
4260 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
4261 if (!dev_priv->ips.renderctx)
4262 return -ENOMEM;
4263
4264 if (dev_priv->ips.pwrctx == NULL)
4265 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
4266 if (!dev_priv->ips.pwrctx) {
4267 ironlake_teardown_rc6(dev);
4268 return -ENOMEM;
4269 }
4270
4271 return 0;
4272}
4273
4274static void ironlake_enable_rc6(struct drm_device *dev)
4275{
4276 struct drm_i915_private *dev_priv = dev->dev_private;
4277 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
4278 bool was_interruptible;
4279 int ret;
4280
4281
4282
4283
4284 if (!intel_enable_rc6(dev))
4285 return;
4286
4287 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4288
4289 ret = ironlake_setup_rc6(dev);
4290 if (ret)
4291 return;
4292
4293 was_interruptible = dev_priv->mm.interruptible;
4294 dev_priv->mm.interruptible = false;
4295
4296
4297
4298
4299
4300 ret = intel_ring_begin(ring, 6);
4301 if (ret) {
4302 ironlake_teardown_rc6(dev);
4303 dev_priv->mm.interruptible = was_interruptible;
4304 return;
4305 }
4306
4307 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
4308 intel_ring_emit(ring, MI_SET_CONTEXT);
4309 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
4310 MI_MM_SPACE_GTT |
4311 MI_SAVE_EXT_STATE_EN |
4312 MI_RESTORE_EXT_STATE_EN |
4313 MI_RESTORE_INHIBIT);
4314 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
4315 intel_ring_emit(ring, MI_NOOP);
4316 intel_ring_emit(ring, MI_FLUSH);
4317 intel_ring_advance(ring);
4318
4319
4320
4321
4322
4323
4324 ret = intel_ring_idle(ring);
4325 dev_priv->mm.interruptible = was_interruptible;
4326 if (ret) {
4327 DRM_ERROR("failed to enable ironlake power savings\n");
4328 ironlake_teardown_rc6(dev);
4329 return;
4330 }
4331
4332 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
4333 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4334
4335 intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
4336}
4337
4338static unsigned long intel_pxfreq(u32 vidfreq)
4339{
4340 unsigned long freq;
4341 int div = (vidfreq & 0x3f0000) >> 16;
4342 int post = (vidfreq & 0x3000) >> 12;
4343 int pre = (vidfreq & 0x7);
4344
4345 if (!pre)
4346 return 0;
4347
4348 freq = ((div * 133333) / ((1<<post) * pre));
4349
4350 return freq;
4351}
4352
4353static const struct cparams {
4354 u16 i;
4355 u16 t;
4356 u16 m;
4357 u16 c;
4358} cparams[] = {
4359 { 1, 1333, 301, 28664 },
4360 { 1, 1066, 294, 24460 },
4361 { 1, 800, 294, 25192 },
4362 { 0, 1333, 276, 27605 },
4363 { 0, 1066, 276, 27605 },
4364 { 0, 800, 231, 23784 },
4365};
4366
4367static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
4368{
4369 u64 total_count, diff, ret;
4370 u32 count1, count2, count3, m = 0, c = 0;
4371 unsigned long now = jiffies_to_msecs(jiffies), diff1;
4372 int i;
4373
4374 assert_spin_locked(&mchdev_lock);
4375
4376 diff1 = now - dev_priv->ips.last_time1;
4377
4378
4379
4380
4381
4382
4383 if (diff1 <= 10)
4384 return dev_priv->ips.chipset_power;
4385
4386 count1 = I915_READ(DMIEC);
4387 count2 = I915_READ(DDREC);
4388 count3 = I915_READ(CSIEC);
4389
4390 total_count = count1 + count2 + count3;
4391
4392
4393 if (total_count < dev_priv->ips.last_count1) {
4394 diff = ~0UL - dev_priv->ips.last_count1;
4395 diff += total_count;
4396 } else {
4397 diff = total_count - dev_priv->ips.last_count1;
4398 }
4399
4400 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
4401 if (cparams[i].i == dev_priv->ips.c_m &&
4402 cparams[i].t == dev_priv->ips.r_t) {
4403 m = cparams[i].m;
4404 c = cparams[i].c;
4405 break;
4406 }
4407 }
4408
4409 diff = div_u64(diff, diff1);
4410 ret = ((m * diff) + c);
4411 ret = div_u64(ret, 10);
4412
4413 dev_priv->ips.last_count1 = total_count;
4414 dev_priv->ips.last_time1 = now;
4415
4416 dev_priv->ips.chipset_power = ret;
4417
4418 return ret;
4419}
4420
4421unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
4422{
4423 struct drm_device *dev = dev_priv->dev;
4424 unsigned long val;
4425
4426 if (INTEL_INFO(dev)->gen != 5)
4427 return 0;
4428
4429 spin_lock_irq(&mchdev_lock);
4430
4431 val = __i915_chipset_val(dev_priv);
4432
4433 spin_unlock_irq(&mchdev_lock);
4434
4435 return val;
4436}
4437
4438unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
4439{
4440 unsigned long m, x, b;
4441 u32 tsfs;
4442
4443 tsfs = I915_READ(TSFS);
4444
4445 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
4446 x = I915_READ8(TR1);
4447
4448 b = tsfs & TSFS_INTR_MASK;
4449
4450 return ((m * x) / 127) - b;
4451}
4452
4453static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4454{
4455 struct drm_device *dev = dev_priv->dev;
4456 static const struct v_table {
4457 u16 vd;
4458 u16 vm;
4459 } v_table[] = {
4460 { 0, 0, },
4461 { 375, 0, },
4462 { 500, 0, },
4463 { 625, 0, },
4464 { 750, 0, },
4465 { 875, 0, },
4466 { 1000, 0, },
4467 { 1125, 0, },
4468 { 4125, 3000, },
4469 { 4125, 3000, },
4470 { 4125, 3000, },
4471 { 4125, 3000, },
4472 { 4125, 3000, },
4473 { 4125, 3000, },
4474 { 4125, 3000, },
4475 { 4125, 3000, },
4476 { 4125, 3000, },
4477 { 4125, 3000, },
4478 { 4125, 3000, },
4479 { 4125, 3000, },
4480 { 4125, 3000, },
4481 { 4125, 3000, },
4482 { 4125, 3000, },
4483 { 4125, 3000, },
4484 { 4125, 3000, },
4485 { 4125, 3000, },
4486 { 4125, 3000, },
4487 { 4125, 3000, },
4488 { 4125, 3000, },
4489 { 4125, 3000, },
4490 { 4125, 3000, },
4491 { 4125, 3000, },
4492 { 4250, 3125, },
4493 { 4375, 3250, },
4494 { 4500, 3375, },
4495 { 4625, 3500, },
4496 { 4750, 3625, },
4497 { 4875, 3750, },
4498 { 5000, 3875, },
4499 { 5125, 4000, },
4500 { 5250, 4125, },
4501 { 5375, 4250, },
4502 { 5500, 4375, },
4503 { 5625, 4500, },
4504 { 5750, 4625, },
4505 { 5875, 4750, },
4506 { 6000, 4875, },
4507 { 6125, 5000, },
4508 { 6250, 5125, },
4509 { 6375, 5250, },
4510 { 6500, 5375, },
4511 { 6625, 5500, },
4512 { 6750, 5625, },
4513 { 6875, 5750, },
4514 { 7000, 5875, },
4515 { 7125, 6000, },
4516 { 7250, 6125, },
4517 { 7375, 6250, },
4518 { 7500, 6375, },
4519 { 7625, 6500, },
4520 { 7750, 6625, },
4521 { 7875, 6750, },
4522 { 8000, 6875, },
4523 { 8125, 7000, },
4524 { 8250, 7125, },
4525 { 8375, 7250, },
4526 { 8500, 7375, },
4527 { 8625, 7500, },
4528 { 8750, 7625, },
4529 { 8875, 7750, },
4530 { 9000, 7875, },
4531 { 9125, 8000, },
4532 { 9250, 8125, },
4533 { 9375, 8250, },
4534 { 9500, 8375, },
4535 { 9625, 8500, },
4536 { 9750, 8625, },
4537 { 9875, 8750, },
4538 { 10000, 8875, },
4539 { 10125, 9000, },
4540 { 10250, 9125, },
4541 { 10375, 9250, },
4542 { 10500, 9375, },
4543 { 10625, 9500, },
4544 { 10750, 9625, },
4545 { 10875, 9750, },
4546 { 11000, 9875, },
4547 { 11125, 10000, },
4548 { 11250, 10125, },
4549 { 11375, 10250, },
4550 { 11500, 10375, },
4551 { 11625, 10500, },
4552 { 11750, 10625, },
4553 { 11875, 10750, },
4554 { 12000, 10875, },
4555 { 12125, 11000, },
4556 { 12250, 11125, },
4557 { 12375, 11250, },
4558 { 12500, 11375, },
4559 { 12625, 11500, },
4560 { 12750, 11625, },
4561 { 12875, 11750, },
4562 { 13000, 11875, },
4563 { 13125, 12000, },
4564 { 13250, 12125, },
4565 { 13375, 12250, },
4566 { 13500, 12375, },
4567 { 13625, 12500, },
4568 { 13750, 12625, },
4569 { 13875, 12750, },
4570 { 14000, 12875, },
4571 { 14125, 13000, },
4572 { 14250, 13125, },
4573 { 14375, 13250, },
4574 { 14500, 13375, },
4575 { 14625, 13500, },
4576 { 14750, 13625, },
4577 { 14875, 13750, },
4578 { 15000, 13875, },
4579 { 15125, 14000, },
4580 { 15250, 14125, },
4581 { 15375, 14250, },
4582 { 15500, 14375, },
4583 { 15625, 14500, },
4584 { 15750, 14625, },
4585 { 15875, 14750, },
4586 { 16000, 14875, },
4587 { 16125, 15000, },
4588 };
4589 if (INTEL_INFO(dev)->is_mobile)
4590 return v_table[pxvid].vm;
4591 else
4592 return v_table[pxvid].vd;
4593}
4594
4595static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4596{
4597 u64 now, diff, diffms;
4598 u32 count;
4599
4600 assert_spin_locked(&mchdev_lock);
4601
4602 now = ktime_get_raw_ns();
4603 diffms = now - dev_priv->ips.last_time2;
4604 do_div(diffms, NSEC_PER_MSEC);
4605
4606
4607 if (!diffms)
4608 return;
4609
4610 count = I915_READ(GFXEC);
4611
4612 if (count < dev_priv->ips.last_count2) {
4613 diff = ~0UL - dev_priv->ips.last_count2;
4614 diff += count;
4615 } else {
4616 diff = count - dev_priv->ips.last_count2;
4617 }
4618
4619 dev_priv->ips.last_count2 = count;
4620 dev_priv->ips.last_time2 = now;
4621
4622
4623 diff = diff * 1181;
4624 diff = div_u64(diff, diffms * 10);
4625 dev_priv->ips.gfx_power = diff;
4626}
4627
4628void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4629{
4630 struct drm_device *dev = dev_priv->dev;
4631
4632 if (INTEL_INFO(dev)->gen != 5)
4633 return;
4634
4635 spin_lock_irq(&mchdev_lock);
4636
4637 __i915_update_gfx_val(dev_priv);
4638
4639 spin_unlock_irq(&mchdev_lock);
4640}
4641
4642static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4643{
4644 unsigned long t, corr, state1, corr2, state2;
4645 u32 pxvid, ext_v;
4646
4647 assert_spin_locked(&mchdev_lock);
4648
4649 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
4650 pxvid = (pxvid >> 24) & 0x7f;
4651 ext_v = pvid_to_extvid(dev_priv, pxvid);
4652
4653 state1 = ext_v;
4654
4655 t = i915_mch_val(dev_priv);
4656
4657
4658
4659
4660 if (t > 80)
4661 corr = ((t * 2349) + 135940);
4662 else if (t >= 50)
4663 corr = ((t * 964) + 29317);
4664 else
4665 corr = ((t * 301) + 1004);
4666
4667 corr = corr * ((150142 * state1) / 10000 - 78642);
4668 corr /= 100000;
4669 corr2 = (corr * dev_priv->ips.corr);
4670
4671 state2 = (corr2 * state1) / 10000;
4672 state2 /= 100;
4673
4674 __i915_update_gfx_val(dev_priv);
4675
4676 return dev_priv->ips.gfx_power + state2;
4677}
4678
4679unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4680{
4681 struct drm_device *dev = dev_priv->dev;
4682 unsigned long val;
4683
4684 if (INTEL_INFO(dev)->gen != 5)
4685 return 0;
4686
4687 spin_lock_irq(&mchdev_lock);
4688
4689 val = __i915_gfx_val(dev_priv);
4690
4691 spin_unlock_irq(&mchdev_lock);
4692
4693 return val;
4694}
4695
4696
4697
4698
4699
4700
4701
4702unsigned long i915_read_mch_val(void)
4703{
4704 struct drm_i915_private *dev_priv;
4705 unsigned long chipset_val, graphics_val, ret = 0;
4706
4707 spin_lock_irq(&mchdev_lock);
4708 if (!i915_mch_dev)
4709 goto out_unlock;
4710 dev_priv = i915_mch_dev;
4711
4712 chipset_val = __i915_chipset_val(dev_priv);
4713 graphics_val = __i915_gfx_val(dev_priv);
4714
4715 ret = chipset_val + graphics_val;
4716
4717out_unlock:
4718 spin_unlock_irq(&mchdev_lock);
4719
4720 return ret;
4721}
4722EXPORT_SYMBOL_GPL(i915_read_mch_val);
4723
4724
4725
4726
4727
4728
4729bool i915_gpu_raise(void)
4730{
4731 struct drm_i915_private *dev_priv;
4732 bool ret = true;
4733
4734 spin_lock_irq(&mchdev_lock);
4735 if (!i915_mch_dev) {
4736 ret = false;
4737 goto out_unlock;
4738 }
4739 dev_priv = i915_mch_dev;
4740
4741 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4742 dev_priv->ips.max_delay--;
4743
4744out_unlock:
4745 spin_unlock_irq(&mchdev_lock);
4746
4747 return ret;
4748}
4749EXPORT_SYMBOL_GPL(i915_gpu_raise);
4750
4751
4752
4753
4754
4755
4756
4757bool i915_gpu_lower(void)
4758{
4759 struct drm_i915_private *dev_priv;
4760 bool ret = true;
4761
4762 spin_lock_irq(&mchdev_lock);
4763 if (!i915_mch_dev) {
4764 ret = false;
4765 goto out_unlock;
4766 }
4767 dev_priv = i915_mch_dev;
4768
4769 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4770 dev_priv->ips.max_delay++;
4771
4772out_unlock:
4773 spin_unlock_irq(&mchdev_lock);
4774
4775 return ret;
4776}
4777EXPORT_SYMBOL_GPL(i915_gpu_lower);
4778
4779
4780
4781
4782
4783
4784bool i915_gpu_busy(void)
4785{
4786 struct drm_i915_private *dev_priv;
4787 struct intel_engine_cs *ring;
4788 bool ret = false;
4789 int i;
4790
4791 spin_lock_irq(&mchdev_lock);
4792 if (!i915_mch_dev)
4793 goto out_unlock;
4794 dev_priv = i915_mch_dev;
4795
4796 for_each_ring(ring, dev_priv, i)
4797 ret |= !list_empty(&ring->request_list);
4798
4799out_unlock:
4800 spin_unlock_irq(&mchdev_lock);
4801
4802 return ret;
4803}
4804EXPORT_SYMBOL_GPL(i915_gpu_busy);
4805
4806
4807
4808
4809
4810
4811
4812bool i915_gpu_turbo_disable(void)
4813{
4814 struct drm_i915_private *dev_priv;
4815 bool ret = true;
4816
4817 spin_lock_irq(&mchdev_lock);
4818 if (!i915_mch_dev) {
4819 ret = false;
4820 goto out_unlock;
4821 }
4822 dev_priv = i915_mch_dev;
4823
4824 dev_priv->ips.max_delay = dev_priv->ips.fstart;
4825
4826 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
4827 ret = false;
4828
4829out_unlock:
4830 spin_unlock_irq(&mchdev_lock);
4831
4832 return ret;
4833}
4834EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844static void
4845ips_ping_for_i915_load(void)
4846{
4847 void (*link)(void);
4848
4849 link = symbol_get(ips_link_to_i915_driver);
4850 if (link) {
4851 link();
4852 symbol_put(ips_link_to_i915_driver);
4853 }
4854}
4855
4856void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4857{
4858
4859
4860 spin_lock_irq(&mchdev_lock);
4861 i915_mch_dev = dev_priv;
4862 spin_unlock_irq(&mchdev_lock);
4863
4864 ips_ping_for_i915_load();
4865}
4866
4867void intel_gpu_ips_teardown(void)
4868{
4869 spin_lock_irq(&mchdev_lock);
4870 i915_mch_dev = NULL;
4871 spin_unlock_irq(&mchdev_lock);
4872}
4873
4874static void intel_init_emon(struct drm_device *dev)
4875{
4876 struct drm_i915_private *dev_priv = dev->dev_private;
4877 u32 lcfuse;
4878 u8 pxw[16];
4879 int i;
4880
4881
4882 I915_WRITE(ECR, 0);
4883 POSTING_READ(ECR);
4884
4885
4886 I915_WRITE(SDEW, 0x15040d00);
4887 I915_WRITE(CSIEW0, 0x007f0000);
4888 I915_WRITE(CSIEW1, 0x1e220004);
4889 I915_WRITE(CSIEW2, 0x04000004);
4890
4891 for (i = 0; i < 5; i++)
4892 I915_WRITE(PEW + (i * 4), 0);
4893 for (i = 0; i < 3; i++)
4894 I915_WRITE(DEW + (i * 4), 0);
4895
4896
4897 for (i = 0; i < 16; i++) {
4898 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4899 unsigned long freq = intel_pxfreq(pxvidfreq);
4900 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4901 PXVFREQ_PX_SHIFT;
4902 unsigned long val;
4903
4904 val = vid * vid;
4905 val *= (freq / 1000);
4906 val *= 255;
4907 val /= (127*127*900);
4908 if (val > 0xff)
4909 DRM_ERROR("bad pxval: %ld\n", val);
4910 pxw[i] = val;
4911 }
4912
4913 pxw[14] = 0;
4914 pxw[15] = 0;
4915
4916 for (i = 0; i < 4; i++) {
4917 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
4918 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
4919 I915_WRITE(PXW + (i * 4), val);
4920 }
4921
4922
4923 I915_WRITE(OGW0, 0);
4924 I915_WRITE(OGW1, 0);
4925 I915_WRITE(EG0, 0x00007f00);
4926 I915_WRITE(EG1, 0x0000000e);
4927 I915_WRITE(EG2, 0x000e0000);
4928 I915_WRITE(EG3, 0x68000300);
4929 I915_WRITE(EG4, 0x42000000);
4930 I915_WRITE(EG5, 0x00140031);
4931 I915_WRITE(EG6, 0);
4932 I915_WRITE(EG7, 0);
4933
4934 for (i = 0; i < 8; i++)
4935 I915_WRITE(PXWL + (i * 4), 0);
4936
4937
4938 I915_WRITE(ECR, 0x80000019);
4939
4940 lcfuse = I915_READ(LCFUSE02);
4941
4942 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4943}
4944
4945void intel_init_gt_powersave(struct drm_device *dev)
4946{
4947 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
4948
4949 if (IS_CHERRYVIEW(dev))
4950 cherryview_init_gt_powersave(dev);
4951 else if (IS_VALLEYVIEW(dev))
4952 valleyview_init_gt_powersave(dev);
4953}
4954
4955void intel_cleanup_gt_powersave(struct drm_device *dev)
4956{
4957 if (IS_CHERRYVIEW(dev))
4958 return;
4959 else if (IS_VALLEYVIEW(dev))
4960 valleyview_cleanup_gt_powersave(dev);
4961}
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971void intel_suspend_gt_powersave(struct drm_device *dev)
4972{
4973 struct drm_i915_private *dev_priv = dev->dev_private;
4974
4975
4976 WARN_ON(intel_irqs_enabled(dev_priv));
4977
4978 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4979
4980 cancel_work_sync(&dev_priv->rps.work);
4981
4982
4983 gen6_rps_idle(dev_priv);
4984}
4985
4986void intel_disable_gt_powersave(struct drm_device *dev)
4987{
4988 struct drm_i915_private *dev_priv = dev->dev_private;
4989
4990
4991 WARN_ON(intel_irqs_enabled(dev_priv));
4992
4993 if (IS_IRONLAKE_M(dev)) {
4994 ironlake_disable_drps(dev);
4995 ironlake_disable_rc6(dev);
4996 } else if (INTEL_INFO(dev)->gen >= 6) {
4997 intel_suspend_gt_powersave(dev);
4998
4999 mutex_lock(&dev_priv->rps.hw_lock);
5000 if (IS_CHERRYVIEW(dev))
5001 cherryview_disable_rps(dev);
5002 else if (IS_VALLEYVIEW(dev))
5003 valleyview_disable_rps(dev);
5004 else
5005 gen6_disable_rps(dev);
5006 dev_priv->rps.enabled = false;
5007 mutex_unlock(&dev_priv->rps.hw_lock);
5008 }
5009}
5010
5011static void intel_gen6_powersave_work(struct work_struct *work)
5012{
5013 struct drm_i915_private *dev_priv =
5014 container_of(work, struct drm_i915_private,
5015 rps.delayed_resume_work.work);
5016 struct drm_device *dev = dev_priv->dev;
5017
5018 mutex_lock(&dev_priv->rps.hw_lock);
5019
5020 if (IS_CHERRYVIEW(dev)) {
5021 cherryview_enable_rps(dev);
5022 } else if (IS_VALLEYVIEW(dev)) {
5023 valleyview_enable_rps(dev);
5024 } else if (IS_BROADWELL(dev)) {
5025 gen8_enable_rps(dev);
5026 __gen6_update_ring_freq(dev);
5027 } else {
5028 gen6_enable_rps(dev);
5029 __gen6_update_ring_freq(dev);
5030 }
5031 dev_priv->rps.enabled = true;
5032 mutex_unlock(&dev_priv->rps.hw_lock);
5033
5034 intel_runtime_pm_put(dev_priv);
5035}
5036
5037void intel_enable_gt_powersave(struct drm_device *dev)
5038{
5039 struct drm_i915_private *dev_priv = dev->dev_private;
5040
5041 if (IS_IRONLAKE_M(dev)) {
5042 mutex_lock(&dev->struct_mutex);
5043 ironlake_enable_drps(dev);
5044 ironlake_enable_rc6(dev);
5045 intel_init_emon(dev);
5046 mutex_unlock(&dev->struct_mutex);
5047 } else if (INTEL_INFO(dev)->gen >= 6) {
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
5061 round_jiffies_up_relative(HZ)))
5062 intel_runtime_pm_get_noresume(dev_priv);
5063 }
5064}
5065
5066void intel_reset_gt_powersave(struct drm_device *dev)
5067{
5068 struct drm_i915_private *dev_priv = dev->dev_private;
5069
5070 dev_priv->rps.enabled = false;
5071 intel_enable_gt_powersave(dev);
5072}
5073
5074static void ibx_init_clock_gating(struct drm_device *dev)
5075{
5076 struct drm_i915_private *dev_priv = dev->dev_private;
5077
5078
5079
5080
5081
5082
5083 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
5084}
5085
5086static void g4x_disable_trickle_feed(struct drm_device *dev)
5087{
5088 struct drm_i915_private *dev_priv = dev->dev_private;
5089 int pipe;
5090
5091 for_each_pipe(pipe) {
5092 I915_WRITE(DSPCNTR(pipe),
5093 I915_READ(DSPCNTR(pipe)) |
5094 DISPPLANE_TRICKLE_FEED_DISABLE);
5095 intel_flush_primary_plane(dev_priv, pipe);
5096 }
5097}
5098
5099static void ilk_init_lp_watermarks(struct drm_device *dev)
5100{
5101 struct drm_i915_private *dev_priv = dev->dev_private;
5102
5103 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
5104 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
5105 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
5106
5107
5108
5109
5110
5111}
5112
5113static void ironlake_init_clock_gating(struct drm_device *dev)
5114{
5115 struct drm_i915_private *dev_priv = dev->dev_private;
5116 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5117
5118
5119
5120
5121
5122 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
5123 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
5124 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
5125
5126 I915_WRITE(PCH_3DCGDIS0,
5127 MARIUNIT_CLOCK_GATE_DISABLE |
5128 SVSMUNIT_CLOCK_GATE_DISABLE);
5129 I915_WRITE(PCH_3DCGDIS1,
5130 VFMUNIT_CLOCK_GATE_DISABLE);
5131
5132
5133
5134
5135
5136
5137
5138
5139 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5140 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5141 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5142 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
5143 I915_WRITE(DISP_ARB_CTL,
5144 (I915_READ(DISP_ARB_CTL) |
5145 DISP_FBC_WM_DIS));
5146
5147 ilk_init_lp_watermarks(dev);
5148
5149
5150
5151
5152
5153
5154
5155
5156 if (IS_IRONLAKE_M(dev)) {
5157
5158 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5159 I915_READ(ILK_DISPLAY_CHICKEN1) |
5160 ILK_FBCQ_DIS);
5161 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5162 I915_READ(ILK_DISPLAY_CHICKEN2) |
5163 ILK_DPARB_GATE);
5164 }
5165
5166 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5167
5168 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5169 I915_READ(ILK_DISPLAY_CHICKEN2) |
5170 ILK_ELPIN_409_SELECT);
5171 I915_WRITE(_3D_CHICKEN2,
5172 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
5173 _3D_CHICKEN2_WM_READ_PIPELINED);
5174
5175
5176 I915_WRITE(CACHE_MODE_0,
5177 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5178
5179
5180 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5181
5182 g4x_disable_trickle_feed(dev);
5183
5184 ibx_init_clock_gating(dev);
5185}
5186
5187static void cpt_init_clock_gating(struct drm_device *dev)
5188{
5189 struct drm_i915_private *dev_priv = dev->dev_private;
5190 int pipe;
5191 uint32_t val;
5192
5193
5194
5195
5196
5197
5198 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
5199 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
5200 PCH_CPUNIT_CLOCK_GATE_DISABLE);
5201 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
5202 DPLS_EDP_PPS_FIX_DIS);
5203
5204
5205
5206 for_each_pipe(pipe) {
5207 val = I915_READ(TRANS_CHICKEN2(pipe));
5208 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
5209 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5210 if (dev_priv->vbt.fdi_rx_polarity_inverted)
5211 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5212 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
5213 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
5214 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
5215 I915_WRITE(TRANS_CHICKEN2(pipe), val);
5216 }
5217
5218 for_each_pipe(pipe) {
5219 I915_WRITE(TRANS_CHICKEN1(pipe),
5220 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5221 }
5222}
5223
5224static void gen6_check_mch_setup(struct drm_device *dev)
5225{
5226 struct drm_i915_private *dev_priv = dev->dev_private;
5227 uint32_t tmp;
5228
5229 tmp = I915_READ(MCH_SSKPD);
5230 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
5231 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
5232 tmp);
5233}
5234
5235static void gen6_init_clock_gating(struct drm_device *dev)
5236{
5237 struct drm_i915_private *dev_priv = dev->dev_private;
5238 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5239
5240 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5241
5242 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5243 I915_READ(ILK_DISPLAY_CHICKEN2) |
5244 ILK_ELPIN_409_SELECT);
5245
5246
5247 I915_WRITE(_3D_CHICKEN,
5248 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
5249
5250
5251 if (IS_SNB_GT1(dev))
5252 I915_WRITE(GEN6_GT_MODE,
5253 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
5254
5255
5256 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266 I915_WRITE(GEN6_GT_MODE,
5267 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5268
5269 ilk_init_lp_watermarks(dev);
5270
5271 I915_WRITE(CACHE_MODE_0,
5272 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
5273
5274 I915_WRITE(GEN6_UCGCTL1,
5275 I915_READ(GEN6_UCGCTL1) |
5276 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
5277 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292 I915_WRITE(GEN6_UCGCTL2,
5293 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5294 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5295
5296
5297 I915_WRITE(_3D_CHICKEN3,
5298 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
5299
5300
5301
5302
5303
5304
5305 I915_WRITE(_3D_CHICKEN3,
5306 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5320 I915_READ(ILK_DISPLAY_CHICKEN1) |
5321 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
5322 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5323 I915_READ(ILK_DISPLAY_CHICKEN2) |
5324 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
5325 I915_WRITE(ILK_DSPCLK_GATE_D,
5326 I915_READ(ILK_DSPCLK_GATE_D) |
5327 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
5328 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
5329
5330 g4x_disable_trickle_feed(dev);
5331
5332 cpt_init_clock_gating(dev);
5333
5334 gen6_check_mch_setup(dev);
5335}
5336
5337static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
5338{
5339 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
5340
5341
5342
5343
5344
5345
5346
5347 reg &= ~GEN7_FF_SCHED_MASK;
5348 reg |= GEN7_FF_TS_SCHED_HW;
5349 reg |= GEN7_FF_VS_SCHED_HW;
5350 reg |= GEN7_FF_DS_SCHED_HW;
5351
5352 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
5353}
5354
5355static void lpt_init_clock_gating(struct drm_device *dev)
5356{
5357 struct drm_i915_private *dev_priv = dev->dev_private;
5358
5359
5360
5361
5362
5363 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
5364 I915_WRITE(SOUTH_DSPCLK_GATE_D,
5365 I915_READ(SOUTH_DSPCLK_GATE_D) |
5366 PCH_LP_PARTITION_LEVEL_DISABLE);
5367
5368
5369 I915_WRITE(_TRANSA_CHICKEN1,
5370 I915_READ(_TRANSA_CHICKEN1) |
5371 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5372}
5373
5374static void lpt_suspend_hw(struct drm_device *dev)
5375{
5376 struct drm_i915_private *dev_priv = dev->dev_private;
5377
5378 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
5379 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
5380
5381 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5382 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
5383 }
5384}
5385
5386static void gen8_init_clock_gating(struct drm_device *dev)
5387{
5388 struct drm_i915_private *dev_priv = dev->dev_private;
5389 enum pipe pipe;
5390
5391 I915_WRITE(WM3_LP_ILK, 0);
5392 I915_WRITE(WM2_LP_ILK, 0);
5393 I915_WRITE(WM1_LP_ILK, 0);
5394
5395
5396
5397
5398
5399 I915_WRITE(GEN8_ROW_CHICKEN,
5400 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
5401
5402
5403
5404 I915_WRITE(GEN8_ROW_CHICKEN,
5405 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
5406
5407
5408
5409
5410
5411 I915_WRITE(HALF_SLICE_CHICKEN3,
5412 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
5413 I915_WRITE(HALF_SLICE_CHICKEN3,
5414 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5415 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5416
5417 I915_WRITE(_3D_CHICKEN3,
5418 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5419
5420 I915_WRITE(COMMON_SLICE_CHICKEN2,
5421 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
5422
5423 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5424 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
5425
5426
5427 I915_WRITE(GEN7_ROW_CHICKEN2,
5428 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5429
5430
5431 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5432
5433
5434 I915_WRITE(CHICKEN_PAR1_1,
5435 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5436
5437
5438 for_each_pipe(pipe) {
5439 I915_WRITE(CHICKEN_PIPESL_1(pipe),
5440 I915_READ(CHICKEN_PIPESL_1(pipe)) |
5441 BDW_DPRS_MASK_VBLANK_SRD);
5442 }
5443
5444
5445
5446
5447
5448 I915_WRITE(HDC_CHICKEN0,
5449 I915_READ(HDC_CHICKEN0) |
5450 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
5451
5452
5453
5454 I915_WRITE(GEN7_FF_THREAD_MODE,
5455 I915_READ(GEN7_FF_THREAD_MODE) &
5456 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466 I915_WRITE(GEN7_GT_MODE,
5467 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5468
5469 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5470 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5471
5472
5473 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5474 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5475
5476
5477 I915_WRITE(CACHE_MODE_1,
5478 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
5479}
5480
5481static void haswell_init_clock_gating(struct drm_device *dev)
5482{
5483 struct drm_i915_private *dev_priv = dev->dev_private;
5484
5485 ilk_init_lp_watermarks(dev);
5486
5487
5488 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
5489 I915_WRITE(HSW_ROW_CHICKEN3,
5490 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
5491
5492
5493 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5494 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5495 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5496
5497
5498 I915_WRITE(GEN7_FF_THREAD_MODE,
5499 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
5500
5501
5502 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5503
5504
5505 I915_WRITE(CACHE_MODE_0_GEN7,
5506 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5507
5508
5509 I915_WRITE(CACHE_MODE_1,
5510 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520 I915_WRITE(GEN7_GT_MODE,
5521 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5522
5523
5524 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5525
5526
5527 I915_WRITE(CHICKEN_PAR1_1,
5528 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
5529
5530 lpt_init_clock_gating(dev);
5531}
5532
5533static void ivybridge_init_clock_gating(struct drm_device *dev)
5534{
5535 struct drm_i915_private *dev_priv = dev->dev_private;
5536 uint32_t snpcr;
5537
5538 ilk_init_lp_watermarks(dev);
5539
5540 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
5541
5542
5543 I915_WRITE(_3D_CHICKEN3,
5544 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5545
5546
5547 I915_WRITE(IVB_CHICKEN3,
5548 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5549 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5550
5551
5552 if (IS_IVB_GT1(dev))
5553 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5554 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5555
5556
5557 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5558
5559
5560 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5561 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5562
5563
5564 I915_WRITE(GEN7_L3CNTLREG1,
5565 GEN7_WA_FOR_GEN7_L3_CONTROL);
5566 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5567 GEN7_WA_L3_CHICKEN_MODE);
5568 if (IS_IVB_GT1(dev))
5569 I915_WRITE(GEN7_ROW_CHICKEN2,
5570 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5571 else {
5572
5573 I915_WRITE(GEN7_ROW_CHICKEN2,
5574 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5575 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5576 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5577 }
5578
5579
5580 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5581 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5582
5583
5584
5585
5586
5587 I915_WRITE(GEN6_UCGCTL2,
5588 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5589
5590
5591 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5592 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5593 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5594
5595 g4x_disable_trickle_feed(dev);
5596
5597 gen7_setup_fixed_func_scheduler(dev_priv);
5598
5599 if (0) {
5600
5601 I915_WRITE(CACHE_MODE_0_GEN7,
5602 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5603 }
5604
5605
5606 I915_WRITE(CACHE_MODE_1,
5607 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617 I915_WRITE(GEN7_GT_MODE,
5618 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5619
5620 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5621 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5622 snpcr |= GEN6_MBC_SNPCR_MED;
5623 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5624
5625 if (!HAS_PCH_NOP(dev))
5626 cpt_init_clock_gating(dev);
5627
5628 gen6_check_mch_setup(dev);
5629}
5630
5631static void valleyview_init_clock_gating(struct drm_device *dev)
5632{
5633 struct drm_i915_private *dev_priv = dev->dev_private;
5634 u32 val;
5635
5636 mutex_lock(&dev_priv->rps.hw_lock);
5637 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5638 mutex_unlock(&dev_priv->rps.hw_lock);
5639 switch ((val >> 6) & 3) {
5640 case 0:
5641 case 1:
5642 dev_priv->mem_freq = 800;
5643 break;
5644 case 2:
5645 dev_priv->mem_freq = 1066;
5646 break;
5647 case 3:
5648 dev_priv->mem_freq = 1333;
5649 break;
5650 }
5651 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5652
5653 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5654
5655
5656 I915_WRITE(_3D_CHICKEN3,
5657 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5658
5659
5660 I915_WRITE(IVB_CHICKEN3,
5661 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5662 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5663
5664
5665
5666 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5667 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5668 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5669
5670
5671 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5672
5673
5674 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5675 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5676
5677
5678 I915_WRITE(GEN7_ROW_CHICKEN2,
5679 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5680
5681
5682 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5683 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5684 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5685
5686 gen7_setup_fixed_func_scheduler(dev_priv);
5687
5688
5689
5690
5691
5692 I915_WRITE(GEN6_UCGCTL2,
5693 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5694
5695
5696
5697
5698 I915_WRITE(GEN7_UCGCTL4,
5699 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5700
5701 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5702
5703
5704
5705
5706
5707 I915_WRITE(CACHE_MODE_1,
5708 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5709
5710
5711
5712
5713
5714 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
5715
5716
5717
5718
5719
5720
5721 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
5722}
5723
5724static void cherryview_init_clock_gating(struct drm_device *dev)
5725{
5726 struct drm_i915_private *dev_priv = dev->dev_private;
5727 u32 val;
5728
5729 mutex_lock(&dev_priv->rps.hw_lock);
5730 val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
5731 mutex_unlock(&dev_priv->rps.hw_lock);
5732 switch ((val >> 2) & 0x7) {
5733 case 0:
5734 case 1:
5735 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
5736 dev_priv->mem_freq = 1600;
5737 break;
5738 case 2:
5739 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
5740 dev_priv->mem_freq = 1600;
5741 break;
5742 case 3:
5743 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
5744 dev_priv->mem_freq = 2000;
5745 break;
5746 case 4:
5747 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
5748 dev_priv->mem_freq = 1600;
5749 break;
5750 case 5:
5751 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
5752 dev_priv->mem_freq = 1600;
5753 break;
5754 }
5755 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5756
5757 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5758
5759 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5760
5761
5762 I915_WRITE(GEN8_ROW_CHICKEN,
5763 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
5764
5765
5766 I915_WRITE(GEN8_ROW_CHICKEN,
5767 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
5768
5769
5770
5771 I915_WRITE(GEN7_FF_THREAD_MODE,
5772 I915_READ(GEN7_FF_THREAD_MODE) &
5773 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5774
5775
5776 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5777 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5778
5779
5780 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5781 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5782
5783
5784 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5785 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5786
5787
5788 I915_WRITE(HALF_SLICE_CHICKEN3,
5789 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5790
5791
5792 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
5793 GINT_DIS);
5794
5795
5796 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5797 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
5798
5799
5800 I915_WRITE(GEN7_ROW_CHICKEN2,
5801 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5802 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5803 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
5804}
5805
5806static void g4x_init_clock_gating(struct drm_device *dev)
5807{
5808 struct drm_i915_private *dev_priv = dev->dev_private;
5809 uint32_t dspclk_gate;
5810
5811 I915_WRITE(RENCLK_GATE_D1, 0);
5812 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5813 GS_UNIT_CLOCK_GATE_DISABLE |
5814 CL_UNIT_CLOCK_GATE_DISABLE);
5815 I915_WRITE(RAMCLK_GATE_D, 0);
5816 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5817 OVRUNIT_CLOCK_GATE_DISABLE |
5818 OVCUNIT_CLOCK_GATE_DISABLE;
5819 if (IS_GM45(dev))
5820 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5821 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5822
5823
5824 I915_WRITE(CACHE_MODE_0,
5825 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5826
5827
5828 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5829
5830 g4x_disable_trickle_feed(dev);
5831}
5832
5833static void crestline_init_clock_gating(struct drm_device *dev)
5834{
5835 struct drm_i915_private *dev_priv = dev->dev_private;
5836
5837 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5838 I915_WRITE(RENCLK_GATE_D2, 0);
5839 I915_WRITE(DSPCLK_GATE_D, 0);
5840 I915_WRITE(RAMCLK_GATE_D, 0);
5841 I915_WRITE16(DEUC, 0);
5842 I915_WRITE(MI_ARB_STATE,
5843 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5844
5845
5846 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5847}
5848
5849static void broadwater_init_clock_gating(struct drm_device *dev)
5850{
5851 struct drm_i915_private *dev_priv = dev->dev_private;
5852
5853 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5854 I965_RCC_CLOCK_GATE_DISABLE |
5855 I965_RCPB_CLOCK_GATE_DISABLE |
5856 I965_ISC_CLOCK_GATE_DISABLE |
5857 I965_FBC_CLOCK_GATE_DISABLE);
5858 I915_WRITE(RENCLK_GATE_D2, 0);
5859 I915_WRITE(MI_ARB_STATE,
5860 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5861
5862
5863 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5864}
5865
5866static void gen3_init_clock_gating(struct drm_device *dev)
5867{
5868 struct drm_i915_private *dev_priv = dev->dev_private;
5869 u32 dstate = I915_READ(D_STATE);
5870
5871 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5872 DSTATE_DOT_CLOCK_GATING;
5873 I915_WRITE(D_STATE, dstate);
5874
5875 if (IS_PINEVIEW(dev))
5876 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
5877
5878
5879 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
5880
5881
5882 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
5883
5884
5885 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
5886}
5887
5888static void i85x_init_clock_gating(struct drm_device *dev)
5889{
5890 struct drm_i915_private *dev_priv = dev->dev_private;
5891
5892 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5893
5894
5895 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
5896 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
5897}
5898
5899static void i830_init_clock_gating(struct drm_device *dev)
5900{
5901 struct drm_i915_private *dev_priv = dev->dev_private;
5902
5903 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
5904}
5905
5906void intel_init_clock_gating(struct drm_device *dev)
5907{
5908 struct drm_i915_private *dev_priv = dev->dev_private;
5909
5910 dev_priv->display.init_clock_gating(dev);
5911}
5912
5913void intel_suspend_hw(struct drm_device *dev)
5914{
5915 if (HAS_PCH_LPT(dev))
5916 lpt_suspend_hw(dev);
5917}
5918
5919#define for_each_power_well(i, power_well, domain_mask, power_domains) \
5920 for (i = 0; \
5921 i < (power_domains)->power_well_count && \
5922 ((power_well) = &(power_domains)->power_wells[i]); \
5923 i++) \
5924 if ((power_well)->domains & (domain_mask))
5925
5926#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
5927 for (i = (power_domains)->power_well_count - 1; \
5928 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
5929 i--) \
5930 if ((power_well)->domains & (domain_mask))
5931
5932
5933
5934
5935
5936
5937static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
5938 struct i915_power_well *power_well)
5939{
5940 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5941 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5942}
5943
5944bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
5945 enum intel_display_power_domain domain)
5946{
5947 struct i915_power_domains *power_domains;
5948 struct i915_power_well *power_well;
5949 bool is_enabled;
5950 int i;
5951
5952 if (dev_priv->pm.suspended)
5953 return false;
5954
5955 power_domains = &dev_priv->power_domains;
5956
5957 is_enabled = true;
5958
5959 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5960 if (power_well->always_on)
5961 continue;
5962
5963 if (!power_well->hw_enabled) {
5964 is_enabled = false;
5965 break;
5966 }
5967 }
5968
5969 return is_enabled;
5970}
5971
5972bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5973 enum intel_display_power_domain domain)
5974{
5975 struct i915_power_domains *power_domains;
5976 bool ret;
5977
5978 power_domains = &dev_priv->power_domains;
5979
5980 mutex_lock(&power_domains->lock);
5981 ret = intel_display_power_enabled_unlocked(dev_priv, domain);
5982 mutex_unlock(&power_domains->lock);
5983
5984 return ret;
5985}
5986
5987
5988
5989
5990
5991
5992
5993static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5994{
5995 struct drm_device *dev = dev_priv->dev;
5996
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6008 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
6009 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6010
6011 if (IS_BROADWELL(dev))
6012 gen8_irq_power_well_post_enable(dev_priv);
6013}
6014
6015static void hsw_set_power_well(struct drm_i915_private *dev_priv,
6016 struct i915_power_well *power_well, bool enable)
6017{
6018 bool is_enabled, enable_requested;
6019 uint32_t tmp;
6020
6021 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
6022 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
6023 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
6024
6025 if (enable) {
6026 if (!enable_requested)
6027 I915_WRITE(HSW_PWR_WELL_DRIVER,
6028 HSW_PWR_WELL_ENABLE_REQUEST);
6029
6030 if (!is_enabled) {
6031 DRM_DEBUG_KMS("Enabling power well\n");
6032 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
6033 HSW_PWR_WELL_STATE_ENABLED), 20))
6034 DRM_ERROR("Timeout enabling power well\n");
6035 }
6036
6037 hsw_power_well_post_enable(dev_priv);
6038 } else {
6039 if (enable_requested) {
6040 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
6041 POSTING_READ(HSW_PWR_WELL_DRIVER);
6042 DRM_DEBUG_KMS("Requesting to disable the power well\n");
6043 }
6044 }
6045}
6046
6047static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
6048 struct i915_power_well *power_well)
6049{
6050 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
6051
6052
6053
6054
6055
6056 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
6057 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
6058}
6059
6060static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
6061 struct i915_power_well *power_well)
6062{
6063 hsw_set_power_well(dev_priv, power_well, true);
6064}
6065
6066static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
6067 struct i915_power_well *power_well)
6068{
6069 hsw_set_power_well(dev_priv, power_well, false);
6070}
6071
6072static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
6073 struct i915_power_well *power_well)
6074{
6075}
6076
6077static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
6078 struct i915_power_well *power_well)
6079{
6080 return true;
6081}
6082
6083static void vlv_set_power_well(struct drm_i915_private *dev_priv,
6084 struct i915_power_well *power_well, bool enable)
6085{
6086 enum punit_power_well power_well_id = power_well->data;
6087 u32 mask;
6088 u32 state;
6089 u32 ctrl;
6090
6091 mask = PUNIT_PWRGT_MASK(power_well_id);
6092 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
6093 PUNIT_PWRGT_PWR_GATE(power_well_id);
6094
6095 mutex_lock(&dev_priv->rps.hw_lock);
6096
6097#define COND \
6098 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
6099
6100 if (COND)
6101 goto out;
6102
6103 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
6104 ctrl &= ~mask;
6105 ctrl |= state;
6106 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
6107
6108 if (wait_for(COND, 100))
6109 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6110 state,
6111 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
6112
6113#undef COND
6114
6115out:
6116 mutex_unlock(&dev_priv->rps.hw_lock);
6117}
6118
6119static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
6120 struct i915_power_well *power_well)
6121{
6122 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
6123}
6124
6125static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
6126 struct i915_power_well *power_well)
6127{
6128 vlv_set_power_well(dev_priv, power_well, true);
6129}
6130
6131static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
6132 struct i915_power_well *power_well)
6133{
6134 vlv_set_power_well(dev_priv, power_well, false);
6135}
6136
6137static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
6138 struct i915_power_well *power_well)
6139{
6140 int power_well_id = power_well->data;
6141 bool enabled = false;
6142 u32 mask;
6143 u32 state;
6144 u32 ctrl;
6145
6146 mask = PUNIT_PWRGT_MASK(power_well_id);
6147 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
6148
6149 mutex_lock(&dev_priv->rps.hw_lock);
6150
6151 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
6152
6153
6154
6155
6156 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
6157 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
6158 if (state == ctrl)
6159 enabled = true;
6160
6161
6162
6163
6164
6165 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
6166 WARN_ON(ctrl != state);
6167
6168 mutex_unlock(&dev_priv->rps.hw_lock);
6169
6170 return enabled;
6171}
6172
6173static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
6174 struct i915_power_well *power_well)
6175{
6176 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6177
6178 vlv_set_power_well(dev_priv, power_well, true);
6179
6180 spin_lock_irq(&dev_priv->irq_lock);
6181 valleyview_enable_display_irqs(dev_priv);
6182 spin_unlock_irq(&dev_priv->irq_lock);
6183
6184
6185
6186
6187
6188 if (dev_priv->power_domains.initializing)
6189 return;
6190
6191 intel_hpd_init(dev_priv->dev);
6192
6193 i915_redisable_vga_power_on(dev_priv->dev);
6194}
6195
6196static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6197 struct i915_power_well *power_well)
6198{
6199 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6200
6201 spin_lock_irq(&dev_priv->irq_lock);
6202 valleyview_disable_display_irqs(dev_priv);
6203 spin_unlock_irq(&dev_priv->irq_lock);
6204
6205 vlv_set_power_well(dev_priv, power_well, false);
6206}
6207
6208static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6209 struct i915_power_well *power_well)
6210{
6211 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6212
6213
6214
6215
6216
6217
6218 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6219 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6220 udelay(1);
6221
6222 vlv_set_power_well(dev_priv, power_well, true);
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6236}
6237
6238static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6239 struct i915_power_well *power_well)
6240{
6241 struct drm_device *dev = dev_priv->dev;
6242 enum pipe pipe;
6243
6244 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6245
6246 for_each_pipe(pipe)
6247 assert_pll_disabled(dev_priv, pipe);
6248
6249
6250 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
6251
6252 vlv_set_power_well(dev_priv, power_well, false);
6253}
6254
6255static void check_power_well_state(struct drm_i915_private *dev_priv,
6256 struct i915_power_well *power_well)
6257{
6258 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
6259
6260 if (power_well->always_on || !i915.disable_power_well) {
6261 if (!enabled)
6262 goto mismatch;
6263
6264 return;
6265 }
6266
6267 if (enabled != (power_well->count > 0))
6268 goto mismatch;
6269
6270 return;
6271
6272mismatch:
6273 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
6274 power_well->name, power_well->always_on, enabled,
6275 power_well->count, i915.disable_power_well);
6276}
6277
6278void intel_display_power_get(struct drm_i915_private *dev_priv,
6279 enum intel_display_power_domain domain)
6280{
6281 struct i915_power_domains *power_domains;
6282 struct i915_power_well *power_well;
6283 int i;
6284
6285 intel_runtime_pm_get(dev_priv);
6286
6287 power_domains = &dev_priv->power_domains;
6288
6289 mutex_lock(&power_domains->lock);
6290
6291 for_each_power_well(i, power_well, BIT(domain), power_domains) {
6292 if (!power_well->count++) {
6293 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
6294 power_well->ops->enable(dev_priv, power_well);
6295 power_well->hw_enabled = true;
6296 }
6297
6298 check_power_well_state(dev_priv, power_well);
6299 }
6300
6301 power_domains->domain_use_count[domain]++;
6302
6303 mutex_unlock(&power_domains->lock);
6304}
6305
6306void intel_display_power_put(struct drm_i915_private *dev_priv,
6307 enum intel_display_power_domain domain)
6308{
6309 struct i915_power_domains *power_domains;
6310 struct i915_power_well *power_well;
6311 int i;
6312
6313 power_domains = &dev_priv->power_domains;
6314
6315 mutex_lock(&power_domains->lock);
6316
6317 WARN_ON(!power_domains->domain_use_count[domain]);
6318 power_domains->domain_use_count[domain]--;
6319
6320 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6321 WARN_ON(!power_well->count);
6322
6323 if (!--power_well->count && i915.disable_power_well) {
6324 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
6325 power_well->hw_enabled = false;
6326 power_well->ops->disable(dev_priv, power_well);
6327 }
6328
6329 check_power_well_state(dev_priv, power_well);
6330 }
6331
6332 mutex_unlock(&power_domains->lock);
6333
6334 intel_runtime_pm_put(dev_priv);
6335}
6336
6337static struct i915_power_domains *hsw_pwr;
6338
6339
6340int i915_request_power_well(void)
6341{
6342 struct drm_i915_private *dev_priv;
6343
6344 if (!hsw_pwr)
6345 return -ENODEV;
6346
6347 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6348 power_domains);
6349 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
6350 return 0;
6351}
6352EXPORT_SYMBOL_GPL(i915_request_power_well);
6353
6354
6355int i915_release_power_well(void)
6356{
6357 struct drm_i915_private *dev_priv;
6358
6359 if (!hsw_pwr)
6360 return -ENODEV;
6361
6362 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6363 power_domains);
6364 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
6365 return 0;
6366}
6367EXPORT_SYMBOL_GPL(i915_release_power_well);
6368
6369
6370
6371
6372
6373
6374
6375int i915_get_cdclk_freq(void)
6376{
6377 struct drm_i915_private *dev_priv;
6378
6379 if (!hsw_pwr)
6380 return -ENODEV;
6381
6382 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6383 power_domains);
6384
6385 return intel_ddi_get_cdclk_freq(dev_priv);
6386}
6387EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6388
6389
6390#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
6391
6392#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
6393 BIT(POWER_DOMAIN_PIPE_A) | \
6394 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
6395 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
6396 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
6397 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6398 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6399 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6400 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6401 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6402 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6403 BIT(POWER_DOMAIN_PORT_CRT) | \
6404 BIT(POWER_DOMAIN_PLLS) | \
6405 BIT(POWER_DOMAIN_INIT))
6406#define HSW_DISPLAY_POWER_DOMAINS ( \
6407 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
6408 BIT(POWER_DOMAIN_INIT))
6409
6410#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
6411 HSW_ALWAYS_ON_POWER_DOMAINS | \
6412 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
6413#define BDW_DISPLAY_POWER_DOMAINS ( \
6414 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
6415 BIT(POWER_DOMAIN_INIT))
6416
6417#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
6418#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
6419
6420#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
6421 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6422 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6423 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6424 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6425 BIT(POWER_DOMAIN_PORT_CRT) | \
6426 BIT(POWER_DOMAIN_INIT))
6427
6428#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
6429 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6430 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6431 BIT(POWER_DOMAIN_INIT))
6432
6433#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
6434 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6435 BIT(POWER_DOMAIN_INIT))
6436
6437#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
6438 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6439 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6440 BIT(POWER_DOMAIN_INIT))
6441
6442#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
6443 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6444 BIT(POWER_DOMAIN_INIT))
6445
6446static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6447 .sync_hw = i9xx_always_on_power_well_noop,
6448 .enable = i9xx_always_on_power_well_noop,
6449 .disable = i9xx_always_on_power_well_noop,
6450 .is_enabled = i9xx_always_on_power_well_enabled,
6451};
6452
6453static struct i915_power_well i9xx_always_on_power_well[] = {
6454 {
6455 .name = "always-on",
6456 .always_on = 1,
6457 .domains = POWER_DOMAIN_MASK,
6458 .ops = &i9xx_always_on_power_well_ops,
6459 },
6460};
6461
6462static const struct i915_power_well_ops hsw_power_well_ops = {
6463 .sync_hw = hsw_power_well_sync_hw,
6464 .enable = hsw_power_well_enable,
6465 .disable = hsw_power_well_disable,
6466 .is_enabled = hsw_power_well_enabled,
6467};
6468
6469static struct i915_power_well hsw_power_wells[] = {
6470 {
6471 .name = "always-on",
6472 .always_on = 1,
6473 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
6474 .ops = &i9xx_always_on_power_well_ops,
6475 },
6476 {
6477 .name = "display",
6478 .domains = HSW_DISPLAY_POWER_DOMAINS,
6479 .ops = &hsw_power_well_ops,
6480 },
6481};
6482
6483static struct i915_power_well bdw_power_wells[] = {
6484 {
6485 .name = "always-on",
6486 .always_on = 1,
6487 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
6488 .ops = &i9xx_always_on_power_well_ops,
6489 },
6490 {
6491 .name = "display",
6492 .domains = BDW_DISPLAY_POWER_DOMAINS,
6493 .ops = &hsw_power_well_ops,
6494 },
6495};
6496
6497static const struct i915_power_well_ops vlv_display_power_well_ops = {
6498 .sync_hw = vlv_power_well_sync_hw,
6499 .enable = vlv_display_power_well_enable,
6500 .disable = vlv_display_power_well_disable,
6501 .is_enabled = vlv_power_well_enabled,
6502};
6503
6504static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
6505 .sync_hw = vlv_power_well_sync_hw,
6506 .enable = vlv_dpio_cmn_power_well_enable,
6507 .disable = vlv_dpio_cmn_power_well_disable,
6508 .is_enabled = vlv_power_well_enabled,
6509};
6510
6511static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
6512 .sync_hw = vlv_power_well_sync_hw,
6513 .enable = vlv_power_well_enable,
6514 .disable = vlv_power_well_disable,
6515 .is_enabled = vlv_power_well_enabled,
6516};
6517
6518static struct i915_power_well vlv_power_wells[] = {
6519 {
6520 .name = "always-on",
6521 .always_on = 1,
6522 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6523 .ops = &i9xx_always_on_power_well_ops,
6524 },
6525 {
6526 .name = "display",
6527 .domains = VLV_DISPLAY_POWER_DOMAINS,
6528 .data = PUNIT_POWER_WELL_DISP2D,
6529 .ops = &vlv_display_power_well_ops,
6530 },
6531 {
6532 .name = "dpio-tx-b-01",
6533 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6534 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6535 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6536 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6537 .ops = &vlv_dpio_power_well_ops,
6538 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6539 },
6540 {
6541 .name = "dpio-tx-b-23",
6542 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6543 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6544 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6545 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6546 .ops = &vlv_dpio_power_well_ops,
6547 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6548 },
6549 {
6550 .name = "dpio-tx-c-01",
6551 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6552 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6553 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6554 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6555 .ops = &vlv_dpio_power_well_ops,
6556 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6557 },
6558 {
6559 .name = "dpio-tx-c-23",
6560 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6561 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6562 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6563 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6564 .ops = &vlv_dpio_power_well_ops,
6565 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6566 },
6567 {
6568 .name = "dpio-common",
6569 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
6570 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6571 .ops = &vlv_dpio_cmn_power_well_ops,
6572 },
6573};
6574
6575static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
6576 enum punit_power_well power_well_id)
6577{
6578 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6579 struct i915_power_well *power_well;
6580 int i;
6581
6582 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
6583 if (power_well->data == power_well_id)
6584 return power_well;
6585 }
6586
6587 return NULL;
6588}
6589
6590#define set_power_wells(power_domains, __power_wells) ({ \
6591 (power_domains)->power_wells = (__power_wells); \
6592 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
6593})
6594
6595int intel_power_domains_init(struct drm_i915_private *dev_priv)
6596{
6597 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6598
6599 mutex_init(&power_domains->lock);
6600
6601
6602
6603
6604
6605 if (IS_HASWELL(dev_priv->dev)) {
6606 set_power_wells(power_domains, hsw_power_wells);
6607 hsw_pwr = power_domains;
6608 } else if (IS_BROADWELL(dev_priv->dev)) {
6609 set_power_wells(power_domains, bdw_power_wells);
6610 hsw_pwr = power_domains;
6611 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
6612 set_power_wells(power_domains, vlv_power_wells);
6613 } else {
6614 set_power_wells(power_domains, i9xx_always_on_power_well);
6615 }
6616
6617 return 0;
6618}
6619
6620void intel_power_domains_remove(struct drm_i915_private *dev_priv)
6621{
6622 hsw_pwr = NULL;
6623}
6624
6625static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
6626{
6627 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6628 struct i915_power_well *power_well;
6629 int i;
6630
6631 mutex_lock(&power_domains->lock);
6632 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
6633 power_well->ops->sync_hw(dev_priv, power_well);
6634 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
6635 power_well);
6636 }
6637 mutex_unlock(&power_domains->lock);
6638}
6639
6640static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
6641{
6642 struct i915_power_well *cmn =
6643 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
6644 struct i915_power_well *disp2d =
6645 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
6646
6647
6648 if (!cmn->ops->is_enabled(dev_priv, cmn))
6649 return;
6650
6651
6652 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
6653 I915_READ(DPIO_CTL) & DPIO_CMNRST)
6654 return;
6655
6656 DRM_DEBUG_KMS("toggling display PHY side reset\n");
6657
6658
6659 disp2d->ops->enable(dev_priv, disp2d);
6660
6661
6662
6663
6664
6665
6666
6667
6668 cmn->ops->disable(dev_priv, cmn);
6669}
6670
6671void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
6672{
6673 struct drm_device *dev = dev_priv->dev;
6674 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6675
6676 power_domains->initializing = true;
6677
6678 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
6679 mutex_lock(&power_domains->lock);
6680 vlv_cmnlane_wa(dev_priv);
6681 mutex_unlock(&power_domains->lock);
6682 }
6683
6684
6685 intel_display_set_init_power(dev_priv, true);
6686 intel_power_domains_resume(dev_priv);
6687 power_domains->initializing = false;
6688}
6689
6690void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
6691{
6692 intel_runtime_pm_get(dev_priv);
6693}
6694
6695void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
6696{
6697 intel_runtime_pm_put(dev_priv);
6698}
6699
6700void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
6701{
6702 struct drm_device *dev = dev_priv->dev;
6703 struct device *device = &dev->pdev->dev;
6704
6705 if (!HAS_RUNTIME_PM(dev))
6706 return;
6707
6708 pm_runtime_get_sync(device);
6709 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
6710}
6711
6712void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
6713{
6714 struct drm_device *dev = dev_priv->dev;
6715 struct device *device = &dev->pdev->dev;
6716
6717 if (!HAS_RUNTIME_PM(dev))
6718 return;
6719
6720 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
6721 pm_runtime_get_noresume(device);
6722}
6723
6724void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
6725{
6726 struct drm_device *dev = dev_priv->dev;
6727 struct device *device = &dev->pdev->dev;
6728
6729 if (!HAS_RUNTIME_PM(dev))
6730 return;
6731
6732 pm_runtime_mark_last_busy(device);
6733 pm_runtime_put_autosuspend(device);
6734}
6735
6736void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
6737{
6738 struct drm_device *dev = dev_priv->dev;
6739 struct device *device = &dev->pdev->dev;
6740
6741 if (!HAS_RUNTIME_PM(dev))
6742 return;
6743
6744 pm_runtime_set_active(device);
6745
6746
6747
6748
6749
6750 if (!intel_enable_rc6(dev)) {
6751 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6752 return;
6753 }
6754
6755 pm_runtime_set_autosuspend_delay(device, 10000);
6756 pm_runtime_mark_last_busy(device);
6757 pm_runtime_use_autosuspend(device);
6758
6759 pm_runtime_put_autosuspend(device);
6760}
6761
6762void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
6763{
6764 struct drm_device *dev = dev_priv->dev;
6765 struct device *device = &dev->pdev->dev;
6766
6767 if (!HAS_RUNTIME_PM(dev))
6768 return;
6769
6770 if (!intel_enable_rc6(dev))
6771 return;
6772
6773
6774 pm_runtime_get_sync(device);
6775 pm_runtime_disable(device);
6776}
6777
6778
6779void intel_init_pm(struct drm_device *dev)
6780{
6781 struct drm_i915_private *dev_priv = dev->dev_private;
6782
6783 if (HAS_FBC(dev)) {
6784 if (INTEL_INFO(dev)->gen >= 7) {
6785 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6786 dev_priv->display.enable_fbc = gen7_enable_fbc;
6787 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6788 } else if (INTEL_INFO(dev)->gen >= 5) {
6789 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6790 dev_priv->display.enable_fbc = ironlake_enable_fbc;
6791 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6792 } else if (IS_GM45(dev)) {
6793 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
6794 dev_priv->display.enable_fbc = g4x_enable_fbc;
6795 dev_priv->display.disable_fbc = g4x_disable_fbc;
6796 } else {
6797 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
6798 dev_priv->display.enable_fbc = i8xx_enable_fbc;
6799 dev_priv->display.disable_fbc = i8xx_disable_fbc;
6800
6801
6802 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
6803 }
6804 }
6805
6806
6807 if (IS_PINEVIEW(dev))
6808 i915_pineview_get_mem_freq(dev);
6809 else if (IS_GEN5(dev))
6810 i915_ironlake_get_mem_freq(dev);
6811
6812
6813 if (HAS_PCH_SPLIT(dev)) {
6814 ilk_setup_wm_latency(dev);
6815
6816 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6817 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6818 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
6819 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
6820 dev_priv->display.update_wm = ilk_update_wm;
6821 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
6822 } else {
6823 DRM_DEBUG_KMS("Failed to read display plane latency. "
6824 "Disable CxSR\n");
6825 }
6826
6827 if (IS_GEN5(dev))
6828 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6829 else if (IS_GEN6(dev))
6830 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6831 else if (IS_IVYBRIDGE(dev))
6832 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6833 else if (IS_HASWELL(dev))
6834 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6835 else if (INTEL_INFO(dev)->gen == 8)
6836 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
6837 } else if (IS_CHERRYVIEW(dev)) {
6838 dev_priv->display.update_wm = valleyview_update_wm;
6839 dev_priv->display.init_clock_gating =
6840 cherryview_init_clock_gating;
6841 } else if (IS_VALLEYVIEW(dev)) {
6842 dev_priv->display.update_wm = valleyview_update_wm;
6843 dev_priv->display.init_clock_gating =
6844 valleyview_init_clock_gating;
6845 } else if (IS_PINEVIEW(dev)) {
6846 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6847 dev_priv->is_ddr3,
6848 dev_priv->fsb_freq,
6849 dev_priv->mem_freq)) {
6850 DRM_INFO("failed to find known CxSR latency "
6851 "(found ddr%s fsb freq %d, mem freq %d), "
6852 "disabling CxSR\n",
6853 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6854 dev_priv->fsb_freq, dev_priv->mem_freq);
6855
6856 intel_set_memory_cxsr(dev_priv, false);
6857 dev_priv->display.update_wm = NULL;
6858 } else
6859 dev_priv->display.update_wm = pineview_update_wm;
6860 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6861 } else if (IS_G4X(dev)) {
6862 dev_priv->display.update_wm = g4x_update_wm;
6863 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
6864 } else if (IS_GEN4(dev)) {
6865 dev_priv->display.update_wm = i965_update_wm;
6866 if (IS_CRESTLINE(dev))
6867 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
6868 else if (IS_BROADWATER(dev))
6869 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
6870 } else if (IS_GEN3(dev)) {
6871 dev_priv->display.update_wm = i9xx_update_wm;
6872 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6873 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6874 } else if (IS_GEN2(dev)) {
6875 if (INTEL_INFO(dev)->num_pipes == 1) {
6876 dev_priv->display.update_wm = i845_update_wm;
6877 dev_priv->display.get_fifo_size = i845_get_fifo_size;
6878 } else {
6879 dev_priv->display.update_wm = i9xx_update_wm;
6880 dev_priv->display.get_fifo_size = i830_get_fifo_size;
6881 }
6882
6883 if (IS_I85X(dev) || IS_I865G(dev))
6884 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6885 else
6886 dev_priv->display.init_clock_gating = i830_init_clock_gating;
6887 } else {
6888 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6889 }
6890}
6891
6892int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
6893{
6894 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6895
6896 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6897 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
6898 return -EAGAIN;
6899 }
6900
6901 I915_WRITE(GEN6_PCODE_DATA, *val);
6902 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6903
6904 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6905 500)) {
6906 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
6907 return -ETIMEDOUT;
6908 }
6909
6910 *val = I915_READ(GEN6_PCODE_DATA);
6911 I915_WRITE(GEN6_PCODE_DATA, 0);
6912
6913 return 0;
6914}
6915
6916int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
6917{
6918 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6919
6920 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6921 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
6922 return -EAGAIN;
6923 }
6924
6925 I915_WRITE(GEN6_PCODE_DATA, val);
6926 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6927
6928 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6929 500)) {
6930 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
6931 return -ETIMEDOUT;
6932 }
6933
6934 I915_WRITE(GEN6_PCODE_DATA, 0);
6935
6936 return 0;
6937}
6938
6939static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
6940{
6941 int div;
6942
6943
6944 switch (dev_priv->mem_freq) {
6945 case 800:
6946 div = 10;
6947 break;
6948 case 1066:
6949 div = 12;
6950 break;
6951 case 1333:
6952 div = 16;
6953 break;
6954 default:
6955 return -1;
6956 }
6957
6958 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
6959}
6960
6961static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
6962{
6963 int mul;
6964
6965
6966 switch (dev_priv->mem_freq) {
6967 case 800:
6968 mul = 10;
6969 break;
6970 case 1066:
6971 mul = 12;
6972 break;
6973 case 1333:
6974 mul = 16;
6975 break;
6976 default:
6977 return -1;
6978 }
6979
6980 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
6981}
6982
6983static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6984{
6985 int div, freq;
6986
6987 switch (dev_priv->rps.cz_freq) {
6988 case 200:
6989 div = 5;
6990 break;
6991 case 267:
6992 div = 6;
6993 break;
6994 case 320:
6995 case 333:
6996 case 400:
6997 div = 8;
6998 break;
6999 default:
7000 return -1;
7001 }
7002
7003 freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
7004
7005 return freq;
7006}
7007
7008static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7009{
7010 int mul, opcode;
7011
7012 switch (dev_priv->rps.cz_freq) {
7013 case 200:
7014 mul = 5;
7015 break;
7016 case 267:
7017 mul = 6;
7018 break;
7019 case 320:
7020 case 333:
7021 case 400:
7022 mul = 8;
7023 break;
7024 default:
7025 return -1;
7026 }
7027
7028 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
7029
7030 return opcode;
7031}
7032
7033int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7034{
7035 int ret = -1;
7036
7037 if (IS_CHERRYVIEW(dev_priv->dev))
7038 ret = chv_gpu_freq(dev_priv, val);
7039 else if (IS_VALLEYVIEW(dev_priv->dev))
7040 ret = byt_gpu_freq(dev_priv, val);
7041
7042 return ret;
7043}
7044
7045int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7046{
7047 int ret = -1;
7048
7049 if (IS_CHERRYVIEW(dev_priv->dev))
7050 ret = chv_freq_opcode(dev_priv, val);
7051 else if (IS_VALLEYVIEW(dev_priv->dev))
7052 ret = byt_freq_opcode(dev_priv, val);
7053
7054 return ret;
7055}
7056
7057void intel_pm_setup(struct drm_device *dev)
7058{
7059 struct drm_i915_private *dev_priv = dev->dev_private;
7060
7061 mutex_init(&dev_priv->rps.hw_lock);
7062
7063 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7064 intel_gen6_powersave_work);
7065
7066 dev_priv->pm.suspended = false;
7067 dev_priv->pm._irqs_disabled = false;
7068}
7069