1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/cpufreq.h>
29#include "i915_drv.h"
30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h>
33
34
35
36
37
38
39
40
41
42
43
44
45static void i8xx_disable_fbc(struct drm_device *dev)
46{
47 struct drm_i915_private *dev_priv = dev->dev_private;
48 u32 fbc_ctl;
49
50
51 fbc_ctl = I915_READ(FBC_CONTROL);
52 if ((fbc_ctl & FBC_CTL_EN) == 0)
53 return;
54
55 fbc_ctl &= ~FBC_CTL_EN;
56 I915_WRITE(FBC_CONTROL, fbc_ctl);
57
58
59 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
60 DRM_DEBUG_KMS("FBC idle timed out\n");
61 return;
62 }
63
64 DRM_DEBUG_KMS("disabled FBC\n");
65}
66
67static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
68{
69 struct drm_device *dev = crtc->dev;
70 struct drm_i915_private *dev_priv = dev->dev_private;
71 struct drm_framebuffer *fb = crtc->fb;
72 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
73 struct drm_i915_gem_object *obj = intel_fb->obj;
74 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
75 int cfb_pitch;
76 int plane, i;
77 u32 fbc_ctl, fbc_ctl2;
78
79 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
80 if (fb->pitches[0] < cfb_pitch)
81 cfb_pitch = fb->pitches[0];
82
83
84 cfb_pitch = (cfb_pitch / 64) - 1;
85 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
86
87
88 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
89 I915_WRITE(FBC_TAG + (i * 4), 0);
90
91
92 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
93 fbc_ctl2 |= plane;
94 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
95 I915_WRITE(FBC_FENCE_OFF, crtc->y);
96
97
98 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
99 if (IS_I945GM(dev))
100 fbc_ctl |= FBC_CTL_C3_IDLE;
101 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
102 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
103 fbc_ctl |= obj->fence_reg;
104 I915_WRITE(FBC_CONTROL, fbc_ctl);
105
106 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
107 cfb_pitch, crtc->y, intel_crtc->plane);
108}
109
110static bool i8xx_fbc_enabled(struct drm_device *dev)
111{
112 struct drm_i915_private *dev_priv = dev->dev_private;
113
114 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
115}
116
117static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
118{
119 struct drm_device *dev = crtc->dev;
120 struct drm_i915_private *dev_priv = dev->dev_private;
121 struct drm_framebuffer *fb = crtc->fb;
122 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
123 struct drm_i915_gem_object *obj = intel_fb->obj;
124 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
125 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
126 unsigned long stall_watermark = 200;
127 u32 dpfc_ctl;
128
129 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
130 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
131 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
132
133 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
134 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
135 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
136 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
137
138
139 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
140
141 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
142}
143
144static void g4x_disable_fbc(struct drm_device *dev)
145{
146 struct drm_i915_private *dev_priv = dev->dev_private;
147 u32 dpfc_ctl;
148
149
150 dpfc_ctl = I915_READ(DPFC_CONTROL);
151 if (dpfc_ctl & DPFC_CTL_EN) {
152 dpfc_ctl &= ~DPFC_CTL_EN;
153 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
154
155 DRM_DEBUG_KMS("disabled FBC\n");
156 }
157}
158
159static bool g4x_fbc_enabled(struct drm_device *dev)
160{
161 struct drm_i915_private *dev_priv = dev->dev_private;
162
163 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
164}
165
166static void sandybridge_blit_fbc_update(struct drm_device *dev)
167{
168 struct drm_i915_private *dev_priv = dev->dev_private;
169 u32 blt_ecoskpd;
170
171
172 gen6_gt_force_wake_get(dev_priv);
173 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
174 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
175 GEN6_BLITTER_LOCK_SHIFT;
176 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
177 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
178 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
179 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
180 GEN6_BLITTER_LOCK_SHIFT);
181 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
182 POSTING_READ(GEN6_BLITTER_ECOSKPD);
183 gen6_gt_force_wake_put(dev_priv);
184}
185
186static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
187{
188 struct drm_device *dev = crtc->dev;
189 struct drm_i915_private *dev_priv = dev->dev_private;
190 struct drm_framebuffer *fb = crtc->fb;
191 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
192 struct drm_i915_gem_object *obj = intel_fb->obj;
193 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
194 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
195 unsigned long stall_watermark = 200;
196 u32 dpfc_ctl;
197
198 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
199 dpfc_ctl &= DPFC_RESERVED;
200 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
201
202 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
203 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
204 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
205
206 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
207 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
208 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
209 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
210 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
211
212 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
213
214 if (IS_GEN6(dev)) {
215 I915_WRITE(SNB_DPFC_CTL_SA,
216 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
217 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
218 sandybridge_blit_fbc_update(dev);
219 }
220
221 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
222}
223
224static void ironlake_disable_fbc(struct drm_device *dev)
225{
226 struct drm_i915_private *dev_priv = dev->dev_private;
227 u32 dpfc_ctl;
228
229
230 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
231 if (dpfc_ctl & DPFC_CTL_EN) {
232 dpfc_ctl &= ~DPFC_CTL_EN;
233 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
234
235 DRM_DEBUG_KMS("disabled FBC\n");
236 }
237}
238
239static bool ironlake_fbc_enabled(struct drm_device *dev)
240{
241 struct drm_i915_private *dev_priv = dev->dev_private;
242
243 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
244}
245
246bool intel_fbc_enabled(struct drm_device *dev)
247{
248 struct drm_i915_private *dev_priv = dev->dev_private;
249
250 if (!dev_priv->display.fbc_enabled)
251 return false;
252
253 return dev_priv->display.fbc_enabled(dev);
254}
255
256static void intel_fbc_work_fn(struct work_struct *__work)
257{
258 struct intel_fbc_work *work =
259 container_of(to_delayed_work(__work),
260 struct intel_fbc_work, work);
261 struct drm_device *dev = work->crtc->dev;
262 struct drm_i915_private *dev_priv = dev->dev_private;
263
264 mutex_lock(&dev->struct_mutex);
265 if (work == dev_priv->fbc_work) {
266
267
268
269 if (work->crtc->fb == work->fb) {
270 dev_priv->display.enable_fbc(work->crtc,
271 work->interval);
272
273 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
274 dev_priv->cfb_fb = work->crtc->fb->base.id;
275 dev_priv->cfb_y = work->crtc->y;
276 }
277
278 dev_priv->fbc_work = NULL;
279 }
280 mutex_unlock(&dev->struct_mutex);
281
282 kfree(work);
283}
284
285static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
286{
287 if (dev_priv->fbc_work == NULL)
288 return;
289
290 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
291
292
293
294
295
296 if (cancel_delayed_work(&dev_priv->fbc_work->work))
297
298 kfree(dev_priv->fbc_work);
299
300
301
302
303
304
305 dev_priv->fbc_work = NULL;
306}
307
308void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
309{
310 struct intel_fbc_work *work;
311 struct drm_device *dev = crtc->dev;
312 struct drm_i915_private *dev_priv = dev->dev_private;
313
314 if (!dev_priv->display.enable_fbc)
315 return;
316
317 intel_cancel_fbc_work(dev_priv);
318
319 work = kzalloc(sizeof *work, GFP_KERNEL);
320 if (work == NULL) {
321 dev_priv->display.enable_fbc(crtc, interval);
322 return;
323 }
324
325 work->crtc = crtc;
326 work->fb = crtc->fb;
327 work->interval = interval;
328 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
329
330 dev_priv->fbc_work = work;
331
332 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
333
334
335
336
337
338
339
340
341
342
343
344
345 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
346}
347
348void intel_disable_fbc(struct drm_device *dev)
349{
350 struct drm_i915_private *dev_priv = dev->dev_private;
351
352 intel_cancel_fbc_work(dev_priv);
353
354 if (!dev_priv->display.disable_fbc)
355 return;
356
357 dev_priv->display.disable_fbc(dev);
358 dev_priv->cfb_plane = -1;
359}
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380void intel_update_fbc(struct drm_device *dev)
381{
382 struct drm_i915_private *dev_priv = dev->dev_private;
383 struct drm_crtc *crtc = NULL, *tmp_crtc;
384 struct intel_crtc *intel_crtc;
385 struct drm_framebuffer *fb;
386 struct intel_framebuffer *intel_fb;
387 struct drm_i915_gem_object *obj;
388 int enable_fbc;
389
390 DRM_DEBUG_KMS("\n");
391
392 if (!i915_powersave)
393 return;
394
395 if (!I915_HAS_FBC(dev))
396 return;
397
398
399
400
401
402
403
404
405
406
407 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
408 if (tmp_crtc->enabled && tmp_crtc->fb) {
409 if (crtc) {
410 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
411 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
412 goto out_disable;
413 }
414 crtc = tmp_crtc;
415 }
416 }
417
418 if (!crtc || crtc->fb == NULL) {
419 DRM_DEBUG_KMS("no output, disabling\n");
420 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
421 goto out_disable;
422 }
423
424 intel_crtc = to_intel_crtc(crtc);
425 fb = crtc->fb;
426 intel_fb = to_intel_framebuffer(fb);
427 obj = intel_fb->obj;
428
429 enable_fbc = i915_enable_fbc;
430 if (enable_fbc < 0) {
431 DRM_DEBUG_KMS("fbc set to per-chip default\n");
432 enable_fbc = 1;
433 if (INTEL_INFO(dev)->gen <= 6)
434 enable_fbc = 0;
435 }
436 if (!enable_fbc) {
437 DRM_DEBUG_KMS("fbc disabled per module param\n");
438 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
439 goto out_disable;
440 }
441 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
442 DRM_DEBUG_KMS("framebuffer too large, disabling "
443 "compression\n");
444 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
445 goto out_disable;
446 }
447 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
448 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
449 DRM_DEBUG_KMS("mode incompatible with compression, "
450 "disabling\n");
451 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
452 goto out_disable;
453 }
454 if ((crtc->mode.hdisplay > 2048) ||
455 (crtc->mode.vdisplay > 1536)) {
456 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
457 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
458 goto out_disable;
459 }
460 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
461 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
462 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
463 goto out_disable;
464 }
465
466
467
468
469 if (obj->tiling_mode != I915_TILING_X ||
470 obj->fence_reg == I915_FENCE_REG_NONE) {
471 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
472 dev_priv->no_fbc_reason = FBC_NOT_TILED;
473 goto out_disable;
474 }
475
476
477 if (in_dbg_master())
478 goto out_disable;
479
480
481
482
483
484
485 if (dev_priv->cfb_plane == intel_crtc->plane &&
486 dev_priv->cfb_fb == fb->base.id &&
487 dev_priv->cfb_y == crtc->y)
488 return;
489
490 if (intel_fbc_enabled(dev)) {
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514 DRM_DEBUG_KMS("disabling active FBC for update\n");
515 intel_disable_fbc(dev);
516 }
517
518 intel_enable_fbc(crtc, 500);
519 return;
520
521out_disable:
522
523 if (intel_fbc_enabled(dev)) {
524 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
525 intel_disable_fbc(dev);
526 }
527}
528
529static void i915_pineview_get_mem_freq(struct drm_device *dev)
530{
531 drm_i915_private_t *dev_priv = dev->dev_private;
532 u32 tmp;
533
534 tmp = I915_READ(CLKCFG);
535
536 switch (tmp & CLKCFG_FSB_MASK) {
537 case CLKCFG_FSB_533:
538 dev_priv->fsb_freq = 533;
539 break;
540 case CLKCFG_FSB_800:
541 dev_priv->fsb_freq = 800;
542 break;
543 case CLKCFG_FSB_667:
544 dev_priv->fsb_freq = 667;
545 break;
546 case CLKCFG_FSB_400:
547 dev_priv->fsb_freq = 400;
548 break;
549 }
550
551 switch (tmp & CLKCFG_MEM_MASK) {
552 case CLKCFG_MEM_533:
553 dev_priv->mem_freq = 533;
554 break;
555 case CLKCFG_MEM_667:
556 dev_priv->mem_freq = 667;
557 break;
558 case CLKCFG_MEM_800:
559 dev_priv->mem_freq = 800;
560 break;
561 }
562
563
564 tmp = I915_READ(CSHRDDR3CTL);
565 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
566}
567
568static void i915_ironlake_get_mem_freq(struct drm_device *dev)
569{
570 drm_i915_private_t *dev_priv = dev->dev_private;
571 u16 ddrpll, csipll;
572
573 ddrpll = I915_READ16(DDRMPLL1);
574 csipll = I915_READ16(CSIPLL0);
575
576 switch (ddrpll & 0xff) {
577 case 0xc:
578 dev_priv->mem_freq = 800;
579 break;
580 case 0x10:
581 dev_priv->mem_freq = 1066;
582 break;
583 case 0x14:
584 dev_priv->mem_freq = 1333;
585 break;
586 case 0x18:
587 dev_priv->mem_freq = 1600;
588 break;
589 default:
590 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
591 ddrpll & 0xff);
592 dev_priv->mem_freq = 0;
593 break;
594 }
595
596 dev_priv->r_t = dev_priv->mem_freq;
597
598 switch (csipll & 0x3ff) {
599 case 0x00c:
600 dev_priv->fsb_freq = 3200;
601 break;
602 case 0x00e:
603 dev_priv->fsb_freq = 3733;
604 break;
605 case 0x010:
606 dev_priv->fsb_freq = 4266;
607 break;
608 case 0x012:
609 dev_priv->fsb_freq = 4800;
610 break;
611 case 0x014:
612 dev_priv->fsb_freq = 5333;
613 break;
614 case 0x016:
615 dev_priv->fsb_freq = 5866;
616 break;
617 case 0x018:
618 dev_priv->fsb_freq = 6400;
619 break;
620 default:
621 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
622 csipll & 0x3ff);
623 dev_priv->fsb_freq = 0;
624 break;
625 }
626
627 if (dev_priv->fsb_freq == 3200) {
628 dev_priv->c_m = 0;
629 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
630 dev_priv->c_m = 1;
631 } else {
632 dev_priv->c_m = 2;
633 }
634}
635
636static const struct cxsr_latency cxsr_latency_table[] = {
637 {1, 0, 800, 400, 3382, 33382, 3983, 33983},
638 {1, 0, 800, 667, 3354, 33354, 3807, 33807},
639 {1, 0, 800, 800, 3347, 33347, 3763, 33763},
640 {1, 1, 800, 667, 6420, 36420, 6873, 36873},
641 {1, 1, 800, 800, 5902, 35902, 6318, 36318},
642
643 {1, 0, 667, 400, 3400, 33400, 4021, 34021},
644 {1, 0, 667, 667, 3372, 33372, 3845, 33845},
645 {1, 0, 667, 800, 3386, 33386, 3822, 33822},
646 {1, 1, 667, 667, 6438, 36438, 6911, 36911},
647 {1, 1, 667, 800, 5941, 35941, 6377, 36377},
648
649 {1, 0, 400, 400, 3472, 33472, 4173, 34173},
650 {1, 0, 400, 667, 3443, 33443, 3996, 33996},
651 {1, 0, 400, 800, 3430, 33430, 3946, 33946},
652 {1, 1, 400, 667, 6509, 36509, 7062, 37062},
653 {1, 1, 400, 800, 5985, 35985, 6501, 36501},
654
655 {0, 0, 800, 400, 3438, 33438, 4065, 34065},
656 {0, 0, 800, 667, 3410, 33410, 3889, 33889},
657 {0, 0, 800, 800, 3403, 33403, 3845, 33845},
658 {0, 1, 800, 667, 6476, 36476, 6955, 36955},
659 {0, 1, 800, 800, 5958, 35958, 6400, 36400},
660
661 {0, 0, 667, 400, 3456, 33456, 4103, 34106},
662 {0, 0, 667, 667, 3428, 33428, 3927, 33927},
663 {0, 0, 667, 800, 3443, 33443, 3905, 33905},
664 {0, 1, 667, 667, 6494, 36494, 6993, 36993},
665 {0, 1, 667, 800, 5998, 35998, 6460, 36460},
666
667 {0, 0, 400, 400, 3528, 33528, 4255, 34255},
668 {0, 0, 400, 667, 3500, 33500, 4079, 34079},
669 {0, 0, 400, 800, 3487, 33487, 4029, 34029},
670 {0, 1, 400, 667, 6566, 36566, 7145, 37145},
671 {0, 1, 400, 800, 6042, 36042, 6584, 36584},
672};
673
674static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
675 int is_ddr3,
676 int fsb,
677 int mem)
678{
679 const struct cxsr_latency *latency;
680 int i;
681
682 if (fsb == 0 || mem == 0)
683 return NULL;
684
685 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
686 latency = &cxsr_latency_table[i];
687 if (is_desktop == latency->is_desktop &&
688 is_ddr3 == latency->is_ddr3 &&
689 fsb == latency->fsb_freq && mem == latency->mem_freq)
690 return latency;
691 }
692
693 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
694
695 return NULL;
696}
697
698static void pineview_disable_cxsr(struct drm_device *dev)
699{
700 struct drm_i915_private *dev_priv = dev->dev_private;
701
702
703 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
704}
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720static const int latency_ns = 5000;
721
722static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
723{
724 struct drm_i915_private *dev_priv = dev->dev_private;
725 uint32_t dsparb = I915_READ(DSPARB);
726 int size;
727
728 size = dsparb & 0x7f;
729 if (plane)
730 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
731
732 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
733 plane ? "B" : "A", size);
734
735 return size;
736}
737
738static int i85x_get_fifo_size(struct drm_device *dev, int plane)
739{
740 struct drm_i915_private *dev_priv = dev->dev_private;
741 uint32_t dsparb = I915_READ(DSPARB);
742 int size;
743
744 size = dsparb & 0x1ff;
745 if (plane)
746 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
747 size >>= 1;
748
749 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
750 plane ? "B" : "A", size);
751
752 return size;
753}
754
755static int i845_get_fifo_size(struct drm_device *dev, int plane)
756{
757 struct drm_i915_private *dev_priv = dev->dev_private;
758 uint32_t dsparb = I915_READ(DSPARB);
759 int size;
760
761 size = dsparb & 0x7f;
762 size >>= 2;
763
764 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
765 plane ? "B" : "A",
766 size);
767
768 return size;
769}
770
771static int i830_get_fifo_size(struct drm_device *dev, int plane)
772{
773 struct drm_i915_private *dev_priv = dev->dev_private;
774 uint32_t dsparb = I915_READ(DSPARB);
775 int size;
776
777 size = dsparb & 0x7f;
778 size >>= 1;
779
780 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
781 plane ? "B" : "A", size);
782
783 return size;
784}
785
786
787static const struct intel_watermark_params pineview_display_wm = {
788 PINEVIEW_DISPLAY_FIFO,
789 PINEVIEW_MAX_WM,
790 PINEVIEW_DFT_WM,
791 PINEVIEW_GUARD_WM,
792 PINEVIEW_FIFO_LINE_SIZE
793};
794static const struct intel_watermark_params pineview_display_hplloff_wm = {
795 PINEVIEW_DISPLAY_FIFO,
796 PINEVIEW_MAX_WM,
797 PINEVIEW_DFT_HPLLOFF_WM,
798 PINEVIEW_GUARD_WM,
799 PINEVIEW_FIFO_LINE_SIZE
800};
801static const struct intel_watermark_params pineview_cursor_wm = {
802 PINEVIEW_CURSOR_FIFO,
803 PINEVIEW_CURSOR_MAX_WM,
804 PINEVIEW_CURSOR_DFT_WM,
805 PINEVIEW_CURSOR_GUARD_WM,
806 PINEVIEW_FIFO_LINE_SIZE,
807};
808static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
809 PINEVIEW_CURSOR_FIFO,
810 PINEVIEW_CURSOR_MAX_WM,
811 PINEVIEW_CURSOR_DFT_WM,
812 PINEVIEW_CURSOR_GUARD_WM,
813 PINEVIEW_FIFO_LINE_SIZE
814};
815static const struct intel_watermark_params g4x_wm_info = {
816 G4X_FIFO_SIZE,
817 G4X_MAX_WM,
818 G4X_MAX_WM,
819 2,
820 G4X_FIFO_LINE_SIZE,
821};
822static const struct intel_watermark_params g4x_cursor_wm_info = {
823 I965_CURSOR_FIFO,
824 I965_CURSOR_MAX_WM,
825 I965_CURSOR_DFT_WM,
826 2,
827 G4X_FIFO_LINE_SIZE,
828};
829static const struct intel_watermark_params valleyview_wm_info = {
830 VALLEYVIEW_FIFO_SIZE,
831 VALLEYVIEW_MAX_WM,
832 VALLEYVIEW_MAX_WM,
833 2,
834 G4X_FIFO_LINE_SIZE,
835};
836static const struct intel_watermark_params valleyview_cursor_wm_info = {
837 I965_CURSOR_FIFO,
838 VALLEYVIEW_CURSOR_MAX_WM,
839 I965_CURSOR_DFT_WM,
840 2,
841 G4X_FIFO_LINE_SIZE,
842};
843static const struct intel_watermark_params i965_cursor_wm_info = {
844 I965_CURSOR_FIFO,
845 I965_CURSOR_MAX_WM,
846 I965_CURSOR_DFT_WM,
847 2,
848 I915_FIFO_LINE_SIZE,
849};
850static const struct intel_watermark_params i945_wm_info = {
851 I945_FIFO_SIZE,
852 I915_MAX_WM,
853 1,
854 2,
855 I915_FIFO_LINE_SIZE
856};
857static const struct intel_watermark_params i915_wm_info = {
858 I915_FIFO_SIZE,
859 I915_MAX_WM,
860 1,
861 2,
862 I915_FIFO_LINE_SIZE
863};
864static const struct intel_watermark_params i855_wm_info = {
865 I855GM_FIFO_SIZE,
866 I915_MAX_WM,
867 1,
868 2,
869 I830_FIFO_LINE_SIZE
870};
871static const struct intel_watermark_params i830_wm_info = {
872 I830_FIFO_SIZE,
873 I915_MAX_WM,
874 1,
875 2,
876 I830_FIFO_LINE_SIZE
877};
878
879static const struct intel_watermark_params ironlake_display_wm_info = {
880 ILK_DISPLAY_FIFO,
881 ILK_DISPLAY_MAXWM,
882 ILK_DISPLAY_DFTWM,
883 2,
884 ILK_FIFO_LINE_SIZE
885};
886static const struct intel_watermark_params ironlake_cursor_wm_info = {
887 ILK_CURSOR_FIFO,
888 ILK_CURSOR_MAXWM,
889 ILK_CURSOR_DFTWM,
890 2,
891 ILK_FIFO_LINE_SIZE
892};
893static const struct intel_watermark_params ironlake_display_srwm_info = {
894 ILK_DISPLAY_SR_FIFO,
895 ILK_DISPLAY_MAX_SRWM,
896 ILK_DISPLAY_DFT_SRWM,
897 2,
898 ILK_FIFO_LINE_SIZE
899};
900static const struct intel_watermark_params ironlake_cursor_srwm_info = {
901 ILK_CURSOR_SR_FIFO,
902 ILK_CURSOR_MAX_SRWM,
903 ILK_CURSOR_DFT_SRWM,
904 2,
905 ILK_FIFO_LINE_SIZE
906};
907
908static const struct intel_watermark_params sandybridge_display_wm_info = {
909 SNB_DISPLAY_FIFO,
910 SNB_DISPLAY_MAXWM,
911 SNB_DISPLAY_DFTWM,
912 2,
913 SNB_FIFO_LINE_SIZE
914};
915static const struct intel_watermark_params sandybridge_cursor_wm_info = {
916 SNB_CURSOR_FIFO,
917 SNB_CURSOR_MAXWM,
918 SNB_CURSOR_DFTWM,
919 2,
920 SNB_FIFO_LINE_SIZE
921};
922static const struct intel_watermark_params sandybridge_display_srwm_info = {
923 SNB_DISPLAY_SR_FIFO,
924 SNB_DISPLAY_MAX_SRWM,
925 SNB_DISPLAY_DFT_SRWM,
926 2,
927 SNB_FIFO_LINE_SIZE
928};
929static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
930 SNB_CURSOR_SR_FIFO,
931 SNB_CURSOR_MAX_SRWM,
932 SNB_CURSOR_DFT_SRWM,
933 2,
934 SNB_FIFO_LINE_SIZE
935};
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
957 const struct intel_watermark_params *wm,
958 int fifo_size,
959 int pixel_size,
960 unsigned long latency_ns)
961{
962 long entries_required, wm_size;
963
964
965
966
967
968
969
970 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
971 1000;
972 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
973
974 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
975
976 wm_size = fifo_size - (entries_required + wm->guard_size);
977
978 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
979
980
981 if (wm_size > (long)wm->max_wm)
982 wm_size = wm->max_wm;
983 if (wm_size <= 0)
984 wm_size = wm->default_wm;
985 return wm_size;
986}
987
988static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
989{
990 struct drm_crtc *crtc, *enabled = NULL;
991
992 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
993 if (crtc->enabled && crtc->fb) {
994 if (enabled)
995 return NULL;
996 enabled = crtc;
997 }
998 }
999
1000 return enabled;
1001}
1002
1003static void pineview_update_wm(struct drm_device *dev)
1004{
1005 struct drm_i915_private *dev_priv = dev->dev_private;
1006 struct drm_crtc *crtc;
1007 const struct cxsr_latency *latency;
1008 u32 reg;
1009 unsigned long wm;
1010
1011 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1012 dev_priv->fsb_freq, dev_priv->mem_freq);
1013 if (!latency) {
1014 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1015 pineview_disable_cxsr(dev);
1016 return;
1017 }
1018
1019 crtc = single_enabled_crtc(dev);
1020 if (crtc) {
1021 int clock = crtc->mode.clock;
1022 int pixel_size = crtc->fb->bits_per_pixel / 8;
1023
1024
1025 wm = intel_calculate_wm(clock, &pineview_display_wm,
1026 pineview_display_wm.fifo_size,
1027 pixel_size, latency->display_sr);
1028 reg = I915_READ(DSPFW1);
1029 reg &= ~DSPFW_SR_MASK;
1030 reg |= wm << DSPFW_SR_SHIFT;
1031 I915_WRITE(DSPFW1, reg);
1032 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1033
1034
1035 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1036 pineview_display_wm.fifo_size,
1037 pixel_size, latency->cursor_sr);
1038 reg = I915_READ(DSPFW3);
1039 reg &= ~DSPFW_CURSOR_SR_MASK;
1040 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1041 I915_WRITE(DSPFW3, reg);
1042
1043
1044 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1045 pineview_display_hplloff_wm.fifo_size,
1046 pixel_size, latency->display_hpll_disable);
1047 reg = I915_READ(DSPFW3);
1048 reg &= ~DSPFW_HPLL_SR_MASK;
1049 reg |= wm & DSPFW_HPLL_SR_MASK;
1050 I915_WRITE(DSPFW3, reg);
1051
1052
1053 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1054 pineview_display_hplloff_wm.fifo_size,
1055 pixel_size, latency->cursor_hpll_disable);
1056 reg = I915_READ(DSPFW3);
1057 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1058 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1059 I915_WRITE(DSPFW3, reg);
1060 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1061
1062
1063 I915_WRITE(DSPFW3,
1064 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1065 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1066 } else {
1067 pineview_disable_cxsr(dev);
1068 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1069 }
1070}
1071
1072static bool g4x_compute_wm0(struct drm_device *dev,
1073 int plane,
1074 const struct intel_watermark_params *display,
1075 int display_latency_ns,
1076 const struct intel_watermark_params *cursor,
1077 int cursor_latency_ns,
1078 int *plane_wm,
1079 int *cursor_wm)
1080{
1081 struct drm_crtc *crtc;
1082 int htotal, hdisplay, clock, pixel_size;
1083 int line_time_us, line_count;
1084 int entries, tlb_miss;
1085
1086 crtc = intel_get_crtc_for_plane(dev, plane);
1087 if (crtc->fb == NULL || !crtc->enabled) {
1088 *cursor_wm = cursor->guard_size;
1089 *plane_wm = display->guard_size;
1090 return false;
1091 }
1092
1093 htotal = crtc->mode.htotal;
1094 hdisplay = crtc->mode.hdisplay;
1095 clock = crtc->mode.clock;
1096 pixel_size = crtc->fb->bits_per_pixel / 8;
1097
1098
1099 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1100 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1101 if (tlb_miss > 0)
1102 entries += tlb_miss;
1103 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1104 *plane_wm = entries + display->guard_size;
1105 if (*plane_wm > (int)display->max_wm)
1106 *plane_wm = display->max_wm;
1107
1108
1109 line_time_us = ((htotal * 1000) / clock);
1110 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1111 entries = line_count * 64 * pixel_size;
1112 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1113 if (tlb_miss > 0)
1114 entries += tlb_miss;
1115 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1116 *cursor_wm = entries + cursor->guard_size;
1117 if (*cursor_wm > (int)cursor->max_wm)
1118 *cursor_wm = (int)cursor->max_wm;
1119
1120 return true;
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130static bool g4x_check_srwm(struct drm_device *dev,
1131 int display_wm, int cursor_wm,
1132 const struct intel_watermark_params *display,
1133 const struct intel_watermark_params *cursor)
1134{
1135 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1136 display_wm, cursor_wm);
1137
1138 if (display_wm > display->max_wm) {
1139 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1140 display_wm, display->max_wm);
1141 return false;
1142 }
1143
1144 if (cursor_wm > cursor->max_wm) {
1145 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1146 cursor_wm, cursor->max_wm);
1147 return false;
1148 }
1149
1150 if (!(display_wm || cursor_wm)) {
1151 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1152 return false;
1153 }
1154
1155 return true;
1156}
1157
1158static bool g4x_compute_srwm(struct drm_device *dev,
1159 int plane,
1160 int latency_ns,
1161 const struct intel_watermark_params *display,
1162 const struct intel_watermark_params *cursor,
1163 int *display_wm, int *cursor_wm)
1164{
1165 struct drm_crtc *crtc;
1166 int hdisplay, htotal, pixel_size, clock;
1167 unsigned long line_time_us;
1168 int line_count, line_size;
1169 int small, large;
1170 int entries;
1171
1172 if (!latency_ns) {
1173 *display_wm = *cursor_wm = 0;
1174 return false;
1175 }
1176
1177 crtc = intel_get_crtc_for_plane(dev, plane);
1178 hdisplay = crtc->mode.hdisplay;
1179 htotal = crtc->mode.htotal;
1180 clock = crtc->mode.clock;
1181 pixel_size = crtc->fb->bits_per_pixel / 8;
1182
1183 line_time_us = (htotal * 1000) / clock;
1184 line_count = (latency_ns / line_time_us + 1000) / 1000;
1185 line_size = hdisplay * pixel_size;
1186
1187
1188 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1189 large = line_count * line_size;
1190
1191 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1192 *display_wm = entries + display->guard_size;
1193
1194
1195 entries = line_count * pixel_size * 64;
1196 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1197 *cursor_wm = entries + cursor->guard_size;
1198
1199 return g4x_check_srwm(dev,
1200 *display_wm, *cursor_wm,
1201 display, cursor);
1202}
1203
1204static bool vlv_compute_drain_latency(struct drm_device *dev,
1205 int plane,
1206 int *plane_prec_mult,
1207 int *plane_dl,
1208 int *cursor_prec_mult,
1209 int *cursor_dl)
1210{
1211 struct drm_crtc *crtc;
1212 int clock, pixel_size;
1213 int entries;
1214
1215 crtc = intel_get_crtc_for_plane(dev, plane);
1216 if (crtc->fb == NULL || !crtc->enabled)
1217 return false;
1218
1219 clock = crtc->mode.clock;
1220 pixel_size = crtc->fb->bits_per_pixel / 8;
1221
1222 entries = (clock / 1000) * pixel_size;
1223 *plane_prec_mult = (entries > 256) ?
1224 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1225 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1226 pixel_size);
1227
1228 entries = (clock / 1000) * 4;
1229 *cursor_prec_mult = (entries > 256) ?
1230 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1231 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1232
1233 return true;
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244static void vlv_update_drain_latency(struct drm_device *dev)
1245{
1246 struct drm_i915_private *dev_priv = dev->dev_private;
1247 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1248 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1249 int plane_prec_mult, cursor_prec_mult;
1250
1251
1252
1253 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1254 &cursor_prec_mult, &cursora_dl)) {
1255 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1256 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1257 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1258 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1259
1260 I915_WRITE(VLV_DDL1, cursora_prec |
1261 (cursora_dl << DDL_CURSORA_SHIFT) |
1262 planea_prec | planea_dl);
1263 }
1264
1265
1266 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1267 &cursor_prec_mult, &cursorb_dl)) {
1268 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1269 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1270 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1271 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1272
1273 I915_WRITE(VLV_DDL2, cursorb_prec |
1274 (cursorb_dl << DDL_CURSORB_SHIFT) |
1275 planeb_prec | planeb_dl);
1276 }
1277}
1278
1279#define single_plane_enabled(mask) is_power_of_2(mask)
1280
1281static void valleyview_update_wm(struct drm_device *dev)
1282{
1283 static const int sr_latency_ns = 12000;
1284 struct drm_i915_private *dev_priv = dev->dev_private;
1285 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1286 int plane_sr, cursor_sr;
1287 unsigned int enabled = 0;
1288
1289 vlv_update_drain_latency(dev);
1290
1291 if (g4x_compute_wm0(dev, 0,
1292 &valleyview_wm_info, latency_ns,
1293 &valleyview_cursor_wm_info, latency_ns,
1294 &planea_wm, &cursora_wm))
1295 enabled |= 1;
1296
1297 if (g4x_compute_wm0(dev, 1,
1298 &valleyview_wm_info, latency_ns,
1299 &valleyview_cursor_wm_info, latency_ns,
1300 &planeb_wm, &cursorb_wm))
1301 enabled |= 2;
1302
1303 plane_sr = cursor_sr = 0;
1304 if (single_plane_enabled(enabled) &&
1305 g4x_compute_srwm(dev, ffs(enabled) - 1,
1306 sr_latency_ns,
1307 &valleyview_wm_info,
1308 &valleyview_cursor_wm_info,
1309 &plane_sr, &cursor_sr))
1310 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1311 else
1312 I915_WRITE(FW_BLC_SELF_VLV,
1313 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1314
1315 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1316 planea_wm, cursora_wm,
1317 planeb_wm, cursorb_wm,
1318 plane_sr, cursor_sr);
1319
1320 I915_WRITE(DSPFW1,
1321 (plane_sr << DSPFW_SR_SHIFT) |
1322 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1323 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1324 planea_wm);
1325 I915_WRITE(DSPFW2,
1326 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1327 (cursora_wm << DSPFW_CURSORA_SHIFT));
1328 I915_WRITE(DSPFW3,
1329 (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
1330}
1331
1332static void g4x_update_wm(struct drm_device *dev)
1333{
1334 static const int sr_latency_ns = 12000;
1335 struct drm_i915_private *dev_priv = dev->dev_private;
1336 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1337 int plane_sr, cursor_sr;
1338 unsigned int enabled = 0;
1339
1340 if (g4x_compute_wm0(dev, 0,
1341 &g4x_wm_info, latency_ns,
1342 &g4x_cursor_wm_info, latency_ns,
1343 &planea_wm, &cursora_wm))
1344 enabled |= 1;
1345
1346 if (g4x_compute_wm0(dev, 1,
1347 &g4x_wm_info, latency_ns,
1348 &g4x_cursor_wm_info, latency_ns,
1349 &planeb_wm, &cursorb_wm))
1350 enabled |= 2;
1351
1352 plane_sr = cursor_sr = 0;
1353 if (single_plane_enabled(enabled) &&
1354 g4x_compute_srwm(dev, ffs(enabled) - 1,
1355 sr_latency_ns,
1356 &g4x_wm_info,
1357 &g4x_cursor_wm_info,
1358 &plane_sr, &cursor_sr))
1359 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1360 else
1361 I915_WRITE(FW_BLC_SELF,
1362 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1363
1364 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1365 planea_wm, cursora_wm,
1366 planeb_wm, cursorb_wm,
1367 plane_sr, cursor_sr);
1368
1369 I915_WRITE(DSPFW1,
1370 (plane_sr << DSPFW_SR_SHIFT) |
1371 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1372 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1373 planea_wm);
1374 I915_WRITE(DSPFW2,
1375 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1376 (cursora_wm << DSPFW_CURSORA_SHIFT));
1377
1378 I915_WRITE(DSPFW3,
1379 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
1380 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1381}
1382
1383static void i965_update_wm(struct drm_device *dev)
1384{
1385 struct drm_i915_private *dev_priv = dev->dev_private;
1386 struct drm_crtc *crtc;
1387 int srwm = 1;
1388 int cursor_sr = 16;
1389
1390
1391 crtc = single_enabled_crtc(dev);
1392 if (crtc) {
1393
1394 static const int sr_latency_ns = 12000;
1395 int clock = crtc->mode.clock;
1396 int htotal = crtc->mode.htotal;
1397 int hdisplay = crtc->mode.hdisplay;
1398 int pixel_size = crtc->fb->bits_per_pixel / 8;
1399 unsigned long line_time_us;
1400 int entries;
1401
1402 line_time_us = ((htotal * 1000) / clock);
1403
1404
1405 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1406 pixel_size * hdisplay;
1407 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1408 srwm = I965_FIFO_SIZE - entries;
1409 if (srwm < 0)
1410 srwm = 1;
1411 srwm &= 0x1ff;
1412 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1413 entries, srwm);
1414
1415 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1416 pixel_size * 64;
1417 entries = DIV_ROUND_UP(entries,
1418 i965_cursor_wm_info.cacheline_size);
1419 cursor_sr = i965_cursor_wm_info.fifo_size -
1420 (entries + i965_cursor_wm_info.guard_size);
1421
1422 if (cursor_sr > i965_cursor_wm_info.max_wm)
1423 cursor_sr = i965_cursor_wm_info.max_wm;
1424
1425 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1426 "cursor %d\n", srwm, cursor_sr);
1427
1428 if (IS_CRESTLINE(dev))
1429 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1430 } else {
1431
1432 if (IS_CRESTLINE(dev))
1433 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1434 & ~FW_BLC_SELF_EN);
1435 }
1436
1437 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1438 srwm);
1439
1440
1441 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1442 (8 << 16) | (8 << 8) | (8 << 0));
1443 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1444
1445 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1446}
1447
1448static void i9xx_update_wm(struct drm_device *dev)
1449{
1450 struct drm_i915_private *dev_priv = dev->dev_private;
1451 const struct intel_watermark_params *wm_info;
1452 uint32_t fwater_lo;
1453 uint32_t fwater_hi;
1454 int cwm, srwm = 1;
1455 int fifo_size;
1456 int planea_wm, planeb_wm;
1457 struct drm_crtc *crtc, *enabled = NULL;
1458
1459 if (IS_I945GM(dev))
1460 wm_info = &i945_wm_info;
1461 else if (!IS_GEN2(dev))
1462 wm_info = &i915_wm_info;
1463 else
1464 wm_info = &i855_wm_info;
1465
1466 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1467 crtc = intel_get_crtc_for_plane(dev, 0);
1468 if (crtc->enabled && crtc->fb) {
1469 planea_wm = intel_calculate_wm(crtc->mode.clock,
1470 wm_info, fifo_size,
1471 crtc->fb->bits_per_pixel / 8,
1472 latency_ns);
1473 enabled = crtc;
1474 } else
1475 planea_wm = fifo_size - wm_info->guard_size;
1476
1477 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1478 crtc = intel_get_crtc_for_plane(dev, 1);
1479 if (crtc->enabled && crtc->fb) {
1480 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1481 wm_info, fifo_size,
1482 crtc->fb->bits_per_pixel / 8,
1483 latency_ns);
1484 if (enabled == NULL)
1485 enabled = crtc;
1486 else
1487 enabled = NULL;
1488 } else
1489 planeb_wm = fifo_size - wm_info->guard_size;
1490
1491 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1492
1493
1494
1495
1496 cwm = 2;
1497
1498
1499 if (IS_I945G(dev) || IS_I945GM(dev))
1500 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1501 else if (IS_I915GM(dev))
1502 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1503
1504
1505 if (HAS_FW_BLC(dev) && enabled) {
1506
1507 static const int sr_latency_ns = 6000;
1508 int clock = enabled->mode.clock;
1509 int htotal = enabled->mode.htotal;
1510 int hdisplay = enabled->mode.hdisplay;
1511 int pixel_size = enabled->fb->bits_per_pixel / 8;
1512 unsigned long line_time_us;
1513 int entries;
1514
1515 line_time_us = (htotal * 1000) / clock;
1516
1517
1518 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1519 pixel_size * hdisplay;
1520 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1521 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1522 srwm = wm_info->fifo_size - entries;
1523 if (srwm < 0)
1524 srwm = 1;
1525
1526 if (IS_I945G(dev) || IS_I945GM(dev))
1527 I915_WRITE(FW_BLC_SELF,
1528 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1529 else if (IS_I915GM(dev))
1530 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1531 }
1532
1533 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1534 planea_wm, planeb_wm, cwm, srwm);
1535
1536 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1537 fwater_hi = (cwm & 0x1f);
1538
1539
1540 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1541 fwater_hi = fwater_hi | (1 << 8);
1542
1543 I915_WRITE(FW_BLC, fwater_lo);
1544 I915_WRITE(FW_BLC2, fwater_hi);
1545
1546 if (HAS_FW_BLC(dev)) {
1547 if (enabled) {
1548 if (IS_I945G(dev) || IS_I945GM(dev))
1549 I915_WRITE(FW_BLC_SELF,
1550 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1551 else if (IS_I915GM(dev))
1552 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1553 DRM_DEBUG_KMS("memory self refresh enabled\n");
1554 } else
1555 DRM_DEBUG_KMS("memory self refresh disabled\n");
1556 }
1557}
1558
1559static void i830_update_wm(struct drm_device *dev)
1560{
1561 struct drm_i915_private *dev_priv = dev->dev_private;
1562 struct drm_crtc *crtc;
1563 uint32_t fwater_lo;
1564 int planea_wm;
1565
1566 crtc = single_enabled_crtc(dev);
1567 if (crtc == NULL)
1568 return;
1569
1570 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1571 dev_priv->display.get_fifo_size(dev, 0),
1572 crtc->fb->bits_per_pixel / 8,
1573 latency_ns);
1574 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1575 fwater_lo |= (3<<8) | planea_wm;
1576
1577 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1578
1579 I915_WRITE(FW_BLC, fwater_lo);
1580}
1581
1582#define ILK_LP0_PLANE_LATENCY 700
1583#define ILK_LP0_CURSOR_LATENCY 1300
1584
1585
1586
1587
1588
1589
1590
1591
1592static bool ironlake_check_srwm(struct drm_device *dev, int level,
1593 int fbc_wm, int display_wm, int cursor_wm,
1594 const struct intel_watermark_params *display,
1595 const struct intel_watermark_params *cursor)
1596{
1597 struct drm_i915_private *dev_priv = dev->dev_private;
1598
1599 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1600 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1601
1602 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1603 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1604 fbc_wm, SNB_FBC_MAX_SRWM, level);
1605
1606
1607 I915_WRITE(DISP_ARB_CTL,
1608 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1609 return false;
1610 }
1611
1612 if (display_wm > display->max_wm) {
1613 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1614 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1615 return false;
1616 }
1617
1618 if (cursor_wm > cursor->max_wm) {
1619 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1620 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1621 return false;
1622 }
1623
1624 if (!(fbc_wm || display_wm || cursor_wm)) {
1625 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1626 return false;
1627 }
1628
1629 return true;
1630}
1631
1632
1633
1634
1635static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1636 int latency_ns,
1637 const struct intel_watermark_params *display,
1638 const struct intel_watermark_params *cursor,
1639 int *fbc_wm, int *display_wm, int *cursor_wm)
1640{
1641 struct drm_crtc *crtc;
1642 unsigned long line_time_us;
1643 int hdisplay, htotal, pixel_size, clock;
1644 int line_count, line_size;
1645 int small, large;
1646 int entries;
1647
1648 if (!latency_ns) {
1649 *fbc_wm = *display_wm = *cursor_wm = 0;
1650 return false;
1651 }
1652
1653 crtc = intel_get_crtc_for_plane(dev, plane);
1654 hdisplay = crtc->mode.hdisplay;
1655 htotal = crtc->mode.htotal;
1656 clock = crtc->mode.clock;
1657 pixel_size = crtc->fb->bits_per_pixel / 8;
1658
1659 line_time_us = (htotal * 1000) / clock;
1660 line_count = (latency_ns / line_time_us + 1000) / 1000;
1661 line_size = hdisplay * pixel_size;
1662
1663
1664 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1665 large = line_count * line_size;
1666
1667 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1668 *display_wm = entries + display->guard_size;
1669
1670
1671
1672
1673
1674 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1675
1676
1677 entries = line_count * pixel_size * 64;
1678 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1679 *cursor_wm = entries + cursor->guard_size;
1680
1681 return ironlake_check_srwm(dev, level,
1682 *fbc_wm, *display_wm, *cursor_wm,
1683 display, cursor);
1684}
1685
1686static void ironlake_update_wm(struct drm_device *dev)
1687{
1688 struct drm_i915_private *dev_priv = dev->dev_private;
1689 int fbc_wm, plane_wm, cursor_wm;
1690 unsigned int enabled;
1691
1692 enabled = 0;
1693 if (g4x_compute_wm0(dev, 0,
1694 &ironlake_display_wm_info,
1695 ILK_LP0_PLANE_LATENCY,
1696 &ironlake_cursor_wm_info,
1697 ILK_LP0_CURSOR_LATENCY,
1698 &plane_wm, &cursor_wm)) {
1699 I915_WRITE(WM0_PIPEA_ILK,
1700 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1701 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1702 " plane %d, " "cursor: %d\n",
1703 plane_wm, cursor_wm);
1704 enabled |= 1;
1705 }
1706
1707 if (g4x_compute_wm0(dev, 1,
1708 &ironlake_display_wm_info,
1709 ILK_LP0_PLANE_LATENCY,
1710 &ironlake_cursor_wm_info,
1711 ILK_LP0_CURSOR_LATENCY,
1712 &plane_wm, &cursor_wm)) {
1713 I915_WRITE(WM0_PIPEB_ILK,
1714 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1715 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1716 " plane %d, cursor: %d\n",
1717 plane_wm, cursor_wm);
1718 enabled |= 2;
1719 }
1720
1721
1722
1723
1724
1725 I915_WRITE(WM3_LP_ILK, 0);
1726 I915_WRITE(WM2_LP_ILK, 0);
1727 I915_WRITE(WM1_LP_ILK, 0);
1728
1729 if (!single_plane_enabled(enabled))
1730 return;
1731 enabled = ffs(enabled) - 1;
1732
1733
1734 if (!ironlake_compute_srwm(dev, 1, enabled,
1735 ILK_READ_WM1_LATENCY() * 500,
1736 &ironlake_display_srwm_info,
1737 &ironlake_cursor_srwm_info,
1738 &fbc_wm, &plane_wm, &cursor_wm))
1739 return;
1740
1741 I915_WRITE(WM1_LP_ILK,
1742 WM1_LP_SR_EN |
1743 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1744 (fbc_wm << WM1_LP_FBC_SHIFT) |
1745 (plane_wm << WM1_LP_SR_SHIFT) |
1746 cursor_wm);
1747
1748
1749 if (!ironlake_compute_srwm(dev, 2, enabled,
1750 ILK_READ_WM2_LATENCY() * 500,
1751 &ironlake_display_srwm_info,
1752 &ironlake_cursor_srwm_info,
1753 &fbc_wm, &plane_wm, &cursor_wm))
1754 return;
1755
1756 I915_WRITE(WM2_LP_ILK,
1757 WM2_LP_EN |
1758 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1759 (fbc_wm << WM1_LP_FBC_SHIFT) |
1760 (plane_wm << WM1_LP_SR_SHIFT) |
1761 cursor_wm);
1762
1763
1764
1765
1766
1767}
1768
1769static void sandybridge_update_wm(struct drm_device *dev)
1770{
1771 struct drm_i915_private *dev_priv = dev->dev_private;
1772 int latency = SNB_READ_WM0_LATENCY() * 100;
1773 u32 val;
1774 int fbc_wm, plane_wm, cursor_wm;
1775 unsigned int enabled;
1776
1777 enabled = 0;
1778 if (g4x_compute_wm0(dev, 0,
1779 &sandybridge_display_wm_info, latency,
1780 &sandybridge_cursor_wm_info, latency,
1781 &plane_wm, &cursor_wm)) {
1782 val = I915_READ(WM0_PIPEA_ILK);
1783 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1784 I915_WRITE(WM0_PIPEA_ILK, val |
1785 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1786 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1787 " plane %d, " "cursor: %d\n",
1788 plane_wm, cursor_wm);
1789 enabled |= 1;
1790 }
1791
1792 if (g4x_compute_wm0(dev, 1,
1793 &sandybridge_display_wm_info, latency,
1794 &sandybridge_cursor_wm_info, latency,
1795 &plane_wm, &cursor_wm)) {
1796 val = I915_READ(WM0_PIPEB_ILK);
1797 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1798 I915_WRITE(WM0_PIPEB_ILK, val |
1799 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1800 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1801 " plane %d, cursor: %d\n",
1802 plane_wm, cursor_wm);
1803 enabled |= 2;
1804 }
1805
1806 if ((dev_priv->num_pipe == 3) &&
1807 g4x_compute_wm0(dev, 2,
1808 &sandybridge_display_wm_info, latency,
1809 &sandybridge_cursor_wm_info, latency,
1810 &plane_wm, &cursor_wm)) {
1811 val = I915_READ(WM0_PIPEC_IVB);
1812 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1813 I915_WRITE(WM0_PIPEC_IVB, val |
1814 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1815 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1816 " plane %d, cursor: %d\n",
1817 plane_wm, cursor_wm);
1818 enabled |= 3;
1819 }
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831 I915_WRITE(WM3_LP_ILK, 0);
1832 I915_WRITE(WM2_LP_ILK, 0);
1833 I915_WRITE(WM1_LP_ILK, 0);
1834
1835 if (!single_plane_enabled(enabled) ||
1836 dev_priv->sprite_scaling_enabled)
1837 return;
1838 enabled = ffs(enabled) - 1;
1839
1840
1841 if (!ironlake_compute_srwm(dev, 1, enabled,
1842 SNB_READ_WM1_LATENCY() * 500,
1843 &sandybridge_display_srwm_info,
1844 &sandybridge_cursor_srwm_info,
1845 &fbc_wm, &plane_wm, &cursor_wm))
1846 return;
1847
1848 I915_WRITE(WM1_LP_ILK,
1849 WM1_LP_SR_EN |
1850 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1851 (fbc_wm << WM1_LP_FBC_SHIFT) |
1852 (plane_wm << WM1_LP_SR_SHIFT) |
1853 cursor_wm);
1854
1855
1856 if (!ironlake_compute_srwm(dev, 2, enabled,
1857 SNB_READ_WM2_LATENCY() * 500,
1858 &sandybridge_display_srwm_info,
1859 &sandybridge_cursor_srwm_info,
1860 &fbc_wm, &plane_wm, &cursor_wm))
1861 return;
1862
1863 I915_WRITE(WM2_LP_ILK,
1864 WM2_LP_EN |
1865 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1866 (fbc_wm << WM1_LP_FBC_SHIFT) |
1867 (plane_wm << WM1_LP_SR_SHIFT) |
1868 cursor_wm);
1869
1870
1871 if (!ironlake_compute_srwm(dev, 3, enabled,
1872 SNB_READ_WM3_LATENCY() * 500,
1873 &sandybridge_display_srwm_info,
1874 &sandybridge_cursor_srwm_info,
1875 &fbc_wm, &plane_wm, &cursor_wm))
1876 return;
1877
1878 I915_WRITE(WM3_LP_ILK,
1879 WM3_LP_EN |
1880 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1881 (fbc_wm << WM1_LP_FBC_SHIFT) |
1882 (plane_wm << WM1_LP_SR_SHIFT) |
1883 cursor_wm);
1884}
1885
1886static void
1887haswell_update_linetime_wm(struct drm_device *dev, int pipe,
1888 struct drm_display_mode *mode)
1889{
1890 struct drm_i915_private *dev_priv = dev->dev_private;
1891 u32 temp;
1892
1893 temp = I915_READ(PIPE_WM_LINETIME(pipe));
1894 temp &= ~PIPE_WM_LINETIME_MASK;
1895
1896
1897
1898
1899 temp |= PIPE_WM_LINETIME_TIME(
1900 ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910 I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
1911}
1912
1913static bool
1914sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1915 uint32_t sprite_width, int pixel_size,
1916 const struct intel_watermark_params *display,
1917 int display_latency_ns, int *sprite_wm)
1918{
1919 struct drm_crtc *crtc;
1920 int clock;
1921 int entries, tlb_miss;
1922
1923 crtc = intel_get_crtc_for_plane(dev, plane);
1924 if (crtc->fb == NULL || !crtc->enabled) {
1925 *sprite_wm = display->guard_size;
1926 return false;
1927 }
1928
1929 clock = crtc->mode.clock;
1930
1931
1932 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1933 tlb_miss = display->fifo_size*display->cacheline_size -
1934 sprite_width * 8;
1935 if (tlb_miss > 0)
1936 entries += tlb_miss;
1937 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1938 *sprite_wm = entries + display->guard_size;
1939 if (*sprite_wm > (int)display->max_wm)
1940 *sprite_wm = display->max_wm;
1941
1942 return true;
1943}
1944
1945static bool
1946sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
1947 uint32_t sprite_width, int pixel_size,
1948 const struct intel_watermark_params *display,
1949 int latency_ns, int *sprite_wm)
1950{
1951 struct drm_crtc *crtc;
1952 unsigned long line_time_us;
1953 int clock;
1954 int line_count, line_size;
1955 int small, large;
1956 int entries;
1957
1958 if (!latency_ns) {
1959 *sprite_wm = 0;
1960 return false;
1961 }
1962
1963 crtc = intel_get_crtc_for_plane(dev, plane);
1964 clock = crtc->mode.clock;
1965 if (!clock) {
1966 *sprite_wm = 0;
1967 return false;
1968 }
1969
1970 line_time_us = (sprite_width * 1000) / clock;
1971 if (!line_time_us) {
1972 *sprite_wm = 0;
1973 return false;
1974 }
1975
1976 line_count = (latency_ns / line_time_us + 1000) / 1000;
1977 line_size = sprite_width * pixel_size;
1978
1979
1980 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1981 large = line_count * line_size;
1982
1983 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1984 *sprite_wm = entries + display->guard_size;
1985
1986 return *sprite_wm > 0x3ff ? false : true;
1987}
1988
1989static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
1990 uint32_t sprite_width, int pixel_size)
1991{
1992 struct drm_i915_private *dev_priv = dev->dev_private;
1993 int latency = SNB_READ_WM0_LATENCY() * 100;
1994 u32 val;
1995 int sprite_wm, reg;
1996 int ret;
1997
1998 switch (pipe) {
1999 case 0:
2000 reg = WM0_PIPEA_ILK;
2001 break;
2002 case 1:
2003 reg = WM0_PIPEB_ILK;
2004 break;
2005 case 2:
2006 reg = WM0_PIPEC_IVB;
2007 break;
2008 default:
2009 return;
2010 }
2011
2012 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
2013 &sandybridge_display_wm_info,
2014 latency, &sprite_wm);
2015 if (!ret) {
2016 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
2017 pipe);
2018 return;
2019 }
2020
2021 val = I915_READ(reg);
2022 val &= ~WM0_PIPE_SPRITE_MASK;
2023 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
2024 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
2025
2026
2027 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2028 pixel_size,
2029 &sandybridge_display_srwm_info,
2030 SNB_READ_WM1_LATENCY() * 500,
2031 &sprite_wm);
2032 if (!ret) {
2033 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
2034 pipe);
2035 return;
2036 }
2037 I915_WRITE(WM1S_LP_ILK, sprite_wm);
2038
2039
2040 if (!IS_IVYBRIDGE(dev))
2041 return;
2042
2043 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2044 pixel_size,
2045 &sandybridge_display_srwm_info,
2046 SNB_READ_WM2_LATENCY() * 500,
2047 &sprite_wm);
2048 if (!ret) {
2049 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
2050 pipe);
2051 return;
2052 }
2053 I915_WRITE(WM2S_LP_IVB, sprite_wm);
2054
2055 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2056 pixel_size,
2057 &sandybridge_display_srwm_info,
2058 SNB_READ_WM3_LATENCY() * 500,
2059 &sprite_wm);
2060 if (!ret) {
2061 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
2062 pipe);
2063 return;
2064 }
2065 I915_WRITE(WM3S_LP_IVB, sprite_wm);
2066}
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100void intel_update_watermarks(struct drm_device *dev)
2101{
2102 struct drm_i915_private *dev_priv = dev->dev_private;
2103
2104 if (dev_priv->display.update_wm)
2105 dev_priv->display.update_wm(dev);
2106}
2107
2108void intel_update_linetime_watermarks(struct drm_device *dev,
2109 int pipe, struct drm_display_mode *mode)
2110{
2111 struct drm_i915_private *dev_priv = dev->dev_private;
2112
2113 if (dev_priv->display.update_linetime_wm)
2114 dev_priv->display.update_linetime_wm(dev, pipe, mode);
2115}
2116
2117void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2118 uint32_t sprite_width, int pixel_size)
2119{
2120 struct drm_i915_private *dev_priv = dev->dev_private;
2121
2122 if (dev_priv->display.update_sprite_wm)
2123 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2124 pixel_size);
2125}
2126
2127static struct drm_i915_gem_object *
2128intel_alloc_context_page(struct drm_device *dev)
2129{
2130 struct drm_i915_gem_object *ctx;
2131 int ret;
2132
2133 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2134
2135 ctx = i915_gem_alloc_object(dev, 4096);
2136 if (!ctx) {
2137 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2138 return NULL;
2139 }
2140
2141 ret = i915_gem_object_pin(ctx, 4096, true);
2142 if (ret) {
2143 DRM_ERROR("failed to pin power context: %d\n", ret);
2144 goto err_unref;
2145 }
2146
2147 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2148 if (ret) {
2149 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2150 goto err_unpin;
2151 }
2152
2153 return ctx;
2154
2155err_unpin:
2156 i915_gem_object_unpin(ctx);
2157err_unref:
2158 drm_gem_object_unreference(&ctx->base);
2159 mutex_unlock(&dev->struct_mutex);
2160 return NULL;
2161}
2162
2163bool ironlake_set_drps(struct drm_device *dev, u8 val)
2164{
2165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u16 rgvswctl;
2167
2168 rgvswctl = I915_READ16(MEMSWCTL);
2169 if (rgvswctl & MEMCTL_CMD_STS) {
2170 DRM_DEBUG("gpu busy, RCS change rejected\n");
2171 return false;
2172 }
2173
2174 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2175 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2176 I915_WRITE16(MEMSWCTL, rgvswctl);
2177 POSTING_READ16(MEMSWCTL);
2178
2179 rgvswctl |= MEMCTL_CMD_STS;
2180 I915_WRITE16(MEMSWCTL, rgvswctl);
2181
2182 return true;
2183}
2184
2185void ironlake_enable_drps(struct drm_device *dev)
2186{
2187 struct drm_i915_private *dev_priv = dev->dev_private;
2188 u32 rgvmodectl = I915_READ(MEMMODECTL);
2189 u8 fmax, fmin, fstart, vstart;
2190
2191
2192 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2193 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2194
2195
2196 I915_WRITE(RCUPEI, 100000);
2197 I915_WRITE(RCDNEI, 100000);
2198
2199
2200 I915_WRITE(RCBMAXAVG, 90000);
2201 I915_WRITE(RCBMINAVG, 80000);
2202
2203 I915_WRITE(MEMIHYST, 1);
2204
2205
2206 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2207 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2208 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2209 MEMMODE_FSTART_SHIFT;
2210
2211 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2212 PXVFREQ_PX_SHIFT;
2213
2214 dev_priv->fmax = fmax;
2215 dev_priv->fstart = fstart;
2216
2217 dev_priv->max_delay = fstart;
2218 dev_priv->min_delay = fmin;
2219 dev_priv->cur_delay = fstart;
2220
2221 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2222 fmax, fmin, fstart);
2223
2224 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2225
2226
2227
2228
2229
2230 I915_WRITE(VIDSTART, vstart);
2231 POSTING_READ(VIDSTART);
2232
2233 rgvmodectl |= MEMMODE_SWMODE_EN;
2234 I915_WRITE(MEMMODECTL, rgvmodectl);
2235
2236 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2237 DRM_ERROR("stuck trying to change perf mode\n");
2238 msleep(1);
2239
2240 ironlake_set_drps(dev, fstart);
2241
2242 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2243 I915_READ(0x112e0);
2244 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
2245 dev_priv->last_count2 = I915_READ(0x112f4);
2246 getrawmonotonic(&dev_priv->last_time2);
2247}
2248
2249void ironlake_disable_drps(struct drm_device *dev)
2250{
2251 struct drm_i915_private *dev_priv = dev->dev_private;
2252 u16 rgvswctl = I915_READ16(MEMSWCTL);
2253
2254
2255 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2256 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2257 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2258 I915_WRITE(DEIIR, DE_PCU_EVENT);
2259 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2260
2261
2262 ironlake_set_drps(dev, dev_priv->fstart);
2263 msleep(1);
2264 rgvswctl |= MEMCTL_CMD_STS;
2265 I915_WRITE(MEMSWCTL, rgvswctl);
2266 msleep(1);
2267
2268}
2269
2270void gen6_set_rps(struct drm_device *dev, u8 val)
2271{
2272 struct drm_i915_private *dev_priv = dev->dev_private;
2273 u32 limits;
2274
2275 limits = 0;
2276 if (val >= dev_priv->max_delay)
2277 val = dev_priv->max_delay;
2278 else
2279 limits |= dev_priv->max_delay << 24;
2280
2281 if (val <= dev_priv->min_delay)
2282 val = dev_priv->min_delay;
2283 else
2284 limits |= dev_priv->min_delay << 16;
2285
2286 if (val == dev_priv->cur_delay)
2287 return;
2288
2289 I915_WRITE(GEN6_RPNSWREQ,
2290 GEN6_FREQUENCY(val) |
2291 GEN6_OFFSET(0) |
2292 GEN6_AGGRESSIVE_TURBO);
2293
2294
2295
2296
2297 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
2298
2299 dev_priv->cur_delay = val;
2300}
2301
2302void gen6_disable_rps(struct drm_device *dev)
2303{
2304 struct drm_i915_private *dev_priv = dev->dev_private;
2305
2306 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2307 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2308 I915_WRITE(GEN6_PMIER, 0);
2309
2310
2311
2312
2313
2314 spin_lock_irq(&dev_priv->rps_lock);
2315 dev_priv->pm_iir = 0;
2316 spin_unlock_irq(&dev_priv->rps_lock);
2317
2318 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2319}
2320
2321int intel_enable_rc6(const struct drm_device *dev)
2322{
2323
2324
2325
2326 if (i915_enable_rc6 >= 0)
2327 return i915_enable_rc6;
2328
2329
2330
2331
2332 if (INTEL_INFO(dev)->gen == 5)
2333 return 0;
2334
2335
2336 if (IS_HASWELL(dev))
2337 return 0;
2338
2339
2340
2341
2342 if (INTEL_INFO(dev)->gen == 6) {
2343 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2344 return INTEL_RC6_ENABLE;
2345 }
2346 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2347 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2348}
2349
2350void gen6_enable_rps(struct drm_i915_private *dev_priv)
2351{
2352 struct intel_ring_buffer *ring;
2353 u32 rp_state_cap;
2354 u32 gt_perf_status;
2355 u32 pcu_mbox, rc6_mask = 0;
2356 u32 gtfifodbg;
2357 int rc6_mode;
2358 int i;
2359
2360
2361
2362
2363
2364
2365
2366 I915_WRITE(GEN6_RC_STATE, 0);
2367 mutex_lock(&dev_priv->dev->struct_mutex);
2368
2369
2370 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2371 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2372 I915_WRITE(GTFIFODBG, gtfifodbg);
2373 }
2374
2375 gen6_gt_force_wake_get(dev_priv);
2376
2377 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2378 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2379
2380
2381 dev_priv->max_delay = rp_state_cap & 0xff;
2382 dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
2383 dev_priv->cur_delay = 0;
2384
2385
2386 I915_WRITE(GEN6_RC_CONTROL, 0);
2387
2388 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2389 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2390 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2391 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2392 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2393
2394 for_each_ring(ring, dev_priv, i)
2395 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
2396
2397 I915_WRITE(GEN6_RC_SLEEP, 0);
2398 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2399 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2400 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2401 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000);
2402
2403 rc6_mode = intel_enable_rc6(dev_priv->dev);
2404 if (rc6_mode & INTEL_RC6_ENABLE)
2405 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2406
2407 if (rc6_mode & INTEL_RC6p_ENABLE)
2408 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2409
2410 if (rc6_mode & INTEL_RC6pp_ENABLE)
2411 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2412
2413 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2414 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
2415 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
2416 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
2417
2418 I915_WRITE(GEN6_RC_CONTROL,
2419 rc6_mask |
2420 GEN6_RC_CTL_EI_MODE(1) |
2421 GEN6_RC_CTL_HW_ENABLE);
2422
2423 I915_WRITE(GEN6_RPNSWREQ,
2424 GEN6_FREQUENCY(10) |
2425 GEN6_OFFSET(0) |
2426 GEN6_AGGRESSIVE_TURBO);
2427 I915_WRITE(GEN6_RC_VIDEO_FREQ,
2428 GEN6_FREQUENCY(12));
2429
2430 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2431 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2432 dev_priv->max_delay << 24 |
2433 dev_priv->min_delay << 16);
2434 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2435 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2436 I915_WRITE(GEN6_RP_UP_EI, 100000);
2437 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
2438 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2439 I915_WRITE(GEN6_RP_CONTROL,
2440 GEN6_RP_MEDIA_TURBO |
2441 GEN6_RP_MEDIA_HW_NORMAL_MODE |
2442 GEN6_RP_MEDIA_IS_GFX |
2443 GEN6_RP_ENABLE |
2444 GEN6_RP_UP_BUSY_AVG |
2445 GEN6_RP_DOWN_IDLE_CONT);
2446
2447 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2448 500))
2449 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2450
2451 I915_WRITE(GEN6_PCODE_DATA, 0);
2452 I915_WRITE(GEN6_PCODE_MAILBOX,
2453 GEN6_PCODE_READY |
2454 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2455 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2456 500))
2457 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2458
2459
2460 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2461 500))
2462 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2463 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
2464 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
2465 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2466 500))
2467 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2468 if (pcu_mbox & (1<<31)) {
2469 dev_priv->max_delay = pcu_mbox & 0xff;
2470 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2471 }
2472
2473 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
2474
2475
2476 I915_WRITE(GEN6_PMIER,
2477 GEN6_PM_MBOX_EVENT |
2478 GEN6_PM_THERMAL_EVENT |
2479 GEN6_PM_RP_DOWN_TIMEOUT |
2480 GEN6_PM_RP_UP_THRESHOLD |
2481 GEN6_PM_RP_DOWN_THRESHOLD |
2482 GEN6_PM_RP_UP_EI_EXPIRED |
2483 GEN6_PM_RP_DOWN_EI_EXPIRED);
2484 spin_lock_irq(&dev_priv->rps_lock);
2485 WARN_ON(dev_priv->pm_iir != 0);
2486 I915_WRITE(GEN6_PMIMR, 0);
2487 spin_unlock_irq(&dev_priv->rps_lock);
2488
2489 I915_WRITE(GEN6_PMINTRMSK, 0);
2490
2491 gen6_gt_force_wake_put(dev_priv);
2492 mutex_unlock(&dev_priv->dev->struct_mutex);
2493}
2494
2495void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
2496{
2497 int min_freq = 15;
2498 int gpu_freq, ia_freq, max_ia_freq;
2499 int scaling_factor = 180;
2500
2501 max_ia_freq = cpufreq_quick_get_max(0);
2502
2503
2504
2505
2506 if (!max_ia_freq)
2507 max_ia_freq = tsc_khz;
2508
2509
2510 max_ia_freq /= 1000;
2511
2512 mutex_lock(&dev_priv->dev->struct_mutex);
2513
2514
2515
2516
2517
2518
2519 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
2520 gpu_freq--) {
2521 int diff = dev_priv->max_delay - gpu_freq;
2522
2523
2524
2525
2526
2527 if (gpu_freq < min_freq)
2528 ia_freq = 800;
2529 else
2530 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2531 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2532
2533 I915_WRITE(GEN6_PCODE_DATA,
2534 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
2535 gpu_freq);
2536 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
2537 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2538 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
2539 GEN6_PCODE_READY) == 0, 10)) {
2540 DRM_ERROR("pcode write of freq table timed out\n");
2541 continue;
2542 }
2543 }
2544
2545 mutex_unlock(&dev_priv->dev->struct_mutex);
2546}
2547
2548static void ironlake_teardown_rc6(struct drm_device *dev)
2549{
2550 struct drm_i915_private *dev_priv = dev->dev_private;
2551
2552 if (dev_priv->renderctx) {
2553 i915_gem_object_unpin(dev_priv->renderctx);
2554 drm_gem_object_unreference(&dev_priv->renderctx->base);
2555 dev_priv->renderctx = NULL;
2556 }
2557
2558 if (dev_priv->pwrctx) {
2559 i915_gem_object_unpin(dev_priv->pwrctx);
2560 drm_gem_object_unreference(&dev_priv->pwrctx->base);
2561 dev_priv->pwrctx = NULL;
2562 }
2563}
2564
2565void ironlake_disable_rc6(struct drm_device *dev)
2566{
2567 struct drm_i915_private *dev_priv = dev->dev_private;
2568
2569 if (I915_READ(PWRCTXA)) {
2570
2571 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
2572 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
2573 50);
2574
2575 I915_WRITE(PWRCTXA, 0);
2576 POSTING_READ(PWRCTXA);
2577
2578 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2579 POSTING_READ(RSTDBYCTL);
2580 }
2581
2582 ironlake_teardown_rc6(dev);
2583}
2584
2585static int ironlake_setup_rc6(struct drm_device *dev)
2586{
2587 struct drm_i915_private *dev_priv = dev->dev_private;
2588
2589 if (dev_priv->renderctx == NULL)
2590 dev_priv->renderctx = intel_alloc_context_page(dev);
2591 if (!dev_priv->renderctx)
2592 return -ENOMEM;
2593
2594 if (dev_priv->pwrctx == NULL)
2595 dev_priv->pwrctx = intel_alloc_context_page(dev);
2596 if (!dev_priv->pwrctx) {
2597 ironlake_teardown_rc6(dev);
2598 return -ENOMEM;
2599 }
2600
2601 return 0;
2602}
2603
2604void ironlake_enable_rc6(struct drm_device *dev)
2605{
2606 struct drm_i915_private *dev_priv = dev->dev_private;
2607 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2608 int ret;
2609
2610
2611
2612
2613 if (!intel_enable_rc6(dev))
2614 return;
2615
2616 mutex_lock(&dev->struct_mutex);
2617 ret = ironlake_setup_rc6(dev);
2618 if (ret) {
2619 mutex_unlock(&dev->struct_mutex);
2620 return;
2621 }
2622
2623
2624
2625
2626
2627 ret = intel_ring_begin(ring, 6);
2628 if (ret) {
2629 ironlake_teardown_rc6(dev);
2630 mutex_unlock(&dev->struct_mutex);
2631 return;
2632 }
2633
2634 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2635 intel_ring_emit(ring, MI_SET_CONTEXT);
2636 intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
2637 MI_MM_SPACE_GTT |
2638 MI_SAVE_EXT_STATE_EN |
2639 MI_RESTORE_EXT_STATE_EN |
2640 MI_RESTORE_INHIBIT);
2641 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
2642 intel_ring_emit(ring, MI_NOOP);
2643 intel_ring_emit(ring, MI_FLUSH);
2644 intel_ring_advance(ring);
2645
2646
2647
2648
2649
2650
2651 ret = intel_wait_ring_idle(ring);
2652 if (ret) {
2653 DRM_ERROR("failed to enable ironlake power power savings\n");
2654 ironlake_teardown_rc6(dev);
2655 mutex_unlock(&dev->struct_mutex);
2656 return;
2657 }
2658
2659 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
2660 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2661 mutex_unlock(&dev->struct_mutex);
2662}
2663
2664static unsigned long intel_pxfreq(u32 vidfreq)
2665{
2666 unsigned long freq;
2667 int div = (vidfreq & 0x3f0000) >> 16;
2668 int post = (vidfreq & 0x3000) >> 12;
2669 int pre = (vidfreq & 0x7);
2670
2671 if (!pre)
2672 return 0;
2673
2674 freq = ((div * 133333) / ((1<<post) * pre));
2675
2676 return freq;
2677}
2678
2679static const struct cparams {
2680 u16 i;
2681 u16 t;
2682 u16 m;
2683 u16 c;
2684} cparams[] = {
2685 { 1, 1333, 301, 28664 },
2686 { 1, 1066, 294, 24460 },
2687 { 1, 800, 294, 25192 },
2688 { 0, 1333, 276, 27605 },
2689 { 0, 1066, 276, 27605 },
2690 { 0, 800, 231, 23784 },
2691};
2692
2693unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2694{
2695 u64 total_count, diff, ret;
2696 u32 count1, count2, count3, m = 0, c = 0;
2697 unsigned long now = jiffies_to_msecs(jiffies), diff1;
2698 int i;
2699
2700 diff1 = now - dev_priv->last_time1;
2701
2702
2703
2704
2705
2706
2707 if (diff1 <= 10)
2708 return dev_priv->chipset_power;
2709
2710 count1 = I915_READ(DMIEC);
2711 count2 = I915_READ(DDREC);
2712 count3 = I915_READ(CSIEC);
2713
2714 total_count = count1 + count2 + count3;
2715
2716
2717 if (total_count < dev_priv->last_count1) {
2718 diff = ~0UL - dev_priv->last_count1;
2719 diff += total_count;
2720 } else {
2721 diff = total_count - dev_priv->last_count1;
2722 }
2723
2724 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
2725 if (cparams[i].i == dev_priv->c_m &&
2726 cparams[i].t == dev_priv->r_t) {
2727 m = cparams[i].m;
2728 c = cparams[i].c;
2729 break;
2730 }
2731 }
2732
2733 diff = div_u64(diff, diff1);
2734 ret = ((m * diff) + c);
2735 ret = div_u64(ret, 10);
2736
2737 dev_priv->last_count1 = total_count;
2738 dev_priv->last_time1 = now;
2739
2740 dev_priv->chipset_power = ret;
2741
2742 return ret;
2743}
2744
2745unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2746{
2747 unsigned long m, x, b;
2748 u32 tsfs;
2749
2750 tsfs = I915_READ(TSFS);
2751
2752 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
2753 x = I915_READ8(TR1);
2754
2755 b = tsfs & TSFS_INTR_MASK;
2756
2757 return ((m * x) / 127) - b;
2758}
2759
2760static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2761{
2762 static const struct v_table {
2763 u16 vd;
2764 u16 vm;
2765 } v_table[] = {
2766 { 0, 0, },
2767 { 375, 0, },
2768 { 500, 0, },
2769 { 625, 0, },
2770 { 750, 0, },
2771 { 875, 0, },
2772 { 1000, 0, },
2773 { 1125, 0, },
2774 { 4125, 3000, },
2775 { 4125, 3000, },
2776 { 4125, 3000, },
2777 { 4125, 3000, },
2778 { 4125, 3000, },
2779 { 4125, 3000, },
2780 { 4125, 3000, },
2781 { 4125, 3000, },
2782 { 4125, 3000, },
2783 { 4125, 3000, },
2784 { 4125, 3000, },
2785 { 4125, 3000, },
2786 { 4125, 3000, },
2787 { 4125, 3000, },
2788 { 4125, 3000, },
2789 { 4125, 3000, },
2790 { 4125, 3000, },
2791 { 4125, 3000, },
2792 { 4125, 3000, },
2793 { 4125, 3000, },
2794 { 4125, 3000, },
2795 { 4125, 3000, },
2796 { 4125, 3000, },
2797 { 4125, 3000, },
2798 { 4250, 3125, },
2799 { 4375, 3250, },
2800 { 4500, 3375, },
2801 { 4625, 3500, },
2802 { 4750, 3625, },
2803 { 4875, 3750, },
2804 { 5000, 3875, },
2805 { 5125, 4000, },
2806 { 5250, 4125, },
2807 { 5375, 4250, },
2808 { 5500, 4375, },
2809 { 5625, 4500, },
2810 { 5750, 4625, },
2811 { 5875, 4750, },
2812 { 6000, 4875, },
2813 { 6125, 5000, },
2814 { 6250, 5125, },
2815 { 6375, 5250, },
2816 { 6500, 5375, },
2817 { 6625, 5500, },
2818 { 6750, 5625, },
2819 { 6875, 5750, },
2820 { 7000, 5875, },
2821 { 7125, 6000, },
2822 { 7250, 6125, },
2823 { 7375, 6250, },
2824 { 7500, 6375, },
2825 { 7625, 6500, },
2826 { 7750, 6625, },
2827 { 7875, 6750, },
2828 { 8000, 6875, },
2829 { 8125, 7000, },
2830 { 8250, 7125, },
2831 { 8375, 7250, },
2832 { 8500, 7375, },
2833 { 8625, 7500, },
2834 { 8750, 7625, },
2835 { 8875, 7750, },
2836 { 9000, 7875, },
2837 { 9125, 8000, },
2838 { 9250, 8125, },
2839 { 9375, 8250, },
2840 { 9500, 8375, },
2841 { 9625, 8500, },
2842 { 9750, 8625, },
2843 { 9875, 8750, },
2844 { 10000, 8875, },
2845 { 10125, 9000, },
2846 { 10250, 9125, },
2847 { 10375, 9250, },
2848 { 10500, 9375, },
2849 { 10625, 9500, },
2850 { 10750, 9625, },
2851 { 10875, 9750, },
2852 { 11000, 9875, },
2853 { 11125, 10000, },
2854 { 11250, 10125, },
2855 { 11375, 10250, },
2856 { 11500, 10375, },
2857 { 11625, 10500, },
2858 { 11750, 10625, },
2859 { 11875, 10750, },
2860 { 12000, 10875, },
2861 { 12125, 11000, },
2862 { 12250, 11125, },
2863 { 12375, 11250, },
2864 { 12500, 11375, },
2865 { 12625, 11500, },
2866 { 12750, 11625, },
2867 { 12875, 11750, },
2868 { 13000, 11875, },
2869 { 13125, 12000, },
2870 { 13250, 12125, },
2871 { 13375, 12250, },
2872 { 13500, 12375, },
2873 { 13625, 12500, },
2874 { 13750, 12625, },
2875 { 13875, 12750, },
2876 { 14000, 12875, },
2877 { 14125, 13000, },
2878 { 14250, 13125, },
2879 { 14375, 13250, },
2880 { 14500, 13375, },
2881 { 14625, 13500, },
2882 { 14750, 13625, },
2883 { 14875, 13750, },
2884 { 15000, 13875, },
2885 { 15125, 14000, },
2886 { 15250, 14125, },
2887 { 15375, 14250, },
2888 { 15500, 14375, },
2889 { 15625, 14500, },
2890 { 15750, 14625, },
2891 { 15875, 14750, },
2892 { 16000, 14875, },
2893 { 16125, 15000, },
2894 };
2895 if (dev_priv->info->is_mobile)
2896 return v_table[pxvid].vm;
2897 else
2898 return v_table[pxvid].vd;
2899}
2900
2901void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2902{
2903 struct timespec now, diff1;
2904 u64 diff;
2905 unsigned long diffms;
2906 u32 count;
2907
2908 if (dev_priv->info->gen != 5)
2909 return;
2910
2911 getrawmonotonic(&now);
2912 diff1 = timespec_sub(now, dev_priv->last_time2);
2913
2914
2915 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
2916 if (!diffms)
2917 return;
2918
2919 count = I915_READ(GFXEC);
2920
2921 if (count < dev_priv->last_count2) {
2922 diff = ~0UL - dev_priv->last_count2;
2923 diff += count;
2924 } else {
2925 diff = count - dev_priv->last_count2;
2926 }
2927
2928 dev_priv->last_count2 = count;
2929 dev_priv->last_time2 = now;
2930
2931
2932 diff = diff * 1181;
2933 diff = div_u64(diff, diffms * 10);
2934 dev_priv->gfx_power = diff;
2935}
2936
2937unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2938{
2939 unsigned long t, corr, state1, corr2, state2;
2940 u32 pxvid, ext_v;
2941
2942 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
2943 pxvid = (pxvid >> 24) & 0x7f;
2944 ext_v = pvid_to_extvid(dev_priv, pxvid);
2945
2946 state1 = ext_v;
2947
2948 t = i915_mch_val(dev_priv);
2949
2950
2951
2952
2953 if (t > 80)
2954 corr = ((t * 2349) + 135940);
2955 else if (t >= 50)
2956 corr = ((t * 964) + 29317);
2957 else
2958 corr = ((t * 301) + 1004);
2959
2960 corr = corr * ((150142 * state1) / 10000 - 78642);
2961 corr /= 100000;
2962 corr2 = (corr * dev_priv->corr);
2963
2964 state2 = (corr2 * state1) / 10000;
2965 state2 /= 100;
2966
2967 i915_update_gfx_val(dev_priv);
2968
2969 return dev_priv->gfx_power + state2;
2970}
2971
2972
2973static struct drm_i915_private *i915_mch_dev;
2974
2975
2976
2977
2978
2979
2980
2981
2982static DEFINE_SPINLOCK(mchdev_lock);
2983
2984
2985
2986
2987
2988
2989
2990unsigned long i915_read_mch_val(void)
2991{
2992 struct drm_i915_private *dev_priv;
2993 unsigned long chipset_val, graphics_val, ret = 0;
2994
2995 spin_lock(&mchdev_lock);
2996 if (!i915_mch_dev)
2997 goto out_unlock;
2998 dev_priv = i915_mch_dev;
2999
3000 chipset_val = i915_chipset_val(dev_priv);
3001 graphics_val = i915_gfx_val(dev_priv);
3002
3003 ret = chipset_val + graphics_val;
3004
3005out_unlock:
3006 spin_unlock(&mchdev_lock);
3007
3008 return ret;
3009}
3010EXPORT_SYMBOL_GPL(i915_read_mch_val);
3011
3012
3013
3014
3015
3016
3017bool i915_gpu_raise(void)
3018{
3019 struct drm_i915_private *dev_priv;
3020 bool ret = true;
3021
3022 spin_lock(&mchdev_lock);
3023 if (!i915_mch_dev) {
3024 ret = false;
3025 goto out_unlock;
3026 }
3027 dev_priv = i915_mch_dev;
3028
3029 if (dev_priv->max_delay > dev_priv->fmax)
3030 dev_priv->max_delay--;
3031
3032out_unlock:
3033 spin_unlock(&mchdev_lock);
3034
3035 return ret;
3036}
3037EXPORT_SYMBOL_GPL(i915_gpu_raise);
3038
3039
3040
3041
3042
3043
3044
3045bool i915_gpu_lower(void)
3046{
3047 struct drm_i915_private *dev_priv;
3048 bool ret = true;
3049
3050 spin_lock(&mchdev_lock);
3051 if (!i915_mch_dev) {
3052 ret = false;
3053 goto out_unlock;
3054 }
3055 dev_priv = i915_mch_dev;
3056
3057 if (dev_priv->max_delay < dev_priv->min_delay)
3058 dev_priv->max_delay++;
3059
3060out_unlock:
3061 spin_unlock(&mchdev_lock);
3062
3063 return ret;
3064}
3065EXPORT_SYMBOL_GPL(i915_gpu_lower);
3066
3067
3068
3069
3070
3071
3072bool i915_gpu_busy(void)
3073{
3074 struct drm_i915_private *dev_priv;
3075 bool ret = false;
3076
3077 spin_lock(&mchdev_lock);
3078 if (!i915_mch_dev)
3079 goto out_unlock;
3080 dev_priv = i915_mch_dev;
3081
3082 ret = dev_priv->busy;
3083
3084out_unlock:
3085 spin_unlock(&mchdev_lock);
3086
3087 return ret;
3088}
3089EXPORT_SYMBOL_GPL(i915_gpu_busy);
3090
3091
3092
3093
3094
3095
3096
3097bool i915_gpu_turbo_disable(void)
3098{
3099 struct drm_i915_private *dev_priv;
3100 bool ret = true;
3101
3102 spin_lock(&mchdev_lock);
3103 if (!i915_mch_dev) {
3104 ret = false;
3105 goto out_unlock;
3106 }
3107 dev_priv = i915_mch_dev;
3108
3109 dev_priv->max_delay = dev_priv->fstart;
3110
3111 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
3112 ret = false;
3113
3114out_unlock:
3115 spin_unlock(&mchdev_lock);
3116
3117 return ret;
3118}
3119EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129static void
3130ips_ping_for_i915_load(void)
3131{
3132 void (*link)(void);
3133
3134 link = symbol_get(ips_link_to_i915_driver);
3135 if (link) {
3136 link();
3137 symbol_put(ips_link_to_i915_driver);
3138 }
3139}
3140
3141void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
3142{
3143 spin_lock(&mchdev_lock);
3144 i915_mch_dev = dev_priv;
3145 dev_priv->mchdev_lock = &mchdev_lock;
3146 spin_unlock(&mchdev_lock);
3147
3148 ips_ping_for_i915_load();
3149}
3150
3151void intel_gpu_ips_teardown(void)
3152{
3153 spin_lock(&mchdev_lock);
3154 i915_mch_dev = NULL;
3155 spin_unlock(&mchdev_lock);
3156}
3157
3158void intel_init_emon(struct drm_device *dev)
3159{
3160 struct drm_i915_private *dev_priv = dev->dev_private;
3161 u32 lcfuse;
3162 u8 pxw[16];
3163 int i;
3164
3165
3166 I915_WRITE(ECR, 0);
3167 POSTING_READ(ECR);
3168
3169
3170 I915_WRITE(SDEW, 0x15040d00);
3171 I915_WRITE(CSIEW0, 0x007f0000);
3172 I915_WRITE(CSIEW1, 0x1e220004);
3173 I915_WRITE(CSIEW2, 0x04000004);
3174
3175 for (i = 0; i < 5; i++)
3176 I915_WRITE(PEW + (i * 4), 0);
3177 for (i = 0; i < 3; i++)
3178 I915_WRITE(DEW + (i * 4), 0);
3179
3180
3181 for (i = 0; i < 16; i++) {
3182 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
3183 unsigned long freq = intel_pxfreq(pxvidfreq);
3184 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
3185 PXVFREQ_PX_SHIFT;
3186 unsigned long val;
3187
3188 val = vid * vid;
3189 val *= (freq / 1000);
3190 val *= 255;
3191 val /= (127*127*900);
3192 if (val > 0xff)
3193 DRM_ERROR("bad pxval: %ld\n", val);
3194 pxw[i] = val;
3195 }
3196
3197 pxw[14] = 0;
3198 pxw[15] = 0;
3199
3200 for (i = 0; i < 4; i++) {
3201 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
3202 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
3203 I915_WRITE(PXW + (i * 4), val);
3204 }
3205
3206
3207 I915_WRITE(OGW0, 0);
3208 I915_WRITE(OGW1, 0);
3209 I915_WRITE(EG0, 0x00007f00);
3210 I915_WRITE(EG1, 0x0000000e);
3211 I915_WRITE(EG2, 0x000e0000);
3212 I915_WRITE(EG3, 0x68000300);
3213 I915_WRITE(EG4, 0x42000000);
3214 I915_WRITE(EG5, 0x00140031);
3215 I915_WRITE(EG6, 0);
3216 I915_WRITE(EG7, 0);
3217
3218 for (i = 0; i < 8; i++)
3219 I915_WRITE(PXWL + (i * 4), 0);
3220
3221
3222 I915_WRITE(ECR, 0x80000019);
3223
3224 lcfuse = I915_READ(LCFUSE02);
3225
3226 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
3227}
3228
3229static void ironlake_init_clock_gating(struct drm_device *dev)
3230{
3231 struct drm_i915_private *dev_priv = dev->dev_private;
3232 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3233
3234
3235 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
3236 DPFCRUNIT_CLOCK_GATE_DISABLE |
3237 DPFDUNIT_CLOCK_GATE_DISABLE;
3238
3239 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
3240
3241 I915_WRITE(PCH_3DCGDIS0,
3242 MARIUNIT_CLOCK_GATE_DISABLE |
3243 SVSMUNIT_CLOCK_GATE_DISABLE);
3244 I915_WRITE(PCH_3DCGDIS1,
3245 VFMUNIT_CLOCK_GATE_DISABLE);
3246
3247 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3248
3249
3250
3251
3252
3253
3254
3255
3256 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3257 (I915_READ(ILK_DISPLAY_CHICKEN2) |
3258 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
3259 I915_WRITE(ILK_DSPCLK_GATE,
3260 (I915_READ(ILK_DSPCLK_GATE) |
3261 ILK_DPARB_CLK_GATE));
3262 I915_WRITE(DISP_ARB_CTL,
3263 (I915_READ(DISP_ARB_CTL) |
3264 DISP_FBC_WM_DIS));
3265 I915_WRITE(WM3_LP_ILK, 0);
3266 I915_WRITE(WM2_LP_ILK, 0);
3267 I915_WRITE(WM1_LP_ILK, 0);
3268
3269
3270
3271
3272
3273
3274
3275
3276 if (IS_IRONLAKE_M(dev)) {
3277 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3278 I915_READ(ILK_DISPLAY_CHICKEN1) |
3279 ILK_FBCQ_DIS);
3280 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3281 I915_READ(ILK_DISPLAY_CHICKEN2) |
3282 ILK_DPARB_GATE);
3283 I915_WRITE(ILK_DSPCLK_GATE,
3284 I915_READ(ILK_DSPCLK_GATE) |
3285 ILK_DPFC_DIS1 |
3286 ILK_DPFC_DIS2 |
3287 ILK_CLK_FBC);
3288 }
3289
3290 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3291 I915_READ(ILK_DISPLAY_CHICKEN2) |
3292 ILK_ELPIN_409_SELECT);
3293 I915_WRITE(_3D_CHICKEN2,
3294 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
3295 _3D_CHICKEN2_WM_READ_PIPELINED);
3296}
3297
3298static void gen6_init_clock_gating(struct drm_device *dev)
3299{
3300 struct drm_i915_private *dev_priv = dev->dev_private;
3301 int pipe;
3302 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3303
3304 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3305
3306 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3307 I915_READ(ILK_DISPLAY_CHICKEN2) |
3308 ILK_ELPIN_409_SELECT);
3309
3310 I915_WRITE(WM3_LP_ILK, 0);
3311 I915_WRITE(WM2_LP_ILK, 0);
3312 I915_WRITE(WM1_LP_ILK, 0);
3313
3314 I915_WRITE(CACHE_MODE_0,
3315 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
3316
3317 I915_WRITE(GEN6_UCGCTL1,
3318 I915_READ(GEN6_UCGCTL1) |
3319 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
3320 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332 I915_WRITE(GEN6_UCGCTL2,
3333 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
3334 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3335
3336
3337 I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
3338 _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3350 I915_READ(ILK_DISPLAY_CHICKEN1) |
3351 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
3352 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3353 I915_READ(ILK_DISPLAY_CHICKEN2) |
3354 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
3355 I915_WRITE(ILK_DSPCLK_GATE,
3356 I915_READ(ILK_DSPCLK_GATE) |
3357 ILK_DPARB_CLK_GATE |
3358 ILK_DPFD_CLK_GATE);
3359
3360 for_each_pipe(pipe) {
3361 I915_WRITE(DSPCNTR(pipe),
3362 I915_READ(DSPCNTR(pipe)) |
3363 DISPPLANE_TRICKLE_FEED_DISABLE);
3364 intel_flush_display_plane(dev_priv, pipe);
3365 }
3366}
3367
3368static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3369{
3370 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
3371
3372 reg &= ~GEN7_FF_SCHED_MASK;
3373 reg |= GEN7_FF_TS_SCHED_HW;
3374 reg |= GEN7_FF_VS_SCHED_HW;
3375 reg |= GEN7_FF_DS_SCHED_HW;
3376
3377 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
3378}
3379
3380static void ivybridge_init_clock_gating(struct drm_device *dev)
3381{
3382 struct drm_i915_private *dev_priv = dev->dev_private;
3383 int pipe;
3384 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3385
3386 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3387
3388 I915_WRITE(WM3_LP_ILK, 0);
3389 I915_WRITE(WM2_LP_ILK, 0);
3390 I915_WRITE(WM1_LP_ILK, 0);
3391
3392
3393
3394
3395 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3396
3397 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3398
3399 I915_WRITE(IVB_CHICKEN3,
3400 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3401 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3402
3403
3404 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3405 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3406
3407
3408 I915_WRITE(GEN7_L3CNTLREG1,
3409 GEN7_WA_FOR_GEN7_L3_CONTROL);
3410 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3411 GEN7_WA_L3_CHICKEN_MODE);
3412
3413
3414 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3415 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3416 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3417
3418 for_each_pipe(pipe) {
3419 I915_WRITE(DSPCNTR(pipe),
3420 I915_READ(DSPCNTR(pipe)) |
3421 DISPPLANE_TRICKLE_FEED_DISABLE);
3422 intel_flush_display_plane(dev_priv, pipe);
3423 }
3424
3425 gen7_setup_fixed_func_scheduler(dev_priv);
3426
3427
3428 I915_WRITE(CACHE_MODE_1,
3429 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3430}
3431
3432static void valleyview_init_clock_gating(struct drm_device *dev)
3433{
3434 struct drm_i915_private *dev_priv = dev->dev_private;
3435 int pipe;
3436 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3437
3438 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3439
3440 I915_WRITE(WM3_LP_ILK, 0);
3441 I915_WRITE(WM2_LP_ILK, 0);
3442 I915_WRITE(WM1_LP_ILK, 0);
3443
3444
3445
3446
3447 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3448
3449 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3450
3451 I915_WRITE(IVB_CHICKEN3,
3452 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3453 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3454
3455
3456 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3457 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3458
3459
3460 I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
3461 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
3462
3463
3464 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3465 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3466 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3467
3468 for_each_pipe(pipe) {
3469 I915_WRITE(DSPCNTR(pipe),
3470 I915_READ(DSPCNTR(pipe)) |
3471 DISPPLANE_TRICKLE_FEED_DISABLE);
3472 intel_flush_display_plane(dev_priv, pipe);
3473 }
3474
3475 I915_WRITE(CACHE_MODE_1,
3476 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3477}
3478
3479static void g4x_init_clock_gating(struct drm_device *dev)
3480{
3481 struct drm_i915_private *dev_priv = dev->dev_private;
3482 uint32_t dspclk_gate;
3483
3484 I915_WRITE(RENCLK_GATE_D1, 0);
3485 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
3486 GS_UNIT_CLOCK_GATE_DISABLE |
3487 CL_UNIT_CLOCK_GATE_DISABLE);
3488 I915_WRITE(RAMCLK_GATE_D, 0);
3489 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
3490 OVRUNIT_CLOCK_GATE_DISABLE |
3491 OVCUNIT_CLOCK_GATE_DISABLE;
3492 if (IS_GM45(dev))
3493 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3494 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3495}
3496
3497static void crestline_init_clock_gating(struct drm_device *dev)
3498{
3499 struct drm_i915_private *dev_priv = dev->dev_private;
3500
3501 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
3502 I915_WRITE(RENCLK_GATE_D2, 0);
3503 I915_WRITE(DSPCLK_GATE_D, 0);
3504 I915_WRITE(RAMCLK_GATE_D, 0);
3505 I915_WRITE16(DEUC, 0);
3506}
3507
3508static void broadwater_init_clock_gating(struct drm_device *dev)
3509{
3510 struct drm_i915_private *dev_priv = dev->dev_private;
3511
3512 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
3513 I965_RCC_CLOCK_GATE_DISABLE |
3514 I965_RCPB_CLOCK_GATE_DISABLE |
3515 I965_ISC_CLOCK_GATE_DISABLE |
3516 I965_FBC_CLOCK_GATE_DISABLE);
3517 I915_WRITE(RENCLK_GATE_D2, 0);
3518}
3519
3520static void gen3_init_clock_gating(struct drm_device *dev)
3521{
3522 struct drm_i915_private *dev_priv = dev->dev_private;
3523 u32 dstate = I915_READ(D_STATE);
3524
3525 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
3526 DSTATE_DOT_CLOCK_GATING;
3527 I915_WRITE(D_STATE, dstate);
3528
3529 if (IS_PINEVIEW(dev))
3530 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
3531}
3532
3533static void i85x_init_clock_gating(struct drm_device *dev)
3534{
3535 struct drm_i915_private *dev_priv = dev->dev_private;
3536
3537 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
3538}
3539
3540static void i830_init_clock_gating(struct drm_device *dev)
3541{
3542 struct drm_i915_private *dev_priv = dev->dev_private;
3543
3544 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
3545}
3546
3547static void ibx_init_clock_gating(struct drm_device *dev)
3548{
3549 struct drm_i915_private *dev_priv = dev->dev_private;
3550
3551
3552
3553
3554
3555
3556 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3557}
3558
3559static void cpt_init_clock_gating(struct drm_device *dev)
3560{
3561 struct drm_i915_private *dev_priv = dev->dev_private;
3562 int pipe;
3563
3564
3565
3566
3567
3568
3569 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3570 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3571 DPLS_EDP_PPS_FIX_DIS);
3572
3573 for_each_pipe(pipe)
3574 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
3575}
3576
3577void intel_init_clock_gating(struct drm_device *dev)
3578{
3579 struct drm_i915_private *dev_priv = dev->dev_private;
3580
3581 dev_priv->display.init_clock_gating(dev);
3582
3583 if (dev_priv->display.init_pch_clock_gating)
3584 dev_priv->display.init_pch_clock_gating(dev);
3585}
3586
3587static void gen6_sanitize_pm(struct drm_device *dev)
3588{
3589 struct drm_i915_private *dev_priv = dev->dev_private;
3590 u32 limits, delay, old;
3591
3592 gen6_gt_force_wake_get(dev_priv);
3593
3594 old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
3595
3596
3597
3598 limits &= ~(0x3f << 16 | 0x3f << 24);
3599 delay = dev_priv->cur_delay;
3600 if (delay < dev_priv->max_delay)
3601 limits |= (dev_priv->max_delay & 0x3f) << 24;
3602 if (delay > dev_priv->min_delay)
3603 limits |= (dev_priv->min_delay & 0x3f) << 16;
3604
3605 if (old != limits) {
3606
3607 DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
3608 "expected %08x, was %08x\n", limits, old);
3609 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3610 }
3611
3612 gen6_gt_force_wake_put(dev_priv);
3613}
3614
3615void intel_sanitize_pm(struct drm_device *dev)
3616{
3617 struct drm_i915_private *dev_priv = dev->dev_private;
3618
3619 if (dev_priv->display.sanitize_pm)
3620 dev_priv->display.sanitize_pm(dev);
3621}
3622
3623
3624
3625
3626void intel_init_power_wells(struct drm_device *dev)
3627{
3628 struct drm_i915_private *dev_priv = dev->dev_private;
3629 unsigned long power_wells[] = {
3630 HSW_PWR_WELL_CTL1,
3631 HSW_PWR_WELL_CTL2,
3632 HSW_PWR_WELL_CTL4
3633 };
3634 int i;
3635
3636 if (!IS_HASWELL(dev))
3637 return;
3638
3639 mutex_lock(&dev->struct_mutex);
3640
3641 for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
3642 int well = I915_READ(power_wells[i]);
3643
3644 if ((well & HSW_PWR_WELL_STATE) == 0) {
3645 I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
3646 if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
3647 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
3648 }
3649 }
3650
3651 mutex_unlock(&dev->struct_mutex);
3652}
3653
3654
3655void intel_init_pm(struct drm_device *dev)
3656{
3657 struct drm_i915_private *dev_priv = dev->dev_private;
3658
3659 if (I915_HAS_FBC(dev)) {
3660 if (HAS_PCH_SPLIT(dev)) {
3661 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
3662 dev_priv->display.enable_fbc = ironlake_enable_fbc;
3663 dev_priv->display.disable_fbc = ironlake_disable_fbc;
3664 } else if (IS_GM45(dev)) {
3665 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
3666 dev_priv->display.enable_fbc = g4x_enable_fbc;
3667 dev_priv->display.disable_fbc = g4x_disable_fbc;
3668 } else if (IS_CRESTLINE(dev)) {
3669 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
3670 dev_priv->display.enable_fbc = i8xx_enable_fbc;
3671 dev_priv->display.disable_fbc = i8xx_disable_fbc;
3672 }
3673
3674 }
3675
3676
3677 if (IS_PINEVIEW(dev))
3678 i915_pineview_get_mem_freq(dev);
3679 else if (IS_GEN5(dev))
3680 i915_ironlake_get_mem_freq(dev);
3681
3682
3683 if (HAS_PCH_SPLIT(dev)) {
3684 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
3685 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
3686
3687
3688 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3689 u32 ecobus;
3690
3691
3692
3693
3694
3695
3696
3697 mutex_lock(&dev->struct_mutex);
3698 __gen6_gt_force_wake_mt_get(dev_priv);
3699 ecobus = I915_READ_NOTRACE(ECOBUS);
3700 __gen6_gt_force_wake_mt_put(dev_priv);
3701 mutex_unlock(&dev->struct_mutex);
3702
3703 if (ecobus & FORCEWAKE_MT_ENABLE) {
3704 DRM_DEBUG_KMS("Using MT version of forcewake\n");
3705 dev_priv->display.force_wake_get =
3706 __gen6_gt_force_wake_mt_get;
3707 dev_priv->display.force_wake_put =
3708 __gen6_gt_force_wake_mt_put;
3709 }
3710 }
3711
3712 if (HAS_PCH_IBX(dev))
3713 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
3714 else if (HAS_PCH_CPT(dev))
3715 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
3716
3717 if (IS_GEN5(dev)) {
3718 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
3719 dev_priv->display.update_wm = ironlake_update_wm;
3720 else {
3721 DRM_DEBUG_KMS("Failed to get proper latency. "
3722 "Disable CxSR\n");
3723 dev_priv->display.update_wm = NULL;
3724 }
3725 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
3726 } else if (IS_GEN6(dev)) {
3727 if (SNB_READ_WM0_LATENCY()) {
3728 dev_priv->display.update_wm = sandybridge_update_wm;
3729 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3730 } else {
3731 DRM_DEBUG_KMS("Failed to read display plane latency. "
3732 "Disable CxSR\n");
3733 dev_priv->display.update_wm = NULL;
3734 }
3735 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
3736 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3737 } else if (IS_IVYBRIDGE(dev)) {
3738
3739 if (SNB_READ_WM0_LATENCY()) {
3740 dev_priv->display.update_wm = sandybridge_update_wm;
3741 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3742 } else {
3743 DRM_DEBUG_KMS("Failed to read display plane latency. "
3744 "Disable CxSR\n");
3745 dev_priv->display.update_wm = NULL;
3746 }
3747 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3748 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3749 } else if (IS_HASWELL(dev)) {
3750 if (SNB_READ_WM0_LATENCY()) {
3751 dev_priv->display.update_wm = sandybridge_update_wm;
3752 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3753 dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
3754 } else {
3755 DRM_DEBUG_KMS("Failed to read display plane latency. "
3756 "Disable CxSR\n");
3757 dev_priv->display.update_wm = NULL;
3758 }
3759 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3760 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3761 } else
3762 dev_priv->display.update_wm = NULL;
3763 } else if (IS_VALLEYVIEW(dev)) {
3764 dev_priv->display.update_wm = valleyview_update_wm;
3765 dev_priv->display.init_clock_gating =
3766 valleyview_init_clock_gating;
3767 dev_priv->display.force_wake_get = vlv_force_wake_get;
3768 dev_priv->display.force_wake_put = vlv_force_wake_put;
3769 } else if (IS_PINEVIEW(dev)) {
3770 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
3771 dev_priv->is_ddr3,
3772 dev_priv->fsb_freq,
3773 dev_priv->mem_freq)) {
3774 DRM_INFO("failed to find known CxSR latency "
3775 "(found ddr%s fsb freq %d, mem freq %d), "
3776 "disabling CxSR\n",
3777 (dev_priv->is_ddr3 == 1) ? "3" : "2",
3778 dev_priv->fsb_freq, dev_priv->mem_freq);
3779
3780 pineview_disable_cxsr(dev);
3781 dev_priv->display.update_wm = NULL;
3782 } else
3783 dev_priv->display.update_wm = pineview_update_wm;
3784 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3785 } else if (IS_G4X(dev)) {
3786 dev_priv->display.update_wm = g4x_update_wm;
3787 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
3788 } else if (IS_GEN4(dev)) {
3789 dev_priv->display.update_wm = i965_update_wm;
3790 if (IS_CRESTLINE(dev))
3791 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
3792 else if (IS_BROADWATER(dev))
3793 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
3794 } else if (IS_GEN3(dev)) {
3795 dev_priv->display.update_wm = i9xx_update_wm;
3796 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
3797 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3798 } else if (IS_I865G(dev)) {
3799 dev_priv->display.update_wm = i830_update_wm;
3800 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3801 dev_priv->display.get_fifo_size = i830_get_fifo_size;
3802 } else if (IS_I85X(dev)) {
3803 dev_priv->display.update_wm = i9xx_update_wm;
3804 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
3805 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3806 } else {
3807 dev_priv->display.update_wm = i830_update_wm;
3808 dev_priv->display.init_clock_gating = i830_init_clock_gating;
3809 if (IS_845G(dev))
3810 dev_priv->display.get_fifo_size = i845_get_fifo_size;
3811 else
3812 dev_priv->display.get_fifo_size = i830_get_fifo_size;
3813 }
3814
3815
3816
3817
3818 intel_init_power_wells(dev);
3819}
3820
3821