1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "i915_drv.h"
25#include "intel_drv.h"
26#include "i915_vgpu.h"
27
28#include <linux/pm_runtime.h>
29
30#define FORCEWAKE_ACK_TIMEOUT_MS 50
31
32#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
33
34static const char * const forcewake_domain_names[] = {
35 "render",
36 "blitter",
37 "media",
38};
39
40const char *
41intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
42{
43 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
44
45 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
46 return forcewake_domain_names[id];
47
48 WARN_ON(id);
49
50 return "unknown";
51}
52
53static inline void
54fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
55{
56 WARN_ON(!i915_mmio_reg_valid(d->reg_set));
57 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
58}
59
60static inline void
61fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
62{
63 mod_timer_pinned(&d->timer, jiffies + 1);
64}
65
66static inline void
67fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
68{
69 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
70 FORCEWAKE_KERNEL) == 0,
71 FORCEWAKE_ACK_TIMEOUT_MS))
72 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
73 intel_uncore_forcewake_domain_to_str(d->id));
74}
75
76static inline void
77fw_domain_get(const struct intel_uncore_forcewake_domain *d)
78{
79 __raw_i915_write32(d->i915, d->reg_set, d->val_set);
80}
81
82static inline void
83fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
84{
85 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
86 FORCEWAKE_KERNEL),
87 FORCEWAKE_ACK_TIMEOUT_MS))
88 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
89 intel_uncore_forcewake_domain_to_str(d->id));
90}
91
92static inline void
93fw_domain_put(const struct intel_uncore_forcewake_domain *d)
94{
95 __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
96}
97
98static inline void
99fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
100{
101
102 if (i915_mmio_reg_valid(d->reg_post))
103 __raw_posting_read(d->i915, d->reg_post);
104}
105
106static void
107fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
108{
109 struct intel_uncore_forcewake_domain *d;
110 enum forcewake_domain_id id;
111
112 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
113 fw_domain_wait_ack_clear(d);
114 fw_domain_get(d);
115 fw_domain_wait_ack(d);
116 }
117}
118
119static void
120fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
121{
122 struct intel_uncore_forcewake_domain *d;
123 enum forcewake_domain_id id;
124
125 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
126 fw_domain_put(d);
127 fw_domain_posting_read(d);
128 }
129}
130
131static void
132fw_domains_posting_read(struct drm_i915_private *dev_priv)
133{
134 struct intel_uncore_forcewake_domain *d;
135 enum forcewake_domain_id id;
136
137
138 for_each_fw_domain(d, dev_priv, id) {
139 fw_domain_posting_read(d);
140 break;
141 }
142}
143
144static void
145fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
146{
147 struct intel_uncore_forcewake_domain *d;
148 enum forcewake_domain_id id;
149
150 if (dev_priv->uncore.fw_domains == 0)
151 return;
152
153 for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
154 fw_domain_reset(d);
155
156 fw_domains_posting_read(dev_priv);
157}
158
159static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
160{
161
162
163
164 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
165 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
166 DRM_ERROR("GT thread status wait timed out\n");
167}
168
169static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
170 enum forcewake_domains fw_domains)
171{
172 fw_domains_get(dev_priv, fw_domains);
173
174
175 __gen6_gt_wait_for_thread_c0(dev_priv);
176}
177
178static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
179{
180 u32 gtfifodbg;
181
182 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
183 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
184 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
185}
186
187static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
188 enum forcewake_domains fw_domains)
189{
190 fw_domains_put(dev_priv, fw_domains);
191 gen6_gt_check_fifodbg(dev_priv);
192}
193
194static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
195{
196 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
197
198 return count & GT_FIFO_FREE_ENTRIES_MASK;
199}
200
201static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
202{
203 int ret = 0;
204
205
206
207 if (IS_VALLEYVIEW(dev_priv->dev))
208 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
209
210 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
211 int loop = 500;
212 u32 fifo = fifo_free_entries(dev_priv);
213
214 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
215 udelay(10);
216 fifo = fifo_free_entries(dev_priv);
217 }
218 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
219 ++ret;
220 dev_priv->uncore.fifo_count = fifo;
221 }
222 dev_priv->uncore.fifo_count--;
223
224 return ret;
225}
226
227static void intel_uncore_fw_release_timer(unsigned long arg)
228{
229 struct intel_uncore_forcewake_domain *domain = (void *)arg;
230 unsigned long irqflags;
231
232 assert_rpm_device_not_suspended(domain->i915);
233
234 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
235 if (WARN_ON(domain->wake_count == 0))
236 domain->wake_count++;
237
238 if (--domain->wake_count == 0)
239 domain->i915->uncore.funcs.force_wake_put(domain->i915,
240 1 << domain->id);
241
242 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
243}
244
245void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
246{
247 struct drm_i915_private *dev_priv = dev->dev_private;
248 unsigned long irqflags;
249 struct intel_uncore_forcewake_domain *domain;
250 int retry_count = 100;
251 enum forcewake_domain_id id;
252 enum forcewake_domains fw = 0, active_domains;
253
254
255
256
257
258 while (1) {
259 active_domains = 0;
260
261 for_each_fw_domain(domain, dev_priv, id) {
262 if (del_timer_sync(&domain->timer) == 0)
263 continue;
264
265 intel_uncore_fw_release_timer((unsigned long)domain);
266 }
267
268 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
269
270 for_each_fw_domain(domain, dev_priv, id) {
271 if (timer_pending(&domain->timer))
272 active_domains |= (1 << id);
273 }
274
275 if (active_domains == 0)
276 break;
277
278 if (--retry_count == 0) {
279 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
280 break;
281 }
282
283 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
284 cond_resched();
285 }
286
287 WARN_ON(active_domains);
288
289 for_each_fw_domain(domain, dev_priv, id)
290 if (domain->wake_count)
291 fw |= 1 << id;
292
293 if (fw)
294 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
295
296 fw_domains_reset(dev_priv, FORCEWAKE_ALL);
297
298 if (restore) {
299 if (fw)
300 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
301
302 if (IS_GEN6(dev) || IS_GEN7(dev))
303 dev_priv->uncore.fifo_count =
304 fifo_free_entries(dev_priv);
305 }
306
307 if (!restore)
308 assert_forcewakes_inactive(dev_priv);
309
310 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
311}
312
313static void intel_uncore_ellc_detect(struct drm_device *dev)
314{
315 struct drm_i915_private *dev_priv = dev->dev_private;
316
317 if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
318 INTEL_INFO(dev)->gen >= 9) &&
319 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
320
321
322
323
324
325 dev_priv->ellc_size = 128;
326 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
327 }
328}
329
330static bool
331fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
332{
333 u32 dbg;
334
335 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
336 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
337 return false;
338
339 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
340
341 return true;
342}
343
344static bool
345vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
346{
347 u32 cer;
348
349 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
350 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
351 return false;
352
353 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
354
355 return true;
356}
357
358static bool
359check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
360{
361 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
362 return fpga_check_for_unclaimed_mmio(dev_priv);
363
364 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
365 return vlv_check_for_unclaimed_mmio(dev_priv);
366
367 return false;
368}
369
370static void __intel_uncore_early_sanitize(struct drm_device *dev,
371 bool restore_forcewake)
372{
373 struct drm_i915_private *dev_priv = dev->dev_private;
374
375
376 if (check_for_unclaimed_mmio(dev_priv))
377 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
378
379
380 if (IS_GEN6(dev) || IS_GEN7(dev))
381 __raw_i915_write32(dev_priv, GTFIFODBG,
382 __raw_i915_read32(dev_priv, GTFIFODBG));
383
384
385 if (IS_CHERRYVIEW(dev)) {
386 __raw_i915_write32(dev_priv, GTFIFOCTL,
387 __raw_i915_read32(dev_priv, GTFIFOCTL) |
388 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
389 GT_FIFO_CTL_RC6_POLICY_STALL);
390 }
391
392 intel_uncore_forcewake_reset(dev, restore_forcewake);
393}
394
395void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
396{
397 __intel_uncore_early_sanitize(dev, restore_forcewake);
398 i915_check_and_clear_faults(dev);
399}
400
401void intel_uncore_sanitize(struct drm_device *dev)
402{
403 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
404
405
406 intel_disable_gt_powersave(dev);
407}
408
409static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
410 enum forcewake_domains fw_domains)
411{
412 struct intel_uncore_forcewake_domain *domain;
413 enum forcewake_domain_id id;
414
415 if (!dev_priv->uncore.funcs.force_wake_get)
416 return;
417
418 fw_domains &= dev_priv->uncore.fw_domains;
419
420 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
421 if (domain->wake_count++)
422 fw_domains &= ~(1 << id);
423 }
424
425 if (fw_domains)
426 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
427}
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
443 enum forcewake_domains fw_domains)
444{
445 unsigned long irqflags;
446
447 if (!dev_priv->uncore.funcs.force_wake_get)
448 return;
449
450 assert_rpm_wakelock_held(dev_priv);
451
452 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
453 __intel_uncore_forcewake_get(dev_priv, fw_domains);
454 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
455}
456
457
458
459
460
461
462
463
464
465void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
466 enum forcewake_domains fw_domains)
467{
468 assert_spin_locked(&dev_priv->uncore.lock);
469
470 if (!dev_priv->uncore.funcs.force_wake_get)
471 return;
472
473 __intel_uncore_forcewake_get(dev_priv, fw_domains);
474}
475
476static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
477 enum forcewake_domains fw_domains)
478{
479 struct intel_uncore_forcewake_domain *domain;
480 enum forcewake_domain_id id;
481
482 if (!dev_priv->uncore.funcs.force_wake_put)
483 return;
484
485 fw_domains &= dev_priv->uncore.fw_domains;
486
487 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
488 if (WARN_ON(domain->wake_count == 0))
489 continue;
490
491 if (--domain->wake_count)
492 continue;
493
494 domain->wake_count++;
495 fw_domain_arm_timer(domain);
496 }
497}
498
499
500
501
502
503
504
505
506
507void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
508 enum forcewake_domains fw_domains)
509{
510 unsigned long irqflags;
511
512 if (!dev_priv->uncore.funcs.force_wake_put)
513 return;
514
515 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
516 __intel_uncore_forcewake_put(dev_priv, fw_domains);
517 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
518}
519
520
521
522
523
524
525
526
527
528void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
529 enum forcewake_domains fw_domains)
530{
531 assert_spin_locked(&dev_priv->uncore.lock);
532
533 if (!dev_priv->uncore.funcs.force_wake_put)
534 return;
535
536 __intel_uncore_forcewake_put(dev_priv, fw_domains);
537}
538
539void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
540{
541 struct intel_uncore_forcewake_domain *domain;
542 enum forcewake_domain_id id;
543
544 if (!dev_priv->uncore.funcs.force_wake_get)
545 return;
546
547 for_each_fw_domain(domain, dev_priv, id)
548 WARN_ON(domain->wake_count);
549}
550
551
552#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
553
554#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
555
556#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
557 (REG_RANGE((reg), 0x2000, 0x4000) || \
558 REG_RANGE((reg), 0x5000, 0x8000) || \
559 REG_RANGE((reg), 0xB000, 0x12000) || \
560 REG_RANGE((reg), 0x2E000, 0x30000))
561
562#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
563 (REG_RANGE((reg), 0x12000, 0x14000) || \
564 REG_RANGE((reg), 0x22000, 0x24000) || \
565 REG_RANGE((reg), 0x30000, 0x40000))
566
567#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
568 (REG_RANGE((reg), 0x2000, 0x4000) || \
569 REG_RANGE((reg), 0x5200, 0x8000) || \
570 REG_RANGE((reg), 0x8300, 0x8500) || \
571 REG_RANGE((reg), 0xB000, 0xB480) || \
572 REG_RANGE((reg), 0xE000, 0xE800))
573
574#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
575 (REG_RANGE((reg), 0x8800, 0x8900) || \
576 REG_RANGE((reg), 0xD000, 0xD800) || \
577 REG_RANGE((reg), 0x12000, 0x14000) || \
578 REG_RANGE((reg), 0x1A000, 0x1C000) || \
579 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
580 REG_RANGE((reg), 0x30000, 0x38000))
581
582#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
583 (REG_RANGE((reg), 0x4000, 0x5000) || \
584 REG_RANGE((reg), 0x8000, 0x8300) || \
585 REG_RANGE((reg), 0x8500, 0x8600) || \
586 REG_RANGE((reg), 0x9000, 0xB000) || \
587 REG_RANGE((reg), 0xF000, 0x10000))
588
589#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
590 REG_RANGE((reg), 0xB00, 0x2000)
591
592#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
593 (REG_RANGE((reg), 0x2000, 0x2700) || \
594 REG_RANGE((reg), 0x3000, 0x4000) || \
595 REG_RANGE((reg), 0x5200, 0x8000) || \
596 REG_RANGE((reg), 0x8140, 0x8160) || \
597 REG_RANGE((reg), 0x8300, 0x8500) || \
598 REG_RANGE((reg), 0x8C00, 0x8D00) || \
599 REG_RANGE((reg), 0xB000, 0xB480) || \
600 REG_RANGE((reg), 0xE000, 0xE900) || \
601 REG_RANGE((reg), 0x24400, 0x24800))
602
603#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
604 (REG_RANGE((reg), 0x8130, 0x8140) || \
605 REG_RANGE((reg), 0x8800, 0x8A00) || \
606 REG_RANGE((reg), 0xD000, 0xD800) || \
607 REG_RANGE((reg), 0x12000, 0x14000) || \
608 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
609 REG_RANGE((reg), 0x30000, 0x40000))
610
611#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
612 REG_RANGE((reg), 0x9400, 0x9800)
613
614#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
615 ((reg) < 0x40000 && \
616 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
617 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
618 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
619 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
620
621static void
622ilk_dummy_write(struct drm_i915_private *dev_priv)
623{
624
625
626
627 __raw_i915_write32(dev_priv, MI_MODE, 0);
628}
629
630static void
631__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
632 const i915_reg_t reg,
633 const bool read,
634 const bool before)
635{
636
637
638
639
640
641 if (i915.mmio_debug < 2 &&
642 (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
643 return;
644
645 if (WARN(check_for_unclaimed_mmio(dev_priv),
646 "Unclaimed register detected %s %s register 0x%x\n",
647 before ? "before" : "after",
648 read ? "reading" : "writing to",
649 i915_mmio_reg_offset(reg)))
650 i915.mmio_debug--;
651}
652
653static inline void
654unclaimed_reg_debug(struct drm_i915_private *dev_priv,
655 const i915_reg_t reg,
656 const bool read,
657 const bool before)
658{
659 if (likely(!i915.mmio_debug))
660 return;
661
662 __unclaimed_reg_debug(dev_priv, reg, read, before);
663}
664
665#define GEN2_READ_HEADER(x) \
666 u##x val = 0; \
667 assert_rpm_wakelock_held(dev_priv);
668
669#define GEN2_READ_FOOTER \
670 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
671 return val
672
673#define __gen2_read(x) \
674static u##x \
675gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
676 GEN2_READ_HEADER(x); \
677 val = __raw_i915_read##x(dev_priv, reg); \
678 GEN2_READ_FOOTER; \
679}
680
681#define __gen5_read(x) \
682static u##x \
683gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
684 GEN2_READ_HEADER(x); \
685 ilk_dummy_write(dev_priv); \
686 val = __raw_i915_read##x(dev_priv, reg); \
687 GEN2_READ_FOOTER; \
688}
689
690__gen5_read(8)
691__gen5_read(16)
692__gen5_read(32)
693__gen5_read(64)
694__gen2_read(8)
695__gen2_read(16)
696__gen2_read(32)
697__gen2_read(64)
698
699#undef __gen5_read
700#undef __gen2_read
701
702#undef GEN2_READ_FOOTER
703#undef GEN2_READ_HEADER
704
705#define GEN6_READ_HEADER(x) \
706 u32 offset = i915_mmio_reg_offset(reg); \
707 unsigned long irqflags; \
708 u##x val = 0; \
709 assert_rpm_wakelock_held(dev_priv); \
710 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
711 unclaimed_reg_debug(dev_priv, reg, true, true)
712
713#define GEN6_READ_FOOTER \
714 unclaimed_reg_debug(dev_priv, reg, true, false); \
715 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
716 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
717 return val
718
719static inline void __force_wake_get(struct drm_i915_private *dev_priv,
720 enum forcewake_domains fw_domains)
721{
722 struct intel_uncore_forcewake_domain *domain;
723 enum forcewake_domain_id id;
724
725 if (WARN_ON(!fw_domains))
726 return;
727
728
729 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
730 if (domain->wake_count) {
731 fw_domains &= ~(1 << id);
732 continue;
733 }
734
735 domain->wake_count++;
736 fw_domain_arm_timer(domain);
737 }
738
739 if (fw_domains)
740 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
741}
742
743#define __gen6_read(x) \
744static u##x \
745gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
746 GEN6_READ_HEADER(x); \
747 if (NEEDS_FORCE_WAKE(offset)) \
748 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
749 val = __raw_i915_read##x(dev_priv, reg); \
750 GEN6_READ_FOOTER; \
751}
752
753#define __vlv_read(x) \
754static u##x \
755vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
756 enum forcewake_domains fw_engine = 0; \
757 GEN6_READ_HEADER(x); \
758 if (!NEEDS_FORCE_WAKE(offset)) \
759 fw_engine = 0; \
760 else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
761 fw_engine = FORCEWAKE_RENDER; \
762 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
763 fw_engine = FORCEWAKE_MEDIA; \
764 if (fw_engine) \
765 __force_wake_get(dev_priv, fw_engine); \
766 val = __raw_i915_read##x(dev_priv, reg); \
767 GEN6_READ_FOOTER; \
768}
769
770#define __chv_read(x) \
771static u##x \
772chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
773 enum forcewake_domains fw_engine = 0; \
774 GEN6_READ_HEADER(x); \
775 if (!NEEDS_FORCE_WAKE(offset)) \
776 fw_engine = 0; \
777 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
778 fw_engine = FORCEWAKE_RENDER; \
779 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
780 fw_engine = FORCEWAKE_MEDIA; \
781 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
782 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
783 if (fw_engine) \
784 __force_wake_get(dev_priv, fw_engine); \
785 val = __raw_i915_read##x(dev_priv, reg); \
786 GEN6_READ_FOOTER; \
787}
788
789#define SKL_NEEDS_FORCE_WAKE(reg) \
790 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
791
792#define __gen9_read(x) \
793static u##x \
794gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
795 enum forcewake_domains fw_engine; \
796 GEN6_READ_HEADER(x); \
797 if (!SKL_NEEDS_FORCE_WAKE(offset)) \
798 fw_engine = 0; \
799 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
800 fw_engine = FORCEWAKE_RENDER; \
801 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
802 fw_engine = FORCEWAKE_MEDIA; \
803 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
804 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
805 else \
806 fw_engine = FORCEWAKE_BLITTER; \
807 if (fw_engine) \
808 __force_wake_get(dev_priv, fw_engine); \
809 val = __raw_i915_read##x(dev_priv, reg); \
810 GEN6_READ_FOOTER; \
811}
812
813__gen9_read(8)
814__gen9_read(16)
815__gen9_read(32)
816__gen9_read(64)
817__chv_read(8)
818__chv_read(16)
819__chv_read(32)
820__chv_read(64)
821__vlv_read(8)
822__vlv_read(16)
823__vlv_read(32)
824__vlv_read(64)
825__gen6_read(8)
826__gen6_read(16)
827__gen6_read(32)
828__gen6_read(64)
829
830#undef __gen9_read
831#undef __chv_read
832#undef __vlv_read
833#undef __gen6_read
834#undef GEN6_READ_FOOTER
835#undef GEN6_READ_HEADER
836
837#define VGPU_READ_HEADER(x) \
838 unsigned long irqflags; \
839 u##x val = 0; \
840 assert_rpm_device_not_suspended(dev_priv); \
841 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
842
843#define VGPU_READ_FOOTER \
844 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
845 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
846 return val
847
848#define __vgpu_read(x) \
849static u##x \
850vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
851 VGPU_READ_HEADER(x); \
852 val = __raw_i915_read##x(dev_priv, reg); \
853 VGPU_READ_FOOTER; \
854}
855
856__vgpu_read(8)
857__vgpu_read(16)
858__vgpu_read(32)
859__vgpu_read(64)
860
861#undef __vgpu_read
862#undef VGPU_READ_FOOTER
863#undef VGPU_READ_HEADER
864
865#define GEN2_WRITE_HEADER \
866 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
867 assert_rpm_wakelock_held(dev_priv); \
868
869#define GEN2_WRITE_FOOTER
870
871#define __gen2_write(x) \
872static void \
873gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
874 GEN2_WRITE_HEADER; \
875 __raw_i915_write##x(dev_priv, reg, val); \
876 GEN2_WRITE_FOOTER; \
877}
878
879#define __gen5_write(x) \
880static void \
881gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
882 GEN2_WRITE_HEADER; \
883 ilk_dummy_write(dev_priv); \
884 __raw_i915_write##x(dev_priv, reg, val); \
885 GEN2_WRITE_FOOTER; \
886}
887
888__gen5_write(8)
889__gen5_write(16)
890__gen5_write(32)
891__gen5_write(64)
892__gen2_write(8)
893__gen2_write(16)
894__gen2_write(32)
895__gen2_write(64)
896
897#undef __gen5_write
898#undef __gen2_write
899
900#undef GEN2_WRITE_FOOTER
901#undef GEN2_WRITE_HEADER
902
903#define GEN6_WRITE_HEADER \
904 u32 offset = i915_mmio_reg_offset(reg); \
905 unsigned long irqflags; \
906 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
907 assert_rpm_wakelock_held(dev_priv); \
908 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
909 unclaimed_reg_debug(dev_priv, reg, false, true)
910
911#define GEN6_WRITE_FOOTER \
912 unclaimed_reg_debug(dev_priv, reg, false, false); \
913 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
914
915#define __gen6_write(x) \
916static void \
917gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
918 u32 __fifo_ret = 0; \
919 GEN6_WRITE_HEADER; \
920 if (NEEDS_FORCE_WAKE(offset)) { \
921 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
922 } \
923 __raw_i915_write##x(dev_priv, reg, val); \
924 if (unlikely(__fifo_ret)) { \
925 gen6_gt_check_fifodbg(dev_priv); \
926 } \
927 GEN6_WRITE_FOOTER; \
928}
929
930#define __hsw_write(x) \
931static void \
932hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
933 u32 __fifo_ret = 0; \
934 GEN6_WRITE_HEADER; \
935 if (NEEDS_FORCE_WAKE(offset)) { \
936 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
937 } \
938 __raw_i915_write##x(dev_priv, reg, val); \
939 if (unlikely(__fifo_ret)) { \
940 gen6_gt_check_fifodbg(dev_priv); \
941 } \
942 GEN6_WRITE_FOOTER; \
943}
944
945static const i915_reg_t gen8_shadowed_regs[] = {
946 FORCEWAKE_MT,
947 GEN6_RPNSWREQ,
948 GEN6_RC_VIDEO_FREQ,
949 RING_TAIL(RENDER_RING_BASE),
950 RING_TAIL(GEN6_BSD_RING_BASE),
951 RING_TAIL(VEBOX_RING_BASE),
952 RING_TAIL(BLT_RING_BASE),
953
954};
955
956static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
957 i915_reg_t reg)
958{
959 int i;
960 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
961 if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
962 return true;
963
964 return false;
965}
966
967#define __gen8_write(x) \
968static void \
969gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
970 GEN6_WRITE_HEADER; \
971 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
972 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
973 __raw_i915_write##x(dev_priv, reg, val); \
974 GEN6_WRITE_FOOTER; \
975}
976
977#define __chv_write(x) \
978static void \
979chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
980 enum forcewake_domains fw_engine = 0; \
981 GEN6_WRITE_HEADER; \
982 if (!NEEDS_FORCE_WAKE(offset) || \
983 is_gen8_shadowed(dev_priv, reg)) \
984 fw_engine = 0; \
985 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
986 fw_engine = FORCEWAKE_RENDER; \
987 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
988 fw_engine = FORCEWAKE_MEDIA; \
989 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
990 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
991 if (fw_engine) \
992 __force_wake_get(dev_priv, fw_engine); \
993 __raw_i915_write##x(dev_priv, reg, val); \
994 GEN6_WRITE_FOOTER; \
995}
996
997static const i915_reg_t gen9_shadowed_regs[] = {
998 RING_TAIL(RENDER_RING_BASE),
999 RING_TAIL(GEN6_BSD_RING_BASE),
1000 RING_TAIL(VEBOX_RING_BASE),
1001 RING_TAIL(BLT_RING_BASE),
1002 FORCEWAKE_BLITTER_GEN9,
1003 FORCEWAKE_RENDER_GEN9,
1004 FORCEWAKE_MEDIA_GEN9,
1005 GEN6_RPNSWREQ,
1006 GEN6_RC_VIDEO_FREQ,
1007
1008};
1009
1010static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
1011 i915_reg_t reg)
1012{
1013 int i;
1014 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
1015 if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
1016 return true;
1017
1018 return false;
1019}
1020
1021#define __gen9_write(x) \
1022static void \
1023gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
1024 bool trace) { \
1025 enum forcewake_domains fw_engine; \
1026 GEN6_WRITE_HEADER; \
1027 if (!SKL_NEEDS_FORCE_WAKE(offset) || \
1028 is_gen9_shadowed(dev_priv, reg)) \
1029 fw_engine = 0; \
1030 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
1031 fw_engine = FORCEWAKE_RENDER; \
1032 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
1033 fw_engine = FORCEWAKE_MEDIA; \
1034 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
1035 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
1036 else \
1037 fw_engine = FORCEWAKE_BLITTER; \
1038 if (fw_engine) \
1039 __force_wake_get(dev_priv, fw_engine); \
1040 __raw_i915_write##x(dev_priv, reg, val); \
1041 GEN6_WRITE_FOOTER; \
1042}
1043
1044__gen9_write(8)
1045__gen9_write(16)
1046__gen9_write(32)
1047__gen9_write(64)
1048__chv_write(8)
1049__chv_write(16)
1050__chv_write(32)
1051__chv_write(64)
1052__gen8_write(8)
1053__gen8_write(16)
1054__gen8_write(32)
1055__gen8_write(64)
1056__hsw_write(8)
1057__hsw_write(16)
1058__hsw_write(32)
1059__hsw_write(64)
1060__gen6_write(8)
1061__gen6_write(16)
1062__gen6_write(32)
1063__gen6_write(64)
1064
1065#undef __gen9_write
1066#undef __chv_write
1067#undef __gen8_write
1068#undef __hsw_write
1069#undef __gen6_write
1070#undef GEN6_WRITE_FOOTER
1071#undef GEN6_WRITE_HEADER
1072
1073#define VGPU_WRITE_HEADER \
1074 unsigned long irqflags; \
1075 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1076 assert_rpm_device_not_suspended(dev_priv); \
1077 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1078
1079#define VGPU_WRITE_FOOTER \
1080 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1081
1082#define __vgpu_write(x) \
1083static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1084 i915_reg_t reg, u##x val, bool trace) { \
1085 VGPU_WRITE_HEADER; \
1086 __raw_i915_write##x(dev_priv, reg, val); \
1087 VGPU_WRITE_FOOTER; \
1088}
1089
1090__vgpu_write(8)
1091__vgpu_write(16)
1092__vgpu_write(32)
1093__vgpu_write(64)
1094
1095#undef __vgpu_write
1096#undef VGPU_WRITE_FOOTER
1097#undef VGPU_WRITE_HEADER
1098
1099#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1100do { \
1101 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1102 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1103 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1104 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1105} while (0)
1106
1107#define ASSIGN_READ_MMIO_VFUNCS(x) \
1108do { \
1109 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1110 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1111 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1112 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1113} while (0)
1114
1115
1116static void fw_domain_init(struct drm_i915_private *dev_priv,
1117 enum forcewake_domain_id domain_id,
1118 i915_reg_t reg_set,
1119 i915_reg_t reg_ack)
1120{
1121 struct intel_uncore_forcewake_domain *d;
1122
1123 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1124 return;
1125
1126 d = &dev_priv->uncore.fw_domain[domain_id];
1127
1128 WARN_ON(d->wake_count);
1129
1130 d->wake_count = 0;
1131 d->reg_set = reg_set;
1132 d->reg_ack = reg_ack;
1133
1134 if (IS_GEN6(dev_priv)) {
1135 d->val_reset = 0;
1136 d->val_set = FORCEWAKE_KERNEL;
1137 d->val_clear = 0;
1138 } else {
1139
1140 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1141 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1142 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1143 }
1144
1145 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1146 d->reg_post = FORCEWAKE_ACK_VLV;
1147 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1148 d->reg_post = ECOBUS;
1149
1150 d->i915 = dev_priv;
1151 d->id = domain_id;
1152
1153 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1154
1155 dev_priv->uncore.fw_domains |= (1 << domain_id);
1156
1157 fw_domain_reset(d);
1158}
1159
1160static void intel_uncore_fw_domains_init(struct drm_device *dev)
1161{
1162 struct drm_i915_private *dev_priv = dev->dev_private;
1163
1164 if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1165 return;
1166
1167 if (IS_GEN9(dev)) {
1168 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1169 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1170 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1171 FORCEWAKE_RENDER_GEN9,
1172 FORCEWAKE_ACK_RENDER_GEN9);
1173 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1174 FORCEWAKE_BLITTER_GEN9,
1175 FORCEWAKE_ACK_BLITTER_GEN9);
1176 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1177 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1178 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1179 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1180 if (!IS_CHERRYVIEW(dev))
1181 dev_priv->uncore.funcs.force_wake_put =
1182 fw_domains_put_with_fifo;
1183 else
1184 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1185 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1186 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1187 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1188 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1190 dev_priv->uncore.funcs.force_wake_get =
1191 fw_domains_get_with_thread_status;
1192 if (IS_HASWELL(dev))
1193 dev_priv->uncore.funcs.force_wake_put =
1194 fw_domains_put_with_fifo;
1195 else
1196 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1197 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1198 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1199 } else if (IS_IVYBRIDGE(dev)) {
1200 u32 ecobus;
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211 dev_priv->uncore.funcs.force_wake_get =
1212 fw_domains_get_with_thread_status;
1213 dev_priv->uncore.funcs.force_wake_put =
1214 fw_domains_put_with_fifo;
1215
1216
1217
1218
1219
1220
1221
1222
1223 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1224 __raw_posting_read(dev_priv, ECOBUS);
1225
1226 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1227 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1228
1229 mutex_lock(&dev->struct_mutex);
1230 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1231 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1232 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1233 mutex_unlock(&dev->struct_mutex);
1234
1235 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1236 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1237 DRM_INFO("when using vblank-synced partial screen updates.\n");
1238 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1239 FORCEWAKE, FORCEWAKE_ACK);
1240 }
1241 } else if (IS_GEN6(dev)) {
1242 dev_priv->uncore.funcs.force_wake_get =
1243 fw_domains_get_with_thread_status;
1244 dev_priv->uncore.funcs.force_wake_put =
1245 fw_domains_put_with_fifo;
1246 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1247 FORCEWAKE, FORCEWAKE_ACK);
1248 }
1249
1250
1251 WARN_ON(dev_priv->uncore.fw_domains == 0);
1252}
1253
1254void intel_uncore_init(struct drm_device *dev)
1255{
1256 struct drm_i915_private *dev_priv = dev->dev_private;
1257
1258 i915_check_vgpu(dev);
1259
1260 intel_uncore_ellc_detect(dev);
1261 intel_uncore_fw_domains_init(dev);
1262 __intel_uncore_early_sanitize(dev, false);
1263
1264 dev_priv->uncore.unclaimed_mmio_check = 1;
1265
1266 switch (INTEL_INFO(dev)->gen) {
1267 default:
1268 case 9:
1269 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1270 ASSIGN_READ_MMIO_VFUNCS(gen9);
1271 break;
1272 case 8:
1273 if (IS_CHERRYVIEW(dev)) {
1274 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1275 ASSIGN_READ_MMIO_VFUNCS(chv);
1276
1277 } else {
1278 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1279 ASSIGN_READ_MMIO_VFUNCS(gen6);
1280 }
1281 break;
1282 case 7:
1283 case 6:
1284 if (IS_HASWELL(dev)) {
1285 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1286 } else {
1287 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1288 }
1289
1290 if (IS_VALLEYVIEW(dev)) {
1291 ASSIGN_READ_MMIO_VFUNCS(vlv);
1292 } else {
1293 ASSIGN_READ_MMIO_VFUNCS(gen6);
1294 }
1295 break;
1296 case 5:
1297 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1298 ASSIGN_READ_MMIO_VFUNCS(gen5);
1299 break;
1300 case 4:
1301 case 3:
1302 case 2:
1303 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1304 ASSIGN_READ_MMIO_VFUNCS(gen2);
1305 break;
1306 }
1307
1308 if (intel_vgpu_active(dev)) {
1309 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1310 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1311 }
1312
1313 i915_check_and_clear_faults(dev);
1314}
1315#undef ASSIGN_WRITE_MMIO_VFUNCS
1316#undef ASSIGN_READ_MMIO_VFUNCS
1317
1318void intel_uncore_fini(struct drm_device *dev)
1319{
1320
1321 intel_uncore_sanitize(dev);
1322 intel_uncore_forcewake_reset(dev, false);
1323}
1324
1325#define GEN_RANGE(l, h) GENMASK(h, l)
1326
1327static const struct register_whitelist {
1328 i915_reg_t offset_ldw, offset_udw;
1329 uint32_t size;
1330
1331 uint32_t gen_bitmask;
1332} whitelist[] = {
1333 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1334 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1335 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1336};
1337
1338int i915_reg_read_ioctl(struct drm_device *dev,
1339 void *data, struct drm_file *file)
1340{
1341 struct drm_i915_private *dev_priv = dev->dev_private;
1342 struct drm_i915_reg_read *reg = data;
1343 struct register_whitelist const *entry = whitelist;
1344 unsigned size;
1345 i915_reg_t offset_ldw, offset_udw;
1346 int i, ret = 0;
1347
1348 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1349 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1350 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1351 break;
1352 }
1353
1354 if (i == ARRAY_SIZE(whitelist))
1355 return -EINVAL;
1356
1357
1358
1359
1360
1361 offset_ldw = entry->offset_ldw;
1362 offset_udw = entry->offset_udw;
1363 size = entry->size;
1364 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1365
1366 intel_runtime_pm_get(dev_priv);
1367
1368 switch (size) {
1369 case 8 | 1:
1370 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1371 break;
1372 case 8:
1373 reg->val = I915_READ64(offset_ldw);
1374 break;
1375 case 4:
1376 reg->val = I915_READ(offset_ldw);
1377 break;
1378 case 2:
1379 reg->val = I915_READ16(offset_ldw);
1380 break;
1381 case 1:
1382 reg->val = I915_READ8(offset_ldw);
1383 break;
1384 default:
1385 ret = -EINVAL;
1386 goto out;
1387 }
1388
1389out:
1390 intel_runtime_pm_put(dev_priv);
1391 return ret;
1392}
1393
1394int i915_get_reset_stats_ioctl(struct drm_device *dev,
1395 void *data, struct drm_file *file)
1396{
1397 struct drm_i915_private *dev_priv = dev->dev_private;
1398 struct drm_i915_reset_stats *args = data;
1399 struct i915_ctx_hang_stats *hs;
1400 struct intel_context *ctx;
1401 int ret;
1402
1403 if (args->flags || args->pad)
1404 return -EINVAL;
1405
1406 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1407 return -EPERM;
1408
1409 ret = mutex_lock_interruptible(&dev->struct_mutex);
1410 if (ret)
1411 return ret;
1412
1413 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1414 if (IS_ERR(ctx)) {
1415 mutex_unlock(&dev->struct_mutex);
1416 return PTR_ERR(ctx);
1417 }
1418 hs = &ctx->hang_stats;
1419
1420 if (capable(CAP_SYS_ADMIN))
1421 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1422 else
1423 args->reset_count = 0;
1424
1425 args->batch_active = hs->batch_active;
1426 args->batch_pending = hs->batch_pending;
1427
1428 mutex_unlock(&dev->struct_mutex);
1429
1430 return 0;
1431}
1432
1433static int i915_reset_complete(struct drm_device *dev)
1434{
1435 u8 gdrst;
1436 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1437 return (gdrst & GRDOM_RESET_STATUS) == 0;
1438}
1439
1440static int i915_do_reset(struct drm_device *dev)
1441{
1442
1443 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1444 udelay(20);
1445 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1446
1447 return wait_for(i915_reset_complete(dev), 500);
1448}
1449
1450static int g4x_reset_complete(struct drm_device *dev)
1451{
1452 u8 gdrst;
1453 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1454 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1455}
1456
1457static int g33_do_reset(struct drm_device *dev)
1458{
1459 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1460 return wait_for(g4x_reset_complete(dev), 500);
1461}
1462
1463static int g4x_do_reset(struct drm_device *dev)
1464{
1465 struct drm_i915_private *dev_priv = dev->dev_private;
1466 int ret;
1467
1468 pci_write_config_byte(dev->pdev, I915_GDRST,
1469 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1470 ret = wait_for(g4x_reset_complete(dev), 500);
1471 if (ret)
1472 return ret;
1473
1474
1475 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1476 POSTING_READ(VDECCLK_GATE_D);
1477
1478 pci_write_config_byte(dev->pdev, I915_GDRST,
1479 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1480 ret = wait_for(g4x_reset_complete(dev), 500);
1481 if (ret)
1482 return ret;
1483
1484
1485 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1486 POSTING_READ(VDECCLK_GATE_D);
1487
1488 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1489
1490 return 0;
1491}
1492
1493static int ironlake_do_reset(struct drm_device *dev)
1494{
1495 struct drm_i915_private *dev_priv = dev->dev_private;
1496 int ret;
1497
1498 I915_WRITE(ILK_GDSR,
1499 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1500 ret = wait_for((I915_READ(ILK_GDSR) &
1501 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1502 if (ret)
1503 return ret;
1504
1505 I915_WRITE(ILK_GDSR,
1506 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1507 ret = wait_for((I915_READ(ILK_GDSR) &
1508 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1509 if (ret)
1510 return ret;
1511
1512 I915_WRITE(ILK_GDSR, 0);
1513
1514 return 0;
1515}
1516
1517static int gen6_do_reset(struct drm_device *dev)
1518{
1519 struct drm_i915_private *dev_priv = dev->dev_private;
1520 int ret;
1521
1522
1523
1524
1525
1526
1527
1528 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1529
1530
1531 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1532
1533 intel_uncore_forcewake_reset(dev, true);
1534
1535 return ret;
1536}
1537
1538static int wait_for_register(struct drm_i915_private *dev_priv,
1539 i915_reg_t reg,
1540 const u32 mask,
1541 const u32 value,
1542 const unsigned long timeout_ms)
1543{
1544 return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
1545}
1546
1547static int gen8_do_reset(struct drm_device *dev)
1548{
1549 struct drm_i915_private *dev_priv = dev->dev_private;
1550 struct intel_engine_cs *engine;
1551 int i;
1552
1553 for_each_ring(engine, dev_priv, i) {
1554 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1555 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1556
1557 if (wait_for_register(dev_priv,
1558 RING_RESET_CTL(engine->mmio_base),
1559 RESET_CTL_READY_TO_RESET,
1560 RESET_CTL_READY_TO_RESET,
1561 700)) {
1562 DRM_ERROR("%s: reset request timeout\n", engine->name);
1563 goto not_ready;
1564 }
1565 }
1566
1567 return gen6_do_reset(dev);
1568
1569not_ready:
1570 for_each_ring(engine, dev_priv, i)
1571 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1572 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1573
1574 return -EIO;
1575}
1576
1577static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1578{
1579 if (!i915.reset)
1580 return NULL;
1581
1582 if (INTEL_INFO(dev)->gen >= 8)
1583 return gen8_do_reset;
1584 else if (INTEL_INFO(dev)->gen >= 6)
1585 return gen6_do_reset;
1586 else if (IS_GEN5(dev))
1587 return ironlake_do_reset;
1588 else if (IS_G4X(dev))
1589 return g4x_do_reset;
1590 else if (IS_G33(dev))
1591 return g33_do_reset;
1592 else if (INTEL_INFO(dev)->gen >= 3)
1593 return i915_do_reset;
1594 else
1595 return NULL;
1596}
1597
1598int intel_gpu_reset(struct drm_device *dev)
1599{
1600 struct drm_i915_private *dev_priv = to_i915(dev);
1601 int (*reset)(struct drm_device *);
1602 int ret;
1603
1604 reset = intel_get_gpu_reset(dev);
1605 if (reset == NULL)
1606 return -ENODEV;
1607
1608
1609
1610
1611 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1612 ret = reset(dev);
1613 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1614
1615 return ret;
1616}
1617
1618bool intel_has_gpu_reset(struct drm_device *dev)
1619{
1620 return intel_get_gpu_reset(dev) != NULL;
1621}
1622
1623bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1624{
1625 return check_for_unclaimed_mmio(dev_priv);
1626}
1627
1628bool
1629intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1630{
1631 if (unlikely(i915.mmio_debug ||
1632 dev_priv->uncore.unclaimed_mmio_check <= 0))
1633 return false;
1634
1635 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1636 DRM_DEBUG("Unclaimed register detected, "
1637 "enabling oneshot unclaimed register reporting. "
1638 "Please use i915.mmio_debug=N for more information.\n");
1639 i915.mmio_debug++;
1640 dev_priv->uncore.unclaimed_mmio_check--;
1641 return true;
1642 }
1643
1644 return false;
1645}
1646