1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/pm_runtime.h>
25#include <asm/iosf_mbi.h>
26
27#include "i915_drv.h"
28#include "i915_vgpu.h"
29#include "intel_drv.h"
30#include "intel_pm.h"
31
32#define FORCEWAKE_ACK_TIMEOUT_MS 50
33#define GT_FIFO_TIMEOUT_MS 10
34
35#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
36
37static const char * const forcewake_domain_names[] = {
38 "render",
39 "blitter",
40 "media",
41 "vdbox0",
42 "vdbox1",
43 "vdbox2",
44 "vdbox3",
45 "vebox0",
46 "vebox1",
47};
48
49const char *
50intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
51{
52 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
53
54 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
55 return forcewake_domain_names[id];
56
57 WARN_ON(id);
58
59 return "unknown";
60}
61
62#define fw_ack(d) readl((d)->reg_ack)
63#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
64#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
65
66static inline void
67fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
68{
69
70
71
72
73
74
75 fw_clear(d, 0xffff);
76}
77
78static inline void
79fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
80{
81 d->wake_count++;
82 hrtimer_start_range_ns(&d->timer,
83 NSEC_PER_MSEC,
84 NSEC_PER_MSEC,
85 HRTIMER_MODE_REL);
86}
87
88static inline int
89__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
90 const u32 ack,
91 const u32 value)
92{
93 return wait_for_atomic((fw_ack(d) & ack) == value,
94 FORCEWAKE_ACK_TIMEOUT_MS);
95}
96
97static inline int
98wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
99 const u32 ack)
100{
101 return __wait_for_ack(d, ack, 0);
102}
103
104static inline int
105wait_ack_set(const struct intel_uncore_forcewake_domain *d,
106 const u32 ack)
107{
108 return __wait_for_ack(d, ack, ack);
109}
110
111static inline void
112fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
113{
114 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
115 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
116 intel_uncore_forcewake_domain_to_str(d->id));
117 add_taint_for_CI(TAINT_WARN);
118 }
119}
120
121enum ack_type {
122 ACK_CLEAR = 0,
123 ACK_SET
124};
125
126static int
127fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
128 const enum ack_type type)
129{
130 const u32 ack_bit = FORCEWAKE_KERNEL;
131 const u32 value = type == ACK_SET ? ack_bit : 0;
132 unsigned int pass;
133 bool ack_detected;
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149 pass = 1;
150 do {
151 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
152
153 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
154
155 udelay(10 * pass);
156 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
157
158 ack_detected = (fw_ack(d) & ack_bit) == value;
159
160 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
161 } while (!ack_detected && pass++ < 10);
162
163 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
164 intel_uncore_forcewake_domain_to_str(d->id),
165 type == ACK_SET ? "set" : "clear",
166 fw_ack(d),
167 pass);
168
169 return ack_detected ? 0 : -ETIMEDOUT;
170}
171
172static inline void
173fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
174{
175 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
176 return;
177
178 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
179 fw_domain_wait_ack_clear(d);
180}
181
182static inline void
183fw_domain_get(const struct intel_uncore_forcewake_domain *d)
184{
185 fw_set(d, FORCEWAKE_KERNEL);
186}
187
188static inline void
189fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
190{
191 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
192 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
193 intel_uncore_forcewake_domain_to_str(d->id));
194 add_taint_for_CI(TAINT_WARN);
195 }
196}
197
198static inline void
199fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
200{
201 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
202 return;
203
204 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
205 fw_domain_wait_ack_set(d);
206}
207
208static inline void
209fw_domain_put(const struct intel_uncore_forcewake_domain *d)
210{
211 fw_clear(d, FORCEWAKE_KERNEL);
212}
213
214static void
215fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
216{
217 struct intel_uncore_forcewake_domain *d;
218 unsigned int tmp;
219
220 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
221
222 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
223 fw_domain_wait_ack_clear(d);
224 fw_domain_get(d);
225 }
226
227 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
228 fw_domain_wait_ack_set(d);
229
230 uncore->fw_domains_active |= fw_domains;
231}
232
233static void
234fw_domains_get_with_fallback(struct intel_uncore *uncore,
235 enum forcewake_domains fw_domains)
236{
237 struct intel_uncore_forcewake_domain *d;
238 unsigned int tmp;
239
240 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
241
242 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
243 fw_domain_wait_ack_clear_fallback(d);
244 fw_domain_get(d);
245 }
246
247 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
248 fw_domain_wait_ack_set_fallback(d);
249
250 uncore->fw_domains_active |= fw_domains;
251}
252
253static void
254fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
255{
256 struct intel_uncore_forcewake_domain *d;
257 unsigned int tmp;
258
259 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
260
261 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
262 fw_domain_put(d);
263
264 uncore->fw_domains_active &= ~fw_domains;
265}
266
267static void
268fw_domains_reset(struct intel_uncore *uncore,
269 enum forcewake_domains fw_domains)
270{
271 struct intel_uncore_forcewake_domain *d;
272 unsigned int tmp;
273
274 if (!fw_domains)
275 return;
276
277 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
278
279 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
280 fw_domain_reset(d);
281}
282
283static inline u32 gt_thread_status(struct intel_uncore *uncore)
284{
285 u32 val;
286
287 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
288 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
289
290 return val;
291}
292
293static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
294{
295
296
297
298
299 WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
300 "GT thread status wait timed out\n");
301}
302
303static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
304 enum forcewake_domains fw_domains)
305{
306 fw_domains_get(uncore, fw_domains);
307
308
309 __gen6_gt_wait_for_thread_c0(uncore);
310}
311
312static inline u32 fifo_free_entries(struct intel_uncore *uncore)
313{
314 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
315
316 return count & GT_FIFO_FREE_ENTRIES_MASK;
317}
318
319static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
320{
321 u32 n;
322
323
324
325 if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
326 n = fifo_free_entries(uncore);
327 else
328 n = uncore->fifo_count;
329
330 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
331 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
332 GT_FIFO_NUM_RESERVED_ENTRIES,
333 GT_FIFO_TIMEOUT_MS)) {
334 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
335 return;
336 }
337 }
338
339 uncore->fifo_count = n - 1;
340}
341
342static enum hrtimer_restart
343intel_uncore_fw_release_timer(struct hrtimer *timer)
344{
345 struct intel_uncore_forcewake_domain *domain =
346 container_of(timer, struct intel_uncore_forcewake_domain, timer);
347 struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
348 unsigned long irqflags;
349
350 assert_rpm_device_not_suspended(uncore->rpm);
351
352 if (xchg(&domain->active, false))
353 return HRTIMER_RESTART;
354
355 spin_lock_irqsave(&uncore->lock, irqflags);
356 if (WARN_ON(domain->wake_count == 0))
357 domain->wake_count++;
358
359 if (--domain->wake_count == 0)
360 uncore->funcs.force_wake_put(uncore, domain->mask);
361
362 spin_unlock_irqrestore(&uncore->lock, irqflags);
363
364 return HRTIMER_NORESTART;
365}
366
367
368static unsigned int
369intel_uncore_forcewake_reset(struct intel_uncore *uncore)
370{
371 unsigned long irqflags;
372 struct intel_uncore_forcewake_domain *domain;
373 int retry_count = 100;
374 enum forcewake_domains fw, active_domains;
375
376 iosf_mbi_assert_punit_acquired();
377
378
379
380
381
382 while (1) {
383 unsigned int tmp;
384
385 active_domains = 0;
386
387 for_each_fw_domain(domain, uncore, tmp) {
388 smp_store_mb(domain->active, false);
389 if (hrtimer_cancel(&domain->timer) == 0)
390 continue;
391
392 intel_uncore_fw_release_timer(&domain->timer);
393 }
394
395 spin_lock_irqsave(&uncore->lock, irqflags);
396
397 for_each_fw_domain(domain, uncore, tmp) {
398 if (hrtimer_active(&domain->timer))
399 active_domains |= domain->mask;
400 }
401
402 if (active_domains == 0)
403 break;
404
405 if (--retry_count == 0) {
406 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
407 break;
408 }
409
410 spin_unlock_irqrestore(&uncore->lock, irqflags);
411 cond_resched();
412 }
413
414 WARN_ON(active_domains);
415
416 fw = uncore->fw_domains_active;
417 if (fw)
418 uncore->funcs.force_wake_put(uncore, fw);
419
420 fw_domains_reset(uncore, uncore->fw_domains);
421 assert_forcewakes_inactive(uncore);
422
423 spin_unlock_irqrestore(&uncore->lock, irqflags);
424
425 return fw;
426}
427
428static bool
429fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
430{
431 u32 dbg;
432
433 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
434 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
435 return false;
436
437 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
438
439 return true;
440}
441
442static bool
443vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
444{
445 u32 cer;
446
447 cer = __raw_uncore_read32(uncore, CLAIM_ER);
448 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
449 return false;
450
451 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
452
453 return true;
454}
455
456static bool
457gen6_check_for_fifo_debug(struct intel_uncore *uncore)
458{
459 u32 fifodbg;
460
461 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
462
463 if (unlikely(fifodbg)) {
464 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
465 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
466 }
467
468 return fifodbg;
469}
470
471static bool
472check_for_unclaimed_mmio(struct intel_uncore *uncore)
473{
474 bool ret = false;
475
476 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
477 ret |= fpga_check_for_unclaimed_mmio(uncore);
478
479 if (intel_uncore_has_dbg_unclaimed(uncore))
480 ret |= vlv_check_for_unclaimed_mmio(uncore);
481
482 if (intel_uncore_has_fifo(uncore))
483 ret |= gen6_check_for_fifo_debug(uncore);
484
485 return ret;
486}
487
488static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
489 unsigned int restore_forcewake)
490{
491
492 if (check_for_unclaimed_mmio(uncore))
493 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
494
495
496 if (IS_CHERRYVIEW(uncore_to_i915(uncore))) {
497 __raw_uncore_write32(uncore, GTFIFOCTL,
498 __raw_uncore_read32(uncore, GTFIFOCTL) |
499 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
500 GT_FIFO_CTL_RC6_POLICY_STALL);
501 }
502
503 iosf_mbi_punit_acquire();
504 intel_uncore_forcewake_reset(uncore);
505 if (restore_forcewake) {
506 spin_lock_irq(&uncore->lock);
507 uncore->funcs.force_wake_get(uncore, restore_forcewake);
508
509 if (intel_uncore_has_fifo(uncore))
510 uncore->fifo_count = fifo_free_entries(uncore);
511 spin_unlock_irq(&uncore->lock);
512 }
513 iosf_mbi_punit_release();
514}
515
516void intel_uncore_suspend(struct intel_uncore *uncore)
517{
518 iosf_mbi_punit_acquire();
519 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
520 &uncore->pmic_bus_access_nb);
521 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
522 iosf_mbi_punit_release();
523}
524
525void intel_uncore_resume_early(struct intel_uncore *uncore)
526{
527 unsigned int restore_forcewake;
528
529 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
530 __intel_uncore_early_sanitize(uncore, restore_forcewake);
531
532 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
533}
534
535void intel_uncore_runtime_resume(struct intel_uncore *uncore)
536{
537 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
538}
539
540void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
541{
542
543 intel_sanitize_gt_powersave(dev_priv);
544}
545
546static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
547 enum forcewake_domains fw_domains)
548{
549 struct intel_uncore_forcewake_domain *domain;
550 unsigned int tmp;
551
552 fw_domains &= uncore->fw_domains;
553
554 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
555 if (domain->wake_count++) {
556 fw_domains &= ~domain->mask;
557 domain->active = true;
558 }
559 }
560
561 if (fw_domains)
562 uncore->funcs.force_wake_get(uncore, fw_domains);
563}
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578void intel_uncore_forcewake_get(struct intel_uncore *uncore,
579 enum forcewake_domains fw_domains)
580{
581 unsigned long irqflags;
582
583 if (!uncore->funcs.force_wake_get)
584 return;
585
586 assert_rpm_wakelock_held(uncore->rpm);
587
588 spin_lock_irqsave(&uncore->lock, irqflags);
589 __intel_uncore_forcewake_get(uncore, fw_domains);
590 spin_unlock_irqrestore(&uncore->lock, irqflags);
591}
592
593
594
595
596
597
598
599
600
601void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
602{
603 spin_lock_irq(&uncore->lock);
604 if (!uncore->user_forcewake.count++) {
605 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
606
607
608 uncore->user_forcewake.saved_mmio_check =
609 uncore->unclaimed_mmio_check;
610 uncore->user_forcewake.saved_mmio_debug =
611 i915_modparams.mmio_debug;
612
613 uncore->unclaimed_mmio_check = 0;
614 i915_modparams.mmio_debug = 0;
615 }
616 spin_unlock_irq(&uncore->lock);
617}
618
619
620
621
622
623
624
625
626void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
627{
628 spin_lock_irq(&uncore->lock);
629 if (!--uncore->user_forcewake.count) {
630 if (intel_uncore_unclaimed_mmio(uncore))
631 dev_info(uncore_to_i915(uncore)->drm.dev,
632 "Invalid mmio detected during user access\n");
633
634 uncore->unclaimed_mmio_check =
635 uncore->user_forcewake.saved_mmio_check;
636 i915_modparams.mmio_debug =
637 uncore->user_forcewake.saved_mmio_debug;
638
639 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
640 }
641 spin_unlock_irq(&uncore->lock);
642}
643
644
645
646
647
648
649
650
651
652void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
653 enum forcewake_domains fw_domains)
654{
655 lockdep_assert_held(&uncore->lock);
656
657 if (!uncore->funcs.force_wake_get)
658 return;
659
660 __intel_uncore_forcewake_get(uncore, fw_domains);
661}
662
663static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
664 enum forcewake_domains fw_domains)
665{
666 struct intel_uncore_forcewake_domain *domain;
667 unsigned int tmp;
668
669 fw_domains &= uncore->fw_domains;
670
671 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
672 if (WARN_ON(domain->wake_count == 0))
673 continue;
674
675 if (--domain->wake_count) {
676 domain->active = true;
677 continue;
678 }
679
680 fw_domain_arm_timer(domain);
681 }
682}
683
684
685
686
687
688
689
690
691
692void intel_uncore_forcewake_put(struct intel_uncore *uncore,
693 enum forcewake_domains fw_domains)
694{
695 unsigned long irqflags;
696
697 if (!uncore->funcs.force_wake_put)
698 return;
699
700 spin_lock_irqsave(&uncore->lock, irqflags);
701 __intel_uncore_forcewake_put(uncore, fw_domains);
702 spin_unlock_irqrestore(&uncore->lock, irqflags);
703}
704
705
706
707
708
709
710
711
712
713void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
714 enum forcewake_domains fw_domains)
715{
716 lockdep_assert_held(&uncore->lock);
717
718 if (!uncore->funcs.force_wake_put)
719 return;
720
721 __intel_uncore_forcewake_put(uncore, fw_domains);
722}
723
724void assert_forcewakes_inactive(struct intel_uncore *uncore)
725{
726 if (!uncore->funcs.force_wake_get)
727 return;
728
729 WARN(uncore->fw_domains_active,
730 "Expected all fw_domains to be inactive, but %08x are still on\n",
731 uncore->fw_domains_active);
732}
733
734void assert_forcewakes_active(struct intel_uncore *uncore,
735 enum forcewake_domains fw_domains)
736{
737 if (!uncore->funcs.force_wake_get)
738 return;
739
740 assert_rpm_wakelock_held(uncore->rpm);
741
742 fw_domains &= uncore->fw_domains;
743 WARN(fw_domains & ~uncore->fw_domains_active,
744 "Expected %08x fw_domains to be active, but %08x are off\n",
745 fw_domains, fw_domains & ~uncore->fw_domains_active);
746}
747
748
749#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
750
751#define GEN11_NEEDS_FORCE_WAKE(reg) \
752 ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
753
754#define __gen6_reg_read_fw_domains(uncore, offset) \
755({ \
756 enum forcewake_domains __fwd; \
757 if (NEEDS_FORCE_WAKE(offset)) \
758 __fwd = FORCEWAKE_RENDER; \
759 else \
760 __fwd = 0; \
761 __fwd; \
762})
763
764static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
765{
766 if (offset < entry->start)
767 return -1;
768 else if (offset > entry->end)
769 return 1;
770 else
771 return 0;
772}
773
774
775#define BSEARCH(key, base, num, cmp) ({ \
776 unsigned int start__ = 0, end__ = (num); \
777 typeof(base) result__ = NULL; \
778 while (start__ < end__) { \
779 unsigned int mid__ = start__ + (end__ - start__) / 2; \
780 int ret__ = (cmp)((key), (base) + mid__); \
781 if (ret__ < 0) { \
782 end__ = mid__; \
783 } else if (ret__ > 0) { \
784 start__ = mid__ + 1; \
785 } else { \
786 result__ = (base) + mid__; \
787 break; \
788 } \
789 } \
790 result__; \
791})
792
793static enum forcewake_domains
794find_fw_domain(struct intel_uncore *uncore, u32 offset)
795{
796 const struct intel_forcewake_range *entry;
797
798 entry = BSEARCH(offset,
799 uncore->fw_domains_table,
800 uncore->fw_domains_table_entries,
801 fw_range_cmp);
802
803 if (!entry)
804 return 0;
805
806
807
808
809
810
811 if (entry->domains == FORCEWAKE_ALL)
812 return uncore->fw_domains;
813
814 WARN(entry->domains & ~uncore->fw_domains,
815 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
816 entry->domains & ~uncore->fw_domains, offset);
817
818 return entry->domains;
819}
820
821#define GEN_FW_RANGE(s, e, d) \
822 { .start = (s), .end = (e), .domains = (d) }
823
824#define HAS_FWTABLE(dev_priv) \
825 (INTEL_GEN(dev_priv) >= 9 || \
826 IS_CHERRYVIEW(dev_priv) || \
827 IS_VALLEYVIEW(dev_priv))
828
829
830static const struct intel_forcewake_range __vlv_fw_ranges[] = {
831 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
832 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
833 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
834 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
835 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
836 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
837 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
838};
839
840#define __fwtable_reg_read_fw_domains(uncore, offset) \
841({ \
842 enum forcewake_domains __fwd = 0; \
843 if (NEEDS_FORCE_WAKE((offset))) \
844 __fwd = find_fw_domain(uncore, offset); \
845 __fwd; \
846})
847
848#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
849({ \
850 enum forcewake_domains __fwd = 0; \
851 if (GEN11_NEEDS_FORCE_WAKE((offset))) \
852 __fwd = find_fw_domain(uncore, offset); \
853 __fwd; \
854})
855
856
857static const i915_reg_t gen8_shadowed_regs[] = {
858 RING_TAIL(RENDER_RING_BASE),
859 GEN6_RPNSWREQ,
860 GEN6_RC_VIDEO_FREQ,
861 RING_TAIL(GEN6_BSD_RING_BASE),
862 RING_TAIL(VEBOX_RING_BASE),
863 RING_TAIL(BLT_RING_BASE),
864
865};
866
867static const i915_reg_t gen11_shadowed_regs[] = {
868 RING_TAIL(RENDER_RING_BASE),
869 GEN6_RPNSWREQ,
870 GEN6_RC_VIDEO_FREQ,
871 RING_TAIL(BLT_RING_BASE),
872 RING_TAIL(GEN11_BSD_RING_BASE),
873 RING_TAIL(GEN11_BSD2_RING_BASE),
874 RING_TAIL(GEN11_VEBOX_RING_BASE),
875 RING_TAIL(GEN11_BSD3_RING_BASE),
876 RING_TAIL(GEN11_BSD4_RING_BASE),
877 RING_TAIL(GEN11_VEBOX2_RING_BASE),
878
879};
880
881static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
882{
883 u32 offset = i915_mmio_reg_offset(*reg);
884
885 if (key < offset)
886 return -1;
887 else if (key > offset)
888 return 1;
889 else
890 return 0;
891}
892
893#define __is_genX_shadowed(x) \
894static bool is_gen##x##_shadowed(u32 offset) \
895{ \
896 const i915_reg_t *regs = gen##x##_shadowed_regs; \
897 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
898 mmio_reg_cmp); \
899}
900
901__is_genX_shadowed(8)
902__is_genX_shadowed(11)
903
904#define __gen8_reg_write_fw_domains(uncore, offset) \
905({ \
906 enum forcewake_domains __fwd; \
907 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
908 __fwd = FORCEWAKE_RENDER; \
909 else \
910 __fwd = 0; \
911 __fwd; \
912})
913
914
915static const struct intel_forcewake_range __chv_fw_ranges[] = {
916 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
917 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
918 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
919 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
920 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
921 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
922 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
923 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
924 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
925 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
926 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
927 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
928 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
929 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
930 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
931 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
932};
933
934#define __fwtable_reg_write_fw_domains(uncore, offset) \
935({ \
936 enum forcewake_domains __fwd = 0; \
937 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
938 __fwd = find_fw_domain(uncore, offset); \
939 __fwd; \
940})
941
942#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
943({ \
944 enum forcewake_domains __fwd = 0; \
945 if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
946 __fwd = find_fw_domain(uncore, offset); \
947 __fwd; \
948})
949
950
951static const struct intel_forcewake_range __gen9_fw_ranges[] = {
952 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
953 GEN_FW_RANGE(0xb00, 0x1fff, 0),
954 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
955 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
956 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
957 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
958 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
959 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
960 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
961 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
962 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
963 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
964 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
965 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
966 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
967 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
968 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
969 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
970 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
971 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
972 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
973 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
974 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
975 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
976 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
977 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
978 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
979 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
980 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
981 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
982 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
983 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
984};
985
986
987static const struct intel_forcewake_range __gen11_fw_ranges[] = {
988 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
989 GEN_FW_RANGE(0xb00, 0x1fff, 0),
990 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
991 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
992 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
993 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
994 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
995 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
996 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
997 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
998 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
999 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
1000 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1001 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1002 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
1003 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1004 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1005 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
1006 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1007 GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
1008 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1009 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
1010 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1011 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1012 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
1013 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1014 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
1015 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1016 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
1017 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
1018};
1019
1020static void
1021ilk_dummy_write(struct intel_uncore *uncore)
1022{
1023
1024
1025
1026 __raw_uncore_write32(uncore, MI_MODE, 0);
1027}
1028
1029static void
1030__unclaimed_reg_debug(struct intel_uncore *uncore,
1031 const i915_reg_t reg,
1032 const bool read,
1033 const bool before)
1034{
1035 if (WARN(check_for_unclaimed_mmio(uncore) && !before,
1036 "Unclaimed %s register 0x%x\n",
1037 read ? "read from" : "write to",
1038 i915_mmio_reg_offset(reg)))
1039
1040 i915_modparams.mmio_debug--;
1041}
1042
1043static inline void
1044unclaimed_reg_debug(struct intel_uncore *uncore,
1045 const i915_reg_t reg,
1046 const bool read,
1047 const bool before)
1048{
1049 if (likely(!i915_modparams.mmio_debug))
1050 return;
1051
1052 __unclaimed_reg_debug(uncore, reg, read, before);
1053}
1054
1055#define GEN2_READ_HEADER(x) \
1056 u##x val = 0; \
1057 assert_rpm_wakelock_held(uncore->rpm);
1058
1059#define GEN2_READ_FOOTER \
1060 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1061 return val
1062
1063#define __gen2_read(x) \
1064static u##x \
1065gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1066 GEN2_READ_HEADER(x); \
1067 val = __raw_uncore_read##x(uncore, reg); \
1068 GEN2_READ_FOOTER; \
1069}
1070
1071#define __gen5_read(x) \
1072static u##x \
1073gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1074 GEN2_READ_HEADER(x); \
1075 ilk_dummy_write(uncore); \
1076 val = __raw_uncore_read##x(uncore, reg); \
1077 GEN2_READ_FOOTER; \
1078}
1079
1080__gen5_read(8)
1081__gen5_read(16)
1082__gen5_read(32)
1083__gen5_read(64)
1084__gen2_read(8)
1085__gen2_read(16)
1086__gen2_read(32)
1087__gen2_read(64)
1088
1089#undef __gen5_read
1090#undef __gen2_read
1091
1092#undef GEN2_READ_FOOTER
1093#undef GEN2_READ_HEADER
1094
1095#define GEN6_READ_HEADER(x) \
1096 u32 offset = i915_mmio_reg_offset(reg); \
1097 unsigned long irqflags; \
1098 u##x val = 0; \
1099 assert_rpm_wakelock_held(uncore->rpm); \
1100 spin_lock_irqsave(&uncore->lock, irqflags); \
1101 unclaimed_reg_debug(uncore, reg, true, true)
1102
1103#define GEN6_READ_FOOTER \
1104 unclaimed_reg_debug(uncore, reg, true, false); \
1105 spin_unlock_irqrestore(&uncore->lock, irqflags); \
1106 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1107 return val
1108
1109static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1110 enum forcewake_domains fw_domains)
1111{
1112 struct intel_uncore_forcewake_domain *domain;
1113 unsigned int tmp;
1114
1115 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1116
1117 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1118 fw_domain_arm_timer(domain);
1119
1120 uncore->funcs.force_wake_get(uncore, fw_domains);
1121}
1122
1123static inline void __force_wake_auto(struct intel_uncore *uncore,
1124 enum forcewake_domains fw_domains)
1125{
1126 if (WARN_ON(!fw_domains))
1127 return;
1128
1129
1130 fw_domains &= uncore->fw_domains;
1131 fw_domains &= ~uncore->fw_domains_active;
1132
1133 if (fw_domains)
1134 ___force_wake_auto(uncore, fw_domains);
1135}
1136
1137#define __gen_read(func, x) \
1138static u##x \
1139func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1140 enum forcewake_domains fw_engine; \
1141 GEN6_READ_HEADER(x); \
1142 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
1143 if (fw_engine) \
1144 __force_wake_auto(uncore, fw_engine); \
1145 val = __raw_uncore_read##x(uncore, reg); \
1146 GEN6_READ_FOOTER; \
1147}
1148#define __gen6_read(x) __gen_read(gen6, x)
1149#define __fwtable_read(x) __gen_read(fwtable, x)
1150#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x)
1151
1152__gen11_fwtable_read(8)
1153__gen11_fwtable_read(16)
1154__gen11_fwtable_read(32)
1155__gen11_fwtable_read(64)
1156__fwtable_read(8)
1157__fwtable_read(16)
1158__fwtable_read(32)
1159__fwtable_read(64)
1160__gen6_read(8)
1161__gen6_read(16)
1162__gen6_read(32)
1163__gen6_read(64)
1164
1165#undef __gen11_fwtable_read
1166#undef __fwtable_read
1167#undef __gen6_read
1168#undef GEN6_READ_FOOTER
1169#undef GEN6_READ_HEADER
1170
1171#define GEN2_WRITE_HEADER \
1172 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1173 assert_rpm_wakelock_held(uncore->rpm); \
1174
1175#define GEN2_WRITE_FOOTER
1176
1177#define __gen2_write(x) \
1178static void \
1179gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1180 GEN2_WRITE_HEADER; \
1181 __raw_uncore_write##x(uncore, reg, val); \
1182 GEN2_WRITE_FOOTER; \
1183}
1184
1185#define __gen5_write(x) \
1186static void \
1187gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1188 GEN2_WRITE_HEADER; \
1189 ilk_dummy_write(uncore); \
1190 __raw_uncore_write##x(uncore, reg, val); \
1191 GEN2_WRITE_FOOTER; \
1192}
1193
1194__gen5_write(8)
1195__gen5_write(16)
1196__gen5_write(32)
1197__gen2_write(8)
1198__gen2_write(16)
1199__gen2_write(32)
1200
1201#undef __gen5_write
1202#undef __gen2_write
1203
1204#undef GEN2_WRITE_FOOTER
1205#undef GEN2_WRITE_HEADER
1206
1207#define GEN6_WRITE_HEADER \
1208 u32 offset = i915_mmio_reg_offset(reg); \
1209 unsigned long irqflags; \
1210 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1211 assert_rpm_wakelock_held(uncore->rpm); \
1212 spin_lock_irqsave(&uncore->lock, irqflags); \
1213 unclaimed_reg_debug(uncore, reg, false, true)
1214
1215#define GEN6_WRITE_FOOTER \
1216 unclaimed_reg_debug(uncore, reg, false, false); \
1217 spin_unlock_irqrestore(&uncore->lock, irqflags)
1218
1219#define __gen6_write(x) \
1220static void \
1221gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1222 GEN6_WRITE_HEADER; \
1223 if (NEEDS_FORCE_WAKE(offset)) \
1224 __gen6_gt_wait_for_fifo(uncore); \
1225 __raw_uncore_write##x(uncore, reg, val); \
1226 GEN6_WRITE_FOOTER; \
1227}
1228
1229#define __gen_write(func, x) \
1230static void \
1231func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1232 enum forcewake_domains fw_engine; \
1233 GEN6_WRITE_HEADER; \
1234 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
1235 if (fw_engine) \
1236 __force_wake_auto(uncore, fw_engine); \
1237 __raw_uncore_write##x(uncore, reg, val); \
1238 GEN6_WRITE_FOOTER; \
1239}
1240#define __gen8_write(x) __gen_write(gen8, x)
1241#define __fwtable_write(x) __gen_write(fwtable, x)
1242#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x)
1243
1244__gen11_fwtable_write(8)
1245__gen11_fwtable_write(16)
1246__gen11_fwtable_write(32)
1247__fwtable_write(8)
1248__fwtable_write(16)
1249__fwtable_write(32)
1250__gen8_write(8)
1251__gen8_write(16)
1252__gen8_write(32)
1253__gen6_write(8)
1254__gen6_write(16)
1255__gen6_write(32)
1256
1257#undef __gen11_fwtable_write
1258#undef __fwtable_write
1259#undef __gen8_write
1260#undef __gen6_write
1261#undef GEN6_WRITE_FOOTER
1262#undef GEN6_WRITE_HEADER
1263
1264#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1265do { \
1266 (uncore)->funcs.mmio_writeb = x##_write8; \
1267 (uncore)->funcs.mmio_writew = x##_write16; \
1268 (uncore)->funcs.mmio_writel = x##_write32; \
1269} while (0)
1270
1271#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1272do { \
1273 (uncore)->funcs.mmio_readb = x##_read8; \
1274 (uncore)->funcs.mmio_readw = x##_read16; \
1275 (uncore)->funcs.mmio_readl = x##_read32; \
1276 (uncore)->funcs.mmio_readq = x##_read64; \
1277} while (0)
1278
1279
1280static void fw_domain_init(struct intel_uncore *uncore,
1281 enum forcewake_domain_id domain_id,
1282 i915_reg_t reg_set,
1283 i915_reg_t reg_ack)
1284{
1285 struct intel_uncore_forcewake_domain *d;
1286
1287 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1288 return;
1289
1290 d = &uncore->fw_domain[domain_id];
1291
1292 WARN_ON(d->wake_count);
1293
1294 WARN_ON(!i915_mmio_reg_valid(reg_set));
1295 WARN_ON(!i915_mmio_reg_valid(reg_ack));
1296
1297 d->wake_count = 0;
1298 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1299 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
1300
1301 d->id = domain_id;
1302
1303 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1304 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1305 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1306 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1307 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1308 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1309 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1310 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1311 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1312
1313
1314 d->mask = BIT(domain_id);
1315
1316 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1317 d->timer.function = intel_uncore_fw_release_timer;
1318
1319 uncore->fw_domains |= BIT(domain_id);
1320
1321 fw_domain_reset(d);
1322}
1323
1324static void fw_domain_fini(struct intel_uncore *uncore,
1325 enum forcewake_domain_id domain_id)
1326{
1327 struct intel_uncore_forcewake_domain *d;
1328
1329 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1330 return;
1331
1332 d = &uncore->fw_domain[domain_id];
1333
1334 WARN_ON(d->wake_count);
1335 WARN_ON(hrtimer_cancel(&d->timer));
1336 memset(d, 0, sizeof(*d));
1337
1338 uncore->fw_domains &= ~BIT(domain_id);
1339}
1340
1341static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
1342{
1343 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1344
1345 if (!intel_uncore_has_forcewake(uncore))
1346 return;
1347
1348 if (INTEL_GEN(i915) >= 11) {
1349 int i;
1350
1351 uncore->funcs.force_wake_get =
1352 fw_domains_get_with_fallback;
1353 uncore->funcs.force_wake_put = fw_domains_put;
1354 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1355 FORCEWAKE_RENDER_GEN9,
1356 FORCEWAKE_ACK_RENDER_GEN9);
1357 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1358 FORCEWAKE_BLITTER_GEN9,
1359 FORCEWAKE_ACK_BLITTER_GEN9);
1360 for (i = 0; i < I915_MAX_VCS; i++) {
1361 if (!HAS_ENGINE(i915, _VCS(i)))
1362 continue;
1363
1364 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1365 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1366 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1367 }
1368 for (i = 0; i < I915_MAX_VECS; i++) {
1369 if (!HAS_ENGINE(i915, _VECS(i)))
1370 continue;
1371
1372 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1373 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1374 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1375 }
1376 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1377 uncore->funcs.force_wake_get =
1378 fw_domains_get_with_fallback;
1379 uncore->funcs.force_wake_put = fw_domains_put;
1380 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1381 FORCEWAKE_RENDER_GEN9,
1382 FORCEWAKE_ACK_RENDER_GEN9);
1383 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1384 FORCEWAKE_BLITTER_GEN9,
1385 FORCEWAKE_ACK_BLITTER_GEN9);
1386 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1387 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1388 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1389 uncore->funcs.force_wake_get = fw_domains_get;
1390 uncore->funcs.force_wake_put = fw_domains_put;
1391 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1392 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1393 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1394 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1395 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1396 uncore->funcs.force_wake_get =
1397 fw_domains_get_with_thread_status;
1398 uncore->funcs.force_wake_put = fw_domains_put;
1399 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1400 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1401 } else if (IS_IVYBRIDGE(i915)) {
1402 u32 ecobus;
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413 uncore->funcs.force_wake_get =
1414 fw_domains_get_with_thread_status;
1415 uncore->funcs.force_wake_put = fw_domains_put;
1416
1417
1418
1419
1420
1421
1422
1423
1424 __raw_uncore_write32(uncore, FORCEWAKE, 0);
1425 __raw_posting_read(uncore, ECOBUS);
1426
1427 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1428 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1429
1430 spin_lock_irq(&uncore->lock);
1431 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
1432 ecobus = __raw_uncore_read32(uncore, ECOBUS);
1433 fw_domains_put(uncore, FORCEWAKE_RENDER);
1434 spin_unlock_irq(&uncore->lock);
1435
1436 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1437 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1438 DRM_INFO("when using vblank-synced partial screen updates.\n");
1439 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1440 FORCEWAKE, FORCEWAKE_ACK);
1441 }
1442 } else if (IS_GEN(i915, 6)) {
1443 uncore->funcs.force_wake_get =
1444 fw_domains_get_with_thread_status;
1445 uncore->funcs.force_wake_put = fw_domains_put;
1446 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1447 FORCEWAKE, FORCEWAKE_ACK);
1448 }
1449
1450
1451 WARN_ON(uncore->fw_domains == 0);
1452}
1453
1454#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
1455{ \
1456 (uncore)->fw_domains_table = \
1457 (struct intel_forcewake_range *)(d); \
1458 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
1459}
1460
1461static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1462 unsigned long action, void *data)
1463{
1464 struct intel_uncore *uncore = container_of(nb,
1465 struct intel_uncore, pmic_bus_access_nb);
1466
1467 switch (action) {
1468 case MBI_PMIC_BUS_ACCESS_BEGIN:
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482 disable_rpm_wakeref_asserts(uncore->rpm);
1483 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1484 enable_rpm_wakeref_asserts(uncore->rpm);
1485 break;
1486 case MBI_PMIC_BUS_ACCESS_END:
1487 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1488 break;
1489 }
1490
1491 return NOTIFY_OK;
1492}
1493
1494static int uncore_mmio_setup(struct intel_uncore *uncore)
1495{
1496 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1497 struct pci_dev *pdev = i915->drm.pdev;
1498 int mmio_bar;
1499 int mmio_size;
1500
1501 mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
1502
1503
1504
1505
1506
1507
1508
1509
1510 if (INTEL_GEN(i915) < 5)
1511 mmio_size = 512 * 1024;
1512 else
1513 mmio_size = 2 * 1024 * 1024;
1514 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
1515 if (uncore->regs == NULL) {
1516 DRM_ERROR("failed to map registers\n");
1517
1518 return -EIO;
1519 }
1520
1521 return 0;
1522}
1523
1524static void uncore_mmio_cleanup(struct intel_uncore *uncore)
1525{
1526 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1527 struct pci_dev *pdev = i915->drm.pdev;
1528
1529 pci_iounmap(pdev, uncore->regs);
1530}
1531
1532void intel_uncore_init_early(struct intel_uncore *uncore)
1533{
1534 spin_lock_init(&uncore->lock);
1535}
1536
1537int intel_uncore_init_mmio(struct intel_uncore *uncore)
1538{
1539 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1540 int ret;
1541
1542 ret = uncore_mmio_setup(uncore);
1543 if (ret)
1544 return ret;
1545
1546 i915_check_vgpu(i915);
1547
1548 if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
1549 uncore->flags |= UNCORE_HAS_FORCEWAKE;
1550
1551 intel_uncore_fw_domains_init(uncore);
1552 __intel_uncore_early_sanitize(uncore, 0);
1553
1554 uncore->unclaimed_mmio_check = 1;
1555 uncore->pmic_bus_access_nb.notifier_call =
1556 i915_pmic_bus_access_notifier;
1557
1558 uncore->rpm = &i915->runtime_pm;
1559
1560 if (!intel_uncore_has_forcewake(uncore)) {
1561 if (IS_GEN(i915, 5)) {
1562 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
1563 ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
1564 } else {
1565 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
1566 ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
1567 }
1568 } else if (IS_GEN_RANGE(i915, 6, 7)) {
1569 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
1570
1571 if (IS_VALLEYVIEW(i915)) {
1572 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
1573 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1574 } else {
1575 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1576 }
1577 } else if (IS_GEN(i915, 8)) {
1578 if (IS_CHERRYVIEW(i915)) {
1579 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
1580 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1581 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1582
1583 } else {
1584 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
1585 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1586 }
1587 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1588 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
1589 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1590 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1591 } else {
1592 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
1593 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
1594 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
1595 }
1596
1597 if (HAS_FPGA_DBG_UNCLAIMED(i915))
1598 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
1599
1600 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1601 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
1602
1603 if (IS_GEN_RANGE(i915, 6, 7))
1604 uncore->flags |= UNCORE_HAS_FIFO;
1605
1606 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
1607
1608 return 0;
1609}
1610
1611
1612
1613
1614
1615
1616void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
1617{
1618 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1619
1620 if (INTEL_GEN(i915) >= 11) {
1621 enum forcewake_domains fw_domains = uncore->fw_domains;
1622 enum forcewake_domain_id domain_id;
1623 int i;
1624
1625 for (i = 0; i < I915_MAX_VCS; i++) {
1626 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
1627
1628 if (HAS_ENGINE(i915, _VCS(i)))
1629 continue;
1630
1631 if (fw_domains & BIT(domain_id))
1632 fw_domain_fini(uncore, domain_id);
1633 }
1634
1635 for (i = 0; i < I915_MAX_VECS; i++) {
1636 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
1637
1638 if (HAS_ENGINE(i915, _VECS(i)))
1639 continue;
1640
1641 if (fw_domains & BIT(domain_id))
1642 fw_domain_fini(uncore, domain_id);
1643 }
1644 }
1645}
1646
1647void intel_uncore_fini_mmio(struct intel_uncore *uncore)
1648{
1649
1650 intel_uncore_sanitize(uncore_to_i915(uncore));
1651
1652 iosf_mbi_punit_acquire();
1653 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1654 &uncore->pmic_bus_access_nb);
1655 intel_uncore_forcewake_reset(uncore);
1656 iosf_mbi_punit_release();
1657 uncore_mmio_cleanup(uncore);
1658}
1659
1660static const struct reg_whitelist {
1661 i915_reg_t offset_ldw;
1662 i915_reg_t offset_udw;
1663 u16 gen_mask;
1664 u8 size;
1665} reg_read_whitelist[] = { {
1666 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1667 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1668 .gen_mask = INTEL_GEN_MASK(4, 11),
1669 .size = 8
1670} };
1671
1672int i915_reg_read_ioctl(struct drm_device *dev,
1673 void *data, struct drm_file *file)
1674{
1675 struct drm_i915_private *i915 = to_i915(dev);
1676 struct intel_uncore *uncore = &i915->uncore;
1677 struct drm_i915_reg_read *reg = data;
1678 struct reg_whitelist const *entry;
1679 intel_wakeref_t wakeref;
1680 unsigned int flags;
1681 int remain;
1682 int ret = 0;
1683
1684 entry = reg_read_whitelist;
1685 remain = ARRAY_SIZE(reg_read_whitelist);
1686 while (remain) {
1687 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1688
1689 GEM_BUG_ON(!is_power_of_2(entry->size));
1690 GEM_BUG_ON(entry->size > 8);
1691 GEM_BUG_ON(entry_offset & (entry->size - 1));
1692
1693 if (INTEL_INFO(i915)->gen_mask & entry->gen_mask &&
1694 entry_offset == (reg->offset & -entry->size))
1695 break;
1696 entry++;
1697 remain--;
1698 }
1699
1700 if (!remain)
1701 return -EINVAL;
1702
1703 flags = reg->offset & (entry->size - 1);
1704
1705 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1706 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1707 reg->val = intel_uncore_read64_2x32(uncore,
1708 entry->offset_ldw,
1709 entry->offset_udw);
1710 else if (entry->size == 8 && flags == 0)
1711 reg->val = intel_uncore_read64(uncore,
1712 entry->offset_ldw);
1713 else if (entry->size == 4 && flags == 0)
1714 reg->val = intel_uncore_read(uncore, entry->offset_ldw);
1715 else if (entry->size == 2 && flags == 0)
1716 reg->val = intel_uncore_read16(uncore,
1717 entry->offset_ldw);
1718 else if (entry->size == 1 && flags == 0)
1719 reg->val = intel_uncore_read8(uncore,
1720 entry->offset_ldw);
1721 else
1722 ret = -EINVAL;
1723 }
1724
1725 return ret;
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754int __intel_wait_for_register_fw(struct intel_uncore *uncore,
1755 i915_reg_t reg,
1756 u32 mask,
1757 u32 value,
1758 unsigned int fast_timeout_us,
1759 unsigned int slow_timeout_ms,
1760 u32 *out_value)
1761{
1762 u32 uninitialized_var(reg_value);
1763#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
1764 int ret;
1765
1766
1767 might_sleep_if(slow_timeout_ms);
1768 GEM_BUG_ON(fast_timeout_us > 20000);
1769
1770 ret = -ETIMEDOUT;
1771 if (fast_timeout_us && fast_timeout_us <= 20000)
1772 ret = _wait_for_atomic(done, fast_timeout_us, 0);
1773 if (ret && slow_timeout_ms)
1774 ret = wait_for(done, slow_timeout_ms);
1775
1776 if (out_value)
1777 *out_value = reg_value;
1778
1779 return ret;
1780#undef done
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802int __intel_wait_for_register(struct intel_uncore *uncore,
1803 i915_reg_t reg,
1804 u32 mask,
1805 u32 value,
1806 unsigned int fast_timeout_us,
1807 unsigned int slow_timeout_ms,
1808 u32 *out_value)
1809{
1810 unsigned fw =
1811 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
1812 u32 reg_value;
1813 int ret;
1814
1815 might_sleep_if(slow_timeout_ms);
1816
1817 spin_lock_irq(&uncore->lock);
1818 intel_uncore_forcewake_get__locked(uncore, fw);
1819
1820 ret = __intel_wait_for_register_fw(uncore,
1821 reg, mask, value,
1822 fast_timeout_us, 0, ®_value);
1823
1824 intel_uncore_forcewake_put__locked(uncore, fw);
1825 spin_unlock_irq(&uncore->lock);
1826
1827 if (ret && slow_timeout_ms)
1828 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
1829 reg),
1830 (reg_value & mask) == value,
1831 slow_timeout_ms * 1000, 10, 1000);
1832
1833
1834 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
1835
1836 if (out_value)
1837 *out_value = reg_value;
1838
1839 return ret;
1840}
1841
1842bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
1843{
1844 return check_for_unclaimed_mmio(uncore);
1845}
1846
1847bool
1848intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
1849{
1850 bool ret = false;
1851
1852 spin_lock_irq(&uncore->lock);
1853
1854 if (unlikely(uncore->unclaimed_mmio_check <= 0))
1855 goto out;
1856
1857 if (unlikely(intel_uncore_unclaimed_mmio(uncore))) {
1858 if (!i915_modparams.mmio_debug) {
1859 DRM_DEBUG("Unclaimed register detected, "
1860 "enabling oneshot unclaimed register reporting. "
1861 "Please use i915.mmio_debug=N for more information.\n");
1862 i915_modparams.mmio_debug++;
1863 }
1864 uncore->unclaimed_mmio_check--;
1865 ret = true;
1866 }
1867
1868out:
1869 spin_unlock_irq(&uncore->lock);
1870
1871 return ret;
1872}
1873
1874static enum forcewake_domains
1875intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
1876 i915_reg_t reg)
1877{
1878 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1879 u32 offset = i915_mmio_reg_offset(reg);
1880 enum forcewake_domains fw_domains;
1881
1882 if (INTEL_GEN(i915) >= 11) {
1883 fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
1884 } else if (HAS_FWTABLE(i915)) {
1885 fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
1886 } else if (INTEL_GEN(i915) >= 6) {
1887 fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
1888 } else {
1889
1890 if (intel_uncore_has_forcewake(uncore))
1891 MISSING_CASE(INTEL_GEN(i915));
1892
1893 fw_domains = 0;
1894 }
1895
1896 WARN_ON(fw_domains & ~uncore->fw_domains);
1897
1898 return fw_domains;
1899}
1900
1901static enum forcewake_domains
1902intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
1903 i915_reg_t reg)
1904{
1905 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1906 u32 offset = i915_mmio_reg_offset(reg);
1907 enum forcewake_domains fw_domains;
1908
1909 if (INTEL_GEN(i915) >= 11) {
1910 fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
1911 } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
1912 fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
1913 } else if (IS_GEN(i915, 8)) {
1914 fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
1915 } else if (IS_GEN_RANGE(i915, 6, 7)) {
1916 fw_domains = FORCEWAKE_RENDER;
1917 } else {
1918
1919 if (intel_uncore_has_forcewake(uncore))
1920 MISSING_CASE(INTEL_GEN(i915));
1921
1922 fw_domains = 0;
1923 }
1924
1925 WARN_ON(fw_domains & ~uncore->fw_domains);
1926
1927 return fw_domains;
1928}
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944enum forcewake_domains
1945intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
1946 i915_reg_t reg, unsigned int op)
1947{
1948 enum forcewake_domains fw_domains = 0;
1949
1950 WARN_ON(!op);
1951
1952 if (!intel_uncore_has_forcewake(uncore))
1953 return 0;
1954
1955 if (op & FW_REG_READ)
1956 fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
1957
1958 if (op & FW_REG_WRITE)
1959 fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
1960
1961 return fw_domains;
1962}
1963
1964#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1965#include "selftests/mock_uncore.c"
1966#include "selftests/intel_uncore.c"
1967#endif
1968