1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "../i915_selftest.h"
26
27static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
28 unsigned int num_ranges,
29 bool is_watertight)
30{
31 unsigned int i;
32 s32 prev;
33
34 for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
35
36 if (is_watertight && (prev + 1) != (s32)ranges->start) {
37 pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
38 __func__, i, ranges->start, ranges->end, prev);
39 return -EINVAL;
40 }
41
42
43 if (prev >= (s32)ranges->start) {
44 pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
45 __func__, i, ranges->start, ranges->end, prev);
46 return -EINVAL;
47 }
48
49
50 if (ranges->start >= ranges->end) {
51 pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
52 __func__, i, ranges->start, ranges->end);
53 return -EINVAL;
54 }
55
56 prev = ranges->end;
57 }
58
59 return 0;
60}
61
62static int intel_shadow_table_check(void)
63{
64 struct {
65 const i915_reg_t *regs;
66 unsigned int size;
67 } reg_lists[] = {
68 { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
69 { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
70 };
71 const i915_reg_t *reg;
72 unsigned int i, j;
73 s32 prev;
74
75 for (j = 0; j < ARRAY_SIZE(reg_lists); ++j) {
76 reg = reg_lists[j].regs;
77 for (i = 0, prev = -1; i < reg_lists[j].size; i++, reg++) {
78 u32 offset = i915_mmio_reg_offset(*reg);
79
80 if (prev >= (s32)offset) {
81 pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
82 __func__, i, offset, prev);
83 return -EINVAL;
84 }
85
86 prev = offset;
87 }
88 }
89
90 return 0;
91}
92
93int intel_uncore_mock_selftests(void)
94{
95 struct {
96 const struct intel_forcewake_range *ranges;
97 unsigned int num_ranges;
98 bool is_watertight;
99 } fw[] = {
100 { __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
101 { __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
102 { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
103 { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
104 };
105 int err, i;
106
107 for (i = 0; i < ARRAY_SIZE(fw); i++) {
108 err = intel_fw_table_check(fw[i].ranges,
109 fw[i].num_ranges,
110 fw[i].is_watertight);
111 if (err)
112 return err;
113 }
114
115 err = intel_shadow_table_check();
116 if (err)
117 return err;
118
119 return 0;
120}
121
122static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv)
123{
124#define FW_RANGE 0x40000
125 unsigned long *valid;
126 u32 offset;
127 int err;
128
129 if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) &&
130 !IS_VALLEYVIEW(dev_priv) &&
131 !IS_CHERRYVIEW(dev_priv))
132 return 0;
133
134
135
136
137 if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
138 return 0;
139
140 valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid),
141 GFP_KERNEL);
142 if (!valid)
143 return -ENOMEM;
144
145 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
146
147 check_for_unclaimed_mmio(dev_priv);
148 for (offset = 0; offset < FW_RANGE; offset += 4) {
149 i915_reg_t reg = { offset };
150
151 (void)I915_READ_FW(reg);
152 if (!check_for_unclaimed_mmio(dev_priv))
153 set_bit(offset, valid);
154 }
155
156 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
157
158 err = 0;
159 for_each_set_bit(offset, valid, FW_RANGE) {
160 i915_reg_t reg = { offset };
161
162 iosf_mbi_punit_acquire();
163 intel_uncore_forcewake_reset(dev_priv);
164 iosf_mbi_punit_release();
165
166 check_for_unclaimed_mmio(dev_priv);
167
168 (void)I915_READ(reg);
169 if (check_for_unclaimed_mmio(dev_priv)) {
170 pr_err("Unclaimed mmio read to register 0x%04x\n",
171 offset);
172 err = -EINVAL;
173 }
174 }
175
176 kfree(valid);
177 return err;
178}
179
180int intel_uncore_live_selftests(struct drm_i915_private *i915)
181{
182 int err;
183
184
185 err = intel_fw_table_check(i915->uncore.fw_domains_table,
186 i915->uncore.fw_domains_table_entries,
187 INTEL_GEN(i915) >= 9);
188 if (err)
189 return err;
190
191 err = intel_uncore_check_forcewake_domains(i915);
192 if (err)
193 return err;
194
195 return 0;
196}
197