1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "amdgpu.h"
25
26bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
27{
28
29
30
31 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
32}
33
34void amdgpu_virt_init_setting(struct amdgpu_device *adev)
35{
36
37 adev->mode_info.num_crtc = 1;
38 adev->enable_virtual_display = true;
39 adev->cg_flags = 0;
40 adev->pg_flags = 0;
41}
42
43uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
44{
45 signed long r, cnt = 0;
46 unsigned long flags;
47 uint32_t seq;
48 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
49 struct amdgpu_ring *ring = &kiq->ring;
50
51 BUG_ON(!ring->funcs->emit_rreg);
52
53 spin_lock_irqsave(&kiq->ring_lock, flags);
54 amdgpu_ring_alloc(ring, 32);
55 amdgpu_ring_emit_rreg(ring, reg);
56 amdgpu_fence_emit_polling(ring, &seq);
57 amdgpu_ring_commit(ring);
58 spin_unlock_irqrestore(&kiq->ring_lock, flags);
59
60 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
61
62
63
64
65
66
67
68
69
70 if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
71 goto failed_kiq_read;
72
73 might_sleep();
74 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
75 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
76 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
77 }
78
79 if (cnt > MAX_KIQ_REG_TRY)
80 goto failed_kiq_read;
81
82 return adev->wb.wb[adev->virt.reg_val_offs];
83
84failed_kiq_read:
85 pr_err("failed to read reg:%x\n", reg);
86 return ~0;
87}
88
89void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
90{
91 signed long r, cnt = 0;
92 unsigned long flags;
93 uint32_t seq;
94 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
95 struct amdgpu_ring *ring = &kiq->ring;
96
97 BUG_ON(!ring->funcs->emit_wreg);
98
99 spin_lock_irqsave(&kiq->ring_lock, flags);
100 amdgpu_ring_alloc(ring, 32);
101 amdgpu_ring_emit_wreg(ring, reg, v);
102 amdgpu_fence_emit_polling(ring, &seq);
103 amdgpu_ring_commit(ring);
104 spin_unlock_irqrestore(&kiq->ring_lock, flags);
105
106 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
107
108
109
110
111
112
113
114
115
116 if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
117 goto failed_kiq_write;
118
119 might_sleep();
120 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
121
122 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
123 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
124 }
125
126 if (cnt > MAX_KIQ_REG_TRY)
127 goto failed_kiq_write;
128
129 return;
130
131failed_kiq_write:
132 pr_err("failed to write reg:%x\n", reg);
133}
134
135void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
136 uint32_t reg0, uint32_t reg1,
137 uint32_t ref, uint32_t mask)
138{
139 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
140 struct amdgpu_ring *ring = &kiq->ring;
141 signed long r, cnt = 0;
142 unsigned long flags;
143 uint32_t seq;
144
145 spin_lock_irqsave(&kiq->ring_lock, flags);
146 amdgpu_ring_alloc(ring, 32);
147 amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
148 ref, mask);
149 amdgpu_fence_emit_polling(ring, &seq);
150 amdgpu_ring_commit(ring);
151 spin_unlock_irqrestore(&kiq->ring_lock, flags);
152
153 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
154
155
156 if (r < 1 && in_interrupt())
157 goto failed_kiq;
158
159 might_sleep();
160 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
161
162 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
163 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
164 }
165
166 if (cnt > MAX_KIQ_REG_TRY)
167 goto failed_kiq;
168
169 return;
170
171failed_kiq:
172 pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
173}
174
175
176
177
178
179
180
181
182int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
183{
184 struct amdgpu_virt *virt = &adev->virt;
185 int r;
186
187 if (virt->ops && virt->ops->req_full_gpu) {
188 r = virt->ops->req_full_gpu(adev, init);
189 if (r)
190 return r;
191
192 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
193 }
194
195 return 0;
196}
197
198
199
200
201
202
203
204
205int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
206{
207 struct amdgpu_virt *virt = &adev->virt;
208 int r;
209
210 if (virt->ops && virt->ops->rel_full_gpu) {
211 r = virt->ops->rel_full_gpu(adev, init);
212 if (r)
213 return r;
214
215 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
216 }
217 return 0;
218}
219
220
221
222
223
224
225
226int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
227{
228 struct amdgpu_virt *virt = &adev->virt;
229 int r;
230
231 if (virt->ops && virt->ops->reset_gpu) {
232 r = virt->ops->reset_gpu(adev);
233 if (r)
234 return r;
235
236 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
237 }
238
239 return 0;
240}
241
242
243
244
245
246
247
248int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
249{
250 struct amdgpu_virt *virt = &adev->virt;
251
252 if (!virt->ops || !virt->ops->wait_reset)
253 return -EINVAL;
254
255 return virt->ops->wait_reset(adev);
256}
257
258
259
260
261
262
263
264int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
265{
266 int r;
267
268 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
269 return 0;
270
271 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
272 AMDGPU_GEM_DOMAIN_VRAM,
273 &adev->virt.mm_table.bo,
274 &adev->virt.mm_table.gpu_addr,
275 (void *)&adev->virt.mm_table.cpu_addr);
276 if (r) {
277 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
278 return r;
279 }
280
281 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
282 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
283 adev->virt.mm_table.gpu_addr,
284 adev->virt.mm_table.cpu_addr);
285 return 0;
286}
287
288
289
290
291
292
293void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
294{
295 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
296 return;
297
298 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
299 &adev->virt.mm_table.gpu_addr,
300 (void *)&adev->virt.mm_table.cpu_addr);
301 adev->virt.mm_table.gpu_addr = 0;
302}
303
304
305int amdgpu_virt_fw_reserve_get_checksum(void *obj,
306 unsigned long obj_size,
307 unsigned int key,
308 unsigned int chksum)
309{
310 unsigned int ret = key;
311 unsigned long i = 0;
312 unsigned char *pos;
313
314 pos = (char *)obj;
315
316 for (i = 0; i < obj_size; ++i)
317 ret += *(pos + i);
318
319 pos = (char *)&chksum;
320 for (i = 0; i < sizeof(chksum); ++i)
321 ret -= *(pos + i);
322 return ret;
323}
324
325void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
326{
327 uint32_t pf2vf_size = 0;
328 uint32_t checksum = 0;
329 uint32_t checkval;
330 char *str;
331
332 adev->virt.fw_reserve.p_pf2vf = NULL;
333 adev->virt.fw_reserve.p_vf2pf = NULL;
334
335 if (adev->fw_vram_usage.va != NULL) {
336 adev->virt.fw_reserve.p_pf2vf =
337 (struct amd_sriov_msg_pf2vf_info_header *)(
338 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
339 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
340 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
341 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
342
343
344 if (pf2vf_size > 0 && pf2vf_size < 4096) {
345 checkval = amdgpu_virt_fw_reserve_get_checksum(
346 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
347 adev->virt.fw_reserve.checksum_key, checksum);
348 if (checkval == checksum) {
349 adev->virt.fw_reserve.p_vf2pf =
350 ((void *)adev->virt.fw_reserve.p_pf2vf +
351 pf2vf_size);
352 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
353 sizeof(amdgim_vf2pf_info));
354 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
355 AMDGPU_FW_VRAM_VF2PF_VER);
356 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
357 sizeof(amdgim_vf2pf_info));
358 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
359 &str);
360#ifdef MODULE
361 if (THIS_MODULE->version != NULL)
362 strcpy(str, THIS_MODULE->version);
363 else
364#endif
365 strcpy(str, "N/A");
366 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
367 0);
368 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
369 amdgpu_virt_fw_reserve_get_checksum(
370 adev->virt.fw_reserve.p_vf2pf,
371 pf2vf_size,
372 adev->virt.fw_reserve.checksum_key, 0));
373 }
374 }
375 }
376}
377
378
379