1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "amdgpu.h"
25#include "nbio/nbio_2_3_offset.h"
26#include "nbio/nbio_2_3_sh_mask.h"
27#include "gc/gc_10_1_0_offset.h"
28#include "gc/gc_10_1_0_sh_mask.h"
29#include "soc15.h"
30#include "navi10_ih.h"
31#include "soc15_common.h"
32#include "mxgpu_nv.h"
33
34static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
35{
36 WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
37}
38
39static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
40{
41 WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
42}
43
44
45
46
47
48
49
50
51
52
53static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
54{
55 return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
56}
57
58
59static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
60 enum idh_event event)
61{
62 u32 reg;
63
64 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
65 if (reg != event)
66 return -ENOENT;
67
68 xgpu_nv_mailbox_send_ack(adev);
69
70 return 0;
71}
72
73static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
74{
75 return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
76}
77
78static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
79{
80 int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
81 u8 reg;
82
83 do {
84 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
85 if (reg & 2)
86 return 0;
87
88 mdelay(5);
89 timeout -= 5;
90 } while (timeout > 1);
91
92 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
93
94 return -ETIME;
95}
96
97static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
98{
99 int r;
100 uint64_t timeout, now;
101
102 now = (uint64_t)ktime_to_ms(ktime_get());
103 timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
104
105 do {
106 r = xgpu_nv_mailbox_rcv_msg(adev, event);
107 if (!r)
108 return 0;
109
110 msleep(10);
111 now = (uint64_t)ktime_to_ms(ktime_get());
112 } while (timeout > now);
113
114
115 return -ETIME;
116}
117
118static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
119 enum idh_request req, u32 data1, u32 data2, u32 data3)
120{
121 int r;
122 uint8_t trn;
123
124
125
126
127
128
129
130 do {
131 xgpu_nv_mailbox_set_valid(adev, false);
132 trn = xgpu_nv_peek_ack(adev);
133 if (trn) {
134 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
135 msleep(1);
136 }
137 } while (trn);
138
139 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
140 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
141 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
142 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
143 xgpu_nv_mailbox_set_valid(adev, true);
144
145
146 r = xgpu_nv_poll_ack(adev);
147 if (r)
148 pr_err("Doesn't get ack from pf, continue\n");
149
150 xgpu_nv_mailbox_set_valid(adev, false);
151}
152
153static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
154 enum idh_request req)
155{
156 int r, retry = 1;
157 enum idh_event event = -1;
158
159send_request:
160 xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
161
162 switch (req) {
163 case IDH_REQ_GPU_INIT_ACCESS:
164 case IDH_REQ_GPU_FINI_ACCESS:
165 case IDH_REQ_GPU_RESET_ACCESS:
166 event = IDH_READY_TO_ACCESS_GPU;
167 break;
168 case IDH_REQ_GPU_INIT_DATA:
169 event = IDH_REQ_GPU_INIT_DATA_READY;
170 break;
171 default:
172 break;
173 }
174
175 if (event != -1) {
176 r = xgpu_nv_poll_msg(adev, event);
177 if (r) {
178 if (retry++ < 2)
179 goto send_request;
180
181 if (req != IDH_REQ_GPU_INIT_DATA) {
182 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
183 return r;
184 }
185 else
186 adev->virt.req_init_data_ver = 0;
187 } else {
188 if (req == IDH_REQ_GPU_INIT_DATA)
189 {
190 adev->virt.req_init_data_ver =
191 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
192
193
194 if (adev->virt.req_init_data_ver < 1)
195 adev->virt.req_init_data_ver = 1;
196 }
197 }
198
199
200 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
201 adev->virt.fw_reserve.checksum_key =
202 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
203 }
204 }
205
206 return 0;
207}
208
209static int xgpu_nv_request_reset(struct amdgpu_device *adev)
210{
211 int ret, i = 0;
212
213 while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
214 ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
215 if (!ret)
216 break;
217 i++;
218 }
219
220 return ret;
221}
222
223static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
224 bool init)
225{
226 enum idh_request req;
227
228 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
229 return xgpu_nv_send_access_requests(adev, req);
230}
231
232static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
233 bool init)
234{
235 enum idh_request req;
236 int r = 0;
237
238 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
239 r = xgpu_nv_send_access_requests(adev, req);
240
241 return r;
242}
243
244static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
245{
246 return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
247}
248
249static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
250 struct amdgpu_irq_src *source,
251 struct amdgpu_iv_entry *entry)
252{
253 DRM_DEBUG("get ack intr and do nothing.\n");
254 return 0;
255}
256
257static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
258 struct amdgpu_irq_src *source,
259 unsigned type,
260 enum amdgpu_interrupt_state state)
261{
262 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
263
264 if (state == AMDGPU_IRQ_STATE_ENABLE)
265 tmp |= 2;
266 else
267 tmp &= ~2;
268
269 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
270
271 return 0;
272}
273
274static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
275{
276 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
277 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
278 int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
279
280
281
282
283
284 if (!down_write_trylock(&adev->reset_sem))
285 return;
286
287 amdgpu_virt_fini_data_exchange(adev);
288 atomic_set(&adev->in_gpu_reset, 1);
289
290 xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
291
292 do {
293 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
294 goto flr_done;
295
296 msleep(10);
297 timeout -= 10;
298 } while (timeout > 1);
299
300flr_done:
301 atomic_set(&adev->in_gpu_reset, 0);
302 up_write(&adev->reset_sem);
303
304
305 if (amdgpu_device_should_recover_gpu(adev)
306 && (!amdgpu_device_has_job_running(adev) ||
307 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
308 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
309 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
310 adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
311 amdgpu_device_gpu_recover(adev, NULL);
312}
313
314static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
315 struct amdgpu_irq_src *src,
316 unsigned type,
317 enum amdgpu_interrupt_state state)
318{
319 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
320
321 if (state == AMDGPU_IRQ_STATE_ENABLE)
322 tmp |= 1;
323 else
324 tmp &= ~1;
325
326 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
327
328 return 0;
329}
330
331static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
332 struct amdgpu_irq_src *source,
333 struct amdgpu_iv_entry *entry)
334{
335 enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
336
337 switch (event) {
338 case IDH_FLR_NOTIFICATION:
339 if (amdgpu_sriov_runtime(adev))
340 schedule_work(&adev->virt.flr_work);
341 break;
342
343
344
345
346 case IDH_CLR_MSG_BUF:
347 case IDH_FLR_NOTIFICATION_CMPL:
348 case IDH_READY_TO_ACCESS_GPU:
349 default:
350 break;
351 }
352
353 return 0;
354}
355
356static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
357 .set = xgpu_nv_set_mailbox_ack_irq,
358 .process = xgpu_nv_mailbox_ack_irq,
359};
360
361static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
362 .set = xgpu_nv_set_mailbox_rcv_irq,
363 .process = xgpu_nv_mailbox_rcv_irq,
364};
365
366void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
367{
368 adev->virt.ack_irq.num_types = 1;
369 adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
370 adev->virt.rcv_irq.num_types = 1;
371 adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
372}
373
374int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
375{
376 int r;
377
378 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
379 if (r)
380 return r;
381
382 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
383 if (r) {
384 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
385 return r;
386 }
387
388 return 0;
389}
390
391int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
392{
393 int r;
394
395 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
396 if (r)
397 return r;
398 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
399 if (r) {
400 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
401 return r;
402 }
403
404 INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
405
406 return 0;
407}
408
409void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
410{
411 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
412 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
413}
414
415const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
416 .req_full_gpu = xgpu_nv_request_full_gpu_access,
417 .rel_full_gpu = xgpu_nv_release_full_gpu_access,
418 .req_init_data = xgpu_nv_request_init_data,
419 .reset_gpu = xgpu_nv_request_reset,
420 .wait_reset = NULL,
421 .trans_msg = xgpu_nv_mailbox_trans_msg,
422};
423