1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "amdgpu.h"
25#include "nbio/nbio_2_3_offset.h"
26#include "nbio/nbio_2_3_sh_mask.h"
27#include "gc/gc_10_1_0_offset.h"
28#include "gc/gc_10_1_0_sh_mask.h"
29#include "soc15.h"
30#include "navi10_ih.h"
31#include "soc15_common.h"
32#include "mxgpu_nv.h"
33#include "mxgpu_ai.h"
34
35static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
36{
37 WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
38}
39
40static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
41{
42 WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
43}
44
45
46
47
48
49
50
51
52
53
54static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
55{
56 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
57 mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
58}
59
60
61static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62 enum idh_event event)
63{
64 u32 reg;
65
66 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
67 mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
68 if (reg != event)
69 return -ENOENT;
70
71 xgpu_nv_mailbox_send_ack(adev);
72
73 return 0;
74}
75
76static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
77{
78 return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
79}
80
81static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
82{
83 int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
84 u8 reg;
85
86 do {
87 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
88 if (reg & 2)
89 return 0;
90
91 mdelay(5);
92 timeout -= 5;
93 } while (timeout > 1);
94
95 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
96
97 return -ETIME;
98}
99
100static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
101{
102 int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
103
104 do {
105 r = xgpu_nv_mailbox_rcv_msg(adev, event);
106 if (!r)
107 return 0;
108
109 msleep(10);
110 timeout -= 10;
111 } while (timeout > 1);
112
113 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
114
115 return -ETIME;
116}
117
118static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
119 enum idh_request req, u32 data1, u32 data2, u32 data3)
120{
121 u32 reg;
122 int r;
123 uint8_t trn;
124
125
126
127
128
129
130
131 do {
132 xgpu_nv_mailbox_set_valid(adev, false);
133 trn = xgpu_nv_peek_ack(adev);
134 if (trn) {
135 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
136 msleep(1);
137 }
138 } while (trn);
139
140 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
141 mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
142 reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
143 MSGBUF_DATA, req);
144 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
145 reg);
146 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
147 data1);
148 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
149 data2);
150 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
151 data3);
152
153 xgpu_nv_mailbox_set_valid(adev, true);
154
155
156 r = xgpu_nv_poll_ack(adev);
157 if (r)
158 pr_err("Doesn't get ack from pf, continue\n");
159
160 xgpu_nv_mailbox_set_valid(adev, false);
161}
162
163static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
164 enum idh_request req)
165{
166 int r;
167
168 xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
169
170
171 if (req == IDH_REQ_GPU_INIT_ACCESS ||
172 req == IDH_REQ_GPU_FINI_ACCESS ||
173 req == IDH_REQ_GPU_RESET_ACCESS) {
174 r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
175 if (r) {
176 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
177 return r;
178 }
179
180 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
181 adev->virt.fw_reserve.checksum_key =
182 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
183 mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
184 }
185 }
186
187 return 0;
188}
189
190static int xgpu_nv_request_reset(struct amdgpu_device *adev)
191{
192 return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
193}
194
195static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
196 bool init)
197{
198 enum idh_request req;
199
200 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
201 return xgpu_nv_send_access_requests(adev, req);
202}
203
204static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
205 bool init)
206{
207 enum idh_request req;
208 int r = 0;
209
210 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
211 r = xgpu_nv_send_access_requests(adev, req);
212
213 return r;
214}
215
216static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
217 struct amdgpu_irq_src *source,
218 struct amdgpu_iv_entry *entry)
219{
220 DRM_DEBUG("get ack intr and do nothing.\n");
221 return 0;
222}
223
224static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
225 struct amdgpu_irq_src *source,
226 unsigned type,
227 enum amdgpu_interrupt_state state)
228{
229 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
230
231 tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
232 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
233 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
234
235 return 0;
236}
237
238static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
239{
240 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
241 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
242 int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
243 int locked;
244
245
246
247
248
249
250
251
252
253 locked = mutex_trylock(&adev->lock_reset);
254 if (locked)
255 adev->in_gpu_reset = true;
256
257 do {
258 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
259 goto flr_done;
260
261 msleep(10);
262 timeout -= 10;
263 } while (timeout > 1);
264
265flr_done:
266 if (locked) {
267 adev->in_gpu_reset = false;
268 mutex_unlock(&adev->lock_reset);
269 }
270
271
272 if (amdgpu_device_should_recover_gpu(adev)
273 && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
274 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
275 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
276 adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
277 amdgpu_device_gpu_recover(adev, NULL);
278}
279
280static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
281 struct amdgpu_irq_src *src,
282 unsigned type,
283 enum amdgpu_interrupt_state state)
284{
285 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
286
287 tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
288 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
289 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
290
291 return 0;
292}
293
294static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
295 struct amdgpu_irq_src *source,
296 struct amdgpu_iv_entry *entry)
297{
298 enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
299
300 switch (event) {
301 case IDH_FLR_NOTIFICATION:
302 if (amdgpu_sriov_runtime(adev))
303 schedule_work(&adev->virt.flr_work);
304 break;
305
306
307
308
309 case IDH_CLR_MSG_BUF:
310 case IDH_FLR_NOTIFICATION_CMPL:
311 case IDH_READY_TO_ACCESS_GPU:
312 default:
313 break;
314 }
315
316 return 0;
317}
318
319static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
320 .set = xgpu_nv_set_mailbox_ack_irq,
321 .process = xgpu_nv_mailbox_ack_irq,
322};
323
324static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
325 .set = xgpu_nv_set_mailbox_rcv_irq,
326 .process = xgpu_nv_mailbox_rcv_irq,
327};
328
329void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
330{
331 adev->virt.ack_irq.num_types = 1;
332 adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
333 adev->virt.rcv_irq.num_types = 1;
334 adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
335}
336
337int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
338{
339 int r;
340
341 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
342 if (r)
343 return r;
344
345 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
346 if (r) {
347 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
348 return r;
349 }
350
351 return 0;
352}
353
354int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
355{
356 int r;
357
358 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
359 if (r)
360 return r;
361 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
362 if (r) {
363 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
364 return r;
365 }
366
367 INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
368
369 return 0;
370}
371
372void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
373{
374 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
375 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
376}
377
378const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
379 .req_full_gpu = xgpu_nv_request_full_gpu_access,
380 .rel_full_gpu = xgpu_nv_release_full_gpu_access,
381 .reset_gpu = xgpu_nv_request_reset,
382 .wait_reset = NULL,
383 .trans_msg = xgpu_nv_mailbox_trans_msg,
384};
385