1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "amdgpu.h"
25#include "vega10/soc15ip.h"
26#include "vega10/NBIO/nbio_6_1_offset.h"
27#include "vega10/NBIO/nbio_6_1_sh_mask.h"
28#include "vega10/GC/gc_9_0_offset.h"
29#include "vega10/GC/gc_9_0_sh_mask.h"
30#include "soc15.h"
31#include "vega10_ih.h"
32#include "soc15_common.h"
33#include "mxgpu_ai.h"
34
35static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
36{
37 u32 reg;
38 int timeout = AI_MAILBOX_TIMEDOUT;
39 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
40
41 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
42 mmBIF_BX_PF0_MAILBOX_CONTROL));
43 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
44 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
45 mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
46
47
48 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
49 mmBIF_BX_PF0_MAILBOX_CONTROL));
50 while (reg & mask) {
51 if (timeout <= 0) {
52 pr_err("RCV_MSG_VALID is not cleared\n");
53 break;
54 }
55 mdelay(1);
56 timeout -=1;
57
58 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
59 mmBIF_BX_PF0_MAILBOX_CONTROL));
60 }
61}
62
63static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
64{
65 u32 reg;
66
67 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
68 mmBIF_BX_PF0_MAILBOX_CONTROL));
69 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL,
70 TRN_MSG_VALID, val ? 1 : 0);
71 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL),
72 reg);
73}
74
75static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev,
76 enum idh_request req)
77{
78 u32 reg;
79
80 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
81 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
82 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
83 MSGBUF_DATA, req);
84 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
85 reg);
86
87 xgpu_ai_mailbox_set_valid(adev, true);
88}
89
90static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
91 enum idh_event event)
92{
93 u32 reg;
94 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
95
96 if (event != IDH_FLR_NOTIFICATION_CMPL) {
97 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
98 mmBIF_BX_PF0_MAILBOX_CONTROL));
99 if (!(reg & mask))
100 return -ENOENT;
101 }
102
103 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
104 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
105 if (reg != event)
106 return -ENOENT;
107
108 xgpu_ai_mailbox_send_ack(adev);
109
110 return 0;
111}
112
113static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
114{
115 int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
116 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK);
117 u32 reg;
118
119 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
120 mmBIF_BX_PF0_MAILBOX_CONTROL));
121 while (!(reg & mask)) {
122 if (timeout <= 0) {
123 pr_err("Doesn't get ack from pf.\n");
124 r = -ETIME;
125 break;
126 }
127 mdelay(5);
128 timeout -= 5;
129
130 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
131 mmBIF_BX_PF0_MAILBOX_CONTROL));
132 }
133
134 return r;
135}
136
137static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
138{
139 int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
140
141 r = xgpu_ai_mailbox_rcv_msg(adev, event);
142 while (r) {
143 if (timeout <= 0) {
144 pr_err("Doesn't get msg:%d from pf.\n", event);
145 r = -ETIME;
146 break;
147 }
148 mdelay(5);
149 timeout -= 5;
150
151 r = xgpu_ai_mailbox_rcv_msg(adev, event);
152 }
153
154 return r;
155}
156
157
158static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
159 enum idh_request req)
160{
161 int r;
162
163 xgpu_ai_mailbox_trans_msg(adev, req);
164
165
166 r = xgpu_ai_poll_ack(adev);
167 if (r)
168 pr_err("Doesn't get ack from pf, continue\n");
169
170 xgpu_ai_mailbox_set_valid(adev, false);
171
172
173 if (req == IDH_REQ_GPU_INIT_ACCESS ||
174 req == IDH_REQ_GPU_FINI_ACCESS ||
175 req == IDH_REQ_GPU_RESET_ACCESS) {
176 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
177 if (r) {
178 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
179 return r;
180 }
181 }
182
183 return 0;
184}
185
186static int xgpu_ai_request_reset(struct amdgpu_device *adev)
187{
188 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
189}
190
191static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
192 bool init)
193{
194 enum idh_request req;
195
196 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
197 return xgpu_ai_send_access_requests(adev, req);
198}
199
200static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
201 bool init)
202{
203 enum idh_request req;
204 int r = 0;
205
206 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
207 r = xgpu_ai_send_access_requests(adev, req);
208
209 return r;
210}
211
212static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
213 struct amdgpu_irq_src *source,
214 struct amdgpu_iv_entry *entry)
215{
216 DRM_DEBUG("get ack intr and do nothing.\n");
217 return 0;
218}
219
220static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
221 struct amdgpu_irq_src *source,
222 unsigned type,
223 enum amdgpu_interrupt_state state)
224{
225 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
226
227 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
228 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
229 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
230
231 return 0;
232}
233
234static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
235{
236 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
237 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
238
239
240 if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
241 pr_err("failed to recieve FLR_CMPL\n");
242 return;
243 }
244
245
246 amdgpu_sriov_gpu_reset(adev, NULL);
247}
248
249static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
250 struct amdgpu_irq_src *src,
251 unsigned type,
252 enum amdgpu_interrupt_state state)
253{
254 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
255
256 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
257 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
258 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
259
260 return 0;
261}
262
263static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
264 struct amdgpu_irq_src *source,
265 struct amdgpu_iv_entry *entry)
266{
267 int r;
268
269
270 if (amdgpu_lockup_timeout == 0) {
271
272 r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
273
274
275 if (!r)
276 schedule_work(&adev->virt.flr_work);
277 }
278
279 return 0;
280}
281
282static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
283 .set = xgpu_ai_set_mailbox_ack_irq,
284 .process = xgpu_ai_mailbox_ack_irq,
285};
286
287static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
288 .set = xgpu_ai_set_mailbox_rcv_irq,
289 .process = xgpu_ai_mailbox_rcv_irq,
290};
291
292void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
293{
294 adev->virt.ack_irq.num_types = 1;
295 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
296 adev->virt.rcv_irq.num_types = 1;
297 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
298}
299
300int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
301{
302 int r;
303
304 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
305 if (r)
306 return r;
307
308 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
309 if (r) {
310 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
311 return r;
312 }
313
314 return 0;
315}
316
317int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
318{
319 int r;
320
321 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
322 if (r)
323 return r;
324 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
325 if (r) {
326 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
327 return r;
328 }
329
330 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
331
332 return 0;
333}
334
335void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
336{
337 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
338 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
339}
340
341const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
342 .req_full_gpu = xgpu_ai_request_full_gpu_access,
343 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
344 .reset_gpu = xgpu_ai_request_reset,
345};
346