1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <drm/drmP.h>
24#include "amdgpu.h"
25#include "amdgpu_ih.h"
26#include "cikd.h"
27
28#include "bif/bif_4_1_d.h"
29#include "bif/bif_4_1_sh_mask.h"
30
31#include "oss/oss_2_0_d.h"
32#include "oss/oss_2_0_sh_mask.h"
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev);
50
51
52
53
54
55
56
57
58static void cik_ih_enable_interrupts(struct amdgpu_device *adev)
59{
60 u32 ih_cntl = RREG32(mmIH_CNTL);
61 u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
62
63 ih_cntl |= IH_CNTL__ENABLE_INTR_MASK;
64 ih_rb_cntl |= IH_RB_CNTL__RB_ENABLE_MASK;
65 WREG32(mmIH_CNTL, ih_cntl);
66 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
67 adev->irq.ih.enabled = true;
68}
69
70
71
72
73
74
75
76
77static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
78{
79 u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
80 u32 ih_cntl = RREG32(mmIH_CNTL);
81
82 ih_rb_cntl &= ~IH_RB_CNTL__RB_ENABLE_MASK;
83 ih_cntl &= ~IH_CNTL__ENABLE_INTR_MASK;
84 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
85 WREG32(mmIH_CNTL, ih_cntl);
86
87 WREG32(mmIH_RB_RPTR, 0);
88 WREG32(mmIH_RB_WPTR, 0);
89 adev->irq.ih.enabled = false;
90 adev->irq.ih.rptr = 0;
91}
92
93
94
95
96
97
98
99
100
101
102
103
104static int cik_ih_irq_init(struct amdgpu_device *adev)
105{
106 int rb_bufsz;
107 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
108 u64 wptr_off;
109
110
111 cik_ih_disable_interrupts(adev);
112
113
114 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
115 interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
116
117
118
119 interrupt_cntl &= ~INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK;
120
121 interrupt_cntl &= ~INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK;
122 WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
123
124 WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
125 rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
126
127 ih_rb_cntl = (IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK |
128 IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK |
129 (rb_bufsz << 1));
130
131 ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK;
132
133
134 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
135 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
136 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
137
138 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
139
140
141 WREG32(mmIH_RB_RPTR, 0);
142 WREG32(mmIH_RB_WPTR, 0);
143
144
145 ih_cntl = (0x10 << IH_CNTL__MC_WRREQ_CREDIT__SHIFT) |
146 (0x10 << IH_CNTL__MC_WR_CLEAN_CNT__SHIFT) |
147 (0 << IH_CNTL__MC_VMID__SHIFT);
148
149 if (adev->irq.msi_enabled)
150 ih_cntl |= IH_CNTL__RPTR_REARM_MASK;
151 WREG32(mmIH_CNTL, ih_cntl);
152
153 pci_set_master(adev->pdev);
154
155
156 cik_ih_enable_interrupts(adev);
157
158 return 0;
159}
160
161
162
163
164
165
166
167
168static void cik_ih_irq_disable(struct amdgpu_device *adev)
169{
170 cik_ih_disable_interrupts(adev);
171
172 mdelay(1);
173}
174
175
176
177
178
179
180
181
182
183
184
185
186static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
187{
188 u32 wptr, tmp;
189
190 wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
191
192 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
193 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
194
195
196
197
198 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
199 wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
200 adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
201 tmp = RREG32(mmIH_RB_CNTL);
202 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
203 WREG32(mmIH_RB_CNTL, tmp);
204 }
205 return (wptr & adev->irq.ih.ptr_mask);
206}
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239static void cik_ih_decode_iv(struct amdgpu_device *adev,
240 struct amdgpu_iv_entry *entry)
241{
242
243 u32 ring_index = adev->irq.ih.rptr >> 2;
244 uint32_t dw[4];
245
246 dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
247 dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
248 dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
249 dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
250
251 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
252 entry->src_id = dw[0] & 0xff;
253 entry->src_data[0] = dw[1] & 0xfffffff;
254 entry->ring_id = dw[2] & 0xff;
255 entry->vmid = (dw[2] >> 8) & 0xff;
256 entry->pasid = (dw[2] >> 16) & 0xffff;
257
258
259 adev->irq.ih.rptr += 16;
260}
261
262
263
264
265
266
267
268
269static void cik_ih_set_rptr(struct amdgpu_device *adev)
270{
271 WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
272}
273
274static int cik_ih_early_init(void *handle)
275{
276 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
277 int ret;
278
279 ret = amdgpu_irq_add_domain(adev);
280 if (ret)
281 return ret;
282
283 cik_ih_set_interrupt_funcs(adev);
284
285 return 0;
286}
287
288static int cik_ih_sw_init(void *handle)
289{
290 int r;
291 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
292
293 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
294 if (r)
295 return r;
296
297 r = amdgpu_irq_init(adev);
298
299 return r;
300}
301
302static int cik_ih_sw_fini(void *handle)
303{
304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
305
306 amdgpu_irq_fini(adev);
307 amdgpu_ih_ring_fini(adev, &adev->irq.ih);
308 amdgpu_irq_remove_domain(adev);
309
310 return 0;
311}
312
313static int cik_ih_hw_init(void *handle)
314{
315 int r;
316 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
317
318 r = cik_ih_irq_init(adev);
319 if (r)
320 return r;
321
322 return 0;
323}
324
325static int cik_ih_hw_fini(void *handle)
326{
327 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
328
329 cik_ih_irq_disable(adev);
330
331 return 0;
332}
333
334static int cik_ih_suspend(void *handle)
335{
336 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
337
338 return cik_ih_hw_fini(adev);
339}
340
341static int cik_ih_resume(void *handle)
342{
343 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
344
345 return cik_ih_hw_init(adev);
346}
347
348static bool cik_ih_is_idle(void *handle)
349{
350 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
351 u32 tmp = RREG32(mmSRBM_STATUS);
352
353 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
354 return false;
355
356 return true;
357}
358
359static int cik_ih_wait_for_idle(void *handle)
360{
361 unsigned i;
362 u32 tmp;
363 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
364
365 for (i = 0; i < adev->usec_timeout; i++) {
366
367 tmp = RREG32(mmSRBM_STATUS) & SRBM_STATUS__IH_BUSY_MASK;
368 if (!tmp)
369 return 0;
370 udelay(1);
371 }
372 return -ETIMEDOUT;
373}
374
375static int cik_ih_soft_reset(void *handle)
376{
377 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
378
379 u32 srbm_soft_reset = 0;
380 u32 tmp = RREG32(mmSRBM_STATUS);
381
382 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
383 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
384
385 if (srbm_soft_reset) {
386 tmp = RREG32(mmSRBM_SOFT_RESET);
387 tmp |= srbm_soft_reset;
388 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
389 WREG32(mmSRBM_SOFT_RESET, tmp);
390 tmp = RREG32(mmSRBM_SOFT_RESET);
391
392 udelay(50);
393
394 tmp &= ~srbm_soft_reset;
395 WREG32(mmSRBM_SOFT_RESET, tmp);
396 tmp = RREG32(mmSRBM_SOFT_RESET);
397
398
399 udelay(50);
400 }
401
402 return 0;
403}
404
405static int cik_ih_set_clockgating_state(void *handle,
406 enum amd_clockgating_state state)
407{
408 return 0;
409}
410
411static int cik_ih_set_powergating_state(void *handle,
412 enum amd_powergating_state state)
413{
414 return 0;
415}
416
417static const struct amd_ip_funcs cik_ih_ip_funcs = {
418 .name = "cik_ih",
419 .early_init = cik_ih_early_init,
420 .late_init = NULL,
421 .sw_init = cik_ih_sw_init,
422 .sw_fini = cik_ih_sw_fini,
423 .hw_init = cik_ih_hw_init,
424 .hw_fini = cik_ih_hw_fini,
425 .suspend = cik_ih_suspend,
426 .resume = cik_ih_resume,
427 .is_idle = cik_ih_is_idle,
428 .wait_for_idle = cik_ih_wait_for_idle,
429 .soft_reset = cik_ih_soft_reset,
430 .set_clockgating_state = cik_ih_set_clockgating_state,
431 .set_powergating_state = cik_ih_set_powergating_state,
432};
433
434static const struct amdgpu_ih_funcs cik_ih_funcs = {
435 .get_wptr = cik_ih_get_wptr,
436 .decode_iv = cik_ih_decode_iv,
437 .set_rptr = cik_ih_set_rptr
438};
439
440static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
441{
442 adev->irq.ih_funcs = &cik_ih_funcs;
443}
444
445const struct amdgpu_ip_block_version cik_ih_ip_block =
446{
447 .type = AMD_IP_BLOCK_TYPE_IH,
448 .major = 2,
449 .minor = 0,
450 .rev = 0,
451 .funcs = &cik_ih_ip_funcs,
452};
453