1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <drm/drmP.h>
24#include "amdgpu.h"
25#include "amdgpu_ih.h"
26#include "cikd.h"
27
28#include "bif/bif_4_1_d.h"
29#include "bif/bif_4_1_sh_mask.h"
30
31#include "oss/oss_2_0_d.h"
32#include "oss/oss_2_0_sh_mask.h"
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev);
50
51
52
53
54
55
56
57
58static void cik_ih_enable_interrupts(struct amdgpu_device *adev)
59{
60 u32 ih_cntl = RREG32(mmIH_CNTL);
61 u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
62
63 ih_cntl |= IH_CNTL__ENABLE_INTR_MASK;
64 ih_rb_cntl |= IH_RB_CNTL__RB_ENABLE_MASK;
65 WREG32(mmIH_CNTL, ih_cntl);
66 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
67 adev->irq.ih.enabled = true;
68}
69
70
71
72
73
74
75
76
77static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
78{
79 u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
80 u32 ih_cntl = RREG32(mmIH_CNTL);
81
82 ih_rb_cntl &= ~IH_RB_CNTL__RB_ENABLE_MASK;
83 ih_cntl &= ~IH_CNTL__ENABLE_INTR_MASK;
84 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
85 WREG32(mmIH_CNTL, ih_cntl);
86
87 WREG32(mmIH_RB_RPTR, 0);
88 WREG32(mmIH_RB_WPTR, 0);
89 adev->irq.ih.enabled = false;
90 adev->irq.ih.rptr = 0;
91}
92
93
94
95
96
97
98
99
100
101
102
103
104static int cik_ih_irq_init(struct amdgpu_device *adev)
105{
106 int rb_bufsz;
107 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
108 u64 wptr_off;
109
110
111 cik_ih_disable_interrupts(adev);
112
113
114 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
115 interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
116
117
118
119 interrupt_cntl &= ~INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK;
120
121 interrupt_cntl &= ~INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK;
122 WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
123
124 WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
125 rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
126
127 ih_rb_cntl = (IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK |
128 IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK |
129 (rb_bufsz << 1));
130
131 ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK;
132
133
134 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
135 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
136 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
137
138 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
139
140
141 WREG32(mmIH_RB_RPTR, 0);
142 WREG32(mmIH_RB_WPTR, 0);
143
144
145 ih_cntl = (0x10 << IH_CNTL__MC_WRREQ_CREDIT__SHIFT) |
146 (0x10 << IH_CNTL__MC_WR_CLEAN_CNT__SHIFT) |
147 (0 << IH_CNTL__MC_VMID__SHIFT);
148
149 if (adev->irq.msi_enabled)
150 ih_cntl |= IH_CNTL__RPTR_REARM_MASK;
151 WREG32(mmIH_CNTL, ih_cntl);
152
153 pci_set_master(adev->pdev);
154
155
156 cik_ih_enable_interrupts(adev);
157
158 return 0;
159}
160
161
162
163
164
165
166
167
168static void cik_ih_irq_disable(struct amdgpu_device *adev)
169{
170 cik_ih_disable_interrupts(adev);
171
172 mdelay(1);
173}
174
175
176
177
178
179
180
181
182
183
184
185
186static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
187{
188 u32 wptr, tmp;
189
190 wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
191
192 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
193 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
194
195
196
197
198 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
199 wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
200 adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
201 tmp = RREG32(mmIH_RB_CNTL);
202 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
203 WREG32(mmIH_RB_CNTL, tmp);
204 }
205 return (wptr & adev->irq.ih.ptr_mask);
206}
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
239{
240 u32 ring_index = adev->irq.ih.rptr >> 2;
241 u16 pasid;
242
243 switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
244 case 146:
245 case 147:
246 pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
247 if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
248 return true;
249 break;
250 default:
251
252 return true;
253 }
254
255 adev->irq.ih.rptr += 16;
256 return false;
257}
258
259
260
261
262
263
264
265
266
267static void cik_ih_decode_iv(struct amdgpu_device *adev,
268 struct amdgpu_iv_entry *entry)
269{
270
271 u32 ring_index = adev->irq.ih.rptr >> 2;
272 uint32_t dw[4];
273
274 dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
275 dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
276 dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
277 dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
278
279 entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
280 entry->src_id = dw[0] & 0xff;
281 entry->src_data[0] = dw[1] & 0xfffffff;
282 entry->ring_id = dw[2] & 0xff;
283 entry->vmid = (dw[2] >> 8) & 0xff;
284 entry->pasid = (dw[2] >> 16) & 0xffff;
285
286
287 adev->irq.ih.rptr += 16;
288}
289
290
291
292
293
294
295
296
297static void cik_ih_set_rptr(struct amdgpu_device *adev)
298{
299 WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
300}
301
302static int cik_ih_early_init(void *handle)
303{
304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
305 int ret;
306
307 ret = amdgpu_irq_add_domain(adev);
308 if (ret)
309 return ret;
310
311 cik_ih_set_interrupt_funcs(adev);
312
313 return 0;
314}
315
316static int cik_ih_sw_init(void *handle)
317{
318 int r;
319 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
320
321 r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
322 if (r)
323 return r;
324
325 r = amdgpu_irq_init(adev);
326
327 return r;
328}
329
330static int cik_ih_sw_fini(void *handle)
331{
332 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
333
334 amdgpu_irq_fini(adev);
335 amdgpu_ih_ring_fini(adev);
336 amdgpu_irq_remove_domain(adev);
337
338 return 0;
339}
340
341static int cik_ih_hw_init(void *handle)
342{
343 int r;
344 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
345
346 r = cik_ih_irq_init(adev);
347 if (r)
348 return r;
349
350 return 0;
351}
352
353static int cik_ih_hw_fini(void *handle)
354{
355 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
356
357 cik_ih_irq_disable(adev);
358
359 return 0;
360}
361
362static int cik_ih_suspend(void *handle)
363{
364 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
365
366 return cik_ih_hw_fini(adev);
367}
368
369static int cik_ih_resume(void *handle)
370{
371 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
372
373 return cik_ih_hw_init(adev);
374}
375
376static bool cik_ih_is_idle(void *handle)
377{
378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
379 u32 tmp = RREG32(mmSRBM_STATUS);
380
381 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
382 return false;
383
384 return true;
385}
386
387static int cik_ih_wait_for_idle(void *handle)
388{
389 unsigned i;
390 u32 tmp;
391 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
392
393 for (i = 0; i < adev->usec_timeout; i++) {
394
395 tmp = RREG32(mmSRBM_STATUS) & SRBM_STATUS__IH_BUSY_MASK;
396 if (!tmp)
397 return 0;
398 udelay(1);
399 }
400 return -ETIMEDOUT;
401}
402
403static int cik_ih_soft_reset(void *handle)
404{
405 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
406
407 u32 srbm_soft_reset = 0;
408 u32 tmp = RREG32(mmSRBM_STATUS);
409
410 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
411 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
412
413 if (srbm_soft_reset) {
414 tmp = RREG32(mmSRBM_SOFT_RESET);
415 tmp |= srbm_soft_reset;
416 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
417 WREG32(mmSRBM_SOFT_RESET, tmp);
418 tmp = RREG32(mmSRBM_SOFT_RESET);
419
420 udelay(50);
421
422 tmp &= ~srbm_soft_reset;
423 WREG32(mmSRBM_SOFT_RESET, tmp);
424 tmp = RREG32(mmSRBM_SOFT_RESET);
425
426
427 udelay(50);
428 }
429
430 return 0;
431}
432
433static int cik_ih_set_clockgating_state(void *handle,
434 enum amd_clockgating_state state)
435{
436 return 0;
437}
438
439static int cik_ih_set_powergating_state(void *handle,
440 enum amd_powergating_state state)
441{
442 return 0;
443}
444
445static const struct amd_ip_funcs cik_ih_ip_funcs = {
446 .name = "cik_ih",
447 .early_init = cik_ih_early_init,
448 .late_init = NULL,
449 .sw_init = cik_ih_sw_init,
450 .sw_fini = cik_ih_sw_fini,
451 .hw_init = cik_ih_hw_init,
452 .hw_fini = cik_ih_hw_fini,
453 .suspend = cik_ih_suspend,
454 .resume = cik_ih_resume,
455 .is_idle = cik_ih_is_idle,
456 .wait_for_idle = cik_ih_wait_for_idle,
457 .soft_reset = cik_ih_soft_reset,
458 .set_clockgating_state = cik_ih_set_clockgating_state,
459 .set_powergating_state = cik_ih_set_powergating_state,
460};
461
462static const struct amdgpu_ih_funcs cik_ih_funcs = {
463 .get_wptr = cik_ih_get_wptr,
464 .prescreen_iv = cik_ih_prescreen_iv,
465 .decode_iv = cik_ih_decode_iv,
466 .set_rptr = cik_ih_set_rptr
467};
468
469static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
470{
471 if (adev->irq.ih_funcs == NULL)
472 adev->irq.ih_funcs = &cik_ih_funcs;
473}
474
475const struct amdgpu_ip_block_version cik_ih_ip_block =
476{
477 .type = AMD_IP_BLOCK_TYPE_IH,
478 .major = 2,
479 .minor = 0,
480 .rev = 0,
481 .funcs = &cik_ih_ip_funcs,
482};
483