1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/pci.h>
25
26#include "amdgpu.h"
27#include "amdgpu_ih.h"
28#include "cikd.h"
29
30#include "bif/bif_4_1_d.h"
31#include "bif/bif_4_1_sh_mask.h"
32
33#include "oss/oss_2_0_d.h"
34#include "oss/oss_2_0_sh_mask.h"
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev);
52
53
54
55
56
57
58
59
60static void cik_ih_enable_interrupts(struct amdgpu_device *adev)
61{
62 u32 ih_cntl = RREG32(mmIH_CNTL);
63 u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
64
65 ih_cntl |= IH_CNTL__ENABLE_INTR_MASK;
66 ih_rb_cntl |= IH_RB_CNTL__RB_ENABLE_MASK;
67 WREG32(mmIH_CNTL, ih_cntl);
68 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
69 adev->irq.ih.enabled = true;
70}
71
72
73
74
75
76
77
78
79static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
80{
81 u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
82 u32 ih_cntl = RREG32(mmIH_CNTL);
83
84 ih_rb_cntl &= ~IH_RB_CNTL__RB_ENABLE_MASK;
85 ih_cntl &= ~IH_CNTL__ENABLE_INTR_MASK;
86 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
87 WREG32(mmIH_CNTL, ih_cntl);
88
89 WREG32(mmIH_RB_RPTR, 0);
90 WREG32(mmIH_RB_WPTR, 0);
91 adev->irq.ih.enabled = false;
92 adev->irq.ih.rptr = 0;
93}
94
95
96
97
98
99
100
101
102
103
104
105
106static int cik_ih_irq_init(struct amdgpu_device *adev)
107{
108 struct amdgpu_ih_ring *ih = &adev->irq.ih;
109 int rb_bufsz;
110 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
111
112
113 cik_ih_disable_interrupts(adev);
114
115
116 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
117 interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
118
119
120
121 interrupt_cntl &= ~INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK;
122
123 interrupt_cntl &= ~INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK;
124 WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
125
126 WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
127 rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
128
129 ih_rb_cntl = (IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK |
130 IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK |
131 (rb_bufsz << 1));
132
133 ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK;
134
135
136 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
137 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
138
139 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
140
141
142 WREG32(mmIH_RB_RPTR, 0);
143 WREG32(mmIH_RB_WPTR, 0);
144
145
146 ih_cntl = (0x10 << IH_CNTL__MC_WRREQ_CREDIT__SHIFT) |
147 (0x10 << IH_CNTL__MC_WR_CLEAN_CNT__SHIFT) |
148 (0 << IH_CNTL__MC_VMID__SHIFT);
149
150 if (adev->irq.msi_enabled)
151 ih_cntl |= IH_CNTL__RPTR_REARM_MASK;
152 WREG32(mmIH_CNTL, ih_cntl);
153
154 pci_set_master(adev->pdev);
155
156
157 cik_ih_enable_interrupts(adev);
158
159 return 0;
160}
161
162
163
164
165
166
167
168
169static void cik_ih_irq_disable(struct amdgpu_device *adev)
170{
171 cik_ih_disable_interrupts(adev);
172
173 mdelay(1);
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
189 struct amdgpu_ih_ring *ih)
190{
191 u32 wptr, tmp;
192
193 wptr = le32_to_cpu(*ih->wptr_cpu);
194
195 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
196 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
197
198
199
200
201 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
202 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
203 ih->rptr = (wptr + 16) & ih->ptr_mask;
204 tmp = RREG32(mmIH_RB_CNTL);
205 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
206 WREG32(mmIH_RB_CNTL, tmp);
207 }
208 return (wptr & ih->ptr_mask);
209}
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242static void cik_ih_decode_iv(struct amdgpu_device *adev,
243 struct amdgpu_ih_ring *ih,
244 struct amdgpu_iv_entry *entry)
245{
246
247 u32 ring_index = ih->rptr >> 2;
248 uint32_t dw[4];
249
250 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
251 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
252 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
253 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
254
255 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
256 entry->src_id = dw[0] & 0xff;
257 entry->src_data[0] = dw[1] & 0xfffffff;
258 entry->ring_id = dw[2] & 0xff;
259 entry->vmid = (dw[2] >> 8) & 0xff;
260 entry->pasid = (dw[2] >> 16) & 0xffff;
261
262
263 ih->rptr += 16;
264}
265
266
267
268
269
270
271
272
273
274static void cik_ih_set_rptr(struct amdgpu_device *adev,
275 struct amdgpu_ih_ring *ih)
276{
277 WREG32(mmIH_RB_RPTR, ih->rptr);
278}
279
280static int cik_ih_early_init(void *handle)
281{
282 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
283 int ret;
284
285 ret = amdgpu_irq_add_domain(adev);
286 if (ret)
287 return ret;
288
289 cik_ih_set_interrupt_funcs(adev);
290
291 return 0;
292}
293
294static int cik_ih_sw_init(void *handle)
295{
296 int r;
297 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
298
299 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
300 if (r)
301 return r;
302
303 r = amdgpu_irq_init(adev);
304
305 return r;
306}
307
308static int cik_ih_sw_fini(void *handle)
309{
310 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
311
312 amdgpu_irq_fini(adev);
313 amdgpu_ih_ring_fini(adev, &adev->irq.ih);
314 amdgpu_irq_remove_domain(adev);
315
316 return 0;
317}
318
319static int cik_ih_hw_init(void *handle)
320{
321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
322
323 return cik_ih_irq_init(adev);
324}
325
326static int cik_ih_hw_fini(void *handle)
327{
328 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
329
330 cik_ih_irq_disable(adev);
331
332 return 0;
333}
334
335static int cik_ih_suspend(void *handle)
336{
337 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
338
339 return cik_ih_hw_fini(adev);
340}
341
342static int cik_ih_resume(void *handle)
343{
344 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
345
346 return cik_ih_hw_init(adev);
347}
348
349static bool cik_ih_is_idle(void *handle)
350{
351 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
352 u32 tmp = RREG32(mmSRBM_STATUS);
353
354 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
355 return false;
356
357 return true;
358}
359
360static int cik_ih_wait_for_idle(void *handle)
361{
362 unsigned i;
363 u32 tmp;
364 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
365
366 for (i = 0; i < adev->usec_timeout; i++) {
367
368 tmp = RREG32(mmSRBM_STATUS) & SRBM_STATUS__IH_BUSY_MASK;
369 if (!tmp)
370 return 0;
371 udelay(1);
372 }
373 return -ETIMEDOUT;
374}
375
376static int cik_ih_soft_reset(void *handle)
377{
378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
379
380 u32 srbm_soft_reset = 0;
381 u32 tmp = RREG32(mmSRBM_STATUS);
382
383 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
384 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
385
386 if (srbm_soft_reset) {
387 tmp = RREG32(mmSRBM_SOFT_RESET);
388 tmp |= srbm_soft_reset;
389 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
390 WREG32(mmSRBM_SOFT_RESET, tmp);
391 tmp = RREG32(mmSRBM_SOFT_RESET);
392
393 udelay(50);
394
395 tmp &= ~srbm_soft_reset;
396 WREG32(mmSRBM_SOFT_RESET, tmp);
397 tmp = RREG32(mmSRBM_SOFT_RESET);
398
399
400 udelay(50);
401 }
402
403 return 0;
404}
405
406static int cik_ih_set_clockgating_state(void *handle,
407 enum amd_clockgating_state state)
408{
409 return 0;
410}
411
412static int cik_ih_set_powergating_state(void *handle,
413 enum amd_powergating_state state)
414{
415 return 0;
416}
417
418static const struct amd_ip_funcs cik_ih_ip_funcs = {
419 .name = "cik_ih",
420 .early_init = cik_ih_early_init,
421 .late_init = NULL,
422 .sw_init = cik_ih_sw_init,
423 .sw_fini = cik_ih_sw_fini,
424 .hw_init = cik_ih_hw_init,
425 .hw_fini = cik_ih_hw_fini,
426 .suspend = cik_ih_suspend,
427 .resume = cik_ih_resume,
428 .is_idle = cik_ih_is_idle,
429 .wait_for_idle = cik_ih_wait_for_idle,
430 .soft_reset = cik_ih_soft_reset,
431 .set_clockgating_state = cik_ih_set_clockgating_state,
432 .set_powergating_state = cik_ih_set_powergating_state,
433};
434
435static const struct amdgpu_ih_funcs cik_ih_funcs = {
436 .get_wptr = cik_ih_get_wptr,
437 .decode_iv = cik_ih_decode_iv,
438 .set_rptr = cik_ih_set_rptr
439};
440
441static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
442{
443 adev->irq.ih_funcs = &cik_ih_funcs;
444}
445
446const struct amdgpu_ip_block_version cik_ih_ip_block =
447{
448 .type = AMD_IP_BLOCK_TYPE_IH,
449 .major = 2,
450 .minor = 0,
451 .rev = 0,
452 .funcs = &cik_ih_ip_funcs,
453};
454