1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/pci.h>
25
26#include "amdgpu.h"
27#include "amdgpu_ih.h"
28#include "cikd.h"
29
30#include "bif/bif_4_1_d.h"
31#include "bif/bif_4_1_sh_mask.h"
32
33#include "oss/oss_2_0_d.h"
34#include "oss/oss_2_0_sh_mask.h"
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev);
52
53
54
55
56
57
58
59
60static void cik_ih_enable_interrupts(struct amdgpu_device *adev)
61{
62 u32 ih_cntl = RREG32(mmIH_CNTL);
63 u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
64
65 ih_cntl |= IH_CNTL__ENABLE_INTR_MASK;
66 ih_rb_cntl |= IH_RB_CNTL__RB_ENABLE_MASK;
67 WREG32(mmIH_CNTL, ih_cntl);
68 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
69 adev->irq.ih.enabled = true;
70}
71
72
73
74
75
76
77
78
79static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
80{
81 u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
82 u32 ih_cntl = RREG32(mmIH_CNTL);
83
84 ih_rb_cntl &= ~IH_RB_CNTL__RB_ENABLE_MASK;
85 ih_cntl &= ~IH_CNTL__ENABLE_INTR_MASK;
86 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
87 WREG32(mmIH_CNTL, ih_cntl);
88
89 WREG32(mmIH_RB_RPTR, 0);
90 WREG32(mmIH_RB_WPTR, 0);
91 adev->irq.ih.enabled = false;
92 adev->irq.ih.rptr = 0;
93}
94
95
96
97
98
99
100
101
102
103
104
105
106static int cik_ih_irq_init(struct amdgpu_device *adev)
107{
108 struct amdgpu_ih_ring *ih = &adev->irq.ih;
109 int rb_bufsz;
110 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
111
112
113 cik_ih_disable_interrupts(adev);
114
115
116 WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
117 interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
118
119
120
121 interrupt_cntl &= ~INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK;
122
123 interrupt_cntl &= ~INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK;
124 WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
125
126 WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
127 rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
128
129 ih_rb_cntl = (IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK |
130 IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK |
131 (rb_bufsz << 1));
132
133 ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK;
134
135
136 WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
137 WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
138
139 WREG32(mmIH_RB_CNTL, ih_rb_cntl);
140
141
142 WREG32(mmIH_RB_RPTR, 0);
143 WREG32(mmIH_RB_WPTR, 0);
144
145
146 ih_cntl = (0x10 << IH_CNTL__MC_WRREQ_CREDIT__SHIFT) |
147 (0x10 << IH_CNTL__MC_WR_CLEAN_CNT__SHIFT) |
148 (0 << IH_CNTL__MC_VMID__SHIFT);
149
150 if (adev->irq.msi_enabled)
151 ih_cntl |= IH_CNTL__RPTR_REARM_MASK;
152 WREG32(mmIH_CNTL, ih_cntl);
153
154 pci_set_master(adev->pdev);
155
156
157 cik_ih_enable_interrupts(adev);
158
159 return 0;
160}
161
162
163
164
165
166
167
168
169static void cik_ih_irq_disable(struct amdgpu_device *adev)
170{
171 cik_ih_disable_interrupts(adev);
172
173 mdelay(1);
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
189 struct amdgpu_ih_ring *ih)
190{
191 u32 wptr, tmp;
192
193 wptr = le32_to_cpu(*ih->wptr_cpu);
194
195 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
196 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
197
198
199
200
201 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
202 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
203 ih->rptr = (wptr + 16) & ih->ptr_mask;
204 tmp = RREG32(mmIH_RB_CNTL);
205 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
206 WREG32(mmIH_RB_CNTL, tmp);
207 }
208 return (wptr & ih->ptr_mask);
209}
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242static void cik_ih_decode_iv(struct amdgpu_device *adev,
243 struct amdgpu_ih_ring *ih,
244 struct amdgpu_iv_entry *entry)
245{
246
247 u32 ring_index = ih->rptr >> 2;
248 uint32_t dw[4];
249
250 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
251 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
252 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
253 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
254
255 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
256 entry->src_id = dw[0] & 0xff;
257 entry->src_data[0] = dw[1] & 0xfffffff;
258 entry->ring_id = dw[2] & 0xff;
259 entry->vmid = (dw[2] >> 8) & 0xff;
260 entry->pasid = (dw[2] >> 16) & 0xffff;
261
262
263 ih->rptr += 16;
264}
265
266
267
268
269
270
271
272
273
274static void cik_ih_set_rptr(struct amdgpu_device *adev,
275 struct amdgpu_ih_ring *ih)
276{
277 WREG32(mmIH_RB_RPTR, ih->rptr);
278}
279
280static int cik_ih_early_init(void *handle)
281{
282 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
283 int ret;
284
285 ret = amdgpu_irq_add_domain(adev);
286 if (ret)
287 return ret;
288
289 cik_ih_set_interrupt_funcs(adev);
290
291 return 0;
292}
293
294static int cik_ih_sw_init(void *handle)
295{
296 int r;
297 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
298
299 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
300 if (r)
301 return r;
302
303 r = amdgpu_irq_init(adev);
304
305 return r;
306}
307
308static int cik_ih_sw_fini(void *handle)
309{
310 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
311
312 amdgpu_irq_fini_sw(adev);
313 amdgpu_irq_remove_domain(adev);
314
315 return 0;
316}
317
318static int cik_ih_hw_init(void *handle)
319{
320 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
321
322 return cik_ih_irq_init(adev);
323}
324
325static int cik_ih_hw_fini(void *handle)
326{
327 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
328
329 cik_ih_irq_disable(adev);
330
331 return 0;
332}
333
334static int cik_ih_suspend(void *handle)
335{
336 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
337
338 return cik_ih_hw_fini(adev);
339}
340
341static int cik_ih_resume(void *handle)
342{
343 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
344
345 return cik_ih_hw_init(adev);
346}
347
348static bool cik_ih_is_idle(void *handle)
349{
350 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
351 u32 tmp = RREG32(mmSRBM_STATUS);
352
353 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
354 return false;
355
356 return true;
357}
358
359static int cik_ih_wait_for_idle(void *handle)
360{
361 unsigned i;
362 u32 tmp;
363 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
364
365 for (i = 0; i < adev->usec_timeout; i++) {
366
367 tmp = RREG32(mmSRBM_STATUS) & SRBM_STATUS__IH_BUSY_MASK;
368 if (!tmp)
369 return 0;
370 udelay(1);
371 }
372 return -ETIMEDOUT;
373}
374
375static int cik_ih_soft_reset(void *handle)
376{
377 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
378
379 u32 srbm_soft_reset = 0;
380 u32 tmp = RREG32(mmSRBM_STATUS);
381
382 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
383 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
384
385 if (srbm_soft_reset) {
386 tmp = RREG32(mmSRBM_SOFT_RESET);
387 tmp |= srbm_soft_reset;
388 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
389 WREG32(mmSRBM_SOFT_RESET, tmp);
390 tmp = RREG32(mmSRBM_SOFT_RESET);
391
392 udelay(50);
393
394 tmp &= ~srbm_soft_reset;
395 WREG32(mmSRBM_SOFT_RESET, tmp);
396 tmp = RREG32(mmSRBM_SOFT_RESET);
397
398
399 udelay(50);
400 }
401
402 return 0;
403}
404
405static int cik_ih_set_clockgating_state(void *handle,
406 enum amd_clockgating_state state)
407{
408 return 0;
409}
410
411static int cik_ih_set_powergating_state(void *handle,
412 enum amd_powergating_state state)
413{
414 return 0;
415}
416
417static const struct amd_ip_funcs cik_ih_ip_funcs = {
418 .name = "cik_ih",
419 .early_init = cik_ih_early_init,
420 .late_init = NULL,
421 .sw_init = cik_ih_sw_init,
422 .sw_fini = cik_ih_sw_fini,
423 .hw_init = cik_ih_hw_init,
424 .hw_fini = cik_ih_hw_fini,
425 .suspend = cik_ih_suspend,
426 .resume = cik_ih_resume,
427 .is_idle = cik_ih_is_idle,
428 .wait_for_idle = cik_ih_wait_for_idle,
429 .soft_reset = cik_ih_soft_reset,
430 .set_clockgating_state = cik_ih_set_clockgating_state,
431 .set_powergating_state = cik_ih_set_powergating_state,
432};
433
434static const struct amdgpu_ih_funcs cik_ih_funcs = {
435 .get_wptr = cik_ih_get_wptr,
436 .decode_iv = cik_ih_decode_iv,
437 .set_rptr = cik_ih_set_rptr
438};
439
440static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
441{
442 adev->irq.ih_funcs = &cik_ih_funcs;
443}
444
445const struct amdgpu_ip_block_version cik_ih_ip_block =
446{
447 .type = AMD_IP_BLOCK_TYPE_IH,
448 .major = 2,
449 .minor = 0,
450 .rev = 0,
451 .funcs = &cik_ih_ip_funcs,
452};
453