1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/pci.h>
25
26#include "amdgpu.h"
27#include "amdgpu_ih.h"
28
29#include "oss/osssys_5_0_0_offset.h"
30#include "oss/osssys_5_0_0_sh_mask.h"
31
32#include "soc15_common.h"
33#include "navi10_ih.h"
34
35#define MAX_REARM_RETRY 10
36
37#define mmIH_CHICKEN_Sienna_Cichlid 0x018d
38#define mmIH_CHICKEN_Sienna_Cichlid_BASE_IDX 0
39
40static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
41
42
43
44
45
46
47
48
49static void navi10_ih_init_register_offset(struct amdgpu_device *adev)
50{
51 struct amdgpu_ih_regs *ih_regs;
52
53 if (adev->irq.ih.ring_size) {
54 ih_regs = &adev->irq.ih.ih_regs;
55 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
56 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
57 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
58 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
59 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
60 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
61 ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
62 ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
63 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
64 }
65
66 if (adev->irq.ih1.ring_size) {
67 ih_regs = &adev->irq.ih1.ih_regs;
68 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
69 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
70 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
71 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
72 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
73 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
74 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
75 }
76
77 if (adev->irq.ih2.ring_size) {
78 ih_regs = &adev->irq.ih2.ih_regs;
79 ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
80 ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
81 ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
82 ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
83 ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
84 ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
85 ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
86 }
87}
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104static void
105force_update_wptr_for_self_int(struct amdgpu_device *adev,
106 u32 threshold, u32 timeout, bool enabled)
107{
108 u32 ih_cntl, ih_rb_cntl;
109
110 if (adev->asic_type < CHIP_SIENNA_CICHLID)
111 return;
112
113 ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2);
114 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
115
116 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
117 SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
118 ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
119 SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
120 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
121 RB_USED_INT_THRESHOLD, threshold);
122
123 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
124 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
125 return;
126 } else {
127 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
128 }
129
130 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
131 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
132 RB_USED_INT_THRESHOLD, threshold);
133 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
134 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, ih_rb_cntl))
135 return;
136 } else {
137 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
138 }
139
140 WREG32_SOC15(OSSSYS, 0, mmIH_CNTL2, ih_cntl);
141}
142
143
144
145
146
147
148
149
150
151
152static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
153 struct amdgpu_ih_ring *ih,
154 bool enable)
155{
156 struct amdgpu_ih_regs *ih_regs;
157 uint32_t tmp;
158
159 ih_regs = &ih->ih_regs;
160
161 tmp = RREG32(ih_regs->ih_rb_cntl);
162 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
163
164 if (ih == &adev->irq.ih)
165 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
166
167 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
168 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
169 return -ETIMEDOUT;
170 } else {
171 WREG32(ih_regs->ih_rb_cntl, tmp);
172 }
173
174 if (enable) {
175 ih->enabled = true;
176 } else {
177
178 WREG32(ih_regs->ih_rb_rptr, 0);
179 WREG32(ih_regs->ih_rb_wptr, 0);
180 ih->enabled = false;
181 ih->rptr = 0;
182 }
183
184 return 0;
185}
186
187
188
189
190
191
192
193
194
195static int navi10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
196{
197 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
198 int i;
199 int r;
200
201 for (i = 0; i < ARRAY_SIZE(ih); i++) {
202 if (ih[i]->ring_size) {
203 r = navi10_ih_toggle_ring_interrupts(adev, ih[i], enable);
204 if (r)
205 return r;
206 }
207 }
208
209 return 0;
210}
211
212static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
213{
214 int rb_bufsz = order_base_2(ih->ring_size / 4);
215
216 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
217 MC_SPACE, ih->use_bus_addr ? 1 : 4);
218 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
219 WPTR_OVERFLOW_CLEAR, 1);
220 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
221 WPTR_OVERFLOW_ENABLE, 1);
222 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
223
224
225
226 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
227 WPTR_WRITEBACK_ENABLE, 1);
228 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
229 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
230 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
231
232 return ih_rb_cntl;
233}
234
235static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
236{
237 u32 ih_doorbell_rtpr = 0;
238
239 if (ih->use_doorbell) {
240 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
241 IH_DOORBELL_RPTR, OFFSET,
242 ih->doorbell_index);
243 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
244 IH_DOORBELL_RPTR,
245 ENABLE, 1);
246 } else {
247 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
248 IH_DOORBELL_RPTR,
249 ENABLE, 0);
250 }
251 return ih_doorbell_rtpr;
252}
253
254
255
256
257
258
259
260
261
262static int navi10_ih_enable_ring(struct amdgpu_device *adev,
263 struct amdgpu_ih_ring *ih)
264{
265 struct amdgpu_ih_regs *ih_regs;
266 uint32_t tmp;
267
268 ih_regs = &ih->ih_regs;
269
270
271 WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
272 WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
273
274 tmp = RREG32(ih_regs->ih_rb_cntl);
275 tmp = navi10_ih_rb_cntl(ih, tmp);
276 if (ih == &adev->irq.ih)
277 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
278 if (ih == &adev->irq.ih1) {
279 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
280 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
281 }
282
283 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
284 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
285 DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
286 return -ETIMEDOUT;
287 }
288 } else {
289 WREG32(ih_regs->ih_rb_cntl, tmp);
290 }
291
292 if (ih == &adev->irq.ih) {
293
294 WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
295 WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
296 }
297
298
299 WREG32(ih_regs->ih_rb_wptr, 0);
300 WREG32(ih_regs->ih_rb_rptr, 0);
301
302 WREG32(ih_regs->ih_doorbell_rptr, navi10_ih_doorbell_rptr(ih));
303
304 return 0;
305}
306
307
308
309
310
311
312
313
314
315
316
317
318static int navi10_ih_irq_init(struct amdgpu_device *adev)
319{
320 struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
321 u32 ih_chicken;
322 u32 tmp;
323 int ret;
324 int i;
325
326
327 ret = navi10_ih_toggle_interrupts(adev, false);
328 if (ret)
329 return ret;
330
331 adev->nbio.funcs->ih_control(adev);
332
333 if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
334 if (ih[0]->use_bus_addr) {
335 switch (adev->asic_type) {
336 case CHIP_SIENNA_CICHLID:
337 case CHIP_NAVY_FLOUNDER:
338 case CHIP_VANGOGH:
339 case CHIP_DIMGREY_CAVEFISH:
340 case CHIP_BEIGE_GOBY:
341 case CHIP_YELLOW_CARP:
342 ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid);
343 ih_chicken = REG_SET_FIELD(ih_chicken,
344 IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
345 WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid, ih_chicken);
346 break;
347 default:
348 ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
349 ih_chicken = REG_SET_FIELD(ih_chicken,
350 IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
351 WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
352 break;
353 }
354 }
355 }
356
357 for (i = 0; i < ARRAY_SIZE(ih); i++) {
358 if (ih[i]->ring_size) {
359 ret = navi10_ih_enable_ring(adev, ih[i]);
360 if (ret)
361 return ret;
362 }
363 }
364
365
366 adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
367 ih[0]->doorbell_index);
368
369 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
370 tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
371 CLIENT18_IS_STORM_CLIENT, 1);
372 WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
373
374 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
375 tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
376 WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
377
378 pci_set_master(adev->pdev);
379
380
381 ret = navi10_ih_toggle_interrupts(adev, true);
382 if (ret)
383 return ret;
384
385 force_update_wptr_for_self_int(adev, 0, 8, true);
386
387 if (adev->irq.ih_soft.ring_size)
388 adev->irq.ih_soft.enabled = true;
389
390 return 0;
391}
392
393
394
395
396
397
398
399
400static void navi10_ih_irq_disable(struct amdgpu_device *adev)
401{
402 force_update_wptr_for_self_int(adev, 0, 8, false);
403 navi10_ih_toggle_interrupts(adev, false);
404
405
406 mdelay(1);
407}
408
409
410
411
412
413
414
415
416
417
418
419
420static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
421 struct amdgpu_ih_ring *ih)
422{
423 u32 wptr, tmp;
424 struct amdgpu_ih_regs *ih_regs;
425
426 wptr = le32_to_cpu(*ih->wptr_cpu);
427 ih_regs = &ih->ih_regs;
428
429 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
430 goto out;
431
432 wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
433 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
434 goto out;
435 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
436
437
438
439
440
441 tmp = (wptr + 32) & ih->ptr_mask;
442 dev_warn(adev->dev, "IH ring buffer overflow "
443 "(0x%08X, 0x%08X, 0x%08X)\n",
444 wptr, ih->rptr, tmp);
445 ih->rptr = tmp;
446
447 tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
448 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
449 WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
450out:
451 return (wptr & ih->ptr_mask);
452}
453
454
455
456
457
458
459
460
461static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
462 struct amdgpu_ih_ring *ih)
463{
464 uint32_t v = 0;
465 uint32_t i = 0;
466 struct amdgpu_ih_regs *ih_regs;
467
468 ih_regs = &ih->ih_regs;
469
470
471 for (i = 0; i < MAX_REARM_RETRY; i++) {
472 v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
473 if ((v < ih->ring_size) && (v != ih->rptr))
474 WDOORBELL32(ih->doorbell_index, ih->rptr);
475 else
476 break;
477 }
478}
479
480
481
482
483
484
485
486
487
488static void navi10_ih_set_rptr(struct amdgpu_device *adev,
489 struct amdgpu_ih_ring *ih)
490{
491 struct amdgpu_ih_regs *ih_regs;
492
493 if (ih->use_doorbell) {
494
495 *ih->rptr_cpu = ih->rptr;
496 WDOORBELL32(ih->doorbell_index, ih->rptr);
497
498 if (amdgpu_sriov_vf(adev))
499 navi10_ih_irq_rearm(adev, ih);
500 } else {
501 ih_regs = &ih->ih_regs;
502 WREG32(ih_regs->ih_rb_rptr, ih->rptr);
503 }
504}
505
506
507
508
509
510
511
512
513
514
515static int navi10_ih_self_irq(struct amdgpu_device *adev,
516 struct amdgpu_irq_src *source,
517 struct amdgpu_iv_entry *entry)
518{
519 uint32_t wptr = cpu_to_le32(entry->src_data[0]);
520
521 switch (entry->ring_id) {
522 case 1:
523 *adev->irq.ih1.wptr_cpu = wptr;
524 schedule_work(&adev->irq.ih1_work);
525 break;
526 case 2:
527 *adev->irq.ih2.wptr_cpu = wptr;
528 schedule_work(&adev->irq.ih2_work);
529 break;
530 default: break;
531 }
532 return 0;
533}
534
535static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = {
536 .process = navi10_ih_self_irq,
537};
538
539static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
540{
541 adev->irq.self_irq.num_types = 0;
542 adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
543}
544
545static int navi10_ih_early_init(void *handle)
546{
547 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
548
549 navi10_ih_set_interrupt_funcs(adev);
550 navi10_ih_set_self_irq_funcs(adev);
551 return 0;
552}
553
554static int navi10_ih_sw_init(void *handle)
555{
556 int r;
557 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
558 bool use_bus_addr;
559
560 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
561 &adev->irq.self_irq);
562
563 if (r)
564 return r;
565
566
567
568
569 if ((adev->flags & AMD_IS_APU) ||
570 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
571 use_bus_addr = false;
572 else
573 use_bus_addr = true;
574 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
575 if (r)
576 return r;
577
578 adev->irq.ih.use_doorbell = true;
579 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
580
581 adev->irq.ih1.ring_size = 0;
582 adev->irq.ih2.ring_size = 0;
583
584
585 navi10_ih_init_register_offset(adev);
586
587 r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
588 if (r)
589 return r;
590
591 r = amdgpu_irq_init(adev);
592
593 return r;
594}
595
596static int navi10_ih_sw_fini(void *handle)
597{
598 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
599
600 amdgpu_irq_fini_sw(adev);
601
602 return 0;
603}
604
605static int navi10_ih_hw_init(void *handle)
606{
607 int r;
608 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
609
610 r = navi10_ih_irq_init(adev);
611 if (r)
612 return r;
613
614 return 0;
615}
616
617static int navi10_ih_hw_fini(void *handle)
618{
619 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
620
621 navi10_ih_irq_disable(adev);
622
623 return 0;
624}
625
626static int navi10_ih_suspend(void *handle)
627{
628 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
629
630 return navi10_ih_hw_fini(adev);
631}
632
633static int navi10_ih_resume(void *handle)
634{
635 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
636
637 return navi10_ih_hw_init(adev);
638}
639
640static bool navi10_ih_is_idle(void *handle)
641{
642
643 return true;
644}
645
646static int navi10_ih_wait_for_idle(void *handle)
647{
648
649 return -ETIMEDOUT;
650}
651
652static int navi10_ih_soft_reset(void *handle)
653{
654
655 return 0;
656}
657
658static void navi10_ih_update_clockgating_state(struct amdgpu_device *adev,
659 bool enable)
660{
661 uint32_t data, def, field_val;
662
663 if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
664 def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
665 field_val = enable ? 0 : 1;
666 data = REG_SET_FIELD(data, IH_CLK_CTRL,
667 DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
668 data = REG_SET_FIELD(data, IH_CLK_CTRL,
669 OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
670 data = REG_SET_FIELD(data, IH_CLK_CTRL,
671 LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
672 data = REG_SET_FIELD(data, IH_CLK_CTRL,
673 DYN_CLK_SOFT_OVERRIDE, field_val);
674 data = REG_SET_FIELD(data, IH_CLK_CTRL,
675 REG_CLK_SOFT_OVERRIDE, field_val);
676 if (def != data)
677 WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
678 }
679
680 return;
681}
682
683static int navi10_ih_set_clockgating_state(void *handle,
684 enum amd_clockgating_state state)
685{
686 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
687
688 navi10_ih_update_clockgating_state(adev,
689 state == AMD_CG_STATE_GATE);
690 return 0;
691}
692
693static int navi10_ih_set_powergating_state(void *handle,
694 enum amd_powergating_state state)
695{
696 return 0;
697}
698
699static void navi10_ih_get_clockgating_state(void *handle, u32 *flags)
700{
701 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
702
703 if (!RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL))
704 *flags |= AMD_CG_SUPPORT_IH_CG;
705
706 return;
707}
708
709static const struct amd_ip_funcs navi10_ih_ip_funcs = {
710 .name = "navi10_ih",
711 .early_init = navi10_ih_early_init,
712 .late_init = NULL,
713 .sw_init = navi10_ih_sw_init,
714 .sw_fini = navi10_ih_sw_fini,
715 .hw_init = navi10_ih_hw_init,
716 .hw_fini = navi10_ih_hw_fini,
717 .suspend = navi10_ih_suspend,
718 .resume = navi10_ih_resume,
719 .is_idle = navi10_ih_is_idle,
720 .wait_for_idle = navi10_ih_wait_for_idle,
721 .soft_reset = navi10_ih_soft_reset,
722 .set_clockgating_state = navi10_ih_set_clockgating_state,
723 .set_powergating_state = navi10_ih_set_powergating_state,
724 .get_clockgating_state = navi10_ih_get_clockgating_state,
725};
726
727static const struct amdgpu_ih_funcs navi10_ih_funcs = {
728 .get_wptr = navi10_ih_get_wptr,
729 .decode_iv = amdgpu_ih_decode_iv_helper,
730 .set_rptr = navi10_ih_set_rptr
731};
732
733static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
734{
735 if (adev->irq.ih_funcs == NULL)
736 adev->irq.ih_funcs = &navi10_ih_funcs;
737}
738
739const struct amdgpu_ip_block_version navi10_ih_ip_block =
740{
741 .type = AMD_IP_BLOCK_TYPE_IH,
742 .major = 5,
743 .minor = 0,
744 .rev = 0,
745 .funcs = &navi10_ih_ip_funcs,
746};
747