1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#undef pr_fmt
24#define pr_fmt(fmt) "kfd2kgd: " fmt
25
26#include <linux/module.h>
27#include <linux/fdtable.h>
28#include <linux/uaccess.h>
29#include <linux/mmu_context.h>
30#include <linux/firmware.h>
31#include "amdgpu.h"
32#include "amdgpu_amdkfd.h"
33#include "sdma0/sdma0_4_2_2_offset.h"
34#include "sdma0/sdma0_4_2_2_sh_mask.h"
35#include "sdma1/sdma1_4_2_2_offset.h"
36#include "sdma1/sdma1_4_2_2_sh_mask.h"
37#include "sdma2/sdma2_4_2_2_offset.h"
38#include "sdma2/sdma2_4_2_2_sh_mask.h"
39#include "sdma3/sdma3_4_2_2_offset.h"
40#include "sdma3/sdma3_4_2_2_sh_mask.h"
41#include "sdma4/sdma4_4_2_2_offset.h"
42#include "sdma4/sdma4_4_2_2_sh_mask.h"
43#include "sdma5/sdma5_4_2_2_offset.h"
44#include "sdma5/sdma5_4_2_2_sh_mask.h"
45#include "sdma6/sdma6_4_2_2_offset.h"
46#include "sdma6/sdma6_4_2_2_sh_mask.h"
47#include "sdma7/sdma7_4_2_2_offset.h"
48#include "sdma7/sdma7_4_2_2_sh_mask.h"
49#include "v9_structs.h"
50#include "soc15.h"
51#include "soc15d.h"
52#include "amdgpu_amdkfd_gfx_v9.h"
53
54#define HQD_N_REGS 56
55#define DUMP_REG(addr) do { \
56 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
57 break; \
58 (*dump)[i][0] = (addr) << 2; \
59 (*dump)[i++][1] = RREG32(addr); \
60 } while (0)
61
62static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
63{
64 return (struct amdgpu_device *)kgd;
65}
66
67static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
68{
69 return (struct v9_sdma_mqd *)mqd;
70}
71
72static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
73 unsigned int engine_id,
74 unsigned int queue_id)
75{
76 uint32_t base[8] = {
77 SOC15_REG_OFFSET(SDMA0, 0,
78 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
79 SOC15_REG_OFFSET(SDMA1, 0,
80 mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL,
81 SOC15_REG_OFFSET(SDMA2, 0,
82 mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL,
83 SOC15_REG_OFFSET(SDMA3, 0,
84 mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL,
85 SOC15_REG_OFFSET(SDMA4, 0,
86 mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL,
87 SOC15_REG_OFFSET(SDMA5, 0,
88 mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL,
89 SOC15_REG_OFFSET(SDMA6, 0,
90 mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL,
91 SOC15_REG_OFFSET(SDMA7, 0,
92 mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
93 };
94 uint32_t retval;
95
96 retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
97 mmSDMA0_RLC0_RB_CNTL);
98
99 pr_debug("sdma base address: 0x%x\n", retval);
100
101 return retval;
102}
103
104static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
105 u32 instance, u32 offset)
106{
107 switch (instance) {
108 case 0:
109 return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
110 case 1:
111 return (adev->reg_offset[SDMA1_HWIP][0][1] + offset);
112 case 2:
113 return (adev->reg_offset[SDMA2_HWIP][0][1] + offset);
114 case 3:
115 return (adev->reg_offset[SDMA3_HWIP][0][1] + offset);
116 case 4:
117 return (adev->reg_offset[SDMA4_HWIP][0][1] + offset);
118 case 5:
119 return (adev->reg_offset[SDMA5_HWIP][0][1] + offset);
120 case 6:
121 return (adev->reg_offset[SDMA6_HWIP][0][1] + offset);
122 case 7:
123 return (adev->reg_offset[SDMA7_HWIP][0][1] + offset);
124 default:
125 break;
126 }
127 return 0;
128}
129
130static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
131 uint32_t __user *wptr, struct mm_struct *mm)
132{
133 struct amdgpu_device *adev = get_amdgpu_device(kgd);
134 struct v9_sdma_mqd *m;
135 uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
136 unsigned long end_jiffies;
137 uint32_t data;
138 uint64_t data64;
139 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
140
141 m = get_sdma_mqd(mqd);
142 sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
143 m->sdma_queue_id);
144 sdmax_gfx_context_cntl = sdma_v4_0_get_reg_offset(adev,
145 m->sdma_engine_id, mmSDMA0_GFX_CONTEXT_CNTL);
146
147 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
148 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
149
150 end_jiffies = msecs_to_jiffies(2000) + jiffies;
151 while (true) {
152 data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
153 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
154 break;
155 if (time_after(jiffies, end_jiffies))
156 return -ETIME;
157 usleep_range(500, 1000);
158 }
159 data = RREG32(sdmax_gfx_context_cntl);
160 data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
161 RESUME_CTX, 0);
162 WREG32(sdmax_gfx_context_cntl, data);
163
164 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
165 m->sdmax_rlcx_doorbell_offset);
166
167 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
168 ENABLE, 1);
169 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
170 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
171 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
172 m->sdmax_rlcx_rb_rptr_hi);
173
174 WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
175 if (read_user_wptr(mm, wptr64, data64)) {
176 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
177 lower_32_bits(data64));
178 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
179 upper_32_bits(data64));
180 } else {
181 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
182 m->sdmax_rlcx_rb_rptr);
183 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
184 m->sdmax_rlcx_rb_rptr_hi);
185 }
186 WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
187
188 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
189 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
190 m->sdmax_rlcx_rb_base_hi);
191 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
192 m->sdmax_rlcx_rb_rptr_addr_lo);
193 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
194 m->sdmax_rlcx_rb_rptr_addr_hi);
195
196 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
197 RB_ENABLE, 1);
198 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
199
200 return 0;
201}
202
203static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
204 uint32_t engine_id, uint32_t queue_id,
205 uint32_t (**dump)[2], uint32_t *n_regs)
206{
207 struct amdgpu_device *adev = get_amdgpu_device(kgd);
208 uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
209 uint32_t i = 0, reg;
210#undef HQD_N_REGS
211#define HQD_N_REGS (19+6+7+10)
212
213 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
214 if (*dump == NULL)
215 return -ENOMEM;
216
217 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
218 DUMP_REG(sdma_base_addr + reg);
219 for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
220 DUMP_REG(sdma_base_addr + reg);
221 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
222 reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
223 DUMP_REG(sdma_base_addr + reg);
224 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
225 reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
226 DUMP_REG(sdma_base_addr + reg);
227
228 WARN_ON_ONCE(i != HQD_N_REGS);
229 *n_regs = i;
230
231 return 0;
232}
233
234static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
235{
236 struct amdgpu_device *adev = get_amdgpu_device(kgd);
237 struct v9_sdma_mqd *m;
238 uint32_t sdma_base_addr;
239 uint32_t sdma_rlc_rb_cntl;
240
241 m = get_sdma_mqd(mqd);
242 sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
243 m->sdma_queue_id);
244
245 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
246
247 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
248 return true;
249
250 return false;
251}
252
253static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
254 unsigned int utimeout)
255{
256 struct amdgpu_device *adev = get_amdgpu_device(kgd);
257 struct v9_sdma_mqd *m;
258 uint32_t sdma_base_addr;
259 uint32_t temp;
260 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
261
262 m = get_sdma_mqd(mqd);
263 sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
264 m->sdma_queue_id);
265
266 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
267 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
268 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
269
270 while (true) {
271 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
272 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
273 break;
274 if (time_after(jiffies, end_jiffies))
275 return -ETIME;
276 usleep_range(500, 1000);
277 }
278
279 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
280 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
281 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
282 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
283
284 m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
285 m->sdmax_rlcx_rb_rptr_hi =
286 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
287
288 return 0;
289}
290
291static const struct kfd2kgd_calls kfd2kgd = {
292 .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
293 .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
294 .init_interrupts = kgd_gfx_v9_init_interrupts,
295 .hqd_load = kgd_gfx_v9_hqd_load,
296 .hqd_sdma_load = kgd_hqd_sdma_load,
297 .hqd_dump = kgd_gfx_v9_hqd_dump,
298 .hqd_sdma_dump = kgd_hqd_sdma_dump,
299 .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
300 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
301 .hqd_destroy = kgd_gfx_v9_hqd_destroy,
302 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
303 .address_watch_disable = kgd_gfx_v9_address_watch_disable,
304 .address_watch_execute = kgd_gfx_v9_address_watch_execute,
305 .wave_control_execute = kgd_gfx_v9_wave_control_execute,
306 .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
307 .get_atc_vmid_pasid_mapping_pasid =
308 kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
309 .get_atc_vmid_pasid_mapping_valid =
310 kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
311 .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
312 .get_tile_config = kgd_gfx_v9_get_tile_config,
313 .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
314 .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
315 .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
316 .get_hive_id = amdgpu_amdkfd_get_hive_id,
317};
318
319struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
320{
321 return (struct kfd2kgd_calls *)&kfd2kgd;
322}
323
324