1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#undef pr_fmt
23#define pr_fmt(fmt) "kfd2kgd: " fmt
24
25#include <linux/module.h>
26#include <linux/fdtable.h>
27#include <linux/uaccess.h>
28#include <linux/firmware.h>
29#include <linux/mmu_context.h>
30#include <drm/drmP.h>
31#include "amdgpu.h"
32#include "amdgpu_amdkfd.h"
33#include "amdgpu_ucode.h"
34#include "soc15_hw_ip.h"
35#include "gc/gc_10_1_0_offset.h"
36#include "gc/gc_10_1_0_sh_mask.h"
37#include "navi10_enum.h"
38#include "athub/athub_2_0_0_offset.h"
39#include "athub/athub_2_0_0_sh_mask.h"
40#include "oss/osssys_5_0_0_offset.h"
41#include "oss/osssys_5_0_0_sh_mask.h"
42#include "soc15_common.h"
43#include "v10_structs.h"
44#include "nv.h"
45#include "nvd.h"
46
47enum hqd_dequeue_request_type {
48 NO_ACTION = 0,
49 DRAIN_PIPE,
50 RESET_WAVES,
51 SAVE_WAVES
52};
53
54
55
56
57
58static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
59 uint32_t sh_mem_config,
60 uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
61 uint32_t sh_mem_bases);
62static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
63 unsigned int vmid);
64static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
65static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
66 uint32_t queue_id, uint32_t __user *wptr,
67 uint32_t wptr_shift, uint32_t wptr_mask,
68 struct mm_struct *mm);
69static int kgd_hqd_dump(struct kgd_dev *kgd,
70 uint32_t pipe_id, uint32_t queue_id,
71 uint32_t (**dump)[2], uint32_t *n_regs);
72static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
73 uint32_t __user *wptr, struct mm_struct *mm);
74static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
75 uint32_t engine_id, uint32_t queue_id,
76 uint32_t (**dump)[2], uint32_t *n_regs);
77static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
78 uint32_t pipe_id, uint32_t queue_id);
79static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
80static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
81 enum kfd_preempt_type reset_type,
82 unsigned int utimeout, uint32_t pipe_id,
83 uint32_t queue_id);
84static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
85 unsigned int utimeout);
86#if 0
87static uint32_t get_watch_base_addr(struct amdgpu_device *adev);
88#endif
89static int kgd_address_watch_disable(struct kgd_dev *kgd);
90static int kgd_address_watch_execute(struct kgd_dev *kgd,
91 unsigned int watch_point_id,
92 uint32_t cntl_val,
93 uint32_t addr_hi,
94 uint32_t addr_lo);
95static int kgd_wave_control_execute(struct kgd_dev *kgd,
96 uint32_t gfx_index_val,
97 uint32_t sq_cmd);
98static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
99 unsigned int watch_point_id,
100 unsigned int reg_offset);
101
102static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
103 uint8_t vmid);
104static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
105 uint8_t vmid);
106static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
107 uint64_t page_table_base);
108static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
109static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
110
111
112
113
114static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
115 struct tile_config *config)
116{
117 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
118
119 config->gb_addr_config = adev->gfx.config.gb_addr_config;
120#if 0
121
122
123
124
125
126 config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
127 MC_ARB_RAMCFG, NOOFBANK);
128 config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
129 MC_ARB_RAMCFG, NOOFRANKS);
130#endif
131
132 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
133 config->num_tile_configs =
134 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
135 config->macro_tile_config_ptr =
136 adev->gfx.config.macrotile_mode_array;
137 config->num_macro_tile_configs =
138 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
139
140 return 0;
141}
142
143static const struct kfd2kgd_calls kfd2kgd = {
144 .program_sh_mem_settings = kgd_program_sh_mem_settings,
145 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
146 .init_interrupts = kgd_init_interrupts,
147 .hqd_load = kgd_hqd_load,
148 .hqd_sdma_load = kgd_hqd_sdma_load,
149 .hqd_dump = kgd_hqd_dump,
150 .hqd_sdma_dump = kgd_hqd_sdma_dump,
151 .hqd_is_occupied = kgd_hqd_is_occupied,
152 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
153 .hqd_destroy = kgd_hqd_destroy,
154 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
155 .address_watch_disable = kgd_address_watch_disable,
156 .address_watch_execute = kgd_address_watch_execute,
157 .wave_control_execute = kgd_wave_control_execute,
158 .address_watch_get_offset = kgd_address_watch_get_offset,
159 .get_atc_vmid_pasid_mapping_pasid =
160 get_atc_vmid_pasid_mapping_pasid,
161 .get_atc_vmid_pasid_mapping_valid =
162 get_atc_vmid_pasid_mapping_valid,
163 .invalidate_tlbs = invalidate_tlbs,
164 .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
165 .set_vm_context_page_table_base = set_vm_context_page_table_base,
166 .get_tile_config = amdgpu_amdkfd_get_tile_config,
167};
168
169struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions()
170{
171 return (struct kfd2kgd_calls *)&kfd2kgd;
172}
173
174static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
175{
176 return (struct amdgpu_device *)kgd;
177}
178
179static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
180 uint32_t queue, uint32_t vmid)
181{
182 struct amdgpu_device *adev = get_amdgpu_device(kgd);
183
184 mutex_lock(&adev->srbm_mutex);
185 nv_grbm_select(adev, mec, pipe, queue, vmid);
186}
187
188static void unlock_srbm(struct kgd_dev *kgd)
189{
190 struct amdgpu_device *adev = get_amdgpu_device(kgd);
191
192 nv_grbm_select(adev, 0, 0, 0, 0);
193 mutex_unlock(&adev->srbm_mutex);
194}
195
196static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
197 uint32_t queue_id)
198{
199 struct amdgpu_device *adev = get_amdgpu_device(kgd);
200
201 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
202 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
203
204 lock_srbm(kgd, mec, pipe, queue_id, 0);
205}
206
207static uint32_t get_queue_mask(struct amdgpu_device *adev,
208 uint32_t pipe_id, uint32_t queue_id)
209{
210 unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
211 queue_id) & 31;
212
213 return ((uint32_t)1) << bit;
214}
215
216static void release_queue(struct kgd_dev *kgd)
217{
218 unlock_srbm(kgd);
219}
220
221static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
222 uint32_t sh_mem_config,
223 uint32_t sh_mem_ape1_base,
224 uint32_t sh_mem_ape1_limit,
225 uint32_t sh_mem_bases)
226{
227 struct amdgpu_device *adev = get_amdgpu_device(kgd);
228
229 lock_srbm(kgd, 0, 0, 0, vmid);
230
231 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
232 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
233
234
235 unlock_srbm(kgd);
236}
237
238static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
239 unsigned int vmid)
240{
241 struct amdgpu_device *adev = get_amdgpu_device(kgd);
242
243
244
245
246
247
248
249
250 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
251 ATC_VMID0_PASID_MAPPING__VALID_MASK;
252
253 pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
254
255
256
257
258
259
260 pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
261 WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
262 pasid_mapping);
263
264#if 0
265
266 while (!(RREG32(SOC15_REG_OFFSET(
267 ATHUB, 0,
268 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
269 (1U << vmid)))
270 cpu_relax();
271
272 pr_debug("ATHUB mapping update finished\n");
273 WREG32(SOC15_REG_OFFSET(ATHUB, 0,
274 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
275 1U << vmid);
276#endif
277
278
279 pr_debug("update mapping for IH block and mmhub");
280 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
281 pasid_mapping);
282
283 return 0;
284}
285
286
287
288
289
290static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
291{
292 struct amdgpu_device *adev = get_amdgpu_device(kgd);
293 uint32_t mec;
294 uint32_t pipe;
295
296 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
297 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
298
299 lock_srbm(kgd, mec, pipe, 0, 0);
300
301 WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
302 CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
303 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
304
305 unlock_srbm(kgd);
306
307 return 0;
308}
309
310static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
311 unsigned int engine_id,
312 unsigned int queue_id)
313{
314 uint32_t base[2] = {
315 SOC15_REG_OFFSET(SDMA0, 0,
316 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
317
318
319
320
321
322
323 SOC15_REG_OFFSET(SDMA1, 0,
324 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
325 };
326 uint32_t retval;
327
328 retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
329 mmSDMA0_RLC0_RB_CNTL);
330
331 pr_debug("sdma base address: 0x%x\n", retval);
332
333 return retval;
334}
335
336#if 0
337static uint32_t get_watch_base_addr(struct amdgpu_device *adev)
338{
339 uint32_t retval = SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) -
340 mmTCP_WATCH0_ADDR_H;
341
342 pr_debug("kfd: reg watch base address: 0x%x\n", retval);
343
344 return retval;
345}
346#endif
347
348static inline struct v10_compute_mqd *get_mqd(void *mqd)
349{
350 return (struct v10_compute_mqd *)mqd;
351}
352
353static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
354{
355 return (struct v10_sdma_mqd *)mqd;
356}
357
358static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
359 uint32_t queue_id, uint32_t __user *wptr,
360 uint32_t wptr_shift, uint32_t wptr_mask,
361 struct mm_struct *mm)
362{
363 struct amdgpu_device *adev = get_amdgpu_device(kgd);
364 struct v10_compute_mqd *m;
365 uint32_t *mqd_hqd;
366 uint32_t reg, hqd_base, data;
367
368 m = get_mqd(mqd);
369
370 pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
371 acquire_queue(kgd, pipe_id, queue_id);
372
373
374 if (m->cp_hqd_vmid == 0) {
375 uint32_t value, mec, pipe;
376
377 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
378 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
379
380 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
381 mec, pipe, queue_id);
382 value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
383 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
384 ((mec << 5) | (pipe << 3) | queue_id | 0x80));
385 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
386 }
387
388
389 mqd_hqd = &m->cp_mqd_base_addr_lo;
390 hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
391
392 for (reg = hqd_base;
393 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
394 WREG32(reg, mqd_hqd[reg - hqd_base]);
395
396
397
398 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
399 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
400 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
401
402 if (wptr) {
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419 uint32_t queue_size =
420 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
421 CP_HQD_PQ_CONTROL, QUEUE_SIZE);
422 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
423
424 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
425 guessed_wptr += queue_size;
426 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
427 guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
428
429 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
430 lower_32_bits(guessed_wptr));
431 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
432 upper_32_bits(guessed_wptr));
433 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
434 lower_32_bits((uint64_t)wptr));
435 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
436 upper_32_bits((uint64_t)wptr));
437 pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, get_queue_mask(adev, pipe_id, queue_id));
438 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
439 get_queue_mask(adev, pipe_id, queue_id));
440 }
441
442
443 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
444 REG_SET_FIELD(m->cp_hqd_eop_rptr,
445 CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
446
447 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
448 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
449
450 release_queue(kgd);
451
452 return 0;
453}
454
455static int kgd_hqd_dump(struct kgd_dev *kgd,
456 uint32_t pipe_id, uint32_t queue_id,
457 uint32_t (**dump)[2], uint32_t *n_regs)
458{
459 struct amdgpu_device *adev = get_amdgpu_device(kgd);
460 uint32_t i = 0, reg;
461#define HQD_N_REGS 56
462#define DUMP_REG(addr) do { \
463 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
464 break; \
465 (*dump)[i][0] = (addr) << 2; \
466 (*dump)[i++][1] = RREG32(addr); \
467 } while (0)
468
469 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
470 if (*dump == NULL)
471 return -ENOMEM;
472
473 acquire_queue(kgd, pipe_id, queue_id);
474
475 for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
476 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
477 DUMP_REG(reg);
478
479 release_queue(kgd);
480
481 WARN_ON_ONCE(i != HQD_N_REGS);
482 *n_regs = i;
483
484 return 0;
485}
486
487static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
488 uint32_t __user *wptr, struct mm_struct *mm)
489{
490 struct amdgpu_device *adev = get_amdgpu_device(kgd);
491 struct v10_sdma_mqd *m;
492 uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
493 unsigned long end_jiffies;
494 uint32_t data;
495 uint64_t data64;
496 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
497
498 m = get_sdma_mqd(mqd);
499 sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
500 m->sdma_queue_id);
501 pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id);
502 sdmax_gfx_context_cntl = m->sdma_engine_id ?
503 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
504 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
505
506 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
507 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
508
509 end_jiffies = msecs_to_jiffies(2000) + jiffies;
510 while (true) {
511 data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
512 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
513 break;
514 if (time_after(jiffies, end_jiffies))
515 return -ETIME;
516 usleep_range(500, 1000);
517 }
518 data = RREG32(sdmax_gfx_context_cntl);
519 data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
520 RESUME_CTX, 0);
521 WREG32(sdmax_gfx_context_cntl, data);
522
523 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
524 m->sdmax_rlcx_doorbell_offset);
525
526 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
527 ENABLE, 1);
528 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
529 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
530 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
531 m->sdmax_rlcx_rb_rptr_hi);
532
533 WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
534 if (read_user_wptr(mm, wptr64, data64)) {
535 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
536 lower_32_bits(data64));
537 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
538 upper_32_bits(data64));
539 } else {
540 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
541 m->sdmax_rlcx_rb_rptr);
542 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
543 m->sdmax_rlcx_rb_rptr_hi);
544 }
545 WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
546
547 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
548 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
549 m->sdmax_rlcx_rb_base_hi);
550 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
551 m->sdmax_rlcx_rb_rptr_addr_lo);
552 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
553 m->sdmax_rlcx_rb_rptr_addr_hi);
554
555 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
556 RB_ENABLE, 1);
557 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
558
559 return 0;
560}
561
562static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
563 uint32_t engine_id, uint32_t queue_id,
564 uint32_t (**dump)[2], uint32_t *n_regs)
565{
566 struct amdgpu_device *adev = get_amdgpu_device(kgd);
567 uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
568 uint32_t i = 0, reg;
569#undef HQD_N_REGS
570#define HQD_N_REGS (19+6+7+10)
571
572 pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id);
573 pr_debug("sdma base addr %x\n", sdma_base_addr);
574
575 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
576 if (*dump == NULL)
577 return -ENOMEM;
578
579 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
580 DUMP_REG(sdma_base_addr + reg);
581 for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
582 DUMP_REG(sdma_base_addr + reg);
583 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
584 reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
585 DUMP_REG(sdma_base_addr + reg);
586 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
587 reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
588 DUMP_REG(sdma_base_addr + reg);
589
590 WARN_ON_ONCE(i != HQD_N_REGS);
591 *n_regs = i;
592
593 return 0;
594}
595
596static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
597 uint32_t pipe_id, uint32_t queue_id)
598{
599 struct amdgpu_device *adev = get_amdgpu_device(kgd);
600 uint32_t act;
601 bool retval = false;
602 uint32_t low, high;
603
604 acquire_queue(kgd, pipe_id, queue_id);
605 act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
606 if (act) {
607 low = lower_32_bits(queue_address >> 8);
608 high = upper_32_bits(queue_address >> 8);
609
610 if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
611 high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
612 retval = true;
613 }
614 release_queue(kgd);
615 return retval;
616}
617
618static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
619{
620 struct amdgpu_device *adev = get_amdgpu_device(kgd);
621 struct v10_sdma_mqd *m;
622 uint32_t sdma_base_addr;
623 uint32_t sdma_rlc_rb_cntl;
624
625 m = get_sdma_mqd(mqd);
626 sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
627 m->sdma_queue_id);
628
629 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
630
631 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
632 return true;
633
634 return false;
635}
636
637static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
638 enum kfd_preempt_type reset_type,
639 unsigned int utimeout, uint32_t pipe_id,
640 uint32_t queue_id)
641{
642 struct amdgpu_device *adev = get_amdgpu_device(kgd);
643 enum hqd_dequeue_request_type type;
644 unsigned long end_jiffies;
645 uint32_t temp;
646 struct v10_compute_mqd *m = get_mqd(mqd);
647
648#if 0
649 unsigned long flags;
650 int retry;
651#endif
652
653 acquire_queue(kgd, pipe_id, queue_id);
654
655 if (m->cp_hqd_vmid == 0)
656 WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
657
658 switch (reset_type) {
659 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
660 type = DRAIN_PIPE;
661 break;
662 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
663 type = RESET_WAVES;
664 break;
665 default:
666 type = DRAIN_PIPE;
667 break;
668 }
669
670#if 0
671
672
673
674
675
676
677 local_irq_save(flags);
678 preempt_disable();
679 retry = 5000;
680 while (true) {
681 temp = RREG32(mmCP_HQD_IQ_TIMER);
682 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
683 pr_debug("HW is processing IQ\n");
684 goto loop;
685 }
686 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
687 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
688 == 3)
689 break;
690
691
692
693
694 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
695 >= 10)
696 break;
697 pr_debug("IQ timer is active\n");
698 } else
699 break;
700loop:
701 if (!retry) {
702 pr_err("CP HQD IQ timer status time out\n");
703 break;
704 }
705 ndelay(100);
706 --retry;
707 }
708 retry = 1000;
709 while (true) {
710 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
711 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
712 break;
713 pr_debug("Dequeue request is pending\n");
714
715 if (!retry) {
716 pr_err("CP HQD dequeue request time out\n");
717 break;
718 }
719 ndelay(100);
720 --retry;
721 }
722 local_irq_restore(flags);
723 preempt_enable();
724#endif
725
726 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
727
728 end_jiffies = (utimeout * HZ / 1000) + jiffies;
729 while (true) {
730 temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
731 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
732 break;
733 if (time_after(jiffies, end_jiffies)) {
734 pr_err("cp queue preemption time out.\n");
735 release_queue(kgd);
736 return -ETIME;
737 }
738 usleep_range(500, 1000);
739 }
740
741 release_queue(kgd);
742 return 0;
743}
744
745static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
746 unsigned int utimeout)
747{
748 struct amdgpu_device *adev = get_amdgpu_device(kgd);
749 struct v10_sdma_mqd *m;
750 uint32_t sdma_base_addr;
751 uint32_t temp;
752 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
753
754 m = get_sdma_mqd(mqd);
755 sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
756 m->sdma_queue_id);
757
758 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
759 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
760 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
761
762 while (true) {
763 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
764 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
765 break;
766 if (time_after(jiffies, end_jiffies))
767 return -ETIME;
768 usleep_range(500, 1000);
769 }
770
771 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
772 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
773 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
774 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
775
776 m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
777 m->sdmax_rlcx_rb_rptr_hi =
778 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
779
780 return 0;
781}
782
783static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
784 uint8_t vmid)
785{
786 uint32_t reg;
787 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
788
789 reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
790 + vmid);
791 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
792}
793
794static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
795 uint8_t vmid)
796{
797 uint32_t reg;
798 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
799
800 reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
801 + vmid);
802 return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
803}
804
805static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
806{
807 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
808 uint32_t req = (1 << vmid) |
809 (0 << GCVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT) |
810 GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK |
811 GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK |
812 GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK |
813 GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK |
814 GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK;
815
816 mutex_lock(&adev->srbm_mutex);
817
818
819
820
821
822
823
824
825
826
827 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32),
828 0xffffffff);
829 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32),
830 0x0000001f);
831
832 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ), req);
833
834 while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK)) &
835 (1 << vmid)))
836 cpu_relax();
837
838 mutex_unlock(&adev->srbm_mutex);
839}
840
841static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
842{
843 signed long r;
844 uint32_t seq;
845 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
846
847 spin_lock(&adev->gfx.kiq.ring_lock);
848 amdgpu_ring_alloc(ring, 12);
849 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
850 amdgpu_ring_write(ring,
851 PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
852 PACKET3_INVALIDATE_TLBS_PASID(pasid));
853 amdgpu_fence_emit_polling(ring, &seq);
854 amdgpu_ring_commit(ring);
855 spin_unlock(&adev->gfx.kiq.ring_lock);
856
857 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
858 if (r < 1) {
859 DRM_ERROR("wait for kiq fence error: %ld.\n", r);
860 return -ETIME;
861 }
862
863 return 0;
864}
865
866static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
867{
868 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
869 int vmid;
870 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
871
872 if (amdgpu_emu_mode == 0 && ring->sched.ready)
873 return invalidate_tlbs_with_kiq(adev, pasid);
874
875 for (vmid = 0; vmid < 16; vmid++) {
876 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
877 continue;
878 if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
879 if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
880 == pasid) {
881 write_vmid_invalidate_request(kgd, vmid);
882 break;
883 }
884 }
885 }
886
887 return 0;
888}
889
890static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
891{
892 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
893
894 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
895 pr_err("non kfd vmid %d\n", vmid);
896 return 0;
897 }
898
899 write_vmid_invalidate_request(kgd, vmid);
900 return 0;
901}
902
903static int kgd_address_watch_disable(struct kgd_dev *kgd)
904{
905 return 0;
906}
907
908static int kgd_address_watch_execute(struct kgd_dev *kgd,
909 unsigned int watch_point_id,
910 uint32_t cntl_val,
911 uint32_t addr_hi,
912 uint32_t addr_lo)
913{
914 return 0;
915}
916
917static int kgd_wave_control_execute(struct kgd_dev *kgd,
918 uint32_t gfx_index_val,
919 uint32_t sq_cmd)
920{
921 struct amdgpu_device *adev = get_amdgpu_device(kgd);
922 uint32_t data = 0;
923
924 mutex_lock(&adev->grbm_idx_mutex);
925
926 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
927 WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
928
929 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
930 INSTANCE_BROADCAST_WRITES, 1);
931 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
932 SA_BROADCAST_WRITES, 1);
933 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
934 SE_BROADCAST_WRITES, 1);
935
936 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
937 mutex_unlock(&adev->grbm_idx_mutex);
938
939 return 0;
940}
941
942static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
943 unsigned int watch_point_id,
944 unsigned int reg_offset)
945{
946 return 0;
947}
948
949static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
950 uint64_t page_table_base)
951{
952 struct amdgpu_device *adev = get_amdgpu_device(kgd);
953 uint64_t base = page_table_base | AMDGPU_PTE_VALID;
954
955 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
956 pr_err("trying to set page table base for wrong VMID %u\n",
957 vmid);
958 return;
959 }
960
961
962
963
964
965 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
966 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
967
968 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
969 lower_32_bits(adev->vm_manager.max_pfn - 1));
970 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
971 upper_32_bits(adev->vm_manager.max_pfn - 1));
972
973 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
974 WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
975}
976