1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/mmu_context.h>
24
25#include "amdgpu.h"
26#include "amdgpu_amdkfd.h"
27#include "gfx_v8_0.h"
28#include "gca/gfx_8_0_sh_mask.h"
29#include "gca/gfx_8_0_d.h"
30#include "gca/gfx_8_0_enum.h"
31#include "oss/oss_3_0_sh_mask.h"
32#include "oss/oss_3_0_d.h"
33#include "gmc/gmc_8_1_sh_mask.h"
34#include "gmc/gmc_8_1_d.h"
35#include "vi_structs.h"
36#include "vid.h"
37
38enum hqd_dequeue_request_type {
39 NO_ACTION = 0,
40 DRAIN_PIPE,
41 RESET_WAVES
42};
43
44static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
45{
46 return (struct amdgpu_device *)kgd;
47}
48
49static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
50 uint32_t queue, uint32_t vmid)
51{
52 struct amdgpu_device *adev = get_amdgpu_device(kgd);
53 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
54
55 mutex_lock(&adev->srbm_mutex);
56 WREG32(mmSRBM_GFX_CNTL, value);
57}
58
59static void unlock_srbm(struct kgd_dev *kgd)
60{
61 struct amdgpu_device *adev = get_amdgpu_device(kgd);
62
63 WREG32(mmSRBM_GFX_CNTL, 0);
64 mutex_unlock(&adev->srbm_mutex);
65}
66
67static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
68 uint32_t queue_id)
69{
70 struct amdgpu_device *adev = get_amdgpu_device(kgd);
71
72 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
73 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
74
75 lock_srbm(kgd, mec, pipe, queue_id, 0);
76}
77
78static void release_queue(struct kgd_dev *kgd)
79{
80 unlock_srbm(kgd);
81}
82
83static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
84 uint32_t sh_mem_config,
85 uint32_t sh_mem_ape1_base,
86 uint32_t sh_mem_ape1_limit,
87 uint32_t sh_mem_bases)
88{
89 struct amdgpu_device *adev = get_amdgpu_device(kgd);
90
91 lock_srbm(kgd, 0, 0, 0, vmid);
92
93 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
94 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
95 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
96 WREG32(mmSH_MEM_BASES, sh_mem_bases);
97
98 unlock_srbm(kgd);
99}
100
101static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
102 unsigned int vmid)
103{
104 struct amdgpu_device *adev = get_amdgpu_device(kgd);
105
106
107
108
109
110
111
112
113 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
114 ATC_VMID0_PASID_MAPPING__VALID_MASK;
115
116 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
117
118 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
119 cpu_relax();
120 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
121
122
123 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
124
125 return 0;
126}
127
128static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
129{
130 struct amdgpu_device *adev = get_amdgpu_device(kgd);
131 uint32_t mec;
132 uint32_t pipe;
133
134 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
135 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
136
137 lock_srbm(kgd, mec, pipe, 0, 0);
138
139 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
140 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
141
142 unlock_srbm(kgd);
143
144 return 0;
145}
146
147static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m)
148{
149 uint32_t retval;
150
151 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
152 m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
153
154 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
155 m->sdma_engine_id, m->sdma_queue_id, retval);
156
157 return retval;
158}
159
160static inline struct vi_mqd *get_mqd(void *mqd)
161{
162 return (struct vi_mqd *)mqd;
163}
164
165static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
166{
167 return (struct vi_sdma_mqd *)mqd;
168}
169
170static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
171 uint32_t queue_id, uint32_t __user *wptr,
172 uint32_t wptr_shift, uint32_t wptr_mask,
173 struct mm_struct *mm)
174{
175 struct amdgpu_device *adev = get_amdgpu_device(kgd);
176 struct vi_mqd *m;
177 uint32_t *mqd_hqd;
178 uint32_t reg, wptr_val, data;
179 bool valid_wptr = false;
180
181 m = get_mqd(mqd);
182
183 acquire_queue(kgd, pipe_id, queue_id);
184
185
186 if (m->cp_hqd_vmid == 0) {
187 uint32_t value, mec, pipe;
188
189 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
190 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
191
192 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
193 mec, pipe, queue_id);
194 value = RREG32(mmRLC_CP_SCHEDULERS);
195 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
196 ((mec << 5) | (pipe << 3) | queue_id | 0x80));
197 WREG32(mmRLC_CP_SCHEDULERS, value);
198 }
199
200
201 mqd_hqd = &m->cp_mqd_base_addr_lo;
202
203 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
204 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
205
206
207
208
209
210
211 if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
212 WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
213 WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
214 WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
215 }
216
217 for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
218 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
219
220
221
222
223 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
224 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
225 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
226
227
228
229
230
231 release_queue(kgd);
232 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
233 acquire_queue(kgd, pipe_id, queue_id);
234 if (valid_wptr)
235 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
236
237 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
238 WREG32(mmCP_HQD_ACTIVE, data);
239
240 release_queue(kgd);
241
242 return 0;
243}
244
245static int kgd_hqd_dump(struct kgd_dev *kgd,
246 uint32_t pipe_id, uint32_t queue_id,
247 uint32_t (**dump)[2], uint32_t *n_regs)
248{
249 struct amdgpu_device *adev = get_amdgpu_device(kgd);
250 uint32_t i = 0, reg;
251#define HQD_N_REGS (54+4)
252#define DUMP_REG(addr) do { \
253 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
254 break; \
255 (*dump)[i][0] = (addr) << 2; \
256 (*dump)[i++][1] = RREG32(addr); \
257 } while (0)
258
259 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
260 if (*dump == NULL)
261 return -ENOMEM;
262
263 acquire_queue(kgd, pipe_id, queue_id);
264
265 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
266 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
267 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
268 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
269
270 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
271 DUMP_REG(reg);
272
273 release_queue(kgd);
274
275 WARN_ON_ONCE(i != HQD_N_REGS);
276 *n_regs = i;
277
278 return 0;
279}
280
281static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
282 uint32_t __user *wptr, struct mm_struct *mm)
283{
284 struct amdgpu_device *adev = get_amdgpu_device(kgd);
285 struct vi_sdma_mqd *m;
286 unsigned long end_jiffies;
287 uint32_t sdma_rlc_reg_offset;
288 uint32_t data;
289
290 m = get_sdma_mqd(mqd);
291 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
292 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
293 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
294
295 end_jiffies = msecs_to_jiffies(2000) + jiffies;
296 while (true) {
297 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
298 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
299 break;
300 if (time_after(jiffies, end_jiffies)) {
301 pr_err("SDMA RLC not idle in %s\n", __func__);
302 return -ETIME;
303 }
304 usleep_range(500, 1000);
305 }
306
307 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
308 ENABLE, 1);
309 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
310 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
311 m->sdmax_rlcx_rb_rptr);
312
313 if (read_user_wptr(mm, wptr, data))
314 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
315 else
316 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
317 m->sdmax_rlcx_rb_rptr);
318
319 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
320 m->sdmax_rlcx_virtual_addr);
321 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
322 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
323 m->sdmax_rlcx_rb_base_hi);
324 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
325 m->sdmax_rlcx_rb_rptr_addr_lo);
326 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
327 m->sdmax_rlcx_rb_rptr_addr_hi);
328
329 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
330 RB_ENABLE, 1);
331 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
332
333 return 0;
334}
335
336static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
337 uint32_t engine_id, uint32_t queue_id,
338 uint32_t (**dump)[2], uint32_t *n_regs)
339{
340 struct amdgpu_device *adev = get_amdgpu_device(kgd);
341 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
342 queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
343 uint32_t i = 0, reg;
344#undef HQD_N_REGS
345#define HQD_N_REGS (19+4+2+3+7)
346
347 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
348 if (*dump == NULL)
349 return -ENOMEM;
350
351 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
352 DUMP_REG(sdma_offset + reg);
353 for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
354 reg++)
355 DUMP_REG(sdma_offset + reg);
356 for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
357 reg++)
358 DUMP_REG(sdma_offset + reg);
359 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
360 reg++)
361 DUMP_REG(sdma_offset + reg);
362 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
363 reg++)
364 DUMP_REG(sdma_offset + reg);
365
366 WARN_ON_ONCE(i != HQD_N_REGS);
367 *n_regs = i;
368
369 return 0;
370}
371
372static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
373 uint32_t pipe_id, uint32_t queue_id)
374{
375 struct amdgpu_device *adev = get_amdgpu_device(kgd);
376 uint32_t act;
377 bool retval = false;
378 uint32_t low, high;
379
380 acquire_queue(kgd, pipe_id, queue_id);
381 act = RREG32(mmCP_HQD_ACTIVE);
382 if (act) {
383 low = lower_32_bits(queue_address >> 8);
384 high = upper_32_bits(queue_address >> 8);
385
386 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
387 high == RREG32(mmCP_HQD_PQ_BASE_HI))
388 retval = true;
389 }
390 release_queue(kgd);
391 return retval;
392}
393
394static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
395{
396 struct amdgpu_device *adev = get_amdgpu_device(kgd);
397 struct vi_sdma_mqd *m;
398 uint32_t sdma_rlc_reg_offset;
399 uint32_t sdma_rlc_rb_cntl;
400
401 m = get_sdma_mqd(mqd);
402 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
403
404 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
405
406 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
407 return true;
408
409 return false;
410}
411
412static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
413 enum kfd_preempt_type reset_type,
414 unsigned int utimeout, uint32_t pipe_id,
415 uint32_t queue_id)
416{
417 struct amdgpu_device *adev = get_amdgpu_device(kgd);
418 uint32_t temp;
419 enum hqd_dequeue_request_type type;
420 unsigned long flags, end_jiffies;
421 int retry;
422 struct vi_mqd *m = get_mqd(mqd);
423
424 if (adev->in_gpu_reset)
425 return -EIO;
426
427 acquire_queue(kgd, pipe_id, queue_id);
428
429 if (m->cp_hqd_vmid == 0)
430 WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
431
432 switch (reset_type) {
433 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
434 type = DRAIN_PIPE;
435 break;
436 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
437 type = RESET_WAVES;
438 break;
439 default:
440 type = DRAIN_PIPE;
441 break;
442 }
443
444
445
446
447
448
449
450 local_irq_save(flags);
451 preempt_disable();
452 retry = 5000;
453 while (true) {
454 temp = RREG32(mmCP_HQD_IQ_TIMER);
455 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
456 pr_debug("HW is processing IQ\n");
457 goto loop;
458 }
459 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
460 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
461 == 3)
462 break;
463
464
465
466
467 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
468 >= 10)
469 break;
470 pr_debug("IQ timer is active\n");
471 } else
472 break;
473loop:
474 if (!retry) {
475 pr_err("CP HQD IQ timer status time out\n");
476 break;
477 }
478 ndelay(100);
479 --retry;
480 }
481 retry = 1000;
482 while (true) {
483 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
484 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
485 break;
486 pr_debug("Dequeue request is pending\n");
487
488 if (!retry) {
489 pr_err("CP HQD dequeue request time out\n");
490 break;
491 }
492 ndelay(100);
493 --retry;
494 }
495 local_irq_restore(flags);
496 preempt_enable();
497
498 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
499
500 end_jiffies = (utimeout * HZ / 1000) + jiffies;
501 while (true) {
502 temp = RREG32(mmCP_HQD_ACTIVE);
503 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
504 break;
505 if (time_after(jiffies, end_jiffies)) {
506 pr_err("cp queue preemption time out.\n");
507 release_queue(kgd);
508 return -ETIME;
509 }
510 usleep_range(500, 1000);
511 }
512
513 release_queue(kgd);
514 return 0;
515}
516
517static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
518 unsigned int utimeout)
519{
520 struct amdgpu_device *adev = get_amdgpu_device(kgd);
521 struct vi_sdma_mqd *m;
522 uint32_t sdma_rlc_reg_offset;
523 uint32_t temp;
524 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
525
526 m = get_sdma_mqd(mqd);
527 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
528
529 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
530 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
531 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
532
533 while (true) {
534 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
535 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
536 break;
537 if (time_after(jiffies, end_jiffies)) {
538 pr_err("SDMA RLC not idle in %s\n", __func__);
539 return -ETIME;
540 }
541 usleep_range(500, 1000);
542 }
543
544 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
545 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
546 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
547 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
548
549 m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
550
551 return 0;
552}
553
554static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
555 uint8_t vmid, uint16_t *p_pasid)
556{
557 uint32_t value;
558 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
559
560 value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
561 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
562
563 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
564}
565
566static int kgd_address_watch_disable(struct kgd_dev *kgd)
567{
568 return 0;
569}
570
571static int kgd_address_watch_execute(struct kgd_dev *kgd,
572 unsigned int watch_point_id,
573 uint32_t cntl_val,
574 uint32_t addr_hi,
575 uint32_t addr_lo)
576{
577 return 0;
578}
579
580static int kgd_wave_control_execute(struct kgd_dev *kgd,
581 uint32_t gfx_index_val,
582 uint32_t sq_cmd)
583{
584 struct amdgpu_device *adev = get_amdgpu_device(kgd);
585 uint32_t data = 0;
586
587 mutex_lock(&adev->grbm_idx_mutex);
588
589 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
590 WREG32(mmSQ_CMD, sq_cmd);
591
592 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
593 INSTANCE_BROADCAST_WRITES, 1);
594 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
595 SH_BROADCAST_WRITES, 1);
596 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
597 SE_BROADCAST_WRITES, 1);
598
599 WREG32(mmGRBM_GFX_INDEX, data);
600 mutex_unlock(&adev->grbm_idx_mutex);
601
602 return 0;
603}
604
605static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
606 unsigned int watch_point_id,
607 unsigned int reg_offset)
608{
609 return 0;
610}
611
612static void set_scratch_backing_va(struct kgd_dev *kgd,
613 uint64_t va, uint32_t vmid)
614{
615 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
616
617 lock_srbm(kgd, 0, 0, 0, vmid);
618 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
619 unlock_srbm(kgd);
620}
621
622static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
623 uint64_t page_table_base)
624{
625 struct amdgpu_device *adev = get_amdgpu_device(kgd);
626
627 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
628 pr_err("trying to set page table base for wrong VMID\n");
629 return;
630 }
631 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
632 lower_32_bits(page_table_base));
633}
634
635const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
636 .program_sh_mem_settings = kgd_program_sh_mem_settings,
637 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
638 .init_interrupts = kgd_init_interrupts,
639 .hqd_load = kgd_hqd_load,
640 .hqd_sdma_load = kgd_hqd_sdma_load,
641 .hqd_dump = kgd_hqd_dump,
642 .hqd_sdma_dump = kgd_hqd_sdma_dump,
643 .hqd_is_occupied = kgd_hqd_is_occupied,
644 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
645 .hqd_destroy = kgd_hqd_destroy,
646 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
647 .address_watch_disable = kgd_address_watch_disable,
648 .address_watch_execute = kgd_address_watch_execute,
649 .wave_control_execute = kgd_wave_control_execute,
650 .address_watch_get_offset = kgd_address_watch_get_offset,
651 .get_atc_vmid_pasid_mapping_info =
652 get_atc_vmid_pasid_mapping_info,
653 .set_scratch_backing_va = set_scratch_backing_va,
654 .set_vm_context_page_table_base = set_vm_context_page_table_base,
655};
656