linux/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016-2018 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/printk.h>
  25#include <linux/slab.h>
  26#include <linux/uaccess.h>
  27#include "kfd_priv.h"
  28#include "kfd_mqd_manager.h"
  29#include "v9_structs.h"
  30#include "gc/gc_9_0_offset.h"
  31#include "gc/gc_9_0_sh_mask.h"
  32#include "sdma0/sdma0_4_0_sh_mask.h"
  33#include "amdgpu_amdkfd.h"
  34
  35static inline struct v9_mqd *get_mqd(void *mqd)
  36{
  37        return (struct v9_mqd *)mqd;
  38}
  39
  40static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
  41{
  42        return (struct v9_sdma_mqd *)mqd;
  43}
  44
  45static void update_cu_mask(struct mqd_manager *mm, void *mqd,
  46                        struct queue_properties *q)
  47{
  48        struct v9_mqd *m;
  49        uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
  50
  51        if (q->cu_mask_count == 0)
  52                return;
  53
  54        mqd_symmetrically_map_cu_mask(mm,
  55                q->cu_mask, q->cu_mask_count, se_mask);
  56
  57        m = get_mqd(mqd);
  58        m->compute_static_thread_mgmt_se0 = se_mask[0];
  59        m->compute_static_thread_mgmt_se1 = se_mask[1];
  60        m->compute_static_thread_mgmt_se2 = se_mask[2];
  61        m->compute_static_thread_mgmt_se3 = se_mask[3];
  62        m->compute_static_thread_mgmt_se4 = se_mask[4];
  63        m->compute_static_thread_mgmt_se5 = se_mask[5];
  64        m->compute_static_thread_mgmt_se6 = se_mask[6];
  65        m->compute_static_thread_mgmt_se7 = se_mask[7];
  66
  67        pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
  68                m->compute_static_thread_mgmt_se0,
  69                m->compute_static_thread_mgmt_se1,
  70                m->compute_static_thread_mgmt_se2,
  71                m->compute_static_thread_mgmt_se3,
  72                m->compute_static_thread_mgmt_se4,
  73                m->compute_static_thread_mgmt_se5,
  74                m->compute_static_thread_mgmt_se6,
  75                m->compute_static_thread_mgmt_se7);
  76}
  77
  78static void set_priority(struct v9_mqd *m, struct queue_properties *q)
  79{
  80        m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
  81        m->cp_hqd_queue_priority = q->priority;
  82}
  83
  84static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
  85                struct queue_properties *q)
  86{
  87        int retval;
  88        struct kfd_mem_obj *mqd_mem_obj = NULL;
  89
  90        /* For V9 only, due to a HW bug, the control stack of a user mode
  91         * compute queue needs to be allocated just behind the page boundary
  92         * of its regular MQD buffer. So we allocate an enlarged MQD buffer:
  93         * the first page of the buffer serves as the regular MQD buffer
  94         * purpose and the remaining is for control stack. Although the two
  95         * parts are in the same buffer object, they need different memory
  96         * types: MQD part needs UC (uncached) as usual, while control stack
  97         * needs NC (non coherent), which is different from the UC type which
  98         * is used when control stack is allocated in user space.
  99         *
 100         * Because of all those, we use the gtt allocation function instead
 101         * of sub-allocation function for this enlarged MQD buffer. Moreover,
 102         * in order to achieve two memory types in a single buffer object, we
 103         * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
 104         * amdgpu memory functions to do so.
 105         */
 106        if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
 107                mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
 108                if (!mqd_mem_obj)
 109                        return NULL;
 110                retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
 111                        ALIGN(q->ctl_stack_size, PAGE_SIZE) +
 112                                ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
 113                        &(mqd_mem_obj->gtt_mem),
 114                        &(mqd_mem_obj->gpu_addr),
 115                        (void *)&(mqd_mem_obj->cpu_ptr), true);
 116        } else {
 117                retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd),
 118                                &mqd_mem_obj);
 119        }
 120
 121        if (retval) {
 122                kfree(mqd_mem_obj);
 123                return NULL;
 124        }
 125
 126        return mqd_mem_obj;
 127
 128}
 129
 130static void init_mqd(struct mqd_manager *mm, void **mqd,
 131                        struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
 132                        struct queue_properties *q)
 133{
 134        uint64_t addr;
 135        struct v9_mqd *m;
 136
 137        m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
 138        addr = mqd_mem_obj->gpu_addr;
 139
 140        memset(m, 0, sizeof(struct v9_mqd));
 141
 142        m->header = 0xC0310800;
 143        m->compute_pipelinestat_enable = 1;
 144        m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
 145        m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
 146        m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
 147        m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
 148        m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
 149        m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
 150        m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
 151        m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
 152
 153        m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
 154                        0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
 155
 156        m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
 157
 158        m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
 159        m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
 160
 161        m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
 162                        1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
 163                        1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
 164
 165        if (q->format == KFD_QUEUE_FORMAT_AQL) {
 166                m->cp_hqd_aql_control =
 167                        1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
 168        }
 169
 170        if (q->tba_addr) {
 171                m->compute_pgm_rsrc2 |=
 172                        (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
 173        }
 174
 175        if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
 176                m->cp_hqd_persistent_state |=
 177                        (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
 178                m->cp_hqd_ctx_save_base_addr_lo =
 179                        lower_32_bits(q->ctx_save_restore_area_address);
 180                m->cp_hqd_ctx_save_base_addr_hi =
 181                        upper_32_bits(q->ctx_save_restore_area_address);
 182                m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
 183                m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
 184                m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
 185                m->cp_hqd_wg_state_offset = q->ctl_stack_size;
 186        }
 187
 188        *mqd = m;
 189        if (gart_addr)
 190                *gart_addr = addr;
 191        mm->update_mqd(mm, m, q);
 192}
 193
 194static int load_mqd(struct mqd_manager *mm, void *mqd,
 195                        uint32_t pipe_id, uint32_t queue_id,
 196                        struct queue_properties *p, struct mm_struct *mms)
 197{
 198        /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
 199        uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
 200
 201        return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
 202                                          (uint32_t __user *)p->write_ptr,
 203                                          wptr_shift, 0, mms);
 204}
 205
 206static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
 207                            uint32_t pipe_id, uint32_t queue_id,
 208                            struct queue_properties *p, struct mm_struct *mms)
 209{
 210        return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
 211                                              queue_id, p->doorbell_off);
 212}
 213
 214static void update_mqd(struct mqd_manager *mm, void *mqd,
 215                      struct queue_properties *q)
 216{
 217        struct v9_mqd *m;
 218
 219        m = get_mqd(mqd);
 220
 221        m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
 222        m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
 223        pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
 224
 225        m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
 226        m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
 227
 228        m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
 229        m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
 230        m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
 231        m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
 232
 233        m->cp_hqd_pq_doorbell_control =
 234                q->doorbell_off <<
 235                        CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
 236        pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
 237                        m->cp_hqd_pq_doorbell_control);
 238
 239        m->cp_hqd_ib_control =
 240                3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
 241                1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
 242
 243        /*
 244         * HW does not clamp this field correctly. Maximum EOP queue size
 245         * is constrained by per-SE EOP done signal count, which is 8-bit.
 246         * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
 247         * more than (EOP entry count - 1) so a queue size of 0x800 dwords
 248         * is safe, giving a maximum field value of 0xA.
 249         */
 250        m->cp_hqd_eop_control = min(0xA,
 251                order_base_2(q->eop_ring_buffer_size / 4) - 1);
 252        m->cp_hqd_eop_base_addr_lo =
 253                        lower_32_bits(q->eop_ring_buffer_address >> 8);
 254        m->cp_hqd_eop_base_addr_hi =
 255                        upper_32_bits(q->eop_ring_buffer_address >> 8);
 256
 257        m->cp_hqd_iq_timer = 0;
 258
 259        m->cp_hqd_vmid = q->vmid;
 260
 261        if (q->format == KFD_QUEUE_FORMAT_AQL) {
 262                m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
 263                                2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
 264                                1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
 265                                1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
 266                m->cp_hqd_pq_doorbell_control |= 1 <<
 267                        CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
 268        }
 269        if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
 270                m->cp_hqd_ctx_save_control = 0;
 271
 272        update_cu_mask(mm, mqd, q);
 273        set_priority(m, q);
 274
 275        q->is_active = QUEUE_IS_ACTIVE(*q);
 276}
 277
 278
 279static uint32_t read_doorbell_id(void *mqd)
 280{
 281        struct v9_mqd *m = (struct v9_mqd *)mqd;
 282
 283        return m->queue_doorbell_id0;
 284}
 285
 286static int destroy_mqd(struct mqd_manager *mm, void *mqd,
 287                        enum kfd_preempt_type type,
 288                        unsigned int timeout, uint32_t pipe_id,
 289                        uint32_t queue_id)
 290{
 291        return mm->dev->kfd2kgd->hqd_destroy
 292                (mm->dev->kgd, mqd, type, timeout,
 293                pipe_id, queue_id);
 294}
 295
 296static void free_mqd(struct mqd_manager *mm, void *mqd,
 297                        struct kfd_mem_obj *mqd_mem_obj)
 298{
 299        struct kfd_dev *kfd = mm->dev;
 300
 301        if (mqd_mem_obj->gtt_mem) {
 302                amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
 303                kfree(mqd_mem_obj);
 304        } else {
 305                kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
 306        }
 307}
 308
 309static bool is_occupied(struct mqd_manager *mm, void *mqd,
 310                        uint64_t queue_address, uint32_t pipe_id,
 311                        uint32_t queue_id)
 312{
 313        return mm->dev->kfd2kgd->hqd_is_occupied(
 314                mm->dev->kgd, queue_address,
 315                pipe_id, queue_id);
 316}
 317
 318static int get_wave_state(struct mqd_manager *mm, void *mqd,
 319                          void __user *ctl_stack,
 320                          u32 *ctl_stack_used_size,
 321                          u32 *save_area_used_size)
 322{
 323        struct v9_mqd *m;
 324
 325        /* Control stack is located one page after MQD. */
 326        void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
 327
 328        m = get_mqd(mqd);
 329
 330        *ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
 331                m->cp_hqd_cntl_stack_offset;
 332        *save_area_used_size = m->cp_hqd_wg_state_offset -
 333                m->cp_hqd_cntl_stack_size;
 334
 335        if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
 336                return -EFAULT;
 337
 338        return 0;
 339}
 340
 341static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
 342                        struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
 343                        struct queue_properties *q)
 344{
 345        struct v9_mqd *m;
 346
 347        init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
 348
 349        m = get_mqd(*mqd);
 350
 351        m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
 352                        1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
 353}
 354
 355static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
 356                struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
 357                struct queue_properties *q)
 358{
 359        struct v9_sdma_mqd *m;
 360
 361        m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
 362
 363        memset(m, 0, sizeof(struct v9_sdma_mqd));
 364
 365        *mqd = m;
 366        if (gart_addr)
 367                *gart_addr = mqd_mem_obj->gpu_addr;
 368
 369        mm->update_mqd(mm, m, q);
 370}
 371
 372static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
 373                uint32_t pipe_id, uint32_t queue_id,
 374                struct queue_properties *p, struct mm_struct *mms)
 375{
 376        return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
 377                                               (uint32_t __user *)p->write_ptr,
 378                                               mms);
 379}
 380
 381#define SDMA_RLC_DUMMY_DEFAULT 0xf
 382
 383static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
 384                struct queue_properties *q)
 385{
 386        struct v9_sdma_mqd *m;
 387
 388        m = get_sdma_mqd(mqd);
 389        m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
 390                << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
 391                q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
 392                1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
 393                6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
 394
 395        m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
 396        m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
 397        m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
 398        m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
 399        m->sdmax_rlcx_doorbell_offset =
 400                q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
 401
 402        m->sdma_engine_id = q->sdma_engine_id;
 403        m->sdma_queue_id = q->sdma_queue_id;
 404        m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
 405
 406        q->is_active = QUEUE_IS_ACTIVE(*q);
 407}
 408
 409/*
 410 *  * preempt type here is ignored because there is only one way
 411 *  * to preempt sdma queue
 412 */
 413static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
 414                enum kfd_preempt_type type,
 415                unsigned int timeout, uint32_t pipe_id,
 416                uint32_t queue_id)
 417{
 418        return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
 419}
 420
 421static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
 422                uint64_t queue_address, uint32_t pipe_id,
 423                uint32_t queue_id)
 424{
 425        return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
 426}
 427
 428#if defined(CONFIG_DEBUG_FS)
 429
 430static int debugfs_show_mqd(struct seq_file *m, void *data)
 431{
 432        seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
 433                     data, sizeof(struct v9_mqd), false);
 434        return 0;
 435}
 436
 437static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
 438{
 439        seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
 440                     data, sizeof(struct v9_sdma_mqd), false);
 441        return 0;
 442}
 443
 444#endif
 445
 446struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
 447                struct kfd_dev *dev)
 448{
 449        struct mqd_manager *mqd;
 450
 451        if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
 452                return NULL;
 453
 454        mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
 455        if (!mqd)
 456                return NULL;
 457
 458        mqd->dev = dev;
 459
 460        switch (type) {
 461        case KFD_MQD_TYPE_CP:
 462                mqd->allocate_mqd = allocate_mqd;
 463                mqd->init_mqd = init_mqd;
 464                mqd->free_mqd = free_mqd;
 465                mqd->load_mqd = load_mqd;
 466                mqd->update_mqd = update_mqd;
 467                mqd->destroy_mqd = destroy_mqd;
 468                mqd->is_occupied = is_occupied;
 469                mqd->get_wave_state = get_wave_state;
 470                mqd->mqd_size = sizeof(struct v9_mqd);
 471#if defined(CONFIG_DEBUG_FS)
 472                mqd->debugfs_show_mqd = debugfs_show_mqd;
 473#endif
 474                break;
 475        case KFD_MQD_TYPE_HIQ:
 476                mqd->allocate_mqd = allocate_hiq_mqd;
 477                mqd->init_mqd = init_mqd_hiq;
 478                mqd->free_mqd = free_mqd_hiq_sdma;
 479                mqd->load_mqd = hiq_load_mqd_kiq;
 480                mqd->update_mqd = update_mqd;
 481                mqd->destroy_mqd = destroy_mqd;
 482                mqd->is_occupied = is_occupied;
 483                mqd->mqd_size = sizeof(struct v9_mqd);
 484#if defined(CONFIG_DEBUG_FS)
 485                mqd->debugfs_show_mqd = debugfs_show_mqd;
 486#endif
 487                mqd->read_doorbell_id = read_doorbell_id;
 488                break;
 489        case KFD_MQD_TYPE_DIQ:
 490                mqd->allocate_mqd = allocate_mqd;
 491                mqd->init_mqd = init_mqd_hiq;
 492                mqd->free_mqd = free_mqd;
 493                mqd->load_mqd = load_mqd;
 494                mqd->update_mqd = update_mqd;
 495                mqd->destroy_mqd = destroy_mqd;
 496                mqd->is_occupied = is_occupied;
 497                mqd->mqd_size = sizeof(struct v9_mqd);
 498#if defined(CONFIG_DEBUG_FS)
 499                mqd->debugfs_show_mqd = debugfs_show_mqd;
 500#endif
 501                break;
 502        case KFD_MQD_TYPE_SDMA:
 503                mqd->allocate_mqd = allocate_sdma_mqd;
 504                mqd->init_mqd = init_mqd_sdma;
 505                mqd->free_mqd = free_mqd_hiq_sdma;
 506                mqd->load_mqd = load_mqd_sdma;
 507                mqd->update_mqd = update_mqd_sdma;
 508                mqd->destroy_mqd = destroy_mqd_sdma;
 509                mqd->is_occupied = is_occupied_sdma;
 510                mqd->mqd_size = sizeof(struct v9_sdma_mqd);
 511#if defined(CONFIG_DEBUG_FS)
 512                mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
 513#endif
 514                break;
 515        default:
 516                kfree(mqd);
 517                return NULL;
 518        }
 519
 520        return mqd;
 521}
 522