linux/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/fdtable.h>
  25#include <linux/uaccess.h>
  26#include <linux/firmware.h>
  27#include <drm/drmP.h>
  28#include "amdgpu.h"
  29#include "amdgpu_amdkfd.h"
  30#include "amdgpu_ucode.h"
  31#include "gca/gfx_8_0_sh_mask.h"
  32#include "gca/gfx_8_0_d.h"
  33#include "gca/gfx_8_0_enum.h"
  34#include "oss/oss_3_0_sh_mask.h"
  35#include "oss/oss_3_0_d.h"
  36#include "gmc/gmc_8_1_sh_mask.h"
  37#include "gmc/gmc_8_1_d.h"
  38#include "vi_structs.h"
  39#include "vid.h"
  40
  41#define VI_PIPE_PER_MEC (4)
  42
  43struct cik_sdma_rlc_registers;
  44
  45/*
  46 * Register access functions
  47 */
  48
  49static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
  50                uint32_t sh_mem_config,
  51                uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
  52                uint32_t sh_mem_bases);
  53static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
  54                unsigned int vmid);
  55static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
  56                uint32_t hpd_size, uint64_t hpd_gpu_addr);
  57static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
  58static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
  59                uint32_t queue_id, uint32_t __user *wptr);
  60static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
  61static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
  62                uint32_t pipe_id, uint32_t queue_id);
  63static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
  64static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
  65                                unsigned int timeout, uint32_t pipe_id,
  66                                uint32_t queue_id);
  67static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
  68                                unsigned int timeout);
  69static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
  70static int kgd_address_watch_disable(struct kgd_dev *kgd);
  71static int kgd_address_watch_execute(struct kgd_dev *kgd,
  72                                        unsigned int watch_point_id,
  73                                        uint32_t cntl_val,
  74                                        uint32_t addr_hi,
  75                                        uint32_t addr_lo);
  76static int kgd_wave_control_execute(struct kgd_dev *kgd,
  77                                        uint32_t gfx_index_val,
  78                                        uint32_t sq_cmd);
  79static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
  80                                        unsigned int watch_point_id,
  81                                        unsigned int reg_offset);
  82
  83static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
  84                uint8_t vmid);
  85static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
  86                uint8_t vmid);
  87static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
  88static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
  89
  90static const struct kfd2kgd_calls kfd2kgd = {
  91        .init_gtt_mem_allocation = alloc_gtt_mem,
  92        .free_gtt_mem = free_gtt_mem,
  93        .get_vmem_size = get_vmem_size,
  94        .get_gpu_clock_counter = get_gpu_clock_counter,
  95        .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
  96        .program_sh_mem_settings = kgd_program_sh_mem_settings,
  97        .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
  98        .init_pipeline = kgd_init_pipeline,
  99        .init_interrupts = kgd_init_interrupts,
 100        .hqd_load = kgd_hqd_load,
 101        .hqd_sdma_load = kgd_hqd_sdma_load,
 102        .hqd_is_occupied = kgd_hqd_is_occupied,
 103        .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
 104        .hqd_destroy = kgd_hqd_destroy,
 105        .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
 106        .address_watch_disable = kgd_address_watch_disable,
 107        .address_watch_execute = kgd_address_watch_execute,
 108        .wave_control_execute = kgd_wave_control_execute,
 109        .address_watch_get_offset = kgd_address_watch_get_offset,
 110        .get_atc_vmid_pasid_mapping_pasid =
 111                        get_atc_vmid_pasid_mapping_pasid,
 112        .get_atc_vmid_pasid_mapping_valid =
 113                        get_atc_vmid_pasid_mapping_valid,
 114        .write_vmid_invalidate_request = write_vmid_invalidate_request,
 115        .get_fw_version = get_fw_version
 116};
 117
 118struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
 119{
 120        return (struct kfd2kgd_calls *)&kfd2kgd;
 121}
 122
 123static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 124{
 125        return (struct amdgpu_device *)kgd;
 126}
 127
 128static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
 129                        uint32_t queue, uint32_t vmid)
 130{
 131        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 132        uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
 133
 134        mutex_lock(&adev->srbm_mutex);
 135        WREG32(mmSRBM_GFX_CNTL, value);
 136}
 137
 138static void unlock_srbm(struct kgd_dev *kgd)
 139{
 140        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 141
 142        WREG32(mmSRBM_GFX_CNTL, 0);
 143        mutex_unlock(&adev->srbm_mutex);
 144}
 145
 146static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
 147                                uint32_t queue_id)
 148{
 149        uint32_t mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
 150        uint32_t pipe = (pipe_id % VI_PIPE_PER_MEC);
 151
 152        lock_srbm(kgd, mec, pipe, queue_id, 0);
 153}
 154
 155static void release_queue(struct kgd_dev *kgd)
 156{
 157        unlock_srbm(kgd);
 158}
 159
 160static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
 161                                        uint32_t sh_mem_config,
 162                                        uint32_t sh_mem_ape1_base,
 163                                        uint32_t sh_mem_ape1_limit,
 164                                        uint32_t sh_mem_bases)
 165{
 166        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 167
 168        lock_srbm(kgd, 0, 0, 0, vmid);
 169
 170        WREG32(mmSH_MEM_CONFIG, sh_mem_config);
 171        WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
 172        WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
 173        WREG32(mmSH_MEM_BASES, sh_mem_bases);
 174
 175        unlock_srbm(kgd);
 176}
 177
 178static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
 179                                        unsigned int vmid)
 180{
 181        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 182
 183        /*
 184         * We have to assume that there is no outstanding mapping.
 185         * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
 186         * a mapping is in progress or because a mapping finished
 187         * and the SW cleared it.
 188         * So the protocol is to always wait & clear.
 189         */
 190        uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
 191                        ATC_VMID0_PASID_MAPPING__VALID_MASK;
 192
 193        WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
 194
 195        while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
 196                cpu_relax();
 197        WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
 198
 199        /* Mapping vmid to pasid also for IH block */
 200        WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
 201
 202        return 0;
 203}
 204
 205static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
 206                                uint32_t hpd_size, uint64_t hpd_gpu_addr)
 207{
 208        return 0;
 209}
 210
 211static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
 212{
 213        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 214        uint32_t mec;
 215        uint32_t pipe;
 216
 217        mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
 218        pipe = (pipe_id % VI_PIPE_PER_MEC);
 219
 220        lock_srbm(kgd, mec, pipe, 0, 0);
 221
 222        WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
 223
 224        unlock_srbm(kgd);
 225
 226        return 0;
 227}
 228
 229static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
 230{
 231        return 0;
 232}
 233
 234static inline struct vi_mqd *get_mqd(void *mqd)
 235{
 236        return (struct vi_mqd *)mqd;
 237}
 238
 239static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
 240{
 241        return (struct cik_sdma_rlc_registers *)mqd;
 242}
 243
 244static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 245                        uint32_t queue_id, uint32_t __user *wptr)
 246{
 247        struct vi_mqd *m;
 248        uint32_t shadow_wptr, valid_wptr;
 249        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 250
 251        m = get_mqd(mqd);
 252
 253        valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
 254        acquire_queue(kgd, pipe_id, queue_id);
 255
 256        WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
 257        WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
 258        WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
 259
 260        WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
 261        WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
 262        WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
 263        WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
 264        WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
 265        WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
 266        WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
 267        WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
 268        WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
 269                        m->cp_hqd_pq_rptr_report_addr_hi);
 270
 271        if (valid_wptr > 0)
 272                WREG32(mmCP_HQD_PQ_WPTR, shadow_wptr);
 273
 274        WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
 275        WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
 276
 277        WREG32(mmCP_HQD_EOP_BASE_ADDR, m->cp_hqd_eop_base_addr_lo);
 278        WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, m->cp_hqd_eop_base_addr_hi);
 279        WREG32(mmCP_HQD_EOP_CONTROL, m->cp_hqd_eop_control);
 280        WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
 281        WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
 282        WREG32(mmCP_HQD_EOP_EVENTS, m->cp_hqd_eop_done_events);
 283
 284        WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO, m->cp_hqd_ctx_save_base_addr_lo);
 285        WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI, m->cp_hqd_ctx_save_base_addr_hi);
 286        WREG32(mmCP_HQD_CTX_SAVE_CONTROL, m->cp_hqd_ctx_save_control);
 287        WREG32(mmCP_HQD_CNTL_STACK_OFFSET, m->cp_hqd_cntl_stack_offset);
 288        WREG32(mmCP_HQD_CNTL_STACK_SIZE, m->cp_hqd_cntl_stack_size);
 289        WREG32(mmCP_HQD_WG_STATE_OFFSET, m->cp_hqd_wg_state_offset);
 290        WREG32(mmCP_HQD_CTX_SAVE_SIZE, m->cp_hqd_ctx_save_size);
 291
 292        WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
 293
 294        WREG32(mmCP_HQD_DEQUEUE_REQUEST, m->cp_hqd_dequeue_request);
 295        WREG32(mmCP_HQD_ERROR, m->cp_hqd_error);
 296        WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
 297        WREG32(mmCP_HQD_EOP_DONES, m->cp_hqd_eop_dones);
 298
 299        WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
 300
 301        release_queue(kgd);
 302
 303        return 0;
 304}
 305
 306static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
 307{
 308        return 0;
 309}
 310
 311static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
 312                                uint32_t pipe_id, uint32_t queue_id)
 313{
 314        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 315        uint32_t act;
 316        bool retval = false;
 317        uint32_t low, high;
 318
 319        acquire_queue(kgd, pipe_id, queue_id);
 320        act = RREG32(mmCP_HQD_ACTIVE);
 321        if (act) {
 322                low = lower_32_bits(queue_address >> 8);
 323                high = upper_32_bits(queue_address >> 8);
 324
 325                if (low == RREG32(mmCP_HQD_PQ_BASE) &&
 326                                high == RREG32(mmCP_HQD_PQ_BASE_HI))
 327                        retval = true;
 328        }
 329        release_queue(kgd);
 330        return retval;
 331}
 332
 333static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
 334{
 335        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 336        struct cik_sdma_rlc_registers *m;
 337        uint32_t sdma_base_addr;
 338        uint32_t sdma_rlc_rb_cntl;
 339
 340        m = get_sdma_mqd(mqd);
 341        sdma_base_addr = get_sdma_base_addr(m);
 342
 343        sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
 344
 345        if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
 346                return true;
 347
 348        return false;
 349}
 350
 351static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
 352                                unsigned int timeout, uint32_t pipe_id,
 353                                uint32_t queue_id)
 354{
 355        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 356        uint32_t temp;
 357
 358        acquire_queue(kgd, pipe_id, queue_id);
 359
 360        WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
 361
 362        while (true) {
 363                temp = RREG32(mmCP_HQD_ACTIVE);
 364                if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
 365                        break;
 366                if (timeout == 0) {
 367                        pr_err("kfd: cp queue preemption time out (%dms)\n",
 368                                temp);
 369                        release_queue(kgd);
 370                        return -ETIME;
 371                }
 372                msleep(20);
 373                timeout -= 20;
 374        }
 375
 376        release_queue(kgd);
 377        return 0;
 378}
 379
 380static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
 381                                unsigned int timeout)
 382{
 383        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 384        struct cik_sdma_rlc_registers *m;
 385        uint32_t sdma_base_addr;
 386        uint32_t temp;
 387
 388        m = get_sdma_mqd(mqd);
 389        sdma_base_addr = get_sdma_base_addr(m);
 390
 391        temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
 392        temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
 393        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
 394
 395        while (true) {
 396                temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
 397                if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
 398                        break;
 399                if (timeout == 0)
 400                        return -ETIME;
 401                msleep(20);
 402                timeout -= 20;
 403        }
 404
 405        WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
 406        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
 407        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
 408        WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
 409
 410        return 0;
 411}
 412
 413static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
 414                                                        uint8_t vmid)
 415{
 416        uint32_t reg;
 417        struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
 418
 419        reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
 420        return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
 421}
 422
 423static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
 424                                                                uint8_t vmid)
 425{
 426        uint32_t reg;
 427        struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
 428
 429        reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
 430        return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
 431}
 432
 433static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
 434{
 435        struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
 436
 437        WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 438}
 439
 440static int kgd_address_watch_disable(struct kgd_dev *kgd)
 441{
 442        return 0;
 443}
 444
 445static int kgd_address_watch_execute(struct kgd_dev *kgd,
 446                                        unsigned int watch_point_id,
 447                                        uint32_t cntl_val,
 448                                        uint32_t addr_hi,
 449                                        uint32_t addr_lo)
 450{
 451        return 0;
 452}
 453
 454static int kgd_wave_control_execute(struct kgd_dev *kgd,
 455                                        uint32_t gfx_index_val,
 456                                        uint32_t sq_cmd)
 457{
 458        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 459        uint32_t data = 0;
 460
 461        mutex_lock(&adev->grbm_idx_mutex);
 462
 463        WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
 464        WREG32(mmSQ_CMD, sq_cmd);
 465
 466        data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
 467                INSTANCE_BROADCAST_WRITES, 1);
 468        data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
 469                SH_BROADCAST_WRITES, 1);
 470        data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
 471                SE_BROADCAST_WRITES, 1);
 472
 473        WREG32(mmGRBM_GFX_INDEX, data);
 474        mutex_unlock(&adev->grbm_idx_mutex);
 475
 476        return 0;
 477}
 478
 479static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
 480                                        unsigned int watch_point_id,
 481                                        unsigned int reg_offset)
 482{
 483        return 0;
 484}
 485
 486static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
 487{
 488        struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
 489        const union amdgpu_firmware_header *hdr;
 490
 491        BUG_ON(kgd == NULL);
 492
 493        switch (type) {
 494        case KGD_ENGINE_PFP:
 495                hdr = (const union amdgpu_firmware_header *)
 496                                                        adev->gfx.pfp_fw->data;
 497                break;
 498
 499        case KGD_ENGINE_ME:
 500                hdr = (const union amdgpu_firmware_header *)
 501                                                        adev->gfx.me_fw->data;
 502                break;
 503
 504        case KGD_ENGINE_CE:
 505                hdr = (const union amdgpu_firmware_header *)
 506                                                        adev->gfx.ce_fw->data;
 507                break;
 508
 509        case KGD_ENGINE_MEC1:
 510                hdr = (const union amdgpu_firmware_header *)
 511                                                        adev->gfx.mec_fw->data;
 512                break;
 513
 514        case KGD_ENGINE_MEC2:
 515                hdr = (const union amdgpu_firmware_header *)
 516                                                        adev->gfx.mec2_fw->data;
 517                break;
 518
 519        case KGD_ENGINE_RLC:
 520                hdr = (const union amdgpu_firmware_header *)
 521                                                        adev->gfx.rlc_fw->data;
 522                break;
 523
 524        case KGD_ENGINE_SDMA1:
 525                hdr = (const union amdgpu_firmware_header *)
 526                                                        adev->sdma.instance[0].fw->data;
 527                break;
 528
 529        case KGD_ENGINE_SDMA2:
 530                hdr = (const union amdgpu_firmware_header *)
 531                                                        adev->sdma.instance[1].fw->data;
 532                break;
 533
 534        default:
 535                return 0;
 536        }
 537
 538        if (hdr == NULL)
 539                return 0;
 540
 541        /* Only 12 bit in use*/
 542        return hdr->common.ucode_version;
 543}
 544