linux/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <drm/amdgpu_drm.h>
  25#include "amdgpu.h"
  26#include "atomfirmware.h"
  27#include "amdgpu_atomfirmware.h"
  28#include "atom.h"
  29#include "atombios.h"
  30#include "soc15_hw_ip.h"
  31
  32union firmware_info {
  33        struct atom_firmware_info_v3_1 v31;
  34        struct atom_firmware_info_v3_2 v32;
  35        struct atom_firmware_info_v3_3 v33;
  36        struct atom_firmware_info_v3_4 v34;
  37};
  38
  39/*
  40 * Helper function to query firmware capability
  41 *
  42 * @adev: amdgpu_device pointer
  43 *
  44 * Return firmware_capability in firmwareinfo table on success or 0 if not
  45 */
  46uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev)
  47{
  48        struct amdgpu_mode_info *mode_info = &adev->mode_info;
  49        int index;
  50        u16 data_offset, size;
  51        union firmware_info *firmware_info;
  52        u8 frev, crev;
  53        u32 fw_cap = 0;
  54
  55        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
  56                        firmwareinfo);
  57
  58        if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
  59                                index, &size, &frev, &crev, &data_offset)) {
  60                /* support firmware_info 3.1 + */
  61                if ((frev == 3 && crev >=1) || (frev > 3)) {
  62                        firmware_info = (union firmware_info *)
  63                                (mode_info->atom_context->bios + data_offset);
  64                        fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
  65                }
  66        }
  67
  68        return fw_cap;
  69}
  70
  71/*
  72 * Helper function to query gpu virtualizaiton capability
  73 *
  74 * @adev: amdgpu_device pointer
  75 *
  76 * Return true if gpu virtualization is supported or false if not
  77 */
  78bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev)
  79{
  80        u32 fw_cap;
  81
  82        fw_cap = adev->mode_info.firmware_flags;
  83
  84        return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false;
  85}
  86
  87void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
  88{
  89        int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
  90                                                firmwareinfo);
  91        uint16_t data_offset;
  92
  93        if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
  94                                          NULL, NULL, &data_offset)) {
  95                struct atom_firmware_info_v3_1 *firmware_info =
  96                        (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
  97                                                           data_offset);
  98
  99                adev->bios_scratch_reg_offset =
 100                        le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
 101        }
 102}
 103
 104int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
 105{
 106        struct atom_context *ctx = adev->mode_info.atom_context;
 107        int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 108                                                vram_usagebyfirmware);
 109        struct vram_usagebyfirmware_v2_1 *firmware_usage;
 110        uint32_t start_addr, size;
 111        uint16_t data_offset;
 112        int usage_bytes = 0;
 113
 114        if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
 115                firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
 116                DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
 117                          le32_to_cpu(firmware_usage->start_address_in_kb),
 118                          le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
 119                          le16_to_cpu(firmware_usage->used_by_driver_in_kb));
 120
 121                start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
 122                size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
 123
 124                if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
 125                        (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
 126                        ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
 127                        /* Firmware request VRAM reservation for SR-IOV */
 128                        adev->mman.fw_vram_usage_start_offset = (start_addr &
 129                                (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
 130                        adev->mman.fw_vram_usage_size = size << 10;
 131                        /* Use the default scratch size */
 132                        usage_bytes = 0;
 133                } else {
 134                        usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
 135                }
 136        }
 137        ctx->scratch_size_bytes = 0;
 138        if (usage_bytes == 0)
 139                usage_bytes = 20 * 1024;
 140        /* allocate some scratch memory */
 141        ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
 142        if (!ctx->scratch)
 143                return -ENOMEM;
 144        ctx->scratch_size_bytes = usage_bytes;
 145        return 0;
 146}
 147
 148union igp_info {
 149        struct atom_integrated_system_info_v1_11 v11;
 150        struct atom_integrated_system_info_v1_12 v12;
 151        struct atom_integrated_system_info_v2_1 v21;
 152};
 153
 154union umc_info {
 155        struct atom_umc_info_v3_1 v31;
 156        struct atom_umc_info_v3_2 v32;
 157        struct atom_umc_info_v3_3 v33;
 158};
 159
 160union vram_info {
 161        struct atom_vram_info_header_v2_3 v23;
 162        struct atom_vram_info_header_v2_4 v24;
 163        struct atom_vram_info_header_v2_5 v25;
 164        struct atom_vram_info_header_v2_6 v26;
 165};
 166
 167union vram_module {
 168        struct atom_vram_module_v9 v9;
 169        struct atom_vram_module_v10 v10;
 170        struct atom_vram_module_v11 v11;
 171};
 172
 173static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
 174                                              int atom_mem_type)
 175{
 176        int vram_type;
 177
 178        if (adev->flags & AMD_IS_APU) {
 179                switch (atom_mem_type) {
 180                case Ddr2MemType:
 181                case LpDdr2MemType:
 182                        vram_type = AMDGPU_VRAM_TYPE_DDR2;
 183                        break;
 184                case Ddr3MemType:
 185                case LpDdr3MemType:
 186                        vram_type = AMDGPU_VRAM_TYPE_DDR3;
 187                        break;
 188                case Ddr4MemType:
 189                case LpDdr4MemType:
 190                        vram_type = AMDGPU_VRAM_TYPE_DDR4;
 191                        break;
 192                case Ddr5MemType:
 193                case LpDdr5MemType:
 194                        vram_type = AMDGPU_VRAM_TYPE_DDR5;
 195                        break;
 196                default:
 197                        vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 198                        break;
 199                }
 200        } else {
 201                switch (atom_mem_type) {
 202                case ATOM_DGPU_VRAM_TYPE_GDDR5:
 203                        vram_type = AMDGPU_VRAM_TYPE_GDDR5;
 204                        break;
 205                case ATOM_DGPU_VRAM_TYPE_HBM2:
 206                case ATOM_DGPU_VRAM_TYPE_HBM2E:
 207                        vram_type = AMDGPU_VRAM_TYPE_HBM;
 208                        break;
 209                case ATOM_DGPU_VRAM_TYPE_GDDR6:
 210                        vram_type = AMDGPU_VRAM_TYPE_GDDR6;
 211                        break;
 212                default:
 213                        vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 214                        break;
 215                }
 216        }
 217
 218        return vram_type;
 219}
 220
 221
 222int
 223amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
 224                                  int *vram_width, int *vram_type,
 225                                  int *vram_vendor)
 226{
 227        struct amdgpu_mode_info *mode_info = &adev->mode_info;
 228        int index, i = 0;
 229        u16 data_offset, size;
 230        union igp_info *igp_info;
 231        union vram_info *vram_info;
 232        union vram_module *vram_module;
 233        u8 frev, crev;
 234        u8 mem_type;
 235        u8 mem_vendor;
 236        u32 mem_channel_number;
 237        u32 mem_channel_width;
 238        u32 module_id;
 239
 240        if (adev->flags & AMD_IS_APU)
 241                index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 242                                                    integratedsysteminfo);
 243        else
 244                index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 245                                                    vram_info);
 246
 247        if (amdgpu_atom_parse_data_header(mode_info->atom_context,
 248                                          index, &size,
 249                                          &frev, &crev, &data_offset)) {
 250                if (adev->flags & AMD_IS_APU) {
 251                        igp_info = (union igp_info *)
 252                                (mode_info->atom_context->bios + data_offset);
 253                        switch (frev) {
 254                        case 1:
 255                                switch (crev) {
 256                                case 11:
 257                                case 12:
 258                                        mem_channel_number = igp_info->v11.umachannelnumber;
 259                                        if (!mem_channel_number)
 260                                                mem_channel_number = 1;
 261                                        /* channel width is 64 */
 262                                        if (vram_width)
 263                                                *vram_width = mem_channel_number * 64;
 264                                        mem_type = igp_info->v11.memorytype;
 265                                        if (vram_type)
 266                                                *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
 267                                        break;
 268                                default:
 269                                        return -EINVAL;
 270                                }
 271                                break;
 272                        case 2:
 273                                switch (crev) {
 274                                case 1:
 275                                case 2:
 276                                        mem_channel_number = igp_info->v21.umachannelnumber;
 277                                        if (!mem_channel_number)
 278                                                mem_channel_number = 1;
 279                                        /* channel width is 64 */
 280                                        if (vram_width)
 281                                                *vram_width = mem_channel_number * 64;
 282                                        mem_type = igp_info->v21.memorytype;
 283                                        if (vram_type)
 284                                                *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
 285                                        break;
 286                                default:
 287                                        return -EINVAL;
 288                                }
 289                                break;
 290                        default:
 291                                return -EINVAL;
 292                        }
 293                } else {
 294                        vram_info = (union vram_info *)
 295                                (mode_info->atom_context->bios + data_offset);
 296                        module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
 297                        switch (crev) {
 298                        case 3:
 299                                if (module_id > vram_info->v23.vram_module_num)
 300                                        module_id = 0;
 301                                vram_module = (union vram_module *)vram_info->v23.vram_module;
 302                                while (i < module_id) {
 303                                        vram_module = (union vram_module *)
 304                                                ((u8 *)vram_module + vram_module->v9.vram_module_size);
 305                                        i++;
 306                                }
 307                                mem_type = vram_module->v9.memory_type;
 308                                if (vram_type)
 309                                        *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
 310                                mem_channel_number = vram_module->v9.channel_num;
 311                                mem_channel_width = vram_module->v9.channel_width;
 312                                if (vram_width)
 313                                        *vram_width = mem_channel_number * (1 << mem_channel_width);
 314                                mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
 315                                if (vram_vendor)
 316                                        *vram_vendor = mem_vendor;
 317                                break;
 318                        case 4:
 319                                if (module_id > vram_info->v24.vram_module_num)
 320                                        module_id = 0;
 321                                vram_module = (union vram_module *)vram_info->v24.vram_module;
 322                                while (i < module_id) {
 323                                        vram_module = (union vram_module *)
 324                                                ((u8 *)vram_module + vram_module->v10.vram_module_size);
 325                                        i++;
 326                                }
 327                                mem_type = vram_module->v10.memory_type;
 328                                if (vram_type)
 329                                        *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
 330                                mem_channel_number = vram_module->v10.channel_num;
 331                                mem_channel_width = vram_module->v10.channel_width;
 332                                if (vram_width)
 333                                        *vram_width = mem_channel_number * (1 << mem_channel_width);
 334                                mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
 335                                if (vram_vendor)
 336                                        *vram_vendor = mem_vendor;
 337                                break;
 338                        case 5:
 339                                if (module_id > vram_info->v25.vram_module_num)
 340                                        module_id = 0;
 341                                vram_module = (union vram_module *)vram_info->v25.vram_module;
 342                                while (i < module_id) {
 343                                        vram_module = (union vram_module *)
 344                                                ((u8 *)vram_module + vram_module->v11.vram_module_size);
 345                                        i++;
 346                                }
 347                                mem_type = vram_module->v11.memory_type;
 348                                if (vram_type)
 349                                        *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
 350                                mem_channel_number = vram_module->v11.channel_num;
 351                                mem_channel_width = vram_module->v11.channel_width;
 352                                if (vram_width)
 353                                        *vram_width = mem_channel_number * (1 << mem_channel_width);
 354                                mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
 355                                if (vram_vendor)
 356                                        *vram_vendor = mem_vendor;
 357                                break;
 358                        case 6:
 359                                if (module_id > vram_info->v26.vram_module_num)
 360                                        module_id = 0;
 361                                vram_module = (union vram_module *)vram_info->v26.vram_module;
 362                                while (i < module_id) {
 363                                        vram_module = (union vram_module *)
 364                                                ((u8 *)vram_module + vram_module->v9.vram_module_size);
 365                                        i++;
 366                                }
 367                                mem_type = vram_module->v9.memory_type;
 368                                if (vram_type)
 369                                        *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
 370                                mem_channel_number = vram_module->v9.channel_num;
 371                                mem_channel_width = vram_module->v9.channel_width;
 372                                if (vram_width)
 373                                        *vram_width = mem_channel_number * (1 << mem_channel_width);
 374                                mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
 375                                if (vram_vendor)
 376                                        *vram_vendor = mem_vendor;
 377                                break;
 378                        default:
 379                                return -EINVAL;
 380                        }
 381                }
 382
 383        }
 384
 385        return 0;
 386}
 387
 388/*
 389 * Return true if vbios enabled ecc by default, if umc info table is available
 390 * or false if ecc is not enabled or umc info table is not available
 391 */
 392bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
 393{
 394        struct amdgpu_mode_info *mode_info = &adev->mode_info;
 395        int index;
 396        u16 data_offset, size;
 397        union umc_info *umc_info;
 398        u8 frev, crev;
 399        bool ecc_default_enabled = false;
 400        u8 umc_config;
 401        u32 umc_config1;
 402
 403        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 404                        umc_info);
 405
 406        if (amdgpu_atom_parse_data_header(mode_info->atom_context,
 407                                index, &size, &frev, &crev, &data_offset)) {
 408                if (frev == 3) {
 409                        umc_info = (union umc_info *)
 410                                (mode_info->atom_context->bios + data_offset);
 411                        switch (crev) {
 412                        case 1:
 413                                umc_config = le32_to_cpu(umc_info->v31.umc_config);
 414                                ecc_default_enabled =
 415                                        (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
 416                                break;
 417                        case 2:
 418                                umc_config = le32_to_cpu(umc_info->v32.umc_config);
 419                                ecc_default_enabled =
 420                                        (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
 421                                break;
 422                        case 3:
 423                                umc_config = le32_to_cpu(umc_info->v33.umc_config);
 424                                umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
 425                                ecc_default_enabled =
 426                                        ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
 427                                         (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
 428                                break;
 429                        default:
 430                                /* unsupported crev */
 431                                return false;
 432                        }
 433                }
 434        }
 435
 436        return ecc_default_enabled;
 437}
 438
 439/*
 440 * Helper function to query sram ecc capablity
 441 *
 442 * @adev: amdgpu_device pointer
 443 *
 444 * Return true if vbios supports sram ecc or false if not
 445 */
 446bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
 447{
 448        u32 fw_cap;
 449
 450        fw_cap = adev->mode_info.firmware_flags;
 451
 452        return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
 453}
 454
 455/*
 456 * Helper function to query dynamic boot config capability
 457 *
 458 * @adev: amdgpu_device pointer
 459 *
 460 * Return true if vbios supports dynamic boot config or false if not
 461 */
 462bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev)
 463{
 464        u32 fw_cap;
 465
 466        fw_cap = adev->mode_info.firmware_flags;
 467
 468        return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
 469}
 470
 471/**
 472 * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
 473 * adev: amdgpu_device pointer
 474 * i2c_address: pointer to u8; if not NULL, will contain
 475 *    the RAS EEPROM address if the function returns true
 476 *
 477 * Return true if VBIOS supports RAS EEPROM address reporting,
 478 * else return false. If true and @i2c_address is not NULL,
 479 * will contain the RAS ROM address.
 480 */
 481bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
 482                                      u8 *i2c_address)
 483{
 484        struct amdgpu_mode_info *mode_info = &adev->mode_info;
 485        int index;
 486        u16 data_offset, size;
 487        union firmware_info *firmware_info;
 488        u8 frev, crev;
 489
 490        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 491                                            firmwareinfo);
 492
 493        if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
 494                                          index, &size, &frev, &crev,
 495                                          &data_offset)) {
 496                /* support firmware_info 3.4 + */
 497                if ((frev == 3 && crev >=4) || (frev > 3)) {
 498                        firmware_info = (union firmware_info *)
 499                                (mode_info->atom_context->bios + data_offset);
 500                        /* The ras_rom_i2c_slave_addr should ideally
 501                         * be a 19-bit EEPROM address, which would be
 502                         * used as is by the driver; see top of
 503                         * amdgpu_eeprom.c.
 504                         *
 505                         * When this is the case, 0 is of course a
 506                         * valid RAS EEPROM address, in which case,
 507                         * we'll drop the first "if (firm...)" and only
 508                         * leave the check for the pointer.
 509                         *
 510                         * The reason this works right now is because
 511                         * ras_rom_i2c_slave_addr contains the EEPROM
 512                         * device type qualifier 1010b in the top 4
 513                         * bits.
 514                         */
 515                        if (firmware_info->v34.ras_rom_i2c_slave_addr) {
 516                                if (i2c_address)
 517                                        *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
 518                                return true;
 519                        }
 520                }
 521        }
 522
 523        return false;
 524}
 525
 526
 527union smu_info {
 528        struct atom_smu_info_v3_1 v31;
 529};
 530
 531int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
 532{
 533        struct amdgpu_mode_info *mode_info = &adev->mode_info;
 534        struct amdgpu_pll *spll = &adev->clock.spll;
 535        struct amdgpu_pll *mpll = &adev->clock.mpll;
 536        uint8_t frev, crev;
 537        uint16_t data_offset;
 538        int ret = -EINVAL, index;
 539
 540        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 541                                            firmwareinfo);
 542        if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 543                                   &frev, &crev, &data_offset)) {
 544                union firmware_info *firmware_info =
 545                        (union firmware_info *)(mode_info->atom_context->bios +
 546                                                data_offset);
 547
 548                adev->clock.default_sclk =
 549                        le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
 550                adev->clock.default_mclk =
 551                        le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
 552
 553                adev->pm.current_sclk = adev->clock.default_sclk;
 554                adev->pm.current_mclk = adev->clock.default_mclk;
 555
 556                ret = 0;
 557        }
 558
 559        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 560                                            smu_info);
 561        if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 562                                   &frev, &crev, &data_offset)) {
 563                union smu_info *smu_info =
 564                        (union smu_info *)(mode_info->atom_context->bios +
 565                                           data_offset);
 566
 567                /* system clock */
 568                spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
 569
 570                spll->reference_div = 0;
 571                spll->min_post_div = 1;
 572                spll->max_post_div = 1;
 573                spll->min_ref_div = 2;
 574                spll->max_ref_div = 0xff;
 575                spll->min_feedback_div = 4;
 576                spll->max_feedback_div = 0xff;
 577                spll->best_vco = 0;
 578
 579                ret = 0;
 580        }
 581
 582        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 583                                            umc_info);
 584        if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 585                                   &frev, &crev, &data_offset)) {
 586                union umc_info *umc_info =
 587                        (union umc_info *)(mode_info->atom_context->bios +
 588                                           data_offset);
 589
 590                /* memory clock */
 591                mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
 592
 593                mpll->reference_div = 0;
 594                mpll->min_post_div = 1;
 595                mpll->max_post_div = 1;
 596                mpll->min_ref_div = 2;
 597                mpll->max_ref_div = 0xff;
 598                mpll->min_feedback_div = 4;
 599                mpll->max_feedback_div = 0xff;
 600                mpll->best_vco = 0;
 601
 602                ret = 0;
 603        }
 604
 605        /* if asic is Navi+, the rlc reference clock is used for system clock
 606         * from vbios gfx_info table */
 607        if (adev->asic_type >= CHIP_NAVI10) {
 608                index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 609                                                   gfx_info);
 610                if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 611                                          &frev, &crev, &data_offset)) {
 612                        struct atom_gfx_info_v2_2 *gfx_info = (struct atom_gfx_info_v2_2*)
 613                                (mode_info->atom_context->bios + data_offset);
 614                        if ((frev == 2) && (crev >= 2))
 615                                spll->reference_freq = le32_to_cpu(gfx_info->rlc_gpu_timer_refclk);
 616                        ret = 0;
 617                }
 618        }
 619
 620        return ret;
 621}
 622
 623union gfx_info {
 624        struct atom_gfx_info_v2_4 v24;
 625        struct atom_gfx_info_v2_7 v27;
 626};
 627
 628int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
 629{
 630        struct amdgpu_mode_info *mode_info = &adev->mode_info;
 631        int index;
 632        uint8_t frev, crev;
 633        uint16_t data_offset;
 634
 635        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 636                                            gfx_info);
 637        if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
 638                                   &frev, &crev, &data_offset)) {
 639                union gfx_info *gfx_info = (union gfx_info *)
 640                        (mode_info->atom_context->bios + data_offset);
 641                switch (crev) {
 642                case 4:
 643                        adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
 644                        adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
 645                        adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
 646                        adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
 647                        adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
 648                        adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
 649                        adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
 650                        adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
 651                        adev->gfx.config.gs_prim_buffer_depth =
 652                                le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
 653                        adev->gfx.config.double_offchip_lds_buf =
 654                                gfx_info->v24.gc_double_offchip_lds_buffer;
 655                        adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
 656                        adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
 657                        adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
 658                        adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
 659                        return 0;
 660                case 7:
 661                        adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
 662                        adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
 663                        adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
 664                        adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
 665                        adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
 666                        adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
 667                        adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
 668                        adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
 669                        adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
 670                        adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
 671                        adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
 672                        adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
 673                        adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
 674                        adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
 675                        return 0;
 676                default:
 677                        return -EINVAL;
 678                }
 679
 680        }
 681        return -EINVAL;
 682}
 683
 684/*
 685 * Helper function to query two stage mem training capability
 686 *
 687 * @adev: amdgpu_device pointer
 688 *
 689 * Return true if two stage mem training is supported or false if not
 690 */
 691bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev)
 692{
 693        u32 fw_cap;
 694
 695        fw_cap = adev->mode_info.firmware_flags;
 696
 697        return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false;
 698}
 699
 700int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
 701{
 702        struct atom_context *ctx = adev->mode_info.atom_context;
 703        union firmware_info *firmware_info;
 704        int index;
 705        u16 data_offset, size;
 706        u8 frev, crev;
 707        int fw_reserved_fb_size;
 708
 709        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 710                        firmwareinfo);
 711
 712        if (!amdgpu_atom_parse_data_header(ctx, index, &size,
 713                                &frev, &crev, &data_offset))
 714                /* fail to parse data_header */
 715                return 0;
 716
 717        firmware_info = (union firmware_info *)(ctx->bios + data_offset);
 718
 719        if (frev !=3)
 720                return -EINVAL;
 721
 722        switch (crev) {
 723        case 4:
 724                fw_reserved_fb_size =
 725                        (firmware_info->v34.fw_reserved_size_in_kb << 10);
 726                break;
 727        default:
 728                fw_reserved_fb_size = 0;
 729                break;
 730        }
 731
 732        return fw_reserved_fb_size;
 733}
 734