1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <drm/amdgpu_drm.h>
25#include "amdgpu.h"
26#include "atomfirmware.h"
27#include "amdgpu_atomfirmware.h"
28#include "atom.h"
29#include "atombios.h"
30#include "soc15_hw_ip.h"
31
32bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
33{
34 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
35 firmwareinfo);
36 uint16_t data_offset;
37
38 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
39 NULL, NULL, &data_offset)) {
40 struct atom_firmware_info_v3_1 *firmware_info =
41 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
42 data_offset);
43
44 if (le32_to_cpu(firmware_info->firmware_capability) &
45 ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION)
46 return true;
47 }
48 return false;
49}
50
51void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
52{
53 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
54 firmwareinfo);
55 uint16_t data_offset;
56
57 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
58 NULL, NULL, &data_offset)) {
59 struct atom_firmware_info_v3_1 *firmware_info =
60 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
61 data_offset);
62
63 adev->bios_scratch_reg_offset =
64 le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
65 }
66}
67
68int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
69{
70 struct atom_context *ctx = adev->mode_info.atom_context;
71 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
72 vram_usagebyfirmware);
73 struct vram_usagebyfirmware_v2_1 * firmware_usage;
74 uint32_t start_addr, size;
75 uint16_t data_offset;
76 int usage_bytes = 0;
77
78 if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
79 firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
80 DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
81 le32_to_cpu(firmware_usage->start_address_in_kb),
82 le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
83 le16_to_cpu(firmware_usage->used_by_driver_in_kb));
84
85 start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
86 size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
87
88 if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
89 (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
90 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
91
92 adev->fw_vram_usage.start_offset = (start_addr &
93 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
94 adev->fw_vram_usage.size = size << 10;
95
96 usage_bytes = 0;
97 } else {
98 usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
99 }
100 }
101 ctx->scratch_size_bytes = 0;
102 if (usage_bytes == 0)
103 usage_bytes = 20 * 1024;
104
105 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
106 if (!ctx->scratch)
107 return -ENOMEM;
108 ctx->scratch_size_bytes = usage_bytes;
109 return 0;
110}
111
112union igp_info {
113 struct atom_integrated_system_info_v1_11 v11;
114};
115
116union umc_info {
117 struct atom_umc_info_v3_1 v31;
118};
119
120union vram_info {
121 struct atom_vram_info_header_v2_3 v23;
122 struct atom_vram_info_header_v2_4 v24;
123};
124
125union vram_module {
126 struct atom_vram_module_v9 v9;
127 struct atom_vram_module_v10 v10;
128};
129
130static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
131 int atom_mem_type)
132{
133 int vram_type;
134
135 if (adev->flags & AMD_IS_APU) {
136 switch (atom_mem_type) {
137 case Ddr2MemType:
138 case LpDdr2MemType:
139 vram_type = AMDGPU_VRAM_TYPE_DDR2;
140 break;
141 case Ddr3MemType:
142 case LpDdr3MemType:
143 vram_type = AMDGPU_VRAM_TYPE_DDR3;
144 break;
145 case Ddr4MemType:
146 case LpDdr4MemType:
147 vram_type = AMDGPU_VRAM_TYPE_DDR4;
148 break;
149 default:
150 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
151 break;
152 }
153 } else {
154 switch (atom_mem_type) {
155 case ATOM_DGPU_VRAM_TYPE_GDDR5:
156 vram_type = AMDGPU_VRAM_TYPE_GDDR5;
157 break;
158 case ATOM_DGPU_VRAM_TYPE_HBM2:
159 vram_type = AMDGPU_VRAM_TYPE_HBM;
160 break;
161 case ATOM_DGPU_VRAM_TYPE_GDDR6:
162 vram_type = AMDGPU_VRAM_TYPE_GDDR6;
163 break;
164 default:
165 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
166 break;
167 }
168 }
169
170 return vram_type;
171}
172
173
174int
175amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
176 int *vram_width, int *vram_type,
177 int *vram_vendor)
178{
179 struct amdgpu_mode_info *mode_info = &adev->mode_info;
180 int index, i = 0;
181 u16 data_offset, size;
182 union igp_info *igp_info;
183 union vram_info *vram_info;
184 union vram_module *vram_module;
185 u8 frev, crev;
186 u8 mem_type;
187 u8 mem_vendor;
188 u32 mem_channel_number;
189 u32 mem_channel_width;
190 u32 module_id;
191
192 if (adev->flags & AMD_IS_APU)
193 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
194 integratedsysteminfo);
195 else
196 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
197 vram_info);
198
199 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
200 index, &size,
201 &frev, &crev, &data_offset)) {
202 if (adev->flags & AMD_IS_APU) {
203 igp_info = (union igp_info *)
204 (mode_info->atom_context->bios + data_offset);
205 switch (crev) {
206 case 11:
207 case 12:
208 mem_channel_number = igp_info->v11.umachannelnumber;
209
210 if (vram_width)
211 *vram_width = mem_channel_number * 64;
212 mem_type = igp_info->v11.memorytype;
213 if (vram_type)
214 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
215 break;
216 default:
217 return -EINVAL;
218 }
219 } else {
220 vram_info = (union vram_info *)
221 (mode_info->atom_context->bios + data_offset);
222 module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
223 switch (crev) {
224 case 3:
225 if (module_id > vram_info->v23.vram_module_num)
226 module_id = 0;
227 vram_module = (union vram_module *)vram_info->v23.vram_module;
228 while (i < module_id) {
229 vram_module = (union vram_module *)
230 ((u8 *)vram_module + vram_module->v9.vram_module_size);
231 i++;
232 }
233 mem_type = vram_module->v9.memory_type;
234 if (vram_type)
235 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
236 mem_channel_number = vram_module->v9.channel_num;
237 mem_channel_width = vram_module->v9.channel_width;
238 if (vram_width)
239 *vram_width = mem_channel_number * (1 << mem_channel_width);
240 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
241 if (vram_vendor)
242 *vram_vendor = mem_vendor;
243 break;
244 case 4:
245 if (module_id > vram_info->v24.vram_module_num)
246 module_id = 0;
247 vram_module = (union vram_module *)vram_info->v24.vram_module;
248 while (i < module_id) {
249 vram_module = (union vram_module *)
250 ((u8 *)vram_module + vram_module->v10.vram_module_size);
251 i++;
252 }
253 mem_type = vram_module->v10.memory_type;
254 if (vram_type)
255 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
256 mem_channel_number = vram_module->v10.channel_num;
257 mem_channel_width = vram_module->v10.channel_width;
258 if (vram_width)
259 *vram_width = mem_channel_number * (1 << mem_channel_width);
260 mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
261 if (vram_vendor)
262 *vram_vendor = mem_vendor;
263 break;
264 default:
265 return -EINVAL;
266 }
267 }
268
269 }
270
271 return 0;
272}
273
274
275
276
277
278bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
279{
280 struct amdgpu_mode_info *mode_info = &adev->mode_info;
281 int index;
282 u16 data_offset, size;
283 union umc_info *umc_info;
284 u8 frev, crev;
285 bool ecc_default_enabled = false;
286
287 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
288 umc_info);
289
290 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
291 index, &size, &frev, &crev, &data_offset)) {
292
293 if ((frev == 3 && crev >= 1) || (frev > 3)) {
294 umc_info = (union umc_info *)
295 (mode_info->atom_context->bios + data_offset);
296 ecc_default_enabled =
297 (le32_to_cpu(umc_info->v31.umc_config) &
298 UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
299 }
300 }
301
302 return ecc_default_enabled;
303}
304
305union firmware_info {
306 struct atom_firmware_info_v3_1 v31;
307};
308
309
310
311
312bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
313{
314 struct amdgpu_mode_info *mode_info = &adev->mode_info;
315 int index;
316 u16 data_offset, size;
317 union firmware_info *firmware_info;
318 u8 frev, crev;
319 bool sram_ecc_supported = false;
320
321 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
322 firmwareinfo);
323
324 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
325 index, &size, &frev, &crev, &data_offset)) {
326
327 if ((frev == 3 && crev >=1) || (frev > 3)) {
328 firmware_info = (union firmware_info *)
329 (mode_info->atom_context->bios + data_offset);
330 sram_ecc_supported =
331 (le32_to_cpu(firmware_info->v31.firmware_capability) &
332 ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
333 }
334 }
335
336 return sram_ecc_supported;
337}
338
339union smu_info {
340 struct atom_smu_info_v3_1 v31;
341};
342
343int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
344{
345 struct amdgpu_mode_info *mode_info = &adev->mode_info;
346 struct amdgpu_pll *spll = &adev->clock.spll;
347 struct amdgpu_pll *mpll = &adev->clock.mpll;
348 uint8_t frev, crev;
349 uint16_t data_offset;
350 int ret = -EINVAL, index;
351
352 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
353 firmwareinfo);
354 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
355 &frev, &crev, &data_offset)) {
356 union firmware_info *firmware_info =
357 (union firmware_info *)(mode_info->atom_context->bios +
358 data_offset);
359
360 adev->clock.default_sclk =
361 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
362 adev->clock.default_mclk =
363 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
364
365 adev->pm.current_sclk = adev->clock.default_sclk;
366 adev->pm.current_mclk = adev->clock.default_mclk;
367
368
369 adev->mode_info.firmware_flags =
370 le32_to_cpu(firmware_info->v31.firmware_capability);
371
372 ret = 0;
373 }
374
375 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
376 smu_info);
377 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
378 &frev, &crev, &data_offset)) {
379 union smu_info *smu_info =
380 (union smu_info *)(mode_info->atom_context->bios +
381 data_offset);
382
383
384 spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
385
386 spll->reference_div = 0;
387 spll->min_post_div = 1;
388 spll->max_post_div = 1;
389 spll->min_ref_div = 2;
390 spll->max_ref_div = 0xff;
391 spll->min_feedback_div = 4;
392 spll->max_feedback_div = 0xff;
393 spll->best_vco = 0;
394
395 ret = 0;
396 }
397
398 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
399 umc_info);
400 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
401 &frev, &crev, &data_offset)) {
402 union umc_info *umc_info =
403 (union umc_info *)(mode_info->atom_context->bios +
404 data_offset);
405
406
407 mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
408
409 mpll->reference_div = 0;
410 mpll->min_post_div = 1;
411 mpll->max_post_div = 1;
412 mpll->min_ref_div = 2;
413 mpll->max_ref_div = 0xff;
414 mpll->min_feedback_div = 4;
415 mpll->max_feedback_div = 0xff;
416 mpll->best_vco = 0;
417
418 ret = 0;
419 }
420
421 return ret;
422}
423
424union gfx_info {
425 struct atom_gfx_info_v2_4 v24;
426};
427
428int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
429{
430 struct amdgpu_mode_info *mode_info = &adev->mode_info;
431 int index;
432 uint8_t frev, crev;
433 uint16_t data_offset;
434
435 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
436 gfx_info);
437 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
438 &frev, &crev, &data_offset)) {
439 union gfx_info *gfx_info = (union gfx_info *)
440 (mode_info->atom_context->bios + data_offset);
441 switch (crev) {
442 case 4:
443 adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
444 adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
445 adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
446 adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
447 adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
448 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
449 adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
450 adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
451 adev->gfx.config.gs_prim_buffer_depth =
452 le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
453 adev->gfx.config.double_offchip_lds_buf =
454 gfx_info->v24.gc_double_offchip_lds_buffer;
455 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
456 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
457 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
458 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
459 return 0;
460 default:
461 return -EINVAL;
462 }
463
464 }
465 return -EINVAL;
466}
467
468
469
470
471static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
472{
473 uint16_t data_offset;
474 int index;
475
476 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
477 firmwareinfo);
478 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
479 NULL, NULL, &data_offset)) {
480 struct atom_firmware_info_v3_1 *firmware_info =
481 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
482 data_offset);
483
484 DRM_DEBUG("atom firmware capability:0x%08x.\n",
485 le32_to_cpu(firmware_info->firmware_capability));
486
487 if (le32_to_cpu(firmware_info->firmware_capability) &
488 ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
489 return true;
490 }
491
492 return false;
493}
494
495static int gddr6_mem_train_support(struct amdgpu_device *adev)
496{
497 int ret;
498 uint32_t major, minor, revision, hw_v;
499
500 if (gddr6_mem_train_vbios_support(adev)) {
501 amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
502 hw_v = HW_REV(major, minor, revision);
503
504
505
506
507
508 switch (hw_v) {
509 case HW_REV(11, 0, 0):
510 case HW_REV(11, 0, 5):
511 ret = 1;
512 break;
513 default:
514 DRM_ERROR("memory training vbios supports but psp hw(%08x)"
515 " doesn't support!\n", hw_v);
516 ret = -1;
517 break;
518 }
519 } else {
520 ret = 0;
521 hw_v = -1;
522 }
523
524
525 DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
526 return ret;
527}
528
529int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev)
530{
531 struct atom_context *ctx = adev->mode_info.atom_context;
532 int index;
533 uint8_t frev, crev;
534 uint16_t data_offset, size;
535 int ret;
536
537 adev->fw_vram_usage.mem_train_support = false;
538
539 if (adev->asic_type != CHIP_NAVI10 &&
540 adev->asic_type != CHIP_NAVI14)
541 return 0;
542
543 if (amdgpu_sriov_vf(adev))
544 return 0;
545
546 ret = gddr6_mem_train_support(adev);
547 if (ret == -1)
548 return -EINVAL;
549 else if (ret == 0)
550 return 0;
551
552 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
553 vram_usagebyfirmware);
554 ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev,
555 &data_offset);
556 if (ret == 0) {
557 DRM_ERROR("parse data header failed.\n");
558 return -EINVAL;
559 }
560
561 DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x,"
562 " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset);
563
564 if (((uint16_t)frev << 8 | crev) < 0x0201) {
565 DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev);
566 return -EINVAL;
567 }
568
569 adev->fw_vram_usage.mem_train_support = true;
570 return 0;
571}
572