1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "hwmgr.h"
24#include "pp_debug.h"
25#include "ppatomctrl.h"
26#include "ppsmc.h"
27#include "atom.h"
28#include "ivsrcid/thm/irqsrcs_thm_9_0.h"
29#include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
30#include "ivsrcid/ivsrcid_vislands30.h"
31
32uint8_t convert_to_vid(uint16_t vddc)
33{
34 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
35}
36
37uint16_t convert_to_vddc(uint8_t vid)
38{
39 return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
40}
41
42int phm_copy_clock_limits_array(
43 struct pp_hwmgr *hwmgr,
44 uint32_t **pptable_info_array,
45 const uint32_t *pptable_array,
46 uint32_t power_saving_clock_count)
47{
48 uint32_t array_size, i;
49 uint32_t *table;
50
51 array_size = sizeof(uint32_t) * power_saving_clock_count;
52 table = kzalloc(array_size, GFP_KERNEL);
53 if (NULL == table)
54 return -ENOMEM;
55
56 for (i = 0; i < power_saving_clock_count; i++)
57 table[i] = le32_to_cpu(pptable_array[i]);
58
59 *pptable_info_array = table;
60
61 return 0;
62}
63
64int phm_copy_overdrive_settings_limits_array(
65 struct pp_hwmgr *hwmgr,
66 uint32_t **pptable_info_array,
67 const uint32_t *pptable_array,
68 uint32_t od_setting_count)
69{
70 uint32_t array_size, i;
71 uint32_t *table;
72
73 array_size = sizeof(uint32_t) * od_setting_count;
74 table = kzalloc(array_size, GFP_KERNEL);
75 if (NULL == table)
76 return -ENOMEM;
77
78 for (i = 0; i < od_setting_count; i++)
79 table[i] = le32_to_cpu(pptable_array[i]);
80
81 *pptable_info_array = table;
82
83 return 0;
84}
85
86uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
87{
88 u32 mask = 0;
89 u32 shift = 0;
90
91 shift = (offset % 4) << 3;
92 if (size == sizeof(uint8_t))
93 mask = 0xFF << shift;
94 else if (size == sizeof(uint16_t))
95 mask = 0xFFFF << shift;
96
97 original_data &= ~mask;
98 original_data |= (field << shift);
99 return original_data;
100}
101
102
103
104
105
106int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
107 uint32_t value, uint32_t mask)
108{
109 uint32_t i;
110 uint32_t cur_value;
111
112 if (hwmgr == NULL || hwmgr->device == NULL) {
113 pr_err("Invalid Hardware Manager!");
114 return -EINVAL;
115 }
116
117 for (i = 0; i < hwmgr->usec_timeout; i++) {
118 cur_value = cgs_read_register(hwmgr->device, index);
119 if ((cur_value & mask) == (value & mask))
120 break;
121 udelay(1);
122 }
123
124
125 if (i == hwmgr->usec_timeout)
126 return -1;
127 return 0;
128}
129
130
131
132
133
134
135
136int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
137 uint32_t indirect_port,
138 uint32_t index,
139 uint32_t value,
140 uint32_t mask)
141{
142 if (hwmgr == NULL || hwmgr->device == NULL) {
143 pr_err("Invalid Hardware Manager!");
144 return -EINVAL;
145 }
146
147 cgs_write_register(hwmgr->device, indirect_port, index);
148 return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
149}
150
151int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
152 uint32_t index,
153 uint32_t value, uint32_t mask)
154{
155 uint32_t i;
156 uint32_t cur_value;
157
158 if (hwmgr == NULL || hwmgr->device == NULL)
159 return -EINVAL;
160
161 for (i = 0; i < hwmgr->usec_timeout; i++) {
162 cur_value = cgs_read_register(hwmgr->device,
163 index);
164 if ((cur_value & mask) != (value & mask))
165 break;
166 udelay(1);
167 }
168
169
170 if (i == hwmgr->usec_timeout)
171 return -ETIME;
172 return 0;
173}
174
175int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
176 uint32_t indirect_port,
177 uint32_t index,
178 uint32_t value,
179 uint32_t mask)
180{
181 if (hwmgr == NULL || hwmgr->device == NULL)
182 return -EINVAL;
183
184 cgs_write_register(hwmgr->device, indirect_port, index);
185 return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
186 value, mask);
187}
188
189bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
190{
191 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
192}
193
194bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
195{
196 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
197}
198
199
200int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
201{
202 uint32_t i, j;
203 uint16_t vvalue;
204 bool found = false;
205 struct pp_atomctrl_voltage_table *table;
206
207 PP_ASSERT_WITH_CODE((NULL != vol_table),
208 "Voltage Table empty.", return -EINVAL);
209
210 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
211 GFP_KERNEL);
212
213 if (NULL == table)
214 return -EINVAL;
215
216 table->mask_low = vol_table->mask_low;
217 table->phase_delay = vol_table->phase_delay;
218
219 for (i = 0; i < vol_table->count; i++) {
220 vvalue = vol_table->entries[i].value;
221 found = false;
222
223 for (j = 0; j < table->count; j++) {
224 if (vvalue == table->entries[j].value) {
225 found = true;
226 break;
227 }
228 }
229
230 if (!found) {
231 table->entries[table->count].value = vvalue;
232 table->entries[table->count].smio_low =
233 vol_table->entries[i].smio_low;
234 table->count++;
235 }
236 }
237
238 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
239 kfree(table);
240 table = NULL;
241 return 0;
242}
243
244int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
245 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
246{
247 uint32_t i;
248 int result;
249
250 PP_ASSERT_WITH_CODE((0 != dep_table->count),
251 "Voltage Dependency Table empty.", return -EINVAL);
252
253 PP_ASSERT_WITH_CODE((NULL != vol_table),
254 "vol_table empty.", return -EINVAL);
255
256 vol_table->mask_low = 0;
257 vol_table->phase_delay = 0;
258 vol_table->count = dep_table->count;
259
260 for (i = 0; i < dep_table->count; i++) {
261 vol_table->entries[i].value = dep_table->entries[i].mvdd;
262 vol_table->entries[i].smio_low = 0;
263 }
264
265 result = phm_trim_voltage_table(vol_table);
266 PP_ASSERT_WITH_CODE((0 == result),
267 "Failed to trim MVDD table.", return result);
268
269 return 0;
270}
271
272int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
273 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
274{
275 uint32_t i;
276 int result;
277
278 PP_ASSERT_WITH_CODE((0 != dep_table->count),
279 "Voltage Dependency Table empty.", return -EINVAL);
280
281 PP_ASSERT_WITH_CODE((NULL != vol_table),
282 "vol_table empty.", return -EINVAL);
283
284 vol_table->mask_low = 0;
285 vol_table->phase_delay = 0;
286 vol_table->count = dep_table->count;
287
288 for (i = 0; i < dep_table->count; i++) {
289 vol_table->entries[i].value = dep_table->entries[i].vddci;
290 vol_table->entries[i].smio_low = 0;
291 }
292
293 result = phm_trim_voltage_table(vol_table);
294 PP_ASSERT_WITH_CODE((0 == result),
295 "Failed to trim VDDCI table.", return result);
296
297 return 0;
298}
299
300int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
301 phm_ppt_v1_voltage_lookup_table *lookup_table)
302{
303 int i = 0;
304
305 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
306 "Voltage Lookup Table empty.", return -EINVAL);
307
308 PP_ASSERT_WITH_CODE((NULL != vol_table),
309 "vol_table empty.", return -EINVAL);
310
311 vol_table->mask_low = 0;
312 vol_table->phase_delay = 0;
313
314 vol_table->count = lookup_table->count;
315
316 for (i = 0; i < vol_table->count; i++) {
317 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
318 vol_table->entries[i].smio_low = 0;
319 }
320
321 return 0;
322}
323
324void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
325 struct pp_atomctrl_voltage_table *vol_table)
326{
327 unsigned int i, diff;
328
329 if (vol_table->count <= max_vol_steps)
330 return;
331
332 diff = vol_table->count - max_vol_steps;
333
334 for (i = 0; i < max_vol_steps; i++)
335 vol_table->entries[i] = vol_table->entries[i + diff];
336
337 vol_table->count = max_vol_steps;
338
339 return;
340}
341
342int phm_reset_single_dpm_table(void *table,
343 uint32_t count, int max)
344{
345 int i;
346
347 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
348
349 dpm_table->count = count > max ? max : count;
350
351 for (i = 0; i < dpm_table->count; i++)
352 dpm_table->dpm_level[i].enabled = false;
353
354 return 0;
355}
356
357void phm_setup_pcie_table_entry(
358 void *table,
359 uint32_t index, uint32_t pcie_gen,
360 uint32_t pcie_lanes)
361{
362 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
363 dpm_table->dpm_level[index].value = pcie_gen;
364 dpm_table->dpm_level[index].param1 = pcie_lanes;
365 dpm_table->dpm_level[index].enabled = 1;
366}
367
368int32_t phm_get_dpm_level_enable_mask_value(void *table)
369{
370 int32_t i;
371 int32_t mask = 0;
372 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
373
374 for (i = dpm_table->count; i > 0; i--) {
375 mask = mask << 1;
376 if (dpm_table->dpm_level[i - 1].enabled)
377 mask |= 0x1;
378 else
379 mask &= 0xFFFFFFFE;
380 }
381
382 return mask;
383}
384
385uint8_t phm_get_voltage_index(
386 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
387{
388 uint8_t count = (uint8_t) (lookup_table->count);
389 uint8_t i;
390
391 PP_ASSERT_WITH_CODE((NULL != lookup_table),
392 "Lookup Table empty.", return 0);
393 PP_ASSERT_WITH_CODE((0 != count),
394 "Lookup Table empty.", return 0);
395
396 for (i = 0; i < lookup_table->count; i++) {
397
398 if (lookup_table->entries[i].us_vdd >= voltage)
399 return i;
400 }
401
402 return i - 1;
403}
404
405uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
406 uint32_t voltage)
407{
408 uint8_t count = (uint8_t) (voltage_table->count);
409 uint8_t i = 0;
410
411 PP_ASSERT_WITH_CODE((NULL != voltage_table),
412 "Voltage Table empty.", return 0;);
413 PP_ASSERT_WITH_CODE((0 != count),
414 "Voltage Table empty.", return 0;);
415
416 for (i = 0; i < count; i++) {
417
418 if (voltage_table->entries[i].value >= voltage)
419 return i;
420 }
421
422
423 return i - 1;
424}
425
426uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
427{
428 uint32_t i;
429
430 for (i = 0; i < vddci_table->count; i++) {
431 if (vddci_table->entries[i].value >= vddci)
432 return vddci_table->entries[i].value;
433 }
434
435 pr_debug("vddci is larger than max value in vddci_table\n");
436 return vddci_table->entries[i-1].value;
437}
438
439int phm_find_boot_level(void *table,
440 uint32_t value, uint32_t *boot_level)
441{
442 int result = -EINVAL;
443 uint32_t i;
444 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
445
446 for (i = 0; i < dpm_table->count; i++) {
447 if (value == dpm_table->dpm_level[i].value) {
448 *boot_level = i;
449 result = 0;
450 }
451 }
452
453 return result;
454}
455
456int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
457 phm_ppt_v1_voltage_lookup_table *lookup_table,
458 uint16_t virtual_voltage_id, int32_t *sclk)
459{
460 uint8_t entry_id;
461 uint8_t voltage_id;
462 struct phm_ppt_v1_information *table_info =
463 (struct phm_ppt_v1_information *)(hwmgr->pptable);
464
465 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
466
467
468 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
469 voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
470 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
471 break;
472 }
473
474 if (entry_id >= table_info->vdd_dep_on_sclk->count) {
475 pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
476 return -EINVAL;
477 }
478
479 *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
480
481 return 0;
482}
483
484
485
486
487
488
489int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
490{
491 uint32_t table_size;
492 struct phm_clock_voltage_dependency_table *table_clk_vlt;
493 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
494
495
496 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
497 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
498
499 if (NULL == table_clk_vlt) {
500 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
501 return -ENOMEM;
502 } else {
503 table_clk_vlt->count = 4;
504 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
505 table_clk_vlt->entries[0].v = 0;
506 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
507 table_clk_vlt->entries[1].v = 720;
508 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
509 table_clk_vlt->entries[2].v = 810;
510 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
511 table_clk_vlt->entries[3].v = 900;
512 if (pptable_info != NULL)
513 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
514 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
515 }
516
517 return 0;
518}
519
520uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
521{
522 uint32_t level = 0;
523
524 while (0 == (mask & (1 << level)))
525 level++;
526
527 return level;
528}
529
530void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
531{
532 struct phm_ppt_v1_information *table_info =
533 (struct phm_ppt_v1_information *)hwmgr->pptable;
534 struct phm_clock_voltage_dependency_table *table =
535 table_info->vddc_dep_on_dal_pwrl;
536 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
537 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
538 uint32_t req_vddc = 0, req_volt, i;
539
540 if (!table || table->count <= 0
541 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
542 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
543 return;
544
545 for (i = 0; i < table->count; i++) {
546 if (dal_power_level == table->entries[i].clk) {
547 req_vddc = table->entries[i].v;
548 break;
549 }
550 }
551
552 vddc_table = table_info->vdd_dep_on_sclk;
553 for (i = 0; i < vddc_table->count; i++) {
554 if (req_vddc <= vddc_table->entries[i].vddc) {
555 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
556 smum_send_msg_to_smc_with_parameter(hwmgr,
557 PPSMC_MSG_VddC_Request, req_volt);
558 return;
559 }
560 }
561 pr_err("DAL requested level can not"
562 " found a available voltage in VDDC DPM Table \n");
563}
564
565int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
566 uint32_t sclk, uint16_t id, uint16_t *voltage)
567{
568 uint32_t vol;
569 int ret = 0;
570
571 if (hwmgr->chip_id < CHIP_TONGA) {
572 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
573 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
574 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
575 if (*voltage >= 2000 || *voltage == 0)
576 *voltage = 1150;
577 } else {
578 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
579 *voltage = (uint16_t)(vol/100);
580 }
581 return ret;
582}
583
584
585int phm_irq_process(struct amdgpu_device *adev,
586 struct amdgpu_irq_src *source,
587 struct amdgpu_iv_entry *entry)
588{
589 uint32_t client_id = entry->client_id;
590 uint32_t src_id = entry->src_id;
591
592 if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
593 if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
594 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
595 PCI_BUS_NUM(adev->pdev->devfn),
596 PCI_SLOT(adev->pdev->devfn),
597 PCI_FUNC(adev->pdev->devfn));
598 else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
599 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
600 PCI_BUS_NUM(adev->pdev->devfn),
601 PCI_SLOT(adev->pdev->devfn),
602 PCI_FUNC(adev->pdev->devfn));
603 else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
604 pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
605 PCI_BUS_NUM(adev->pdev->devfn),
606 PCI_SLOT(adev->pdev->devfn),
607 PCI_FUNC(adev->pdev->devfn));
608 } else if (client_id == SOC15_IH_CLIENTID_THM) {
609 if (src_id == 0)
610 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
611 PCI_BUS_NUM(adev->pdev->devfn),
612 PCI_SLOT(adev->pdev->devfn),
613 PCI_FUNC(adev->pdev->devfn));
614 else
615 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
616 PCI_BUS_NUM(adev->pdev->devfn),
617 PCI_SLOT(adev->pdev->devfn),
618 PCI_FUNC(adev->pdev->devfn));
619 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO)
620 pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
621 PCI_BUS_NUM(adev->pdev->devfn),
622 PCI_SLOT(adev->pdev->devfn),
623 PCI_FUNC(adev->pdev->devfn));
624
625 return 0;
626}
627
628static const struct amdgpu_irq_src_funcs smu9_irq_funcs = {
629 .process = phm_irq_process,
630};
631
632int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
633{
634 struct amdgpu_irq_src *source =
635 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
636
637 if (!source)
638 return -ENOMEM;
639
640 source->funcs = &smu9_irq_funcs;
641
642 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
643 SOC15_IH_CLIENTID_THM,
644 THM_9_0__SRCID__THM_DIG_THERM_L2H,
645 source);
646 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
647 SOC15_IH_CLIENTID_THM,
648 THM_9_0__SRCID__THM_DIG_THERM_H2L,
649 source);
650
651
652 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
653 SOC15_IH_CLIENTID_ROM_SMUIO,
654 SMUIO_9_0__SRCID__SMUIO_GPIO19,
655 source);
656
657 return 0;
658}
659
660void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
661 uint8_t *frev, uint8_t *crev)
662{
663 struct amdgpu_device *adev = dev;
664 uint16_t data_start;
665
666 if (amdgpu_atom_parse_data_header(
667 adev->mode_info.atom_context, table, size,
668 frev, crev, &data_start))
669 return (uint8_t *)adev->mode_info.atom_context->bios +
670 data_start;
671
672 return NULL;
673}
674
675int smu_get_voltage_dependency_table_ppt_v1(
676 const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
677 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
678{
679 uint8_t i = 0;
680 PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
681 "Voltage Lookup Table empty",
682 return -EINVAL);
683
684 dep_table->count = allowed_dep_table->count;
685 for (i=0; i<dep_table->count; i++) {
686 dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
687 dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
688 dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
689 dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
690 dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
691 dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
692 dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
693 dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
694 dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
695 dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
696 }
697
698 return 0;
699}
700
701int smu_set_watermarks_for_clocks_ranges(void *wt_table,
702 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
703{
704 uint32_t i;
705 struct watermarks *table = wt_table;
706
707 if (!table || !wm_with_clock_ranges)
708 return -EINVAL;
709
710 if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
711 return -EINVAL;
712
713 for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
714 table->WatermarkRow[1][i].MinClock =
715 cpu_to_le16((uint16_t)
716 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
717 1000));
718 table->WatermarkRow[1][i].MaxClock =
719 cpu_to_le16((uint16_t)
720 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
721 1000));
722 table->WatermarkRow[1][i].MinUclk =
723 cpu_to_le16((uint16_t)
724 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
725 1000));
726 table->WatermarkRow[1][i].MaxUclk =
727 cpu_to_le16((uint16_t)
728 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
729 1000));
730 table->WatermarkRow[1][i].WmSetting = (uint8_t)
731 wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
732 }
733
734 for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
735 table->WatermarkRow[0][i].MinClock =
736 cpu_to_le16((uint16_t)
737 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
738 1000));
739 table->WatermarkRow[0][i].MaxClock =
740 cpu_to_le16((uint16_t)
741 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
742 1000));
743 table->WatermarkRow[0][i].MinUclk =
744 cpu_to_le16((uint16_t)
745 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
746 1000));
747 table->WatermarkRow[0][i].MaxUclk =
748 cpu_to_le16((uint16_t)
749 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
750 1000));
751 table->WatermarkRow[0][i].WmSetting = (uint8_t)
752 wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
753 }
754 return 0;
755}
756