1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include "pp_psm.h"
28
29int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
30{
31 int result;
32 unsigned int i;
33 unsigned int table_entries;
34 struct pp_power_state *state;
35 int size;
36
37 if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
38 return 0;
39
40 if (hwmgr->hwmgr_func->get_power_state_size == NULL)
41 return 0;
42
43 hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
44
45 hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
46 sizeof(struct pp_power_state);
47
48 if (table_entries == 0 || size == 0) {
49 pr_warn("Please check whether power state management is supported on this asic\n");
50 return 0;
51 }
52
53 hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
54 if (hwmgr->ps == NULL)
55 return -ENOMEM;
56
57 hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
58 if (hwmgr->request_ps == NULL) {
59 kfree(hwmgr->ps);
60 hwmgr->ps = NULL;
61 return -ENOMEM;
62 }
63
64 hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
65 if (hwmgr->current_ps == NULL) {
66 kfree(hwmgr->request_ps);
67 kfree(hwmgr->ps);
68 hwmgr->request_ps = NULL;
69 hwmgr->ps = NULL;
70 return -ENOMEM;
71 }
72
73 state = hwmgr->ps;
74
75 for (i = 0; i < table_entries; i++) {
76 result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
77 if (result) {
78 kfree(hwmgr->request_ps);
79 kfree(hwmgr->ps);
80 hwmgr->request_ps = NULL;
81 hwmgr->ps = NULL;
82 return -EINVAL;
83 }
84
85 if (state->classification.flags & PP_StateClassificationFlag_Boot) {
86 hwmgr->boot_ps = state;
87 memcpy(hwmgr->current_ps, state, size);
88 memcpy(hwmgr->request_ps, state, size);
89 }
90
91 state->id = i + 1;
92
93 if (state->classification.flags & PP_StateClassificationFlag_Uvd)
94 hwmgr->uvd_ps = state;
95 state = (struct pp_power_state *)((unsigned long)state + size);
96 }
97
98 return 0;
99}
100
101int psm_fini_power_state_table(struct pp_hwmgr *hwmgr)
102{
103 if (hwmgr == NULL)
104 return -EINVAL;
105
106 if (!hwmgr->ps)
107 return 0;
108
109 kfree(hwmgr->current_ps);
110 kfree(hwmgr->request_ps);
111 kfree(hwmgr->ps);
112 hwmgr->request_ps = NULL;
113 hwmgr->ps = NULL;
114 hwmgr->current_ps = NULL;
115 return 0;
116}
117
118static int psm_get_ui_state(struct pp_hwmgr *hwmgr,
119 enum PP_StateUILabel ui_label,
120 unsigned long *state_id)
121{
122 struct pp_power_state *state;
123 int table_entries;
124 int i;
125
126 table_entries = hwmgr->num_ps;
127 state = hwmgr->ps;
128
129 for (i = 0; i < table_entries; i++) {
130 if (state->classification.ui_label & ui_label) {
131 *state_id = state->id;
132 return 0;
133 }
134 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
135 }
136 return -EINVAL;
137}
138
139static int psm_get_state_by_classification(struct pp_hwmgr *hwmgr,
140 enum PP_StateClassificationFlag flag,
141 unsigned long *state_id)
142{
143 struct pp_power_state *state;
144 int table_entries;
145 int i;
146
147 table_entries = hwmgr->num_ps;
148 state = hwmgr->ps;
149
150 for (i = 0; i < table_entries; i++) {
151 if (state->classification.flags & flag) {
152 *state_id = state->id;
153 return 0;
154 }
155 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
156 }
157 return -EINVAL;
158}
159
160static int psm_set_states(struct pp_hwmgr *hwmgr, unsigned long state_id)
161{
162 struct pp_power_state *state;
163 int table_entries;
164 int i;
165
166 table_entries = hwmgr->num_ps;
167
168 state = hwmgr->ps;
169
170 for (i = 0; i < table_entries; i++) {
171 if (state->id == state_id) {
172 memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
173 return 0;
174 }
175 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
176 }
177 return -EINVAL;
178}
179
180int psm_set_boot_states(struct pp_hwmgr *hwmgr)
181{
182 unsigned long state_id;
183 int ret = -EINVAL;
184
185 if (!hwmgr->ps)
186 return 0;
187
188 if (!psm_get_state_by_classification(hwmgr, PP_StateClassificationFlag_Boot,
189 &state_id))
190 ret = psm_set_states(hwmgr, state_id);
191
192 return ret;
193}
194
195int psm_set_performance_states(struct pp_hwmgr *hwmgr)
196{
197 unsigned long state_id;
198 int ret = -EINVAL;
199
200 if (!hwmgr->ps)
201 return 0;
202
203 if (!psm_get_ui_state(hwmgr, PP_StateUILabel_Performance,
204 &state_id))
205 ret = psm_set_states(hwmgr, state_id);
206
207 return ret;
208}
209
210int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
211 enum PP_StateUILabel label_id,
212 struct pp_power_state **state)
213{
214 int table_entries;
215 int i;
216
217 if (!hwmgr->ps)
218 return 0;
219
220 table_entries = hwmgr->num_ps;
221 *state = hwmgr->ps;
222
223restart_search:
224 for (i = 0; i < table_entries; i++) {
225 if ((*state)->classification.ui_label & label_id)
226 return 0;
227 *state = (struct pp_power_state *)((uintptr_t)*state + hwmgr->ps_size);
228 }
229
230 switch (label_id) {
231 case PP_StateUILabel_Battery:
232 case PP_StateUILabel_Balanced:
233 label_id = PP_StateUILabel_Performance;
234 goto restart_search;
235 default:
236 break;
237 }
238 return -EINVAL;
239}
240
241static void power_state_management(struct pp_hwmgr *hwmgr,
242 struct pp_power_state *new_ps)
243{
244 struct pp_power_state *pcurrent;
245 struct pp_power_state *requested;
246 bool equal;
247
248 if (new_ps != NULL)
249 requested = new_ps;
250 else
251 requested = hwmgr->request_ps;
252
253 pcurrent = hwmgr->current_ps;
254
255 phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
256 if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr,
257 &pcurrent->hardware, &requested->hardware, &equal)))
258 equal = false;
259
260 if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
261 phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
262 memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
263 }
264}
265
266int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_settings,
267 struct pp_power_state *new_ps)
268{
269 uint32_t index;
270 long workload;
271
272 if (hwmgr->not_vf) {
273 if (!skip_display_settings)
274 phm_display_configuration_changed(hwmgr);
275
276 if (hwmgr->ps)
277 power_state_management(hwmgr, new_ps);
278 else
279
280
281
282
283 phm_apply_clock_adjust_rules(hwmgr);
284
285 if (!skip_display_settings)
286 phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
287 }
288
289 if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
290 hwmgr->dpm_level = hwmgr->request_dpm_level;
291
292 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
293 index = fls(hwmgr->workload_mask);
294 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
295 workload = hwmgr->workload_setting[index];
296
297 if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode)
298 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
299 }
300
301 return 0;
302}
303
304