1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/slab.h>
27
28#include "reg_helper.h"
29#include "core_types.h"
30#include "clk_mgr_internal.h"
31#include "rv1_clk_mgr.h"
32#include "dce100/dce_clk_mgr.h"
33#include "dce112/dce112_clk_mgr.h"
34#include "rv1_clk_mgr_vbios_smu.h"
35#include "rv1_clk_mgr_clk.h"
36
37void rv1_init_clocks(struct clk_mgr *clk_mgr)
38{
39 memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
40}
41
42static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, struct dc_clocks *new_clocks)
43{
44 bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
45 bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->base.clks.dispclk_khz;
46 int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
47 bool cur_dpp_div = clk_mgr->base.clks.dispclk_khz > clk_mgr->base.clks.dppclk_khz;
48
49
50 if (dispclk_increase) {
51
52 if (cur_dpp_div)
53 return new_clocks->dispclk_khz;
54
55
56
57
58 if (new_clocks->dispclk_khz <= disp_clk_threshold)
59 return new_clocks->dispclk_khz;
60
61
62 if (!request_dpp_div)
63 return new_clocks->dispclk_khz;
64
65 } else {
66
67
68
69
70
71 if (!cur_dpp_div)
72 return new_clocks->dispclk_khz;
73
74
75
76
77 if (clk_mgr->base.clks.dispclk_khz <= disp_clk_threshold)
78 return new_clocks->dispclk_khz;
79
80
81 if (request_dpp_div)
82 return new_clocks->dispclk_khz;
83 }
84
85 return disp_clk_threshold;
86}
87
88static void ramp_up_dispclk_with_dpp(
89 struct clk_mgr_internal *clk_mgr,
90 struct dc *dc,
91 struct dc_clocks *new_clocks,
92 bool safe_to_lower)
93{
94 int i;
95 int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
96 bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156 if (!safe_to_lower)
157 request_dpp_div = false;
158
159
160
161 clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
162 clk_mgr->funcs->set_dprefclk(clk_mgr);
163
164
165
166 for (i = 0; i < dc->res_pool->pipe_count; i++) {
167 struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
168
169 if (!pipe_ctx->plane_state)
170 continue;
171
172 pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
173 pipe_ctx->plane_res.dpp,
174 request_dpp_div,
175 true);
176 }
177
178
179 if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) {
180 clk_mgr->funcs->set_dispclk(clk_mgr, new_clocks->dispclk_khz);
181 clk_mgr->funcs->set_dprefclk(clk_mgr);
182 }
183
184
185 clk_mgr->base.clks.dispclk_khz = new_clocks->dispclk_khz;
186 clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
187 clk_mgr->base.clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
188}
189
190static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
191 struct dc_state *context,
192 bool safe_to_lower)
193{
194 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
195 struct dc *dc = clk_mgr_base->ctx->dc;
196 struct dc_debug_options *debug = &dc->debug;
197 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
198 struct pp_smu_funcs_rv *pp_smu = NULL;
199 bool send_request_to_increase = false;
200 bool send_request_to_lower = false;
201 int display_count;
202
203 bool enter_display_off = false;
204
205 ASSERT(clk_mgr->pp_smu);
206
207 if (dc->work_arounds.skip_clock_update)
208 return;
209
210 pp_smu = &clk_mgr->pp_smu->rv_funcs;
211
212 display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
213
214 if (display_count == 0)
215 enter_display_off = true;
216
217 if (enter_display_off == safe_to_lower) {
218
219
220
221
222
223 if (pp_smu->set_display_count)
224 pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
225 }
226
227 if (new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz
228 || new_clocks->phyclk_khz > clk_mgr_base->clks.phyclk_khz
229 || new_clocks->fclk_khz > clk_mgr_base->clks.fclk_khz
230 || new_clocks->dcfclk_khz > clk_mgr_base->clks.dcfclk_khz)
231 send_request_to_increase = true;
232
233 if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
234 clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
235 send_request_to_lower = true;
236 }
237
238
239 if (debug->force_fclk_khz != 0)
240 new_clocks->fclk_khz = debug->force_fclk_khz;
241
242 if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) {
243 clk_mgr_base->clks.fclk_khz = new_clocks->fclk_khz;
244 send_request_to_lower = true;
245 }
246
247
248 if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
249 clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
250 send_request_to_lower = true;
251 }
252
253 if (should_set_clock(safe_to_lower,
254 new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
255 clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
256 send_request_to_lower = true;
257 }
258
259
260
261
262 if (send_request_to_increase) {
263
264 if (pp_smu->set_hard_min_fclk_by_freq &&
265 pp_smu->set_hard_min_dcfclk_by_freq &&
266 pp_smu->set_min_deep_sleep_dcfclk) {
267 pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->fclk_khz));
268 pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_khz));
269 pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_deep_sleep_khz));
270 }
271 }
272
273
274
275 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
276 || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
277 ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower);
278 clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
279 send_request_to_lower = true;
280 }
281
282 if (!send_request_to_increase && send_request_to_lower) {
283
284 if (pp_smu->set_hard_min_fclk_by_freq &&
285 pp_smu->set_hard_min_dcfclk_by_freq &&
286 pp_smu->set_min_deep_sleep_dcfclk) {
287 pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->fclk_khz));
288 pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_khz));
289 pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_deep_sleep_khz));
290 }
291 }
292}
293
294static void rv1_enable_pme_wa(struct clk_mgr *clk_mgr_base)
295{
296 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
297 struct pp_smu_funcs_rv *pp_smu = NULL;
298
299 if (clk_mgr->pp_smu) {
300 pp_smu = &clk_mgr->pp_smu->rv_funcs;
301
302 if (pp_smu->set_pme_wa_enable)
303 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
304 }
305}
306
307static struct clk_mgr_funcs rv1_clk_funcs = {
308 .init_clocks = rv1_init_clocks,
309 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
310 .update_clocks = rv1_update_clocks,
311 .enable_pme_wa = rv1_enable_pme_wa,
312};
313
314static struct clk_mgr_internal_funcs rv1_clk_internal_funcs = {
315 .set_dispclk = rv1_vbios_smu_set_dispclk,
316 .set_dprefclk = dce112_set_dprefclk
317};
318
319void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu)
320{
321 struct dc_debug_options *debug = &ctx->dc->debug;
322 struct dc_bios *bp = ctx->dc_bios;
323
324 clk_mgr->base.ctx = ctx;
325 clk_mgr->pp_smu = pp_smu;
326 clk_mgr->base.funcs = &rv1_clk_funcs;
327 clk_mgr->funcs = &rv1_clk_internal_funcs;
328
329 clk_mgr->dfs_bypass_disp_clk = 0;
330
331 clk_mgr->dprefclk_ss_percentage = 0;
332 clk_mgr->dprefclk_ss_divider = 1000;
333 clk_mgr->ss_on_dprefclk = false;
334 clk_mgr->base.dprefclk_khz = 600000;
335
336 if (bp->integrated_info)
337 clk_mgr->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
338 if (bp->fw_info_valid && clk_mgr->base.dentist_vco_freq_khz == 0) {
339 clk_mgr->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
340 if (clk_mgr->base.dentist_vco_freq_khz == 0)
341 clk_mgr->base.dentist_vco_freq_khz = 3600000;
342 }
343
344 if (!debug->disable_dfs_bypass && bp->integrated_info)
345 if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
346 clk_mgr->dfs_bypass_enabled = true;
347
348 dce_clock_read_ss_info(clk_mgr);
349}
350
351
352