1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/slab.h>
27
28#include "reg_helper.h"
29#include "core_types.h"
30#include "clk_mgr_internal.h"
31#include "rv1_clk_mgr.h"
32#include "dce100/dce_clk_mgr.h"
33#include "dce112/dce112_clk_mgr.h"
34#include "rv1_clk_mgr_vbios_smu.h"
35#include "rv1_clk_mgr_clk.h"
36
37void rv1_init_clocks(struct clk_mgr *clk_mgr)
38{
39 memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
40}
41
42static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, struct dc_clocks *new_clocks)
43{
44 bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
45 bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->base.clks.dispclk_khz;
46 int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
47 bool cur_dpp_div = clk_mgr->base.clks.dispclk_khz > clk_mgr->base.clks.dppclk_khz;
48
49
50 if (dispclk_increase) {
51
52 if (cur_dpp_div)
53 return new_clocks->dispclk_khz;
54
55
56
57
58 if (new_clocks->dispclk_khz <= disp_clk_threshold)
59 return new_clocks->dispclk_khz;
60
61
62 if (!request_dpp_div)
63 return new_clocks->dispclk_khz;
64
65 } else {
66
67
68
69
70
71 if (!cur_dpp_div)
72 return new_clocks->dispclk_khz;
73
74
75
76
77 if (clk_mgr->base.clks.dispclk_khz <= disp_clk_threshold)
78 return new_clocks->dispclk_khz;
79
80
81 if (request_dpp_div)
82 return new_clocks->dispclk_khz;
83 }
84
85 return disp_clk_threshold;
86}
87
88static void ramp_up_dispclk_with_dpp(
89 struct clk_mgr_internal *clk_mgr,
90 struct dc *dc,
91 struct dc_clocks *new_clocks,
92 bool safe_to_lower)
93{
94 int i;
95 int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
96 bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156 if (!safe_to_lower)
157 request_dpp_div = false;
158
159
160
161 clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
162 clk_mgr->funcs->set_dprefclk(clk_mgr);
163
164
165
166 for (i = 0; i < dc->res_pool->pipe_count; i++) {
167 struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
168
169 if (!pipe_ctx->plane_state)
170 continue;
171
172 pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
173 pipe_ctx->plane_res.dpp,
174 request_dpp_div,
175 true);
176 }
177
178
179 if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) {
180 clk_mgr->funcs->set_dispclk(clk_mgr, new_clocks->dispclk_khz);
181 clk_mgr->funcs->set_dprefclk(clk_mgr);
182 }
183
184
185 clk_mgr->base.clks.dispclk_khz = new_clocks->dispclk_khz;
186 clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
187 clk_mgr->base.clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
188}
189
190static bool is_mpo_enabled(struct dc_state *context)
191{
192 int i;
193
194 for (i = 0; i < context->stream_count; i++) {
195 if (context->stream_status[i].plane_count > 1)
196 return true;
197 }
198 return false;
199}
200
201static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
202 struct dc_state *context,
203 bool safe_to_lower)
204{
205 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
206 struct dc *dc = clk_mgr_base->ctx->dc;
207 struct dc_debug_options *debug = &dc->debug;
208 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
209 struct pp_smu_funcs_rv *pp_smu = NULL;
210 bool send_request_to_increase = false;
211 bool send_request_to_lower = false;
212 int display_count;
213
214 bool enter_display_off = false;
215
216 ASSERT(clk_mgr->pp_smu);
217
218 if (dc->work_arounds.skip_clock_update)
219 return;
220
221 pp_smu = &clk_mgr->pp_smu->rv_funcs;
222
223 display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
224
225 if (display_count == 0)
226 enter_display_off = true;
227
228 if (enter_display_off == safe_to_lower) {
229
230
231
232
233
234 if (pp_smu->set_display_count)
235 pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
236 }
237
238 if (new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz
239 || new_clocks->phyclk_khz > clk_mgr_base->clks.phyclk_khz
240 || new_clocks->fclk_khz > clk_mgr_base->clks.fclk_khz
241 || new_clocks->dcfclk_khz > clk_mgr_base->clks.dcfclk_khz)
242 send_request_to_increase = true;
243
244 if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
245 clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
246 send_request_to_lower = true;
247 }
248
249
250 if (debug->force_fclk_khz != 0)
251 new_clocks->fclk_khz = debug->force_fclk_khz;
252
253 if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) {
254 clk_mgr_base->clks.fclk_khz = new_clocks->fclk_khz;
255 send_request_to_lower = true;
256 }
257
258
259 if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
260 clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
261 send_request_to_lower = true;
262 }
263
264 if (should_set_clock(safe_to_lower,
265 new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
266 clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
267 send_request_to_lower = true;
268 }
269
270
271
272
273 if (send_request_to_increase) {
274
275 if (pp_smu->set_hard_min_fclk_by_freq &&
276 pp_smu->set_hard_min_dcfclk_by_freq &&
277 pp_smu->set_min_deep_sleep_dcfclk) {
278 pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
279 pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
280 pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
281 }
282 }
283
284
285
286 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
287 || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
288 ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower);
289 clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
290 send_request_to_lower = true;
291 }
292
293 if (!send_request_to_increase && send_request_to_lower) {
294
295 if (pp_smu->set_hard_min_fclk_by_freq &&
296 pp_smu->set_hard_min_dcfclk_by_freq &&
297 pp_smu->set_min_deep_sleep_dcfclk) {
298
299 if (display_count && is_mpo_enabled(context)) {
300 pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu,
301 ((new_clocks->fclk_khz / 1000) * 101) / 100);
302 pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu,
303 ((new_clocks->dcfclk_khz / 1000) * 101) / 100);
304 pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu,
305 (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
306 } else {
307 pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu,
308 new_clocks->fclk_khz / 1000);
309 pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu,
310 new_clocks->dcfclk_khz / 1000);
311 pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu,
312 (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
313 }
314 }
315 }
316}
317
318static void rv1_enable_pme_wa(struct clk_mgr *clk_mgr_base)
319{
320 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
321 struct pp_smu_funcs_rv *pp_smu = NULL;
322
323 if (clk_mgr->pp_smu) {
324 pp_smu = &clk_mgr->pp_smu->rv_funcs;
325
326 if (pp_smu->set_pme_wa_enable)
327 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
328 }
329}
330
331static struct clk_mgr_funcs rv1_clk_funcs = {
332 .init_clocks = rv1_init_clocks,
333 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
334 .update_clocks = rv1_update_clocks,
335 .enable_pme_wa = rv1_enable_pme_wa,
336};
337
338static struct clk_mgr_internal_funcs rv1_clk_internal_funcs = {
339 .set_dispclk = rv1_vbios_smu_set_dispclk,
340 .set_dprefclk = dce112_set_dprefclk
341};
342
343void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu)
344{
345 struct dc_debug_options *debug = &ctx->dc->debug;
346 struct dc_bios *bp = ctx->dc_bios;
347
348 clk_mgr->base.ctx = ctx;
349 clk_mgr->pp_smu = pp_smu;
350 clk_mgr->base.funcs = &rv1_clk_funcs;
351 clk_mgr->funcs = &rv1_clk_internal_funcs;
352
353 clk_mgr->dfs_bypass_disp_clk = 0;
354
355 clk_mgr->dprefclk_ss_percentage = 0;
356 clk_mgr->dprefclk_ss_divider = 1000;
357 clk_mgr->ss_on_dprefclk = false;
358 clk_mgr->base.dprefclk_khz = 600000;
359
360 if (bp->integrated_info)
361 clk_mgr->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
362 if (bp->fw_info_valid && clk_mgr->base.dentist_vco_freq_khz == 0) {
363 clk_mgr->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
364 if (clk_mgr->base.dentist_vco_freq_khz == 0)
365 clk_mgr->base.dentist_vco_freq_khz = 3600000;
366 }
367
368 if (!debug->disable_dfs_bypass && bp->integrated_info)
369 if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
370 clk_mgr->dfs_bypass_enabled = true;
371
372 dce_clock_read_ss_info(clk_mgr);
373}
374
375
376