1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <drm/amdgpu_drm.h>
25#include "amdgpu.h"
26#include "atom.h"
27#include "atombios_encoders.h"
28#include "amdgpu_pll.h"
29#include <asm/div64.h>
30#include <linux/gcd.h>
31
32
33
34
35
36
37
38
39
40
41
42
43
44static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
45 unsigned nom_min, unsigned den_min)
46{
47 unsigned tmp;
48
49
50 tmp = gcd(*nom, *den);
51 *nom /= tmp;
52 *den /= tmp;
53
54
55 if (*nom < nom_min) {
56 tmp = DIV_ROUND_UP(nom_min, *nom);
57 *nom *= tmp;
58 *den *= tmp;
59 }
60
61
62 if (*den < den_min) {
63 tmp = DIV_ROUND_UP(den_min, *den);
64 *nom *= tmp;
65 *den *= tmp;
66 }
67}
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
84 unsigned fb_div_max, unsigned ref_div_max,
85 unsigned *fb_div, unsigned *ref_div)
86{
87
88 ref_div_max = min(128 / post_div, ref_div_max);
89
90
91 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
92 *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
93
94
95 if (*fb_div > fb_div_max) {
96 *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
97 *fb_div = fb_div_max;
98 }
99}
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115void amdgpu_pll_compute(struct amdgpu_pll *pll,
116 u32 freq,
117 u32 *dot_clock_p,
118 u32 *fb_div_p,
119 u32 *frac_fb_div_p,
120 u32 *ref_div_p,
121 u32 *post_div_p)
122{
123 unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
124 freq : freq / 10;
125
126 unsigned fb_div_min, fb_div_max, fb_div;
127 unsigned post_div_min, post_div_max, post_div;
128 unsigned ref_div_min, ref_div_max, ref_div;
129 unsigned post_div_best, diff_best;
130 unsigned nom, den;
131
132
133 fb_div_min = pll->min_feedback_div;
134 fb_div_max = pll->max_feedback_div;
135
136 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
137 fb_div_min *= 10;
138 fb_div_max *= 10;
139 }
140
141
142 if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
143 ref_div_min = pll->reference_div;
144 else
145 ref_div_min = pll->min_ref_div;
146
147 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
148 pll->flags & AMDGPU_PLL_USE_REF_DIV)
149 ref_div_max = pll->reference_div;
150 else
151 ref_div_max = pll->max_ref_div;
152
153
154 if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
155 post_div_min = pll->post_div;
156 post_div_max = pll->post_div;
157 } else {
158 unsigned vco_min, vco_max;
159
160 if (pll->flags & AMDGPU_PLL_IS_LCD) {
161 vco_min = pll->lcd_pll_out_min;
162 vco_max = pll->lcd_pll_out_max;
163 } else {
164 vco_min = pll->pll_out_min;
165 vco_max = pll->pll_out_max;
166 }
167
168 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
169 vco_min *= 10;
170 vco_max *= 10;
171 }
172
173 post_div_min = vco_min / target_clock;
174 if ((target_clock * post_div_min) < vco_min)
175 ++post_div_min;
176 if (post_div_min < pll->min_post_div)
177 post_div_min = pll->min_post_div;
178
179 post_div_max = vco_max / target_clock;
180 if ((target_clock * post_div_max) > vco_max)
181 --post_div_max;
182 if (post_div_max > pll->max_post_div)
183 post_div_max = pll->max_post_div;
184 }
185
186
187 nom = target_clock;
188 den = pll->reference_freq;
189
190
191 amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
192
193
194 if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
195 post_div_best = post_div_min;
196 else
197 post_div_best = post_div_max;
198 diff_best = ~0;
199
200 for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
201 unsigned diff;
202 amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
203 ref_div_max, &fb_div, &ref_div);
204 diff = abs(target_clock - (pll->reference_freq * fb_div) /
205 (ref_div * post_div));
206
207 if (diff < diff_best || (diff == diff_best &&
208 !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
209
210 post_div_best = post_div;
211 diff_best = diff;
212 }
213 }
214 post_div = post_div_best;
215
216
217 amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
218 &fb_div, &ref_div);
219
220
221
222 amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
223
224
225 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
226 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
227 if (fb_div < fb_div_min) {
228 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
229 fb_div *= tmp;
230 ref_div *= tmp;
231 }
232 }
233
234
235 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
236 *fb_div_p = fb_div / 10;
237 *frac_fb_div_p = fb_div % 10;
238 } else {
239 *fb_div_p = fb_div;
240 *frac_fb_div_p = 0;
241 }
242
243 *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
244 (pll->reference_freq * *frac_fb_div_p)) /
245 (ref_div * post_div * 10);
246 *ref_div_p = ref_div;
247 *post_div_p = post_div;
248
249 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
250 freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
251 ref_div, post_div);
252}
253
254
255
256
257
258
259
260
261u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
262{
263 struct drm_device *dev = crtc->dev;
264 struct drm_crtc *test_crtc;
265 struct amdgpu_crtc *test_amdgpu_crtc;
266 u32 pll_in_use = 0;
267
268 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
269 if (crtc == test_crtc)
270 continue;
271
272 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
273 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
274 pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
275 }
276 return pll_in_use;
277}
278
279
280
281
282
283
284
285
286
287
288int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
289{
290 struct drm_device *dev = crtc->dev;
291 struct drm_crtc *test_crtc;
292 struct amdgpu_crtc *test_amdgpu_crtc;
293
294 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
295 if (crtc == test_crtc)
296 continue;
297 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
298 if (test_amdgpu_crtc->encoder &&
299 ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
300
301 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
302 return test_amdgpu_crtc->pll_id;
303 }
304 }
305 return ATOM_PPLL_INVALID;
306}
307
308
309
310
311
312
313
314
315
316int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
317{
318 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
319 struct drm_device *dev = crtc->dev;
320 struct drm_crtc *test_crtc;
321 struct amdgpu_crtc *test_amdgpu_crtc;
322 u32 adjusted_clock, test_adjusted_clock;
323
324 adjusted_clock = amdgpu_crtc->adjusted_clock;
325
326 if (adjusted_clock == 0)
327 return ATOM_PPLL_INVALID;
328
329 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
330 if (crtc == test_crtc)
331 continue;
332 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
333 if (test_amdgpu_crtc->encoder &&
334 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
335
336 if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
337
338 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
339 return test_amdgpu_crtc->pll_id;
340 }
341
342 test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
343 if ((crtc->mode.clock == test_crtc->mode.clock) &&
344 (adjusted_clock == test_adjusted_clock) &&
345 (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
346 (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
347 return test_amdgpu_crtc->pll_id;
348 }
349 }
350 return ATOM_PPLL_INVALID;
351}
352