1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
25#include "gt215.h"
26#include "pll.h"
27
28#include <subdev/bios.h>
29#include <subdev/bios/pll.h>
30#include <subdev/timer.h>
31
32struct mcp77_clk {
33 struct nvkm_clk base;
34 enum nv_clk_src csrc, ssrc, vsrc;
35 u32 cctrl, sctrl;
36 u32 ccoef, scoef;
37 u32 cpost, spost;
38 u32 vdiv;
39};
40
41static u32
42read_div(struct mcp77_clk *clk)
43{
44 struct nvkm_device *device = clk->base.subdev.device;
45 return nvkm_rd32(device, 0x004600);
46}
47
48static u32
49read_pll(struct mcp77_clk *clk, u32 base)
50{
51 struct nvkm_device *device = clk->base.subdev.device;
52 u32 ctrl = nvkm_rd32(device, base + 0);
53 u32 coef = nvkm_rd32(device, base + 4);
54 u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
55 u32 post_div = 0;
56 u32 clock = 0;
57 int N1, M1;
58
59 switch (base){
60 case 0x4020:
61 post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
62 break;
63 case 0x4028:
64 post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
65 break;
66 default:
67 break;
68 }
69
70 N1 = (coef & 0x0000ff00) >> 8;
71 M1 = (coef & 0x000000ff);
72 if ((ctrl & 0x80000000) && M1) {
73 clock = ref * N1 / M1;
74 clock = clock / post_div;
75 }
76
77 return clock;
78}
79
80static int
81mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
82{
83 struct mcp77_clk *clk = mcp77_clk(base);
84 struct nvkm_subdev *subdev = &clk->base.subdev;
85 struct nvkm_device *device = subdev->device;
86 u32 mast = nvkm_rd32(device, 0x00c054);
87 u32 P = 0;
88
89 switch (src) {
90 case nv_clk_src_crystal:
91 return device->crystal;
92 case nv_clk_src_href:
93 return 100000;
94 case nv_clk_src_hclkm4:
95 return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
96 case nv_clk_src_hclkm2d3:
97 return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
98 case nv_clk_src_host:
99 switch (mast & 0x000c0000) {
100 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
101 case 0x00040000: break;
102 case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
103 case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
104 }
105 break;
106 case nv_clk_src_core:
107 P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
108
109 switch (mast & 0x00000003) {
110 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
111 case 0x00000001: return 0;
112 case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
113 case 0x00000003: return read_pll(clk, 0x004028) >> P;
114 }
115 break;
116 case nv_clk_src_cclk:
117 if ((mast & 0x03000000) != 0x03000000)
118 return nvkm_clk_read(&clk->base, nv_clk_src_core);
119
120 if ((mast & 0x00000200) == 0x00000000)
121 return nvkm_clk_read(&clk->base, nv_clk_src_core);
122
123 switch (mast & 0x00000c00) {
124 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
125 case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
126 case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
127 default: return 0;
128 }
129 case nv_clk_src_shader:
130 P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
131 switch (mast & 0x00000030) {
132 case 0x00000000:
133 if (mast & 0x00000040)
134 return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
135 return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
136 case 0x00000010: break;
137 case 0x00000020: return read_pll(clk, 0x004028) >> P;
138 case 0x00000030: return read_pll(clk, 0x004020) >> P;
139 }
140 break;
141 case nv_clk_src_mem:
142 return 0;
143 case nv_clk_src_vdec:
144 P = (read_div(clk) & 0x00000700) >> 8;
145
146 switch (mast & 0x00400000) {
147 case 0x00400000:
148 return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
149 default:
150 return 500000 >> P;
151 }
152 break;
153 default:
154 break;
155 }
156
157 nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
158 return 0;
159}
160
161static u32
162calc_pll(struct mcp77_clk *clk, u32 reg,
163 u32 clock, int *N, int *M, int *P)
164{
165 struct nvkm_subdev *subdev = &clk->base.subdev;
166 struct nvbios_pll pll;
167 int ret;
168
169 ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
170 if (ret)
171 return 0;
172
173 pll.vco2.max_freq = 0;
174 pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
175 if (!pll.refclk)
176 return 0;
177
178 return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
179}
180
181static inline u32
182calc_P(u32 src, u32 target, int *div)
183{
184 u32 clk0 = src, clk1 = src;
185 for (*div = 0; *div <= 7; (*div)++) {
186 if (clk0 <= target) {
187 clk1 = clk0 << (*div ? 1 : 0);
188 break;
189 }
190 clk0 >>= 1;
191 }
192
193 if (target - clk0 <= clk1 - target)
194 return clk0;
195 (*div)--;
196 return clk1;
197}
198
199static int
200mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
201{
202 struct mcp77_clk *clk = mcp77_clk(base);
203 const int shader = cstate->domain[nv_clk_src_shader];
204 const int core = cstate->domain[nv_clk_src_core];
205 const int vdec = cstate->domain[nv_clk_src_vdec];
206 struct nvkm_subdev *subdev = &clk->base.subdev;
207 u32 out = 0, clock = 0;
208 int N, M, P1, P2 = 0;
209 int divs = 0;
210
211
212 if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
213 out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
214
215
216 clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
217
218 if (abs(core - out) <= abs(core - (clock >> 1))) {
219 clk->csrc = nv_clk_src_hclkm4;
220 clk->cctrl = divs << 16;
221 } else {
222
223
224
225 if(P1 > 2) {
226 P2 = P1 - 2;
227 P1 = 2;
228 }
229
230 clk->csrc = nv_clk_src_core;
231 clk->ccoef = (N << 8) | M;
232
233 clk->cctrl = (P2 + 1) << 16;
234 clk->cpost = (1 << P1) << 16;
235 }
236
237
238 out = 0;
239 if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
240 clk->ssrc = nv_clk_src_href;
241 } else {
242 clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
243 if (clk->csrc == nv_clk_src_core)
244 out = calc_P((core << 1), shader, &divs);
245
246 if (abs(shader - out) <=
247 abs(shader - clock) &&
248 (divs + P2) <= 7) {
249 clk->ssrc = nv_clk_src_core;
250 clk->sctrl = (divs + P2) << 16;
251 } else {
252 clk->ssrc = nv_clk_src_shader;
253 clk->scoef = (N << 8) | M;
254 clk->sctrl = P1 << 16;
255 }
256 }
257
258
259 out = calc_P(core, vdec, &divs);
260 clock = calc_P(500000, vdec, &P1);
261 if(abs(vdec - out) <= abs(vdec - clock)) {
262 clk->vsrc = nv_clk_src_cclk;
263 clk->vdiv = divs << 16;
264 } else {
265 clk->vsrc = nv_clk_src_vdec;
266 clk->vdiv = P1 << 16;
267 }
268
269
270 nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
271 clk->ccoef, clk->cpost, clk->cctrl);
272 nvkm_debug(subdev, " spll: %08x %08x %08x\n",
273 clk->scoef, clk->spost, clk->sctrl);
274 nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
275 if (clk->csrc == nv_clk_src_hclkm4)
276 nvkm_debug(subdev, "core: hrefm4\n");
277 else
278 nvkm_debug(subdev, "core: nvpll\n");
279
280 if (clk->ssrc == nv_clk_src_hclkm4)
281 nvkm_debug(subdev, "shader: hrefm4\n");
282 else if (clk->ssrc == nv_clk_src_core)
283 nvkm_debug(subdev, "shader: nvpll\n");
284 else
285 nvkm_debug(subdev, "shader: spll\n");
286
287 if (clk->vsrc == nv_clk_src_hclkm4)
288 nvkm_debug(subdev, "vdec: 500MHz\n");
289 else
290 nvkm_debug(subdev, "vdec: core\n");
291
292 return 0;
293}
294
295static int
296mcp77_clk_prog(struct nvkm_clk *base)
297{
298 struct mcp77_clk *clk = mcp77_clk(base);
299 struct nvkm_subdev *subdev = &clk->base.subdev;
300 struct nvkm_device *device = subdev->device;
301 u32 pllmask = 0, mast;
302 unsigned long flags;
303 unsigned long *f = &flags;
304 int ret = 0;
305
306 ret = gt215_clk_pre(&clk->base, f);
307 if (ret)
308 goto out;
309
310
311 mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
312 mast &= ~0x00400e73;
313 mast |= 0x03000000;
314
315 switch (clk->csrc) {
316 case nv_clk_src_hclkm4:
317 nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
318 mast |= 0x00000002;
319 break;
320 case nv_clk_src_core:
321 nvkm_wr32(device, 0x402c, clk->ccoef);
322 nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
323 nvkm_wr32(device, 0x4040, clk->cpost);
324 pllmask |= (0x3 << 8);
325 mast |= 0x00000003;
326 break;
327 default:
328 nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
329 goto resume;
330 }
331
332 switch (clk->ssrc) {
333 case nv_clk_src_href:
334 nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
335
336 break;
337 case nv_clk_src_core:
338 nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
339 mast |= 0x00000020;
340 break;
341 case nv_clk_src_shader:
342 nvkm_wr32(device, 0x4024, clk->scoef);
343 nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
344 nvkm_wr32(device, 0x4070, clk->spost);
345 pllmask |= (0x3 << 12);
346 mast |= 0x00000030;
347 break;
348 default:
349 nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
350 goto resume;
351 }
352
353 if (nvkm_msec(device, 2000,
354 u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
355 if (tmp == pllmask)
356 break;
357 ) < 0)
358 goto resume;
359
360 switch (clk->vsrc) {
361 case nv_clk_src_cclk:
362 mast |= 0x00400000;
363 fallthrough;
364 default:
365 nvkm_wr32(device, 0x4600, clk->vdiv);
366 }
367
368 nvkm_wr32(device, 0xc054, mast);
369
370resume:
371
372 if (clk->csrc != nv_clk_src_core) {
373 nvkm_wr32(device, 0x4040, 0x00000000);
374 nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
375 }
376
377 if (clk->ssrc != nv_clk_src_shader) {
378 nvkm_wr32(device, 0x4070, 0x00000000);
379 nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
380 }
381
382out:
383 if (ret == -EBUSY)
384 f = NULL;
385
386 gt215_clk_post(&clk->base, f);
387 return ret;
388}
389
390static void
391mcp77_clk_tidy(struct nvkm_clk *base)
392{
393}
394
395static const struct nvkm_clk_func
396mcp77_clk = {
397 .read = mcp77_clk_read,
398 .calc = mcp77_clk_calc,
399 .prog = mcp77_clk_prog,
400 .tidy = mcp77_clk_tidy,
401 .domains = {
402 { nv_clk_src_crystal, 0xff },
403 { nv_clk_src_href , 0xff },
404 { nv_clk_src_core , 0xff, 0, "core", 1000 },
405 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
406 { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
407 { nv_clk_src_max }
408 }
409};
410
411int
412mcp77_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
413 struct nvkm_clk **pclk)
414{
415 struct mcp77_clk *clk;
416
417 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
418 return -ENOMEM;
419 *pclk = &clk->base;
420
421 return nvkm_clk_ctor(&mcp77_clk, device, type, inst, true, &clk->base);
422}
423