1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "priv.h"
25
26#include <subdev/bios.h>
27#include <subdev/bios/boost.h>
28#include <subdev/bios/cstep.h>
29#include <subdev/bios/perf.h>
30#include <subdev/bios/vpstate.h>
31#include <subdev/fb.h>
32#include <subdev/therm.h>
33#include <subdev/volt.h>
34
35#include <core/option.h>
36
37
38
39
40static u32
41nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
42 u8 pstate, u8 domain, u32 input)
43{
44 struct nvkm_bios *bios = clk->subdev.device->bios;
45 struct nvbios_boostE boostE;
46 u8 ver, hdr, cnt, len;
47 u32 data;
48
49 data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
50 if (data) {
51 struct nvbios_boostS boostS;
52 u8 idx = 0, sver, shdr;
53 u32 subd;
54
55 input = max(boostE.min, input);
56 input = min(boostE.max, input);
57 do {
58 sver = ver;
59 shdr = hdr;
60 subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
61 cnt, len, &boostS);
62 if (subd && boostS.domain == domain) {
63 if (adjust)
64 input = input * boostS.percent / 100;
65 input = max(boostS.min, input);
66 input = min(boostS.max, input);
67 break;
68 }
69 } while (subd);
70 }
71
72 return input;
73}
74
75
76
77
78static bool
79nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
80 u32 max_volt, int temp)
81{
82 const struct nvkm_domain *domain = clk->domains;
83 struct nvkm_volt *volt = clk->subdev.device->volt;
84 int voltage;
85
86 while (domain && domain->name != nv_clk_src_max) {
87 if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
88 u32 freq = cstate->domain[domain->name];
89 switch (clk->boost_mode) {
90 case NVKM_CLK_BOOST_NONE:
91 if (clk->base_khz && freq > clk->base_khz)
92 return false;
93 case NVKM_CLK_BOOST_BIOS:
94 if (clk->boost_khz && freq > clk->boost_khz)
95 return false;
96 }
97 }
98 domain++;
99 }
100
101 if (!volt)
102 return true;
103
104 voltage = nvkm_volt_map(volt, cstate->voltage, temp);
105 if (voltage < 0)
106 return false;
107 return voltage <= min(max_volt, volt->max_uv);
108}
109
110static struct nvkm_cstate *
111nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
112 struct nvkm_cstate *cstate)
113{
114 struct nvkm_device *device = clk->subdev.device;
115 struct nvkm_volt *volt = device->volt;
116 int max_volt;
117
118 if (!pstate || !cstate)
119 return NULL;
120
121 if (!volt)
122 return cstate;
123
124 max_volt = volt->max_uv;
125 if (volt->max0_id != 0xff)
126 max_volt = min(max_volt,
127 nvkm_volt_map(volt, volt->max0_id, clk->temp));
128 if (volt->max1_id != 0xff)
129 max_volt = min(max_volt,
130 nvkm_volt_map(volt, volt->max1_id, clk->temp));
131 if (volt->max2_id != 0xff)
132 max_volt = min(max_volt,
133 nvkm_volt_map(volt, volt->max2_id, clk->temp));
134
135 list_for_each_entry_from_reverse(cstate, &pstate->list, head) {
136 if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
137 break;
138 }
139
140 return cstate;
141}
142
143static struct nvkm_cstate *
144nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
145{
146 struct nvkm_cstate *cstate;
147 if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
148 return list_last_entry(&pstate->list, typeof(*cstate), head);
149 else {
150 list_for_each_entry(cstate, &pstate->list, head) {
151 if (cstate->id == cstatei)
152 return cstate;
153 }
154 }
155 return NULL;
156}
157
158static int
159nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
160{
161 struct nvkm_subdev *subdev = &clk->subdev;
162 struct nvkm_device *device = subdev->device;
163 struct nvkm_therm *therm = device->therm;
164 struct nvkm_volt *volt = device->volt;
165 struct nvkm_cstate *cstate;
166 int ret;
167
168 if (!list_empty(&pstate->list)) {
169 cstate = nvkm_cstate_get(clk, pstate, cstatei);
170 cstate = nvkm_cstate_find_best(clk, pstate, cstate);
171 } else {
172 cstate = &pstate->base;
173 }
174
175 if (therm) {
176 ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
177 if (ret && ret != -ENODEV) {
178 nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
179 return ret;
180 }
181 }
182
183 if (volt) {
184 ret = nvkm_volt_set_id(volt, cstate->voltage,
185 pstate->base.voltage, clk->temp, +1);
186 if (ret && ret != -ENODEV) {
187 nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
188 return ret;
189 }
190 }
191
192 ret = clk->func->calc(clk, cstate);
193 if (ret == 0) {
194 ret = clk->func->prog(clk);
195 clk->func->tidy(clk);
196 }
197
198 if (volt) {
199 ret = nvkm_volt_set_id(volt, cstate->voltage,
200 pstate->base.voltage, clk->temp, -1);
201 if (ret && ret != -ENODEV)
202 nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
203 }
204
205 if (therm) {
206 ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
207 if (ret && ret != -ENODEV)
208 nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
209 }
210
211 return ret;
212}
213
214static void
215nvkm_cstate_del(struct nvkm_cstate *cstate)
216{
217 list_del(&cstate->head);
218 kfree(cstate);
219}
220
221static int
222nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
223{
224 struct nvkm_bios *bios = clk->subdev.device->bios;
225 struct nvkm_volt *volt = clk->subdev.device->volt;
226 const struct nvkm_domain *domain = clk->domains;
227 struct nvkm_cstate *cstate = NULL;
228 struct nvbios_cstepX cstepX;
229 u8 ver, hdr;
230 u32 data;
231
232 data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
233 if (!data)
234 return -ENOENT;
235
236 if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
237 return -EINVAL;
238
239 cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
240 if (!cstate)
241 return -ENOMEM;
242
243 *cstate = pstate->base;
244 cstate->voltage = cstepX.voltage;
245 cstate->id = idx;
246
247 while (domain && domain->name != nv_clk_src_max) {
248 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
249 u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
250 domain->bios, cstepX.freq);
251 cstate->domain[domain->name] = freq;
252 }
253 domain++;
254 }
255
256 list_add(&cstate->head, &pstate->list);
257 return 0;
258}
259
260
261
262
263static int
264nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
265{
266 struct nvkm_subdev *subdev = &clk->subdev;
267 struct nvkm_fb *fb = subdev->device->fb;
268 struct nvkm_pci *pci = subdev->device->pci;
269 struct nvkm_pstate *pstate;
270 int ret, idx = 0;
271
272 list_for_each_entry(pstate, &clk->states, head) {
273 if (idx++ == pstatei)
274 break;
275 }
276
277 nvkm_debug(subdev, "setting performance state %d\n", pstatei);
278 clk->pstate = pstatei;
279
280 nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
281
282 if (fb && fb->ram && fb->ram->func->calc) {
283 struct nvkm_ram *ram = fb->ram;
284 int khz = pstate->base.domain[nv_clk_src_mem];
285 do {
286 ret = ram->func->calc(ram, khz);
287 if (ret == 0)
288 ret = ram->func->prog(ram);
289 } while (ret > 0);
290 ram->func->tidy(ram);
291 }
292
293 return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
294}
295
296static void
297nvkm_pstate_work(struct work_struct *work)
298{
299 struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
300 struct nvkm_subdev *subdev = &clk->subdev;
301 int pstate;
302
303 if (!atomic_xchg(&clk->waiting, 0))
304 return;
305 clk->pwrsrc = power_supply_is_system_supplied();
306
307 nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
308 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
309 clk->astate, clk->temp, clk->dstate);
310
311 pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
312 if (clk->state_nr && pstate != -1) {
313 pstate = (pstate < 0) ? clk->astate : pstate;
314 pstate = min(pstate, clk->state_nr - 1);
315 pstate = max(pstate, clk->dstate);
316 } else {
317 pstate = clk->pstate = -1;
318 }
319
320 nvkm_trace(subdev, "-> %d\n", pstate);
321 if (pstate != clk->pstate) {
322 int ret = nvkm_pstate_prog(clk, pstate);
323 if (ret) {
324 nvkm_error(subdev, "error setting pstate %d: %d\n",
325 pstate, ret);
326 }
327 }
328
329 wake_up_all(&clk->wait);
330 nvkm_notify_get(&clk->pwrsrc_ntfy);
331}
332
333static int
334nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
335{
336 atomic_set(&clk->waiting, 1);
337 schedule_work(&clk->work);
338 if (wait)
339 wait_event(clk->wait, !atomic_read(&clk->waiting));
340 return 0;
341}
342
343static void
344nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
345{
346 const struct nvkm_domain *clock = clk->domains - 1;
347 struct nvkm_cstate *cstate;
348 struct nvkm_subdev *subdev = &clk->subdev;
349 char info[3][32] = { "", "", "" };
350 char name[4] = "--";
351 int i = -1;
352
353 if (pstate->pstate != 0xff)
354 snprintf(name, sizeof(name), "%02x", pstate->pstate);
355
356 while ((++clock)->name != nv_clk_src_max) {
357 u32 lo = pstate->base.domain[clock->name];
358 u32 hi = lo;
359 if (hi == 0)
360 continue;
361
362 nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
363 list_for_each_entry(cstate, &pstate->list, head) {
364 u32 freq = cstate->domain[clock->name];
365 lo = min(lo, freq);
366 hi = max(hi, freq);
367 nvkm_debug(subdev, "%10d KHz\n", freq);
368 }
369
370 if (clock->mname && ++i < ARRAY_SIZE(info)) {
371 lo /= clock->mdiv;
372 hi /= clock->mdiv;
373 if (lo == hi) {
374 snprintf(info[i], sizeof(info[i]), "%s %d MHz",
375 clock->mname, lo);
376 } else {
377 snprintf(info[i], sizeof(info[i]),
378 "%s %d-%d MHz", clock->mname, lo, hi);
379 }
380 }
381 }
382
383 nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
384}
385
386static void
387nvkm_pstate_del(struct nvkm_pstate *pstate)
388{
389 struct nvkm_cstate *cstate, *temp;
390
391 list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
392 nvkm_cstate_del(cstate);
393 }
394
395 list_del(&pstate->head);
396 kfree(pstate);
397}
398
399static int
400nvkm_pstate_new(struct nvkm_clk *clk, int idx)
401{
402 struct nvkm_bios *bios = clk->subdev.device->bios;
403 const struct nvkm_domain *domain = clk->domains - 1;
404 struct nvkm_pstate *pstate;
405 struct nvkm_cstate *cstate;
406 struct nvbios_cstepE cstepE;
407 struct nvbios_perfE perfE;
408 u8 ver, hdr, cnt, len;
409 u32 data;
410
411 data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
412 if (!data)
413 return -EINVAL;
414 if (perfE.pstate == 0xff)
415 return 0;
416
417 pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
418 cstate = &pstate->base;
419 if (!pstate)
420 return -ENOMEM;
421
422 INIT_LIST_HEAD(&pstate->list);
423
424 pstate->pstate = perfE.pstate;
425 pstate->fanspeed = perfE.fanspeed;
426 pstate->pcie_speed = perfE.pcie_speed;
427 pstate->pcie_width = perfE.pcie_width;
428 cstate->voltage = perfE.voltage;
429 cstate->domain[nv_clk_src_core] = perfE.core;
430 cstate->domain[nv_clk_src_shader] = perfE.shader;
431 cstate->domain[nv_clk_src_mem] = perfE.memory;
432 cstate->domain[nv_clk_src_vdec] = perfE.vdec;
433 cstate->domain[nv_clk_src_dom6] = perfE.disp;
434
435 while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
436 struct nvbios_perfS perfS;
437 u8 sver = ver, shdr = hdr;
438 u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
439 &sver, &shdr, cnt, len, &perfS);
440 if (perfSe == 0 || sver != 0x40)
441 continue;
442
443 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
444 perfS.v40.freq = nvkm_clk_adjust(clk, false,
445 pstate->pstate,
446 domain->bios,
447 perfS.v40.freq);
448 }
449
450 cstate->domain[domain->name] = perfS.v40.freq;
451 }
452
453 data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
454 if (data) {
455 int idx = cstepE.index;
456 do {
457 nvkm_cstate_new(clk, idx, pstate);
458 } while(idx--);
459 }
460
461 nvkm_pstate_info(clk, pstate);
462 list_add_tail(&pstate->head, &clk->states);
463 clk->state_nr++;
464 return 0;
465}
466
467
468
469
470static int
471nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
472{
473 struct nvkm_pstate *pstate;
474 int i = 0;
475
476 if (!clk->allow_reclock)
477 return -ENOSYS;
478
479 if (req != -1 && req != -2) {
480 list_for_each_entry(pstate, &clk->states, head) {
481 if (pstate->pstate == req)
482 break;
483 i++;
484 }
485
486 if (pstate->pstate != req)
487 return -EINVAL;
488 req = i;
489 }
490
491 return req + 2;
492}
493
494static int
495nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
496{
497 int ret = 1;
498
499 if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
500 return -2;
501
502 if (strncasecmpz(mode, "disabled", arglen)) {
503 char save = mode[arglen];
504 long v;
505
506 ((char *)mode)[arglen] = '\0';
507 if (!kstrtol(mode, 0, &v)) {
508 ret = nvkm_clk_ustate_update(clk, v);
509 if (ret < 0)
510 ret = 1;
511 }
512 ((char *)mode)[arglen] = save;
513 }
514
515 return ret - 2;
516}
517
518int
519nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
520{
521 int ret = nvkm_clk_ustate_update(clk, req);
522 if (ret >= 0) {
523 if (ret -= 2, pwr) clk->ustate_ac = ret;
524 else clk->ustate_dc = ret;
525 return nvkm_pstate_calc(clk, true);
526 }
527 return ret;
528}
529
530int
531nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
532{
533 if (!rel) clk->astate = req;
534 if ( rel) clk->astate += rel;
535 clk->astate = min(clk->astate, clk->state_nr - 1);
536 clk->astate = max(clk->astate, 0);
537 return nvkm_pstate_calc(clk, wait);
538}
539
540int
541nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
542{
543 if (clk->temp == temp)
544 return 0;
545 clk->temp = temp;
546 return nvkm_pstate_calc(clk, false);
547}
548
549int
550nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
551{
552 if (!rel) clk->dstate = req;
553 if ( rel) clk->dstate += rel;
554 clk->dstate = min(clk->dstate, clk->state_nr - 1);
555 clk->dstate = max(clk->dstate, 0);
556 return nvkm_pstate_calc(clk, true);
557}
558
559static int
560nvkm_clk_pwrsrc(struct nvkm_notify *notify)
561{
562 struct nvkm_clk *clk =
563 container_of(notify, typeof(*clk), pwrsrc_ntfy);
564 nvkm_pstate_calc(clk, false);
565 return NVKM_NOTIFY_DROP;
566}
567
568
569
570
571
572int
573nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
574{
575 return clk->func->read(clk, src);
576}
577
578static int
579nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
580{
581 struct nvkm_clk *clk = nvkm_clk(subdev);
582 nvkm_notify_put(&clk->pwrsrc_ntfy);
583 flush_work(&clk->work);
584 if (clk->func->fini)
585 clk->func->fini(clk);
586 return 0;
587}
588
589static int
590nvkm_clk_init(struct nvkm_subdev *subdev)
591{
592 struct nvkm_clk *clk = nvkm_clk(subdev);
593 const struct nvkm_domain *clock = clk->domains;
594 int ret;
595
596 memset(&clk->bstate, 0x00, sizeof(clk->bstate));
597 INIT_LIST_HEAD(&clk->bstate.list);
598 clk->bstate.pstate = 0xff;
599
600 while (clock->name != nv_clk_src_max) {
601 ret = nvkm_clk_read(clk, clock->name);
602 if (ret < 0) {
603 nvkm_error(subdev, "%02x freq unknown\n", clock->name);
604 return ret;
605 }
606 clk->bstate.base.domain[clock->name] = ret;
607 clock++;
608 }
609
610 nvkm_pstate_info(clk, &clk->bstate);
611
612 if (clk->func->init)
613 return clk->func->init(clk);
614
615 clk->astate = clk->state_nr - 1;
616 clk->dstate = 0;
617 clk->pstate = -1;
618 clk->temp = 90;
619 nvkm_pstate_calc(clk, true);
620 return 0;
621}
622
623static void *
624nvkm_clk_dtor(struct nvkm_subdev *subdev)
625{
626 struct nvkm_clk *clk = nvkm_clk(subdev);
627 struct nvkm_pstate *pstate, *temp;
628
629 nvkm_notify_fini(&clk->pwrsrc_ntfy);
630
631
632 if (clk->func->pstates)
633 return clk;
634
635 list_for_each_entry_safe(pstate, temp, &clk->states, head) {
636 nvkm_pstate_del(pstate);
637 }
638
639 return clk;
640}
641
642static const struct nvkm_subdev_func
643nvkm_clk = {
644 .dtor = nvkm_clk_dtor,
645 .init = nvkm_clk_init,
646 .fini = nvkm_clk_fini,
647};
648
649int
650nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
651 int index, bool allow_reclock, struct nvkm_clk *clk)
652{
653 struct nvkm_subdev *subdev = &clk->subdev;
654 struct nvkm_bios *bios = device->bios;
655 int ret, idx, arglen;
656 const char *mode;
657 struct nvbios_vpstate_header h;
658
659 nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
660
661 if (bios && !nvbios_vpstate_parse(bios, &h)) {
662 struct nvbios_vpstate_entry base, boost;
663 if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
664 clk->boost_khz = boost.clock_mhz * 1000;
665 if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
666 clk->base_khz = base.clock_mhz * 1000;
667 }
668
669 clk->func = func;
670 INIT_LIST_HEAD(&clk->states);
671 clk->domains = func->domains;
672 clk->ustate_ac = -1;
673 clk->ustate_dc = -1;
674 clk->allow_reclock = allow_reclock;
675
676 INIT_WORK(&clk->work, nvkm_pstate_work);
677 init_waitqueue_head(&clk->wait);
678 atomic_set(&clk->waiting, 0);
679
680
681 if (!func->pstates) {
682 idx = 0;
683 do {
684 ret = nvkm_pstate_new(clk, idx++);
685 } while (ret == 0);
686 } else {
687 for (idx = 0; idx < func->nr_pstates; idx++)
688 list_add_tail(&func->pstates[idx].head, &clk->states);
689 clk->state_nr = func->nr_pstates;
690 }
691
692 ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
693 NULL, 0, 0, &clk->pwrsrc_ntfy);
694 if (ret)
695 return ret;
696
697 mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
698 if (mode) {
699 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
700 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
701 }
702
703 mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
704 if (mode)
705 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
706
707 mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
708 if (mode)
709 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
710
711 clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
712 NVKM_CLK_BOOST_NONE);
713 return 0;
714}
715
716int
717nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
718 int index, bool allow_reclock, struct nvkm_clk **pclk)
719{
720 if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
721 return -ENOMEM;
722 return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
723}
724