1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#define pr_fmt(fmt) "ACPI CPPC: " fmt
39
40#include <linux/cpufreq.h>
41#include <linux/delay.h>
42#include <linux/ktime.h>
43#include <linux/rwsem.h>
44#include <linux/wait.h>
45
46#include <acpi/cppc_acpi.h>
47
48struct cppc_pcc_data {
49 struct mbox_chan *pcc_channel;
50 void __iomem *pcc_comm_addr;
51 int pcc_subspace_idx;
52 bool pcc_channel_acquired;
53 ktime_t deadline;
54 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
55
56 bool pending_pcc_write_cmd;
57 bool platform_owns_pcc;
58 unsigned int pcc_write_cnt;
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74 struct rw_semaphore pcc_lock;
75
76
77 wait_queue_head_t pcc_write_wait_q;
78};
79
80
81static struct cppc_pcc_data pcc_data = {
82 .pcc_subspace_idx = -1,
83 .platform_owns_pcc = true,
84};
85
86
87
88
89
90
91
92
93static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
94
95
96#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs))
97
98
99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
100 (cpc)->cpc_entry.reg.space_id == \
101 ACPI_ADR_SPACE_PLATFORM_COMM)
102
103
104#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
105 (reg)->address == 0 && \
106 (reg)->bit_width == 0 && \
107 (reg)->bit_offset == 0 && \
108 (reg)->access_width == 0)
109
110
111#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
112 !!(cpc)->cpc_entry.int_value : \
113 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
114
115
116
117
118
119#define NUM_RETRIES 500
120
121struct cppc_attr {
122 struct attribute attr;
123 ssize_t (*show)(struct kobject *kobj,
124 struct attribute *attr, char *buf);
125 ssize_t (*store)(struct kobject *kobj,
126 struct attribute *attr, const char *c, ssize_t count);
127};
128
129#define define_one_cppc_ro(_name) \
130static struct cppc_attr _name = \
131__ATTR(_name, 0444, show_##_name, NULL)
132
133#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
134
135static ssize_t show_feedback_ctrs(struct kobject *kobj,
136 struct attribute *attr, char *buf)
137{
138 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
139 struct cppc_perf_fb_ctrs fb_ctrs = {0};
140
141 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
142
143 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
144 fb_ctrs.reference, fb_ctrs.delivered);
145}
146define_one_cppc_ro(feedback_ctrs);
147
148static ssize_t show_reference_perf(struct kobject *kobj,
149 struct attribute *attr, char *buf)
150{
151 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
152 struct cppc_perf_fb_ctrs fb_ctrs = {0};
153
154 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
155
156 return scnprintf(buf, PAGE_SIZE, "%llu\n",
157 fb_ctrs.reference_perf);
158}
159define_one_cppc_ro(reference_perf);
160
161static ssize_t show_wraparound_time(struct kobject *kobj,
162 struct attribute *attr, char *buf)
163{
164 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
165 struct cppc_perf_fb_ctrs fb_ctrs = {0};
166
167 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
168
169 return scnprintf(buf, PAGE_SIZE, "%llu\n", fb_ctrs.ctr_wrap_time);
170
171}
172define_one_cppc_ro(wraparound_time);
173
174static struct attribute *cppc_attrs[] = {
175 &feedback_ctrs.attr,
176 &reference_perf.attr,
177 &wraparound_time.attr,
178 NULL
179};
180
181static struct kobj_type cppc_ktype = {
182 .sysfs_ops = &kobj_sysfs_ops,
183 .default_attrs = cppc_attrs,
184};
185
186static int check_pcc_chan(bool chk_err_bit)
187{
188 int ret = -EIO, status = 0;
189 struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr;
190 ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline);
191
192 if (!pcc_data.platform_owns_pcc)
193 return 0;
194
195
196 while (!ktime_after(ktime_get(), next_deadline)) {
197
198
199
200
201
202 status = readw_relaxed(&generic_comm_base->status);
203 if (status & PCC_CMD_COMPLETE_MASK) {
204 ret = 0;
205 if (chk_err_bit && (status & PCC_ERROR_MASK))
206 ret = -EIO;
207 break;
208 }
209
210
211
212
213 udelay(3);
214 }
215
216 if (likely(!ret))
217 pcc_data.platform_owns_pcc = false;
218 else
219 pr_err("PCC check channel failed. Status=%x\n", status);
220
221 return ret;
222}
223
224
225
226
227
228static int send_pcc_cmd(u16 cmd)
229{
230 int ret = -EIO, i;
231 struct acpi_pcct_shared_memory *generic_comm_base =
232 (struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr;
233 static ktime_t last_cmd_cmpl_time, last_mpar_reset;
234 static int mpar_count;
235 unsigned int time_delta;
236
237
238
239
240
241 if (cmd == CMD_READ) {
242
243
244
245
246
247 if (pcc_data.pending_pcc_write_cmd)
248 send_pcc_cmd(CMD_WRITE);
249
250 ret = check_pcc_chan(false);
251 if (ret)
252 goto end;
253 } else
254 pcc_data.pending_pcc_write_cmd = FALSE;
255
256
257
258
259
260
261 if (pcc_data.pcc_mrtt) {
262 time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
263 if (pcc_data.pcc_mrtt > time_delta)
264 udelay(pcc_data.pcc_mrtt - time_delta);
265 }
266
267
268
269
270
271
272
273
274
275
276
277
278 if (pcc_data.pcc_mpar) {
279 if (mpar_count == 0) {
280 time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
281 if (time_delta < 60 * MSEC_PER_SEC) {
282 pr_debug("PCC cmd not sent due to MPAR limit");
283 ret = -EIO;
284 goto end;
285 }
286 last_mpar_reset = ktime_get();
287 mpar_count = pcc_data.pcc_mpar;
288 }
289 mpar_count--;
290 }
291
292
293 writew_relaxed(cmd, &generic_comm_base->command);
294
295
296 writew_relaxed(0, &generic_comm_base->status);
297
298 pcc_data.platform_owns_pcc = true;
299
300
301 ret = mbox_send_message(pcc_data.pcc_channel, &cmd);
302 if (ret < 0) {
303 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
304 cmd, ret);
305 goto end;
306 }
307
308
309 ret = check_pcc_chan(true);
310
311 if (pcc_data.pcc_mrtt)
312 last_cmd_cmpl_time = ktime_get();
313
314 if (pcc_data.pcc_channel->mbox->txdone_irq)
315 mbox_chan_txdone(pcc_data.pcc_channel, ret);
316 else
317 mbox_client_txdone(pcc_data.pcc_channel, ret);
318
319end:
320 if (cmd == CMD_WRITE) {
321 if (unlikely(ret)) {
322 for_each_possible_cpu(i) {
323 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
324 if (!desc)
325 continue;
326
327 if (desc->write_cmd_id == pcc_data.pcc_write_cnt)
328 desc->write_cmd_status = ret;
329 }
330 }
331 pcc_data.pcc_write_cnt++;
332 wake_up_all(&pcc_data.pcc_write_wait_q);
333 }
334
335 return ret;
336}
337
338static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
339{
340 if (ret < 0)
341 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
342 *(u16 *)msg, ret);
343 else
344 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
345 *(u16 *)msg, ret);
346}
347
348struct mbox_client cppc_mbox_cl = {
349 .tx_done = cppc_chan_tx_done,
350 .knows_txdone = true,
351};
352
353static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
354{
355 int result = -EFAULT;
356 acpi_status status = AE_OK;
357 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
358 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
359 struct acpi_buffer state = {0, NULL};
360 union acpi_object *psd = NULL;
361 struct acpi_psd_package *pdomain;
362
363 status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
364 ACPI_TYPE_PACKAGE);
365 if (ACPI_FAILURE(status))
366 return -ENODEV;
367
368 psd = buffer.pointer;
369 if (!psd || psd->package.count != 1) {
370 pr_debug("Invalid _PSD data\n");
371 goto end;
372 }
373
374 pdomain = &(cpc_ptr->domain_info);
375
376 state.length = sizeof(struct acpi_psd_package);
377 state.pointer = pdomain;
378
379 status = acpi_extract_package(&(psd->package.elements[0]),
380 &format, &state);
381 if (ACPI_FAILURE(status)) {
382 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
383 goto end;
384 }
385
386 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
387 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
388 goto end;
389 }
390
391 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
392 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
393 goto end;
394 }
395
396 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
397 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
398 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
399 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
400 goto end;
401 }
402
403 result = 0;
404end:
405 kfree(buffer.pointer);
406 return result;
407}
408
409
410
411
412
413
414
415int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
416{
417 int count_target;
418 int retval = 0;
419 unsigned int i, j;
420 cpumask_var_t covered_cpus;
421 struct cppc_cpudata *pr, *match_pr;
422 struct acpi_psd_package *pdomain;
423 struct acpi_psd_package *match_pdomain;
424 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
425
426 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
427 return -ENOMEM;
428
429
430
431
432
433 for_each_possible_cpu(i) {
434 pr = all_cpu_data[i];
435 if (!pr)
436 continue;
437
438 if (cpumask_test_cpu(i, covered_cpus))
439 continue;
440
441 cpc_ptr = per_cpu(cpc_desc_ptr, i);
442 if (!cpc_ptr) {
443 retval = -EFAULT;
444 goto err_ret;
445 }
446
447 pdomain = &(cpc_ptr->domain_info);
448 cpumask_set_cpu(i, pr->shared_cpu_map);
449 cpumask_set_cpu(i, covered_cpus);
450 if (pdomain->num_processors <= 1)
451 continue;
452
453
454 count_target = pdomain->num_processors;
455 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
456 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
457 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
458 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
459 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
460 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
461
462 for_each_possible_cpu(j) {
463 if (i == j)
464 continue;
465
466 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
467 if (!match_cpc_ptr) {
468 retval = -EFAULT;
469 goto err_ret;
470 }
471
472 match_pdomain = &(match_cpc_ptr->domain_info);
473 if (match_pdomain->domain != pdomain->domain)
474 continue;
475
476
477 if (match_pdomain->num_processors != count_target) {
478 retval = -EFAULT;
479 goto err_ret;
480 }
481
482 if (pdomain->coord_type != match_pdomain->coord_type) {
483 retval = -EFAULT;
484 goto err_ret;
485 }
486
487 cpumask_set_cpu(j, covered_cpus);
488 cpumask_set_cpu(j, pr->shared_cpu_map);
489 }
490
491 for_each_possible_cpu(j) {
492 if (i == j)
493 continue;
494
495 match_pr = all_cpu_data[j];
496 if (!match_pr)
497 continue;
498
499 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
500 if (!match_cpc_ptr) {
501 retval = -EFAULT;
502 goto err_ret;
503 }
504
505 match_pdomain = &(match_cpc_ptr->domain_info);
506 if (match_pdomain->domain != pdomain->domain)
507 continue;
508
509 match_pr->shared_type = pr->shared_type;
510 cpumask_copy(match_pr->shared_cpu_map,
511 pr->shared_cpu_map);
512 }
513 }
514
515err_ret:
516 for_each_possible_cpu(i) {
517 pr = all_cpu_data[i];
518 if (!pr)
519 continue;
520
521
522 if (retval) {
523 cpumask_clear(pr->shared_cpu_map);
524 cpumask_set_cpu(i, pr->shared_cpu_map);
525 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
526 }
527 }
528
529 free_cpumask_var(covered_cpus);
530 return retval;
531}
532EXPORT_SYMBOL_GPL(acpi_get_psd_map);
533
534static int register_pcc_channel(int pcc_subspace_idx)
535{
536 struct acpi_pcct_hw_reduced *cppc_ss;
537 u64 usecs_lat;
538
539 if (pcc_subspace_idx >= 0) {
540 pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
541 pcc_subspace_idx);
542
543 if (IS_ERR(pcc_data.pcc_channel)) {
544 pr_err("Failed to find PCC communication channel\n");
545 return -ENODEV;
546 }
547
548
549
550
551
552
553
554 cppc_ss = (pcc_data.pcc_channel)->con_priv;
555
556 if (!cppc_ss) {
557 pr_err("No PCC subspace found for CPPC\n");
558 return -ENODEV;
559 }
560
561
562
563
564
565
566 usecs_lat = NUM_RETRIES * cppc_ss->latency;
567 pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
568 pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time;
569 pcc_data.pcc_mpar = cppc_ss->max_access_rate;
570 pcc_data.pcc_nominal = cppc_ss->latency;
571
572 pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
573 if (!pcc_data.pcc_comm_addr) {
574 pr_err("Failed to ioremap PCC comm region mem\n");
575 return -ENOMEM;
576 }
577
578
579 pcc_data.pcc_channel_acquired = true;
580 }
581
582 return 0;
583}
584
585
586
587
588
589
590
591
592
593bool __weak cpc_ffh_supported(void)
594{
595 return false;
596}
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650int acpi_cppc_processor_probe(struct acpi_processor *pr)
651{
652 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
653 union acpi_object *out_obj, *cpc_obj;
654 struct cpc_desc *cpc_ptr;
655 struct cpc_reg *gas_t;
656 struct device *cpu_dev;
657 acpi_handle handle = pr->handle;
658 unsigned int num_ent, i, cpc_rev;
659 acpi_status status;
660 int ret = -EFAULT;
661
662
663 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
664 ACPI_TYPE_PACKAGE);
665 if (ACPI_FAILURE(status)) {
666 ret = -ENODEV;
667 goto out_buf_free;
668 }
669
670 out_obj = (union acpi_object *) output.pointer;
671
672 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
673 if (!cpc_ptr) {
674 ret = -ENOMEM;
675 goto out_buf_free;
676 }
677
678
679 cpc_obj = &out_obj->package.elements[0];
680 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
681 num_ent = cpc_obj->integer.value;
682 } else {
683 pr_debug("Unexpected entry type(%d) for NumEntries\n",
684 cpc_obj->type);
685 goto out_free;
686 }
687
688
689 if (num_ent != CPPC_NUM_ENT) {
690 pr_debug("Firmware exports %d entries. Expected: %d\n",
691 num_ent, CPPC_NUM_ENT);
692 goto out_free;
693 }
694
695 cpc_ptr->num_entries = num_ent;
696
697
698 cpc_obj = &out_obj->package.elements[1];
699 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
700 cpc_rev = cpc_obj->integer.value;
701 } else {
702 pr_debug("Unexpected entry type(%d) for Revision\n",
703 cpc_obj->type);
704 goto out_free;
705 }
706
707 if (cpc_rev != CPPC_REV) {
708 pr_debug("Firmware exports revision:%d. Expected:%d\n",
709 cpc_rev, CPPC_REV);
710 goto out_free;
711 }
712
713
714 for (i = 2; i < num_ent; i++) {
715 cpc_obj = &out_obj->package.elements[i];
716
717 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
718 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
719 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
720 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
721 gas_t = (struct cpc_reg *)
722 cpc_obj->buffer.pointer;
723
724
725
726
727
728
729
730 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
731 if (pcc_data.pcc_subspace_idx < 0)
732 pcc_data.pcc_subspace_idx = gas_t->access_width;
733 else if (pcc_data.pcc_subspace_idx != gas_t->access_width) {
734 pr_debug("Mismatched PCC ids.\n");
735 goto out_free;
736 }
737 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
738 if (gas_t->address) {
739 void __iomem *addr;
740
741 addr = ioremap(gas_t->address, gas_t->bit_width/8);
742 if (!addr)
743 goto out_free;
744 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
745 }
746 } else {
747 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
748
749 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
750 goto out_free;
751 }
752 }
753
754 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
755 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
756 } else {
757 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
758 goto out_free;
759 }
760 }
761
762 cpc_ptr->cpu_id = pr->id;
763
764
765 ret = acpi_get_psd(cpc_ptr, handle);
766 if (ret)
767 goto out_free;
768
769
770 if (!pcc_data.pcc_channel_acquired) {
771 ret = register_pcc_channel(pcc_data.pcc_subspace_idx);
772 if (ret)
773 goto out_free;
774
775 init_rwsem(&pcc_data.pcc_lock);
776 init_waitqueue_head(&pcc_data.pcc_write_wait_q);
777 }
778
779
780 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
781
782
783 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
784
785
786 cpu_dev = get_cpu_device(pr->id);
787 if (!cpu_dev)
788 goto out_free;
789
790 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
791 "acpi_cppc");
792 if (ret)
793 goto out_free;
794
795 kfree(output.pointer);
796 return 0;
797
798out_free:
799
800 for (i = 2; i < cpc_ptr->num_entries; i++) {
801 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
802
803 if (addr)
804 iounmap(addr);
805 }
806 kfree(cpc_ptr);
807
808out_buf_free:
809 kfree(output.pointer);
810 return ret;
811}
812EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
813
814
815
816
817
818
819
820void acpi_cppc_processor_exit(struct acpi_processor *pr)
821{
822 struct cpc_desc *cpc_ptr;
823 unsigned int i;
824 void __iomem *addr;
825
826 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
827
828
829 for (i = 2; i < cpc_ptr->num_entries; i++) {
830 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
831 if (addr)
832 iounmap(addr);
833 }
834
835 kobject_put(&cpc_ptr->kobj);
836 kfree(cpc_ptr);
837}
838EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
839
840
841
842
843
844
845
846
847
848
849
850int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
851{
852 return -ENOTSUPP;
853}
854
855
856
857
858
859
860
861
862
863
864
865int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
866{
867 return -ENOTSUPP;
868}
869
870
871
872
873
874
875
876static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
877{
878 int ret_val = 0;
879 void __iomem *vaddr = 0;
880 struct cpc_reg *reg = ®_res->cpc_entry.reg;
881
882 if (reg_res->type == ACPI_TYPE_INTEGER) {
883 *val = reg_res->cpc_entry.int_value;
884 return ret_val;
885 }
886
887 *val = 0;
888 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
889 vaddr = GET_PCC_VADDR(reg->address);
890 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
891 vaddr = reg_res->sys_mem_vaddr;
892 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
893 return cpc_read_ffh(cpu, reg, val);
894 else
895 return acpi_os_read_memory((acpi_physical_address)reg->address,
896 val, reg->bit_width);
897
898 switch (reg->bit_width) {
899 case 8:
900 *val = readb_relaxed(vaddr);
901 break;
902 case 16:
903 *val = readw_relaxed(vaddr);
904 break;
905 case 32:
906 *val = readl_relaxed(vaddr);
907 break;
908 case 64:
909 *val = readq_relaxed(vaddr);
910 break;
911 default:
912 pr_debug("Error: Cannot read %u bit width from PCC\n",
913 reg->bit_width);
914 ret_val = -EFAULT;
915 }
916
917 return ret_val;
918}
919
920static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
921{
922 int ret_val = 0;
923 void __iomem *vaddr = 0;
924 struct cpc_reg *reg = ®_res->cpc_entry.reg;
925
926 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
927 vaddr = GET_PCC_VADDR(reg->address);
928 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
929 vaddr = reg_res->sys_mem_vaddr;
930 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
931 return cpc_write_ffh(cpu, reg, val);
932 else
933 return acpi_os_write_memory((acpi_physical_address)reg->address,
934 val, reg->bit_width);
935
936 switch (reg->bit_width) {
937 case 8:
938 writeb_relaxed(val, vaddr);
939 break;
940 case 16:
941 writew_relaxed(val, vaddr);
942 break;
943 case 32:
944 writel_relaxed(val, vaddr);
945 break;
946 case 64:
947 writeq_relaxed(val, vaddr);
948 break;
949 default:
950 pr_debug("Error: Cannot write %u bit width to PCC\n",
951 reg->bit_width);
952 ret_val = -EFAULT;
953 break;
954 }
955
956 return ret_val;
957}
958
959
960
961
962
963
964
965
966int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
967{
968 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
969 struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
970 *nom_perf;
971 u64 high, low, nom;
972 int ret = 0, regs_in_pcc = 0;
973
974 if (!cpc_desc) {
975 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
976 return -ENODEV;
977 }
978
979 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
980 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
981 ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF];
982 nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF];
983
984
985 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
986 CPC_IN_PCC(ref_perf) || CPC_IN_PCC(nom_perf)) {
987 regs_in_pcc = 1;
988 down_write(&pcc_data.pcc_lock);
989
990 if (send_pcc_cmd(CMD_READ) < 0) {
991 ret = -EIO;
992 goto out_err;
993 }
994 }
995
996 cpc_read(cpunum, highest_reg, &high);
997 perf_caps->highest_perf = high;
998
999 cpc_read(cpunum, lowest_reg, &low);
1000 perf_caps->lowest_perf = low;
1001
1002 cpc_read(cpunum, nom_perf, &nom);
1003 perf_caps->nominal_perf = nom;
1004
1005 if (!high || !low || !nom)
1006 ret = -EFAULT;
1007
1008out_err:
1009 if (regs_in_pcc)
1010 up_write(&pcc_data.pcc_lock);
1011 return ret;
1012}
1013EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1014
1015
1016
1017
1018
1019
1020
1021
1022int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1023{
1024 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1025 struct cpc_register_resource *delivered_reg, *reference_reg,
1026 *ref_perf_reg, *ctr_wrap_reg;
1027 u64 delivered, reference, ref_perf, ctr_wrap_time;
1028 int ret = 0, regs_in_pcc = 0;
1029
1030 if (!cpc_desc) {
1031 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1032 return -ENODEV;
1033 }
1034
1035 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1036 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1037 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1038 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1039
1040
1041
1042
1043
1044 if (!CPC_SUPPORTED(ref_perf_reg))
1045 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1046
1047
1048 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1049 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1050 down_write(&pcc_data.pcc_lock);
1051 regs_in_pcc = 1;
1052
1053 if (send_pcc_cmd(CMD_READ) < 0) {
1054 ret = -EIO;
1055 goto out_err;
1056 }
1057 }
1058
1059 cpc_read(cpunum, delivered_reg, &delivered);
1060 cpc_read(cpunum, reference_reg, &reference);
1061 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1062
1063
1064
1065
1066
1067
1068 ctr_wrap_time = (u64)(~((u64)0));
1069 if (CPC_SUPPORTED(ctr_wrap_reg))
1070 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1071
1072 if (!delivered || !reference || !ref_perf) {
1073 ret = -EFAULT;
1074 goto out_err;
1075 }
1076
1077 perf_fb_ctrs->delivered = delivered;
1078 perf_fb_ctrs->reference = reference;
1079 perf_fb_ctrs->reference_perf = ref_perf;
1080 perf_fb_ctrs->ctr_wrap_time = ctr_wrap_time;
1081out_err:
1082 if (regs_in_pcc)
1083 up_write(&pcc_data.pcc_lock);
1084 return ret;
1085}
1086EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1087
1088
1089
1090
1091
1092
1093
1094
1095int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1096{
1097 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1098 struct cpc_register_resource *desired_reg;
1099 int ret = 0;
1100
1101 if (!cpc_desc) {
1102 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1103 return -ENODEV;
1104 }
1105
1106 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1107
1108
1109
1110
1111
1112
1113
1114
1115 if (CPC_IN_PCC(desired_reg)) {
1116 down_read(&pcc_data.pcc_lock);
1117 if (pcc_data.platform_owns_pcc) {
1118 ret = check_pcc_chan(false);
1119 if (ret) {
1120 up_read(&pcc_data.pcc_lock);
1121 return ret;
1122 }
1123 }
1124
1125
1126
1127
1128 pcc_data.pending_pcc_write_cmd = true;
1129 cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt;
1130 cpc_desc->write_cmd_status = 0;
1131 }
1132
1133
1134
1135
1136
1137 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1138
1139 if (CPC_IN_PCC(desired_reg))
1140 up_read(&pcc_data.pcc_lock);
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187 if (CPC_IN_PCC(desired_reg)) {
1188 if (down_write_trylock(&pcc_data.pcc_lock)) {
1189
1190 if (pcc_data.pending_pcc_write_cmd)
1191 send_pcc_cmd(CMD_WRITE);
1192 up_write(&pcc_data.pcc_lock);
1193 } else
1194
1195 wait_event(pcc_data.pcc_write_wait_q,
1196 cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt);
1197
1198
1199 ret = cpc_desc->write_cmd_status;
1200 }
1201 return ret;
1202}
1203EXPORT_SYMBOL_GPL(cppc_set_perf);
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213unsigned int cppc_get_transition_latency(int cpu_num)
1214{
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 unsigned int latency_ns = 0;
1227 struct cpc_desc *cpc_desc;
1228 struct cpc_register_resource *desired_reg;
1229
1230 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1231 if (!cpc_desc)
1232 return CPUFREQ_ETERNAL;
1233
1234 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1235 if (!CPC_IN_PCC(desired_reg))
1236 return CPUFREQ_ETERNAL;
1237
1238 if (pcc_data.pcc_mpar)
1239 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar);
1240
1241 latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000);
1242 latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000);
1243
1244 return latency_ns;
1245}
1246EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1247