1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#define pr_fmt(fmt) "ACPI CPPC: " fmt
35
36#include <linux/cpufreq.h>
37#include <linux/delay.h>
38#include <linux/iopoll.h>
39#include <linux/ktime.h>
40#include <linux/rwsem.h>
41#include <linux/wait.h>
42
43#include <acpi/cppc_acpi.h>
44
45struct cppc_pcc_data {
46 struct mbox_chan *pcc_channel;
47 void __iomem *pcc_comm_addr;
48 bool pcc_channel_acquired;
49 unsigned int deadline_us;
50 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
51
52 bool pending_pcc_write_cmd;
53 bool platform_owns_pcc;
54 unsigned int pcc_write_cnt;
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 struct rw_semaphore pcc_lock;
71
72
73 wait_queue_head_t pcc_write_wait_q;
74 ktime_t last_cmd_cmpl_time;
75 ktime_t last_mpar_reset;
76 int mpar_count;
77 int refcount;
78};
79
80
81static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
82
83static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
84
85
86
87
88
89
90
91
92static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
93
94
95#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
96 0x8 + (offs))
97
98
99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
100 (cpc)->cpc_entry.reg.space_id == \
101 ACPI_ADR_SPACE_PLATFORM_COMM)
102
103
104#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
105 (reg)->address == 0 && \
106 (reg)->bit_width == 0 && \
107 (reg)->bit_offset == 0 && \
108 (reg)->access_width == 0)
109
110
111#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
112 !!(cpc)->cpc_entry.int_value : \
113 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
114
115
116
117
118
119#define NUM_RETRIES 500ULL
120
121struct cppc_attr {
122 struct attribute attr;
123 ssize_t (*show)(struct kobject *kobj,
124 struct attribute *attr, char *buf);
125 ssize_t (*store)(struct kobject *kobj,
126 struct attribute *attr, const char *c, ssize_t count);
127};
128
129#define define_one_cppc_ro(_name) \
130static struct cppc_attr _name = \
131__ATTR(_name, 0444, show_##_name, NULL)
132
133#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
134
135#define show_cppc_data(access_fn, struct_name, member_name) \
136 static ssize_t show_##member_name(struct kobject *kobj, \
137 struct attribute *attr, char *buf) \
138 { \
139 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
140 struct struct_name st_name = {0}; \
141 int ret; \
142 \
143 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
144 if (ret) \
145 return ret; \
146 \
147 return scnprintf(buf, PAGE_SIZE, "%llu\n", \
148 (u64)st_name.member_name); \
149 } \
150 define_one_cppc_ro(member_name)
151
152show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
153show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
154show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
155show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
156show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
157show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
158
159show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
160show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
161
162static ssize_t show_feedback_ctrs(struct kobject *kobj,
163 struct attribute *attr, char *buf)
164{
165 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
166 struct cppc_perf_fb_ctrs fb_ctrs = {0};
167 int ret;
168
169 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
170 if (ret)
171 return ret;
172
173 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
174 fb_ctrs.reference, fb_ctrs.delivered);
175}
176define_one_cppc_ro(feedback_ctrs);
177
178static struct attribute *cppc_attrs[] = {
179 &feedback_ctrs.attr,
180 &reference_perf.attr,
181 &wraparound_time.attr,
182 &highest_perf.attr,
183 &lowest_perf.attr,
184 &lowest_nonlinear_perf.attr,
185 &nominal_perf.attr,
186 &nominal_freq.attr,
187 &lowest_freq.attr,
188 NULL
189};
190
191static struct kobj_type cppc_ktype = {
192 .sysfs_ops = &kobj_sysfs_ops,
193 .default_attrs = cppc_attrs,
194};
195
196static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
197{
198 int ret, status;
199 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
200 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
201 pcc_ss_data->pcc_comm_addr;
202
203 if (!pcc_ss_data->platform_owns_pcc)
204 return 0;
205
206
207
208
209
210 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
211 status & PCC_CMD_COMPLETE_MASK, 3,
212 pcc_ss_data->deadline_us);
213
214 if (likely(!ret)) {
215 pcc_ss_data->platform_owns_pcc = false;
216 if (chk_err_bit && (status & PCC_ERROR_MASK))
217 ret = -EIO;
218 }
219
220 if (unlikely(ret))
221 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
222 pcc_ss_id, ret);
223
224 return ret;
225}
226
227
228
229
230
231static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
232{
233 int ret = -EIO, i;
234 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
235 struct acpi_pcct_shared_memory *generic_comm_base =
236 (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
237 unsigned int time_delta;
238
239
240
241
242
243 if (cmd == CMD_READ) {
244
245
246
247
248
249 if (pcc_ss_data->pending_pcc_write_cmd)
250 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
251
252 ret = check_pcc_chan(pcc_ss_id, false);
253 if (ret)
254 goto end;
255 } else
256 pcc_ss_data->pending_pcc_write_cmd = FALSE;
257
258
259
260
261
262
263 if (pcc_ss_data->pcc_mrtt) {
264 time_delta = ktime_us_delta(ktime_get(),
265 pcc_ss_data->last_cmd_cmpl_time);
266 if (pcc_ss_data->pcc_mrtt > time_delta)
267 udelay(pcc_ss_data->pcc_mrtt - time_delta);
268 }
269
270
271
272
273
274
275
276
277
278
279
280
281 if (pcc_ss_data->pcc_mpar) {
282 if (pcc_ss_data->mpar_count == 0) {
283 time_delta = ktime_ms_delta(ktime_get(),
284 pcc_ss_data->last_mpar_reset);
285 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
286 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
287 pcc_ss_id);
288 ret = -EIO;
289 goto end;
290 }
291 pcc_ss_data->last_mpar_reset = ktime_get();
292 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
293 }
294 pcc_ss_data->mpar_count--;
295 }
296
297
298 writew_relaxed(cmd, &generic_comm_base->command);
299
300
301 writew_relaxed(0, &generic_comm_base->status);
302
303 pcc_ss_data->platform_owns_pcc = true;
304
305
306 ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
307 if (ret < 0) {
308 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
309 pcc_ss_id, cmd, ret);
310 goto end;
311 }
312
313
314 ret = check_pcc_chan(pcc_ss_id, true);
315
316 if (pcc_ss_data->pcc_mrtt)
317 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
318
319 if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
320 mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
321 else
322 mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
323
324end:
325 if (cmd == CMD_WRITE) {
326 if (unlikely(ret)) {
327 for_each_possible_cpu(i) {
328 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
329 if (!desc)
330 continue;
331
332 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
333 desc->write_cmd_status = ret;
334 }
335 }
336 pcc_ss_data->pcc_write_cnt++;
337 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
338 }
339
340 return ret;
341}
342
343static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
344{
345 if (ret < 0)
346 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
347 *(u16 *)msg, ret);
348 else
349 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
350 *(u16 *)msg, ret);
351}
352
353struct mbox_client cppc_mbox_cl = {
354 .tx_done = cppc_chan_tx_done,
355 .knows_txdone = true,
356};
357
358static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
359{
360 int result = -EFAULT;
361 acpi_status status = AE_OK;
362 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
363 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
364 struct acpi_buffer state = {0, NULL};
365 union acpi_object *psd = NULL;
366 struct acpi_psd_package *pdomain;
367
368 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
369 &buffer, ACPI_TYPE_PACKAGE);
370 if (status == AE_NOT_FOUND)
371 return 0;
372 if (ACPI_FAILURE(status))
373 return -ENODEV;
374
375 psd = buffer.pointer;
376 if (!psd || psd->package.count != 1) {
377 pr_debug("Invalid _PSD data\n");
378 goto end;
379 }
380
381 pdomain = &(cpc_ptr->domain_info);
382
383 state.length = sizeof(struct acpi_psd_package);
384 state.pointer = pdomain;
385
386 status = acpi_extract_package(&(psd->package.elements[0]),
387 &format, &state);
388 if (ACPI_FAILURE(status)) {
389 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
390 goto end;
391 }
392
393 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
394 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
395 goto end;
396 }
397
398 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
399 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
400 goto end;
401 }
402
403 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
404 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
405 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
406 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
407 goto end;
408 }
409
410 result = 0;
411end:
412 kfree(buffer.pointer);
413 return result;
414}
415
416
417
418
419
420
421
422int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
423{
424 int count_target;
425 int retval = 0;
426 unsigned int i, j;
427 cpumask_var_t covered_cpus;
428 struct cppc_cpudata *pr, *match_pr;
429 struct acpi_psd_package *pdomain;
430 struct acpi_psd_package *match_pdomain;
431 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
432
433 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
434 return -ENOMEM;
435
436
437
438
439
440 for_each_possible_cpu(i) {
441 pr = all_cpu_data[i];
442 if (!pr)
443 continue;
444
445 if (cpumask_test_cpu(i, covered_cpus))
446 continue;
447
448 cpc_ptr = per_cpu(cpc_desc_ptr, i);
449 if (!cpc_ptr) {
450 retval = -EFAULT;
451 goto err_ret;
452 }
453
454 pdomain = &(cpc_ptr->domain_info);
455 cpumask_set_cpu(i, pr->shared_cpu_map);
456 cpumask_set_cpu(i, covered_cpus);
457 if (pdomain->num_processors <= 1)
458 continue;
459
460
461 count_target = pdomain->num_processors;
462 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
463 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
464 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
465 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
466 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
467 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
468
469 for_each_possible_cpu(j) {
470 if (i == j)
471 continue;
472
473 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
474 if (!match_cpc_ptr) {
475 retval = -EFAULT;
476 goto err_ret;
477 }
478
479 match_pdomain = &(match_cpc_ptr->domain_info);
480 if (match_pdomain->domain != pdomain->domain)
481 continue;
482
483
484 if (match_pdomain->num_processors != count_target) {
485 retval = -EFAULT;
486 goto err_ret;
487 }
488
489 if (pdomain->coord_type != match_pdomain->coord_type) {
490 retval = -EFAULT;
491 goto err_ret;
492 }
493
494 cpumask_set_cpu(j, covered_cpus);
495 cpumask_set_cpu(j, pr->shared_cpu_map);
496 }
497
498 for_each_possible_cpu(j) {
499 if (i == j)
500 continue;
501
502 match_pr = all_cpu_data[j];
503 if (!match_pr)
504 continue;
505
506 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
507 if (!match_cpc_ptr) {
508 retval = -EFAULT;
509 goto err_ret;
510 }
511
512 match_pdomain = &(match_cpc_ptr->domain_info);
513 if (match_pdomain->domain != pdomain->domain)
514 continue;
515
516 match_pr->shared_type = pr->shared_type;
517 cpumask_copy(match_pr->shared_cpu_map,
518 pr->shared_cpu_map);
519 }
520 }
521
522err_ret:
523 for_each_possible_cpu(i) {
524 pr = all_cpu_data[i];
525 if (!pr)
526 continue;
527
528
529 if (retval) {
530 cpumask_clear(pr->shared_cpu_map);
531 cpumask_set_cpu(i, pr->shared_cpu_map);
532 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
533 }
534 }
535
536 free_cpumask_var(covered_cpus);
537 return retval;
538}
539EXPORT_SYMBOL_GPL(acpi_get_psd_map);
540
541static int register_pcc_channel(int pcc_ss_idx)
542{
543 struct acpi_pcct_hw_reduced *cppc_ss;
544 u64 usecs_lat;
545
546 if (pcc_ss_idx >= 0) {
547 pcc_data[pcc_ss_idx]->pcc_channel =
548 pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
549
550 if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
551 pr_err("Failed to find PCC channel for subspace %d\n",
552 pcc_ss_idx);
553 return -ENODEV;
554 }
555
556
557
558
559
560
561
562 cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
563
564 if (!cppc_ss) {
565 pr_err("No PCC subspace found for %d CPPC\n",
566 pcc_ss_idx);
567 return -ENODEV;
568 }
569
570
571
572
573
574
575 usecs_lat = NUM_RETRIES * cppc_ss->latency;
576 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
577 pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
578 pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
579 pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
580
581 pcc_data[pcc_ss_idx]->pcc_comm_addr =
582 acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
583 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
584 pr_err("Failed to ioremap PCC comm region mem for %d\n",
585 pcc_ss_idx);
586 return -ENOMEM;
587 }
588
589
590 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
591 }
592
593 return 0;
594}
595
596
597
598
599
600
601
602
603
604bool __weak cpc_ffh_supported(void)
605{
606 return false;
607}
608
609
610
611
612
613
614
615
616
617
618
619int pcc_data_alloc(int pcc_ss_id)
620{
621 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
622 return -EINVAL;
623
624 if (pcc_data[pcc_ss_id]) {
625 pcc_data[pcc_ss_id]->refcount++;
626 } else {
627 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
628 GFP_KERNEL);
629 if (!pcc_data[pcc_ss_id])
630 return -ENOMEM;
631 pcc_data[pcc_ss_id]->refcount++;
632 }
633
634 return 0;
635}
636
637
638static bool is_cppc_supported(int revision, int num_ent)
639{
640 int expected_num_ent;
641
642 switch (revision) {
643 case CPPC_V2_REV:
644 expected_num_ent = CPPC_V2_NUM_ENT;
645 break;
646 case CPPC_V3_REV:
647 expected_num_ent = CPPC_V3_NUM_ENT;
648 break;
649 default:
650 pr_debug("Firmware exports unsupported CPPC revision: %d\n",
651 revision);
652 return false;
653 }
654
655 if (expected_num_ent != num_ent) {
656 pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
657 num_ent, expected_num_ent, revision);
658 return false;
659 }
660
661 return true;
662}
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716int acpi_cppc_processor_probe(struct acpi_processor *pr)
717{
718 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
719 union acpi_object *out_obj, *cpc_obj;
720 struct cpc_desc *cpc_ptr;
721 struct cpc_reg *gas_t;
722 struct device *cpu_dev;
723 acpi_handle handle = pr->handle;
724 unsigned int num_ent, i, cpc_rev;
725 int pcc_subspace_id = -1;
726 acpi_status status;
727 int ret = -EFAULT;
728
729
730 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
731 ACPI_TYPE_PACKAGE);
732 if (ACPI_FAILURE(status)) {
733 ret = -ENODEV;
734 goto out_buf_free;
735 }
736
737 out_obj = (union acpi_object *) output.pointer;
738
739 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
740 if (!cpc_ptr) {
741 ret = -ENOMEM;
742 goto out_buf_free;
743 }
744
745
746 cpc_obj = &out_obj->package.elements[0];
747 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
748 num_ent = cpc_obj->integer.value;
749 } else {
750 pr_debug("Unexpected entry type(%d) for NumEntries\n",
751 cpc_obj->type);
752 goto out_free;
753 }
754 cpc_ptr->num_entries = num_ent;
755
756
757 cpc_obj = &out_obj->package.elements[1];
758 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
759 cpc_rev = cpc_obj->integer.value;
760 } else {
761 pr_debug("Unexpected entry type(%d) for Revision\n",
762 cpc_obj->type);
763 goto out_free;
764 }
765 cpc_ptr->version = cpc_rev;
766
767 if (!is_cppc_supported(cpc_rev, num_ent))
768 goto out_free;
769
770
771 for (i = 2; i < num_ent; i++) {
772 cpc_obj = &out_obj->package.elements[i];
773
774 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
775 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
776 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
777 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
778 gas_t = (struct cpc_reg *)
779 cpc_obj->buffer.pointer;
780
781
782
783
784
785
786
787 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
788 if (pcc_subspace_id < 0) {
789 pcc_subspace_id = gas_t->access_width;
790 if (pcc_data_alloc(pcc_subspace_id))
791 goto out_free;
792 } else if (pcc_subspace_id != gas_t->access_width) {
793 pr_debug("Mismatched PCC ids.\n");
794 goto out_free;
795 }
796 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
797 if (gas_t->address) {
798 void __iomem *addr;
799
800 addr = ioremap(gas_t->address, gas_t->bit_width/8);
801 if (!addr)
802 goto out_free;
803 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
804 }
805 } else {
806 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
807
808 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
809 goto out_free;
810 }
811 }
812
813 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
814 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
815 } else {
816 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
817 goto out_free;
818 }
819 }
820 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
821
822
823
824
825
826
827 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
828 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
829 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
830 }
831
832
833
834 cpc_ptr->cpu_id = pr->id;
835
836
837 ret = acpi_get_psd(cpc_ptr, handle);
838 if (ret)
839 goto out_free;
840
841
842 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
843 ret = register_pcc_channel(pcc_subspace_id);
844 if (ret)
845 goto out_free;
846
847 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
848 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
849 }
850
851
852 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
853
854
855 cpu_dev = get_cpu_device(pr->id);
856 if (!cpu_dev) {
857 ret = -EINVAL;
858 goto out_free;
859 }
860
861
862 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
863
864 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
865 "acpi_cppc");
866 if (ret) {
867 per_cpu(cpc_desc_ptr, pr->id) = NULL;
868 goto out_free;
869 }
870
871 kfree(output.pointer);
872 return 0;
873
874out_free:
875
876 for (i = 2; i < cpc_ptr->num_entries; i++) {
877 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
878
879 if (addr)
880 iounmap(addr);
881 }
882 kfree(cpc_ptr);
883
884out_buf_free:
885 kfree(output.pointer);
886 return ret;
887}
888EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
889
890
891
892
893
894
895
896void acpi_cppc_processor_exit(struct acpi_processor *pr)
897{
898 struct cpc_desc *cpc_ptr;
899 unsigned int i;
900 void __iomem *addr;
901 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
902
903 if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
904 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
905 pcc_data[pcc_ss_id]->refcount--;
906 if (!pcc_data[pcc_ss_id]->refcount) {
907 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
908 kfree(pcc_data[pcc_ss_id]);
909 pcc_data[pcc_ss_id] = NULL;
910 }
911 }
912 }
913
914 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
915 if (!cpc_ptr)
916 return;
917
918
919 for (i = 2; i < cpc_ptr->num_entries; i++) {
920 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
921 if (addr)
922 iounmap(addr);
923 }
924
925 kobject_put(&cpc_ptr->kobj);
926 kfree(cpc_ptr);
927}
928EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
929
930
931
932
933
934
935
936
937
938
939
940int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
941{
942 return -ENOTSUPP;
943}
944
945
946
947
948
949
950
951
952
953
954
955int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
956{
957 return -ENOTSUPP;
958}
959
960
961
962
963
964
965
966static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
967{
968 int ret_val = 0;
969 void __iomem *vaddr = 0;
970 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
971 struct cpc_reg *reg = ®_res->cpc_entry.reg;
972
973 if (reg_res->type == ACPI_TYPE_INTEGER) {
974 *val = reg_res->cpc_entry.int_value;
975 return ret_val;
976 }
977
978 *val = 0;
979 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
980 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
981 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
982 vaddr = reg_res->sys_mem_vaddr;
983 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
984 return cpc_read_ffh(cpu, reg, val);
985 else
986 return acpi_os_read_memory((acpi_physical_address)reg->address,
987 val, reg->bit_width);
988
989 switch (reg->bit_width) {
990 case 8:
991 *val = readb_relaxed(vaddr);
992 break;
993 case 16:
994 *val = readw_relaxed(vaddr);
995 break;
996 case 32:
997 *val = readl_relaxed(vaddr);
998 break;
999 case 64:
1000 *val = readq_relaxed(vaddr);
1001 break;
1002 default:
1003 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1004 reg->bit_width, pcc_ss_id);
1005 ret_val = -EFAULT;
1006 }
1007
1008 return ret_val;
1009}
1010
1011static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1012{
1013 int ret_val = 0;
1014 void __iomem *vaddr = 0;
1015 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1016 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1017
1018 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1019 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1020 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1021 vaddr = reg_res->sys_mem_vaddr;
1022 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1023 return cpc_write_ffh(cpu, reg, val);
1024 else
1025 return acpi_os_write_memory((acpi_physical_address)reg->address,
1026 val, reg->bit_width);
1027
1028 switch (reg->bit_width) {
1029 case 8:
1030 writeb_relaxed(val, vaddr);
1031 break;
1032 case 16:
1033 writew_relaxed(val, vaddr);
1034 break;
1035 case 32:
1036 writel_relaxed(val, vaddr);
1037 break;
1038 case 64:
1039 writeq_relaxed(val, vaddr);
1040 break;
1041 default:
1042 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1043 reg->bit_width, pcc_ss_id);
1044 ret_val = -EFAULT;
1045 break;
1046 }
1047
1048 return ret_val;
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1059{
1060 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1061 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1062 struct cpc_register_resource *desired_reg;
1063 struct cppc_pcc_data *pcc_ss_data = NULL;
1064
1065 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1066
1067 if (CPC_IN_PCC(desired_reg)) {
1068 int ret = 0;
1069
1070 if (pcc_ss_id < 0)
1071 return -EIO;
1072
1073 pcc_ss_data = pcc_data[pcc_ss_id];
1074
1075 down_write(&pcc_ss_data->pcc_lock);
1076
1077 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1078 cpc_read(cpunum, desired_reg, desired_perf);
1079 else
1080 ret = -EIO;
1081
1082 up_write(&pcc_ss_data->pcc_lock);
1083
1084 return ret;
1085 }
1086
1087 cpc_read(cpunum, desired_reg, desired_perf);
1088
1089 return 0;
1090}
1091EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1092
1093
1094
1095
1096
1097
1098
1099
1100int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1101{
1102 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1103 struct cpc_register_resource *highest_reg, *lowest_reg,
1104 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1105 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1106 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1107 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1108 struct cppc_pcc_data *pcc_ss_data = NULL;
1109 int ret = 0, regs_in_pcc = 0;
1110
1111 if (!cpc_desc) {
1112 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1113 return -ENODEV;
1114 }
1115
1116 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1117 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1118 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1119 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1120 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1121 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1122 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1123
1124
1125 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1126 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1127 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1128 if (pcc_ss_id < 0) {
1129 pr_debug("Invalid pcc_ss_id\n");
1130 return -ENODEV;
1131 }
1132 pcc_ss_data = pcc_data[pcc_ss_id];
1133 regs_in_pcc = 1;
1134 down_write(&pcc_ss_data->pcc_lock);
1135
1136 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1137 ret = -EIO;
1138 goto out_err;
1139 }
1140 }
1141
1142 cpc_read(cpunum, highest_reg, &high);
1143 perf_caps->highest_perf = high;
1144
1145 cpc_read(cpunum, lowest_reg, &low);
1146 perf_caps->lowest_perf = low;
1147
1148 cpc_read(cpunum, nominal_reg, &nom);
1149 perf_caps->nominal_perf = nom;
1150
1151 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1152 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1153 perf_caps->guaranteed_perf = 0;
1154 } else {
1155 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1156 perf_caps->guaranteed_perf = guaranteed;
1157 }
1158
1159 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1160 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1161
1162 if (!high || !low || !nom || !min_nonlinear)
1163 ret = -EFAULT;
1164
1165
1166 if (CPC_SUPPORTED(low_freq_reg))
1167 cpc_read(cpunum, low_freq_reg, &low_f);
1168
1169 if (CPC_SUPPORTED(nom_freq_reg))
1170 cpc_read(cpunum, nom_freq_reg, &nom_f);
1171
1172 perf_caps->lowest_freq = low_f;
1173 perf_caps->nominal_freq = nom_f;
1174
1175
1176out_err:
1177 if (regs_in_pcc)
1178 up_write(&pcc_ss_data->pcc_lock);
1179 return ret;
1180}
1181EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1182
1183
1184
1185
1186
1187
1188
1189
1190int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1191{
1192 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1193 struct cpc_register_resource *delivered_reg, *reference_reg,
1194 *ref_perf_reg, *ctr_wrap_reg;
1195 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1196 struct cppc_pcc_data *pcc_ss_data = NULL;
1197 u64 delivered, reference, ref_perf, ctr_wrap_time;
1198 int ret = 0, regs_in_pcc = 0;
1199
1200 if (!cpc_desc) {
1201 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1202 return -ENODEV;
1203 }
1204
1205 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1206 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1207 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1208 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1209
1210
1211
1212
1213
1214 if (!CPC_SUPPORTED(ref_perf_reg))
1215 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1216
1217
1218 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1219 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1220 if (pcc_ss_id < 0) {
1221 pr_debug("Invalid pcc_ss_id\n");
1222 return -ENODEV;
1223 }
1224 pcc_ss_data = pcc_data[pcc_ss_id];
1225 down_write(&pcc_ss_data->pcc_lock);
1226 regs_in_pcc = 1;
1227
1228 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1229 ret = -EIO;
1230 goto out_err;
1231 }
1232 }
1233
1234 cpc_read(cpunum, delivered_reg, &delivered);
1235 cpc_read(cpunum, reference_reg, &reference);
1236 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1237
1238
1239
1240
1241
1242
1243 ctr_wrap_time = (u64)(~((u64)0));
1244 if (CPC_SUPPORTED(ctr_wrap_reg))
1245 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1246
1247 if (!delivered || !reference || !ref_perf) {
1248 ret = -EFAULT;
1249 goto out_err;
1250 }
1251
1252 perf_fb_ctrs->delivered = delivered;
1253 perf_fb_ctrs->reference = reference;
1254 perf_fb_ctrs->reference_perf = ref_perf;
1255 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1256out_err:
1257 if (regs_in_pcc)
1258 up_write(&pcc_ss_data->pcc_lock);
1259 return ret;
1260}
1261EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1262
1263
1264
1265
1266
1267
1268
1269
1270int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1271{
1272 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1273 struct cpc_register_resource *desired_reg;
1274 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1275 struct cppc_pcc_data *pcc_ss_data = NULL;
1276 int ret = 0;
1277
1278 if (!cpc_desc) {
1279 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1280 return -ENODEV;
1281 }
1282
1283 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1284
1285
1286
1287
1288
1289
1290
1291
1292 if (CPC_IN_PCC(desired_reg)) {
1293 if (pcc_ss_id < 0) {
1294 pr_debug("Invalid pcc_ss_id\n");
1295 return -ENODEV;
1296 }
1297 pcc_ss_data = pcc_data[pcc_ss_id];
1298 down_read(&pcc_ss_data->pcc_lock);
1299 if (pcc_ss_data->platform_owns_pcc) {
1300 ret = check_pcc_chan(pcc_ss_id, false);
1301 if (ret) {
1302 up_read(&pcc_ss_data->pcc_lock);
1303 return ret;
1304 }
1305 }
1306
1307
1308
1309
1310 pcc_ss_data->pending_pcc_write_cmd = true;
1311 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1312 cpc_desc->write_cmd_status = 0;
1313 }
1314
1315
1316
1317
1318
1319 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1320
1321 if (CPC_IN_PCC(desired_reg))
1322 up_read(&pcc_ss_data->pcc_lock);
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369 if (CPC_IN_PCC(desired_reg)) {
1370 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {
1371
1372 if (pcc_ss_data->pending_pcc_write_cmd)
1373 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1374 up_write(&pcc_ss_data->pcc_lock);
1375 } else
1376
1377 wait_event(pcc_ss_data->pcc_write_wait_q,
1378 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1379
1380
1381 ret = cpc_desc->write_cmd_status;
1382 }
1383 return ret;
1384}
1385EXPORT_SYMBOL_GPL(cppc_set_perf);
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395unsigned int cppc_get_transition_latency(int cpu_num)
1396{
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408 unsigned int latency_ns = 0;
1409 struct cpc_desc *cpc_desc;
1410 struct cpc_register_resource *desired_reg;
1411 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1412 struct cppc_pcc_data *pcc_ss_data;
1413
1414 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1415 if (!cpc_desc)
1416 return CPUFREQ_ETERNAL;
1417
1418 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1419 if (!CPC_IN_PCC(desired_reg))
1420 return CPUFREQ_ETERNAL;
1421
1422 if (pcc_ss_id < 0)
1423 return CPUFREQ_ETERNAL;
1424
1425 pcc_ss_data = pcc_data[pcc_ss_id];
1426 if (pcc_ss_data->pcc_mpar)
1427 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1428
1429 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1430 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1431
1432 return latency_ns;
1433}
1434EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1435