linux/drivers/acpi/cppc_acpi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
   4 *
   5 * (C) Copyright 2014, 2015 Linaro Ltd.
   6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
   7 *
   8 * CPPC describes a few methods for controlling CPU performance using
   9 * information from a per CPU table called CPC. This table is described in
  10 * the ACPI v5.0+ specification. The table consists of a list of
  11 * registers which may be memory mapped or hardware registers and also may
  12 * include some static integer values.
  13 *
  14 * CPU performance is on an abstract continuous scale as against a discretized
  15 * P-state scale which is tied to CPU frequency only. In brief, the basic
  16 * operation involves:
  17 *
  18 * - OS makes a CPU performance request. (Can provide min and max bounds)
  19 *
  20 * - Platform (such as BMC) is free to optimize request within requested bounds
  21 *   depending on power/thermal budgets etc.
  22 *
  23 * - Platform conveys its decision back to OS
  24 *
  25 * The communication between OS and platform occurs through another medium
  26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
  27 * mechanism which includes doorbell semantics to indicate register updates.
  28 * See drivers/mailbox/pcc.c for details on PCC.
  29 *
  30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
  31 * above specifications.
  32 */
  33
  34#define pr_fmt(fmt)     "ACPI CPPC: " fmt
  35
  36#include <linux/cpufreq.h>
  37#include <linux/delay.h>
  38#include <linux/iopoll.h>
  39#include <linux/ktime.h>
  40#include <linux/rwsem.h>
  41#include <linux/wait.h>
  42
  43#include <acpi/cppc_acpi.h>
  44
  45struct cppc_pcc_data {
  46        struct mbox_chan *pcc_channel;
  47        void __iomem *pcc_comm_addr;
  48        bool pcc_channel_acquired;
  49        unsigned int deadline_us;
  50        unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
  51
  52        bool pending_pcc_write_cmd;     /* Any pending/batched PCC write cmds? */
  53        bool platform_owns_pcc;         /* Ownership of PCC subspace */
  54        unsigned int pcc_write_cnt;     /* Running count of PCC write commands */
  55
  56        /*
  57         * Lock to provide controlled access to the PCC channel.
  58         *
  59         * For performance critical usecases(currently cppc_set_perf)
  60         *      We need to take read_lock and check if channel belongs to OSPM
  61         * before reading or writing to PCC subspace
  62         *      We need to take write_lock before transferring the channel
  63         * ownership to the platform via a Doorbell
  64         *      This allows us to batch a number of CPPC requests if they happen
  65         * to originate in about the same time
  66         *
  67         * For non-performance critical usecases(init)
  68         *      Take write_lock for all purposes which gives exclusive access
  69         */
  70        struct rw_semaphore pcc_lock;
  71
  72        /* Wait queue for CPUs whose requests were batched */
  73        wait_queue_head_t pcc_write_wait_q;
  74        ktime_t last_cmd_cmpl_time;
  75        ktime_t last_mpar_reset;
  76        int mpar_count;
  77        int refcount;
  78};
  79
  80/* Array to represent the PCC channel per subspace ID */
  81static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
  82/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
  83static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
  84
  85/*
  86 * The cpc_desc structure contains the ACPI register details
  87 * as described in the per CPU _CPC tables. The details
  88 * include the type of register (e.g. PCC, System IO, FFH etc.)
  89 * and destination addresses which lets us READ/WRITE CPU performance
  90 * information using the appropriate I/O methods.
  91 */
  92static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
  93
  94/* pcc mapped address + header size + offset within PCC subspace */
  95#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
  96                                                0x8 + (offs))
  97
  98/* Check if a CPC register is in PCC */
  99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&             \
 100                                (cpc)->cpc_entry.reg.space_id ==        \
 101                                ACPI_ADR_SPACE_PLATFORM_COMM)
 102
 103/* Evalutes to True if reg is a NULL register descriptor */
 104#define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
 105                                (reg)->address == 0 &&                  \
 106                                (reg)->bit_width == 0 &&                \
 107                                (reg)->bit_offset == 0 &&               \
 108                                (reg)->access_width == 0)
 109
 110/* Evalutes to True if an optional cpc field is supported */
 111#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?          \
 112                                !!(cpc)->cpc_entry.int_value :          \
 113                                !IS_NULL_REG(&(cpc)->cpc_entry.reg))
 114/*
 115 * Arbitrary Retries in case the remote processor is slow to respond
 116 * to PCC commands. Keeping it high enough to cover emulators where
 117 * the processors run painfully slow.
 118 */
 119#define NUM_RETRIES 500ULL
 120
 121struct cppc_attr {
 122        struct attribute attr;
 123        ssize_t (*show)(struct kobject *kobj,
 124                        struct attribute *attr, char *buf);
 125        ssize_t (*store)(struct kobject *kobj,
 126                        struct attribute *attr, const char *c, ssize_t count);
 127};
 128
 129#define define_one_cppc_ro(_name)               \
 130static struct cppc_attr _name =                 \
 131__ATTR(_name, 0444, show_##_name, NULL)
 132
 133#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
 134
 135#define show_cppc_data(access_fn, struct_name, member_name)             \
 136        static ssize_t show_##member_name(struct kobject *kobj,         \
 137                                        struct attribute *attr, char *buf) \
 138        {                                                               \
 139                struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);           \
 140                struct struct_name st_name = {0};                       \
 141                int ret;                                                \
 142                                                                        \
 143                ret = access_fn(cpc_ptr->cpu_id, &st_name);             \
 144                if (ret)                                                \
 145                        return ret;                                     \
 146                                                                        \
 147                return scnprintf(buf, PAGE_SIZE, "%llu\n",              \
 148                                (u64)st_name.member_name);              \
 149        }                                                               \
 150        define_one_cppc_ro(member_name)
 151
 152show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
 153show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
 154show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
 155show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
 156show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
 157show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
 158
 159show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
 160show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
 161
 162static ssize_t show_feedback_ctrs(struct kobject *kobj,
 163                struct attribute *attr, char *buf)
 164{
 165        struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
 166        struct cppc_perf_fb_ctrs fb_ctrs = {0};
 167        int ret;
 168
 169        ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
 170        if (ret)
 171                return ret;
 172
 173        return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
 174                        fb_ctrs.reference, fb_ctrs.delivered);
 175}
 176define_one_cppc_ro(feedback_ctrs);
 177
 178static struct attribute *cppc_attrs[] = {
 179        &feedback_ctrs.attr,
 180        &reference_perf.attr,
 181        &wraparound_time.attr,
 182        &highest_perf.attr,
 183        &lowest_perf.attr,
 184        &lowest_nonlinear_perf.attr,
 185        &nominal_perf.attr,
 186        &nominal_freq.attr,
 187        &lowest_freq.attr,
 188        NULL
 189};
 190
 191static struct kobj_type cppc_ktype = {
 192        .sysfs_ops = &kobj_sysfs_ops,
 193        .default_attrs = cppc_attrs,
 194};
 195
 196static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
 197{
 198        int ret, status;
 199        struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 200        struct acpi_pcct_shared_memory __iomem *generic_comm_base =
 201                pcc_ss_data->pcc_comm_addr;
 202
 203        if (!pcc_ss_data->platform_owns_pcc)
 204                return 0;
 205
 206        /*
 207         * Poll PCC status register every 3us(delay_us) for maximum of
 208         * deadline_us(timeout_us) until PCC command complete bit is set(cond)
 209         */
 210        ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
 211                                        status & PCC_CMD_COMPLETE_MASK, 3,
 212                                        pcc_ss_data->deadline_us);
 213
 214        if (likely(!ret)) {
 215                pcc_ss_data->platform_owns_pcc = false;
 216                if (chk_err_bit && (status & PCC_ERROR_MASK))
 217                        ret = -EIO;
 218        }
 219
 220        if (unlikely(ret))
 221                pr_err("PCC check channel failed for ss: %d. ret=%d\n",
 222                       pcc_ss_id, ret);
 223
 224        return ret;
 225}
 226
 227/*
 228 * This function transfers the ownership of the PCC to the platform
 229 * So it must be called while holding write_lock(pcc_lock)
 230 */
 231static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
 232{
 233        int ret = -EIO, i;
 234        struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
 235        struct acpi_pcct_shared_memory *generic_comm_base =
 236                (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
 237        unsigned int time_delta;
 238
 239        /*
 240         * For CMD_WRITE we know for a fact the caller should have checked
 241         * the channel before writing to PCC space
 242         */
 243        if (cmd == CMD_READ) {
 244                /*
 245                 * If there are pending cpc_writes, then we stole the channel
 246                 * before write completion, so first send a WRITE command to
 247                 * platform
 248                 */
 249                if (pcc_ss_data->pending_pcc_write_cmd)
 250                        send_pcc_cmd(pcc_ss_id, CMD_WRITE);
 251
 252                ret = check_pcc_chan(pcc_ss_id, false);
 253                if (ret)
 254                        goto end;
 255        } else /* CMD_WRITE */
 256                pcc_ss_data->pending_pcc_write_cmd = FALSE;
 257
 258        /*
 259         * Handle the Minimum Request Turnaround Time(MRTT)
 260         * "The minimum amount of time that OSPM must wait after the completion
 261         * of a command before issuing the next command, in microseconds"
 262         */
 263        if (pcc_ss_data->pcc_mrtt) {
 264                time_delta = ktime_us_delta(ktime_get(),
 265                                            pcc_ss_data->last_cmd_cmpl_time);
 266                if (pcc_ss_data->pcc_mrtt > time_delta)
 267                        udelay(pcc_ss_data->pcc_mrtt - time_delta);
 268        }
 269
 270        /*
 271         * Handle the non-zero Maximum Periodic Access Rate(MPAR)
 272         * "The maximum number of periodic requests that the subspace channel can
 273         * support, reported in commands per minute. 0 indicates no limitation."
 274         *
 275         * This parameter should be ideally zero or large enough so that it can
 276         * handle maximum number of requests that all the cores in the system can
 277         * collectively generate. If it is not, we will follow the spec and just
 278         * not send the request to the platform after hitting the MPAR limit in
 279         * any 60s window
 280         */
 281        if (pcc_ss_data->pcc_mpar) {
 282                if (pcc_ss_data->mpar_count == 0) {
 283                        time_delta = ktime_ms_delta(ktime_get(),
 284                                                    pcc_ss_data->last_mpar_reset);
 285                        if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
 286                                pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
 287                                         pcc_ss_id);
 288                                ret = -EIO;
 289                                goto end;
 290                        }
 291                        pcc_ss_data->last_mpar_reset = ktime_get();
 292                        pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
 293                }
 294                pcc_ss_data->mpar_count--;
 295        }
 296
 297        /* Write to the shared comm region. */
 298        writew_relaxed(cmd, &generic_comm_base->command);
 299
 300        /* Flip CMD COMPLETE bit */
 301        writew_relaxed(0, &generic_comm_base->status);
 302
 303        pcc_ss_data->platform_owns_pcc = true;
 304
 305        /* Ring doorbell */
 306        ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
 307        if (ret < 0) {
 308                pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
 309                       pcc_ss_id, cmd, ret);
 310                goto end;
 311        }
 312
 313        /* wait for completion and check for PCC errro bit */
 314        ret = check_pcc_chan(pcc_ss_id, true);
 315
 316        if (pcc_ss_data->pcc_mrtt)
 317                pcc_ss_data->last_cmd_cmpl_time = ktime_get();
 318
 319        if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
 320                mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
 321        else
 322                mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
 323
 324end:
 325        if (cmd == CMD_WRITE) {
 326                if (unlikely(ret)) {
 327                        for_each_possible_cpu(i) {
 328                                struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
 329                                if (!desc)
 330                                        continue;
 331
 332                                if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
 333                                        desc->write_cmd_status = ret;
 334                        }
 335                }
 336                pcc_ss_data->pcc_write_cnt++;
 337                wake_up_all(&pcc_ss_data->pcc_write_wait_q);
 338        }
 339
 340        return ret;
 341}
 342
 343static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
 344{
 345        if (ret < 0)
 346                pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
 347                                *(u16 *)msg, ret);
 348        else
 349                pr_debug("TX completed. CMD sent:%x, ret:%d\n",
 350                                *(u16 *)msg, ret);
 351}
 352
 353struct mbox_client cppc_mbox_cl = {
 354        .tx_done = cppc_chan_tx_done,
 355        .knows_txdone = true,
 356};
 357
 358static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
 359{
 360        int result = -EFAULT;
 361        acpi_status status = AE_OK;
 362        struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 363        struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
 364        struct acpi_buffer state = {0, NULL};
 365        union acpi_object  *psd = NULL;
 366        struct acpi_psd_package *pdomain;
 367
 368        status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
 369                                            &buffer, ACPI_TYPE_PACKAGE);
 370        if (status == AE_NOT_FOUND)     /* _PSD is optional */
 371                return 0;
 372        if (ACPI_FAILURE(status))
 373                return -ENODEV;
 374
 375        psd = buffer.pointer;
 376        if (!psd || psd->package.count != 1) {
 377                pr_debug("Invalid _PSD data\n");
 378                goto end;
 379        }
 380
 381        pdomain = &(cpc_ptr->domain_info);
 382
 383        state.length = sizeof(struct acpi_psd_package);
 384        state.pointer = pdomain;
 385
 386        status = acpi_extract_package(&(psd->package.elements[0]),
 387                &format, &state);
 388        if (ACPI_FAILURE(status)) {
 389                pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
 390                goto end;
 391        }
 392
 393        if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
 394                pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
 395                goto end;
 396        }
 397
 398        if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
 399                pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
 400                goto end;
 401        }
 402
 403        if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
 404            pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
 405            pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
 406                pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
 407                goto end;
 408        }
 409
 410        result = 0;
 411end:
 412        kfree(buffer.pointer);
 413        return result;
 414}
 415
 416/**
 417 * acpi_get_psd_map - Map the CPUs in a common freq domain.
 418 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
 419 *
 420 *      Return: 0 for success or negative value for err.
 421 */
 422int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
 423{
 424        int count_target;
 425        int retval = 0;
 426        unsigned int i, j;
 427        cpumask_var_t covered_cpus;
 428        struct cppc_cpudata *pr, *match_pr;
 429        struct acpi_psd_package *pdomain;
 430        struct acpi_psd_package *match_pdomain;
 431        struct cpc_desc *cpc_ptr, *match_cpc_ptr;
 432
 433        if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
 434                return -ENOMEM;
 435
 436        /*
 437         * Now that we have _PSD data from all CPUs, let's setup P-state
 438         * domain info.
 439         */
 440        for_each_possible_cpu(i) {
 441                pr = all_cpu_data[i];
 442                if (!pr)
 443                        continue;
 444
 445                if (cpumask_test_cpu(i, covered_cpus))
 446                        continue;
 447
 448                cpc_ptr = per_cpu(cpc_desc_ptr, i);
 449                if (!cpc_ptr) {
 450                        retval = -EFAULT;
 451                        goto err_ret;
 452                }
 453
 454                pdomain = &(cpc_ptr->domain_info);
 455                cpumask_set_cpu(i, pr->shared_cpu_map);
 456                cpumask_set_cpu(i, covered_cpus);
 457                if (pdomain->num_processors <= 1)
 458                        continue;
 459
 460                /* Validate the Domain info */
 461                count_target = pdomain->num_processors;
 462                if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
 463                        pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 464                else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
 465                        pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
 466                else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
 467                        pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
 468
 469                for_each_possible_cpu(j) {
 470                        if (i == j)
 471                                continue;
 472
 473                        match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
 474                        if (!match_cpc_ptr) {
 475                                retval = -EFAULT;
 476                                goto err_ret;
 477                        }
 478
 479                        match_pdomain = &(match_cpc_ptr->domain_info);
 480                        if (match_pdomain->domain != pdomain->domain)
 481                                continue;
 482
 483                        /* Here i and j are in the same domain */
 484                        if (match_pdomain->num_processors != count_target) {
 485                                retval = -EFAULT;
 486                                goto err_ret;
 487                        }
 488
 489                        if (pdomain->coord_type != match_pdomain->coord_type) {
 490                                retval = -EFAULT;
 491                                goto err_ret;
 492                        }
 493
 494                        cpumask_set_cpu(j, covered_cpus);
 495                        cpumask_set_cpu(j, pr->shared_cpu_map);
 496                }
 497
 498                for_each_possible_cpu(j) {
 499                        if (i == j)
 500                                continue;
 501
 502                        match_pr = all_cpu_data[j];
 503                        if (!match_pr)
 504                                continue;
 505
 506                        match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
 507                        if (!match_cpc_ptr) {
 508                                retval = -EFAULT;
 509                                goto err_ret;
 510                        }
 511
 512                        match_pdomain = &(match_cpc_ptr->domain_info);
 513                        if (match_pdomain->domain != pdomain->domain)
 514                                continue;
 515
 516                        match_pr->shared_type = pr->shared_type;
 517                        cpumask_copy(match_pr->shared_cpu_map,
 518                                     pr->shared_cpu_map);
 519                }
 520        }
 521
 522err_ret:
 523        for_each_possible_cpu(i) {
 524                pr = all_cpu_data[i];
 525                if (!pr)
 526                        continue;
 527
 528                /* Assume no coordination on any error parsing domain info */
 529                if (retval) {
 530                        cpumask_clear(pr->shared_cpu_map);
 531                        cpumask_set_cpu(i, pr->shared_cpu_map);
 532                        pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 533                }
 534        }
 535
 536        free_cpumask_var(covered_cpus);
 537        return retval;
 538}
 539EXPORT_SYMBOL_GPL(acpi_get_psd_map);
 540
 541static int register_pcc_channel(int pcc_ss_idx)
 542{
 543        struct acpi_pcct_hw_reduced *cppc_ss;
 544        u64 usecs_lat;
 545
 546        if (pcc_ss_idx >= 0) {
 547                pcc_data[pcc_ss_idx]->pcc_channel =
 548                        pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
 549
 550                if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
 551                        pr_err("Failed to find PCC channel for subspace %d\n",
 552                               pcc_ss_idx);
 553                        return -ENODEV;
 554                }
 555
 556                /*
 557                 * The PCC mailbox controller driver should
 558                 * have parsed the PCCT (global table of all
 559                 * PCC channels) and stored pointers to the
 560                 * subspace communication region in con_priv.
 561                 */
 562                cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
 563
 564                if (!cppc_ss) {
 565                        pr_err("No PCC subspace found for %d CPPC\n",
 566                               pcc_ss_idx);
 567                        return -ENODEV;
 568                }
 569
 570                /*
 571                 * cppc_ss->latency is just a Nominal value. In reality
 572                 * the remote processor could be much slower to reply.
 573                 * So add an arbitrary amount of wait on top of Nominal.
 574                 */
 575                usecs_lat = NUM_RETRIES * cppc_ss->latency;
 576                pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
 577                pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
 578                pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
 579                pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
 580
 581                pcc_data[pcc_ss_idx]->pcc_comm_addr =
 582                        acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
 583                if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
 584                        pr_err("Failed to ioremap PCC comm region mem for %d\n",
 585                               pcc_ss_idx);
 586                        return -ENOMEM;
 587                }
 588
 589                /* Set flag so that we don't come here for each CPU. */
 590                pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
 591        }
 592
 593        return 0;
 594}
 595
 596/**
 597 * cpc_ffh_supported() - check if FFH reading supported
 598 *
 599 * Check if the architecture has support for functional fixed hardware
 600 * read/write capability.
 601 *
 602 * Return: true for supported, false for not supported
 603 */
 604bool __weak cpc_ffh_supported(void)
 605{
 606        return false;
 607}
 608
 609/**
 610 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
 611 *
 612 * Check and allocate the cppc_pcc_data memory.
 613 * In some processor configurations it is possible that same subspace
 614 * is shared between multiple CPUs. This is seen especially in CPUs
 615 * with hardware multi-threading support.
 616 *
 617 * Return: 0 for success, errno for failure
 618 */
 619int pcc_data_alloc(int pcc_ss_id)
 620{
 621        if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
 622                return -EINVAL;
 623
 624        if (pcc_data[pcc_ss_id]) {
 625                pcc_data[pcc_ss_id]->refcount++;
 626        } else {
 627                pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
 628                                              GFP_KERNEL);
 629                if (!pcc_data[pcc_ss_id])
 630                        return -ENOMEM;
 631                pcc_data[pcc_ss_id]->refcount++;
 632        }
 633
 634        return 0;
 635}
 636
 637/* Check if CPPC revision + num_ent combination is supported */
 638static bool is_cppc_supported(int revision, int num_ent)
 639{
 640        int expected_num_ent;
 641
 642        switch (revision) {
 643        case CPPC_V2_REV:
 644                expected_num_ent = CPPC_V2_NUM_ENT;
 645                break;
 646        case CPPC_V3_REV:
 647                expected_num_ent = CPPC_V3_NUM_ENT;
 648                break;
 649        default:
 650                pr_debug("Firmware exports unsupported CPPC revision: %d\n",
 651                        revision);
 652                return false;
 653        }
 654
 655        if (expected_num_ent != num_ent) {
 656                pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
 657                        num_ent, expected_num_ent, revision);
 658                return false;
 659        }
 660
 661        return true;
 662}
 663
 664/*
 665 * An example CPC table looks like the following.
 666 *
 667 *      Name(_CPC, Package()
 668 *                      {
 669 *                      17,
 670 *                      NumEntries
 671 *                      1,
 672 *                      // Revision
 673 *                      ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
 674 *                      // Highest Performance
 675 *                      ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
 676 *                      // Nominal Performance
 677 *                      ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
 678 *                      // Lowest Nonlinear Performance
 679 *                      ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
 680 *                      // Lowest Performance
 681 *                      ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
 682 *                      // Guaranteed Performance Register
 683 *                      ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
 684 *                      // Desired Performance Register
 685 *                      ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
 686 *                      ..
 687 *                      ..
 688 *                      ..
 689 *
 690 *              }
 691 * Each Register() encodes how to access that specific register.
 692 * e.g. a sample PCC entry has the following encoding:
 693 *
 694 *      Register (
 695 *              PCC,
 696 *              AddressSpaceKeyword
 697 *              8,
 698 *              //RegisterBitWidth
 699 *              8,
 700 *              //RegisterBitOffset
 701 *              0x30,
 702 *              //RegisterAddress
 703 *              9
 704 *              //AccessSize (subspace ID)
 705 *              0
 706 *              )
 707 *      }
 708 */
 709
 710/**
 711 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
 712 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 713 *
 714 *      Return: 0 for success or negative value for err.
 715 */
 716int acpi_cppc_processor_probe(struct acpi_processor *pr)
 717{
 718        struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
 719        union acpi_object *out_obj, *cpc_obj;
 720        struct cpc_desc *cpc_ptr;
 721        struct cpc_reg *gas_t;
 722        struct device *cpu_dev;
 723        acpi_handle handle = pr->handle;
 724        unsigned int num_ent, i, cpc_rev;
 725        int pcc_subspace_id = -1;
 726        acpi_status status;
 727        int ret = -EFAULT;
 728
 729        /* Parse the ACPI _CPC table for this CPU. */
 730        status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
 731                        ACPI_TYPE_PACKAGE);
 732        if (ACPI_FAILURE(status)) {
 733                ret = -ENODEV;
 734                goto out_buf_free;
 735        }
 736
 737        out_obj = (union acpi_object *) output.pointer;
 738
 739        cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
 740        if (!cpc_ptr) {
 741                ret = -ENOMEM;
 742                goto out_buf_free;
 743        }
 744
 745        /* First entry is NumEntries. */
 746        cpc_obj = &out_obj->package.elements[0];
 747        if (cpc_obj->type == ACPI_TYPE_INTEGER) {
 748                num_ent = cpc_obj->integer.value;
 749        } else {
 750                pr_debug("Unexpected entry type(%d) for NumEntries\n",
 751                                cpc_obj->type);
 752                goto out_free;
 753        }
 754        cpc_ptr->num_entries = num_ent;
 755
 756        /* Second entry should be revision. */
 757        cpc_obj = &out_obj->package.elements[1];
 758        if (cpc_obj->type == ACPI_TYPE_INTEGER) {
 759                cpc_rev = cpc_obj->integer.value;
 760        } else {
 761                pr_debug("Unexpected entry type(%d) for Revision\n",
 762                                cpc_obj->type);
 763                goto out_free;
 764        }
 765        cpc_ptr->version = cpc_rev;
 766
 767        if (!is_cppc_supported(cpc_rev, num_ent))
 768                goto out_free;
 769
 770        /* Iterate through remaining entries in _CPC */
 771        for (i = 2; i < num_ent; i++) {
 772                cpc_obj = &out_obj->package.elements[i];
 773
 774                if (cpc_obj->type == ACPI_TYPE_INTEGER) {
 775                        cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
 776                        cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
 777                } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
 778                        gas_t = (struct cpc_reg *)
 779                                cpc_obj->buffer.pointer;
 780
 781                        /*
 782                         * The PCC Subspace index is encoded inside
 783                         * the CPC table entries. The same PCC index
 784                         * will be used for all the PCC entries,
 785                         * so extract it only once.
 786                         */
 787                        if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
 788                                if (pcc_subspace_id < 0) {
 789                                        pcc_subspace_id = gas_t->access_width;
 790                                        if (pcc_data_alloc(pcc_subspace_id))
 791                                                goto out_free;
 792                                } else if (pcc_subspace_id != gas_t->access_width) {
 793                                        pr_debug("Mismatched PCC ids.\n");
 794                                        goto out_free;
 795                                }
 796                        } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 797                                if (gas_t->address) {
 798                                        void __iomem *addr;
 799
 800                                        addr = ioremap(gas_t->address, gas_t->bit_width/8);
 801                                        if (!addr)
 802                                                goto out_free;
 803                                        cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
 804                                }
 805                        } else {
 806                                if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
 807                                        /* Support only PCC ,SYS MEM and FFH type regs */
 808                                        pr_debug("Unsupported register type: %d\n", gas_t->space_id);
 809                                        goto out_free;
 810                                }
 811                        }
 812
 813                        cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
 814                        memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
 815                } else {
 816                        pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
 817                        goto out_free;
 818                }
 819        }
 820        per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
 821
 822        /*
 823         * Initialize the remaining cpc_regs as unsupported.
 824         * Example: In case FW exposes CPPC v2, the below loop will initialize
 825         * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
 826         */
 827        for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
 828                cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
 829                cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
 830        }
 831
 832
 833        /* Store CPU Logical ID */
 834        cpc_ptr->cpu_id = pr->id;
 835
 836        /* Parse PSD data for this CPU */
 837        ret = acpi_get_psd(cpc_ptr, handle);
 838        if (ret)
 839                goto out_free;
 840
 841        /* Register PCC channel once for all PCC subspace ID. */
 842        if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
 843                ret = register_pcc_channel(pcc_subspace_id);
 844                if (ret)
 845                        goto out_free;
 846
 847                init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
 848                init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
 849        }
 850
 851        /* Everything looks okay */
 852        pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
 853
 854        /* Add per logical CPU nodes for reading its feedback counters. */
 855        cpu_dev = get_cpu_device(pr->id);
 856        if (!cpu_dev) {
 857                ret = -EINVAL;
 858                goto out_free;
 859        }
 860
 861        /* Plug PSD data into this CPU's CPC descriptor. */
 862        per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
 863
 864        ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
 865                        "acpi_cppc");
 866        if (ret) {
 867                per_cpu(cpc_desc_ptr, pr->id) = NULL;
 868                goto out_free;
 869        }
 870
 871        kfree(output.pointer);
 872        return 0;
 873
 874out_free:
 875        /* Free all the mapped sys mem areas for this CPU */
 876        for (i = 2; i < cpc_ptr->num_entries; i++) {
 877                void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 878
 879                if (addr)
 880                        iounmap(addr);
 881        }
 882        kfree(cpc_ptr);
 883
 884out_buf_free:
 885        kfree(output.pointer);
 886        return ret;
 887}
 888EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
 889
 890/**
 891 * acpi_cppc_processor_exit - Cleanup CPC structs.
 892 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 893 *
 894 * Return: Void
 895 */
 896void acpi_cppc_processor_exit(struct acpi_processor *pr)
 897{
 898        struct cpc_desc *cpc_ptr;
 899        unsigned int i;
 900        void __iomem *addr;
 901        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
 902
 903        if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
 904                if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
 905                        pcc_data[pcc_ss_id]->refcount--;
 906                        if (!pcc_data[pcc_ss_id]->refcount) {
 907                                pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
 908                                kfree(pcc_data[pcc_ss_id]);
 909                                pcc_data[pcc_ss_id] = NULL;
 910                        }
 911                }
 912        }
 913
 914        cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
 915        if (!cpc_ptr)
 916                return;
 917
 918        /* Free all the mapped sys mem areas for this CPU */
 919        for (i = 2; i < cpc_ptr->num_entries; i++) {
 920                addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
 921                if (addr)
 922                        iounmap(addr);
 923        }
 924
 925        kobject_put(&cpc_ptr->kobj);
 926        kfree(cpc_ptr);
 927}
 928EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
 929
 930/**
 931 * cpc_read_ffh() - Read FFH register
 932 * @cpunum:     CPU number to read
 933 * @reg:        cppc register information
 934 * @val:        place holder for return value
 935 *
 936 * Read bit_width bits from a specified address and bit_offset
 937 *
 938 * Return: 0 for success and error code
 939 */
 940int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
 941{
 942        return -ENOTSUPP;
 943}
 944
 945/**
 946 * cpc_write_ffh() - Write FFH register
 947 * @cpunum:     CPU number to write
 948 * @reg:        cppc register information
 949 * @val:        value to write
 950 *
 951 * Write value of bit_width bits to a specified address and bit_offset
 952 *
 953 * Return: 0 for success and error code
 954 */
 955int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
 956{
 957        return -ENOTSUPP;
 958}
 959
 960/*
 961 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
 962 * as fast as possible. We have already mapped the PCC subspace during init, so
 963 * we can directly write to it.
 964 */
 965
 966static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
 967{
 968        int ret_val = 0;
 969        void __iomem *vaddr = 0;
 970        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
 971        struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 972
 973        if (reg_res->type == ACPI_TYPE_INTEGER) {
 974                *val = reg_res->cpc_entry.int_value;
 975                return ret_val;
 976        }
 977
 978        *val = 0;
 979        if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
 980                vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
 981        else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
 982                vaddr = reg_res->sys_mem_vaddr;
 983        else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
 984                return cpc_read_ffh(cpu, reg, val);
 985        else
 986                return acpi_os_read_memory((acpi_physical_address)reg->address,
 987                                val, reg->bit_width);
 988
 989        switch (reg->bit_width) {
 990                case 8:
 991                        *val = readb_relaxed(vaddr);
 992                        break;
 993                case 16:
 994                        *val = readw_relaxed(vaddr);
 995                        break;
 996                case 32:
 997                        *val = readl_relaxed(vaddr);
 998                        break;
 999                case 64:
1000                        *val = readq_relaxed(vaddr);
1001                        break;
1002                default:
1003                        pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1004                                 reg->bit_width, pcc_ss_id);
1005                        ret_val = -EFAULT;
1006        }
1007
1008        return ret_val;
1009}
1010
1011static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1012{
1013        int ret_val = 0;
1014        void __iomem *vaddr = 0;
1015        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1016        struct cpc_reg *reg = &reg_res->cpc_entry.reg;
1017
1018        if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1019                vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1020        else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1021                vaddr = reg_res->sys_mem_vaddr;
1022        else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1023                return cpc_write_ffh(cpu, reg, val);
1024        else
1025                return acpi_os_write_memory((acpi_physical_address)reg->address,
1026                                val, reg->bit_width);
1027
1028        switch (reg->bit_width) {
1029                case 8:
1030                        writeb_relaxed(val, vaddr);
1031                        break;
1032                case 16:
1033                        writew_relaxed(val, vaddr);
1034                        break;
1035                case 32:
1036                        writel_relaxed(val, vaddr);
1037                        break;
1038                case 64:
1039                        writeq_relaxed(val, vaddr);
1040                        break;
1041                default:
1042                        pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1043                                 reg->bit_width, pcc_ss_id);
1044                        ret_val = -EFAULT;
1045                        break;
1046        }
1047
1048        return ret_val;
1049}
1050
1051/**
1052 * cppc_get_desired_perf - Get the value of desired performance register.
1053 * @cpunum: CPU from which to get desired performance.
1054 * @desired_perf: address of a variable to store the returned desired performance
1055 *
1056 * Return: 0 for success, -EIO otherwise.
1057 */
1058int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1059{
1060        struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1061        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1062        struct cpc_register_resource *desired_reg;
1063        struct cppc_pcc_data *pcc_ss_data = NULL;
1064
1065        desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1066
1067        if (CPC_IN_PCC(desired_reg)) {
1068                int ret = 0;
1069
1070                if (pcc_ss_id < 0)
1071                        return -EIO;
1072
1073                pcc_ss_data = pcc_data[pcc_ss_id];
1074
1075                down_write(&pcc_ss_data->pcc_lock);
1076
1077                if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1078                        cpc_read(cpunum, desired_reg, desired_perf);
1079                else
1080                        ret = -EIO;
1081
1082                up_write(&pcc_ss_data->pcc_lock);
1083
1084                return ret;
1085        }
1086
1087        cpc_read(cpunum, desired_reg, desired_perf);
1088
1089        return 0;
1090}
1091EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1092
1093/**
1094 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1095 * @cpunum: CPU from which to get capabilities info.
1096 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1097 *
1098 * Return: 0 for success with perf_caps populated else -ERRNO.
1099 */
1100int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1101{
1102        struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1103        struct cpc_register_resource *highest_reg, *lowest_reg,
1104                *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1105                *low_freq_reg = NULL, *nom_freq_reg = NULL;
1106        u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1107        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1108        struct cppc_pcc_data *pcc_ss_data = NULL;
1109        int ret = 0, regs_in_pcc = 0;
1110
1111        if (!cpc_desc) {
1112                pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1113                return -ENODEV;
1114        }
1115
1116        highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1117        lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1118        lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1119        nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1120        low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1121        nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1122        guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1123
1124        /* Are any of the regs PCC ?*/
1125        if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1126                CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1127                CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1128                if (pcc_ss_id < 0) {
1129                        pr_debug("Invalid pcc_ss_id\n");
1130                        return -ENODEV;
1131                }
1132                pcc_ss_data = pcc_data[pcc_ss_id];
1133                regs_in_pcc = 1;
1134                down_write(&pcc_ss_data->pcc_lock);
1135                /* Ring doorbell once to update PCC subspace */
1136                if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1137                        ret = -EIO;
1138                        goto out_err;
1139                }
1140        }
1141
1142        cpc_read(cpunum, highest_reg, &high);
1143        perf_caps->highest_perf = high;
1144
1145        cpc_read(cpunum, lowest_reg, &low);
1146        perf_caps->lowest_perf = low;
1147
1148        cpc_read(cpunum, nominal_reg, &nom);
1149        perf_caps->nominal_perf = nom;
1150
1151        if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
1152            IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1153                perf_caps->guaranteed_perf = 0;
1154        } else {
1155                cpc_read(cpunum, guaranteed_reg, &guaranteed);
1156                perf_caps->guaranteed_perf = guaranteed;
1157        }
1158
1159        cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1160        perf_caps->lowest_nonlinear_perf = min_nonlinear;
1161
1162        if (!high || !low || !nom || !min_nonlinear)
1163                ret = -EFAULT;
1164
1165        /* Read optional lowest and nominal frequencies if present */
1166        if (CPC_SUPPORTED(low_freq_reg))
1167                cpc_read(cpunum, low_freq_reg, &low_f);
1168
1169        if (CPC_SUPPORTED(nom_freq_reg))
1170                cpc_read(cpunum, nom_freq_reg, &nom_f);
1171
1172        perf_caps->lowest_freq = low_f;
1173        perf_caps->nominal_freq = nom_f;
1174
1175
1176out_err:
1177        if (regs_in_pcc)
1178                up_write(&pcc_ss_data->pcc_lock);
1179        return ret;
1180}
1181EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1182
1183/**
1184 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1185 * @cpunum: CPU from which to read counters.
1186 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1187 *
1188 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1189 */
1190int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1191{
1192        struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1193        struct cpc_register_resource *delivered_reg, *reference_reg,
1194                *ref_perf_reg, *ctr_wrap_reg;
1195        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1196        struct cppc_pcc_data *pcc_ss_data = NULL;
1197        u64 delivered, reference, ref_perf, ctr_wrap_time;
1198        int ret = 0, regs_in_pcc = 0;
1199
1200        if (!cpc_desc) {
1201                pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1202                return -ENODEV;
1203        }
1204
1205        delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1206        reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1207        ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1208        ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1209
1210        /*
1211         * If reference perf register is not supported then we should
1212         * use the nominal perf value
1213         */
1214        if (!CPC_SUPPORTED(ref_perf_reg))
1215                ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1216
1217        /* Are any of the regs PCC ?*/
1218        if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1219                CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1220                if (pcc_ss_id < 0) {
1221                        pr_debug("Invalid pcc_ss_id\n");
1222                        return -ENODEV;
1223                }
1224                pcc_ss_data = pcc_data[pcc_ss_id];
1225                down_write(&pcc_ss_data->pcc_lock);
1226                regs_in_pcc = 1;
1227                /* Ring doorbell once to update PCC subspace */
1228                if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1229                        ret = -EIO;
1230                        goto out_err;
1231                }
1232        }
1233
1234        cpc_read(cpunum, delivered_reg, &delivered);
1235        cpc_read(cpunum, reference_reg, &reference);
1236        cpc_read(cpunum, ref_perf_reg, &ref_perf);
1237
1238        /*
1239         * Per spec, if ctr_wrap_time optional register is unsupported, then the
1240         * performance counters are assumed to never wrap during the lifetime of
1241         * platform
1242         */
1243        ctr_wrap_time = (u64)(~((u64)0));
1244        if (CPC_SUPPORTED(ctr_wrap_reg))
1245                cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1246
1247        if (!delivered || !reference || !ref_perf) {
1248                ret = -EFAULT;
1249                goto out_err;
1250        }
1251
1252        perf_fb_ctrs->delivered = delivered;
1253        perf_fb_ctrs->reference = reference;
1254        perf_fb_ctrs->reference_perf = ref_perf;
1255        perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1256out_err:
1257        if (regs_in_pcc)
1258                up_write(&pcc_ss_data->pcc_lock);
1259        return ret;
1260}
1261EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1262
1263/**
1264 * cppc_set_perf - Set a CPU's performance controls.
1265 * @cpu: CPU for which to set performance controls.
1266 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1267 *
1268 * Return: 0 for success, -ERRNO otherwise.
1269 */
1270int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1271{
1272        struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1273        struct cpc_register_resource *desired_reg;
1274        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1275        struct cppc_pcc_data *pcc_ss_data = NULL;
1276        int ret = 0;
1277
1278        if (!cpc_desc) {
1279                pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1280                return -ENODEV;
1281        }
1282
1283        desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1284
1285        /*
1286         * This is Phase-I where we want to write to CPC registers
1287         * -> We want all CPUs to be able to execute this phase in parallel
1288         *
1289         * Since read_lock can be acquired by multiple CPUs simultaneously we
1290         * achieve that goal here
1291         */
1292        if (CPC_IN_PCC(desired_reg)) {
1293                if (pcc_ss_id < 0) {
1294                        pr_debug("Invalid pcc_ss_id\n");
1295                        return -ENODEV;
1296                }
1297                pcc_ss_data = pcc_data[pcc_ss_id];
1298                down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1299                if (pcc_ss_data->platform_owns_pcc) {
1300                        ret = check_pcc_chan(pcc_ss_id, false);
1301                        if (ret) {
1302                                up_read(&pcc_ss_data->pcc_lock);
1303                                return ret;
1304                        }
1305                }
1306                /*
1307                 * Update the pending_write to make sure a PCC CMD_READ will not
1308                 * arrive and steal the channel during the switch to write lock
1309                 */
1310                pcc_ss_data->pending_pcc_write_cmd = true;
1311                cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1312                cpc_desc->write_cmd_status = 0;
1313        }
1314
1315        /*
1316         * Skip writing MIN/MAX until Linux knows how to come up with
1317         * useful values.
1318         */
1319        cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1320
1321        if (CPC_IN_PCC(desired_reg))
1322                up_read(&pcc_ss_data->pcc_lock);        /* END Phase-I */
1323        /*
1324         * This is Phase-II where we transfer the ownership of PCC to Platform
1325         *
1326         * Short Summary: Basically if we think of a group of cppc_set_perf
1327         * requests that happened in short overlapping interval. The last CPU to
1328         * come out of Phase-I will enter Phase-II and ring the doorbell.
1329         *
1330         * We have the following requirements for Phase-II:
1331         *     1. We want to execute Phase-II only when there are no CPUs
1332         * currently executing in Phase-I
1333         *     2. Once we start Phase-II we want to avoid all other CPUs from
1334         * entering Phase-I.
1335         *     3. We want only one CPU among all those who went through Phase-I
1336         * to run phase-II
1337         *
1338         * If write_trylock fails to get the lock and doesn't transfer the
1339         * PCC ownership to the platform, then one of the following will be TRUE
1340         *     1. There is at-least one CPU in Phase-I which will later execute
1341         * write_trylock, so the CPUs in Phase-I will be responsible for
1342         * executing the Phase-II.
1343         *     2. Some other CPU has beaten this CPU to successfully execute the
1344         * write_trylock and has already acquired the write_lock. We know for a
1345         * fact it (other CPU acquiring the write_lock) couldn't have happened
1346         * before this CPU's Phase-I as we held the read_lock.
1347         *     3. Some other CPU executing pcc CMD_READ has stolen the
1348         * down_write, in which case, send_pcc_cmd will check for pending
1349         * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1350         * So this CPU can be certain that its request will be delivered
1351         *    So in all cases, this CPU knows that its request will be delivered
1352         * by another CPU and can return
1353         *
1354         * After getting the down_write we still need to check for
1355         * pending_pcc_write_cmd to take care of the following scenario
1356         *    The thread running this code could be scheduled out between
1357         * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1358         * could have delivered the request to Platform by triggering the
1359         * doorbell and transferred the ownership of PCC to platform. So this
1360         * avoids triggering an unnecessary doorbell and more importantly before
1361         * triggering the doorbell it makes sure that the PCC channel ownership
1362         * is still with OSPM.
1363         *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1364         * there was a pcc CMD_READ waiting on down_write and it steals the lock
1365         * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1366         * case during a CMD_READ and if there are pending writes it delivers
1367         * the write command before servicing the read command
1368         */
1369        if (CPC_IN_PCC(desired_reg)) {
1370                if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1371                        /* Update only if there are pending write commands */
1372                        if (pcc_ss_data->pending_pcc_write_cmd)
1373                                send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1374                        up_write(&pcc_ss_data->pcc_lock);       /* END Phase-II */
1375                } else
1376                        /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1377                        wait_event(pcc_ss_data->pcc_write_wait_q,
1378                                   cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1379
1380                /* send_pcc_cmd updates the status in case of failure */
1381                ret = cpc_desc->write_cmd_status;
1382        }
1383        return ret;
1384}
1385EXPORT_SYMBOL_GPL(cppc_set_perf);
1386
1387/**
1388 * cppc_get_transition_latency - returns frequency transition latency in ns
1389 *
1390 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1391 * transition latency for perfromance change requests. The closest we have
1392 * is the timing information from the PCCT tables which provides the info
1393 * on the number and frequency of PCC commands the platform can handle.
1394 */
1395unsigned int cppc_get_transition_latency(int cpu_num)
1396{
1397        /*
1398         * Expected transition latency is based on the PCCT timing values
1399         * Below are definition from ACPI spec:
1400         * pcc_nominal- Expected latency to process a command, in microseconds
1401         * pcc_mpar   - The maximum number of periodic requests that the subspace
1402         *              channel can support, reported in commands per minute. 0
1403         *              indicates no limitation.
1404         * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1405         *              completion of a command before issuing the next command,
1406         *              in microseconds.
1407         */
1408        unsigned int latency_ns = 0;
1409        struct cpc_desc *cpc_desc;
1410        struct cpc_register_resource *desired_reg;
1411        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1412        struct cppc_pcc_data *pcc_ss_data;
1413
1414        cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1415        if (!cpc_desc)
1416                return CPUFREQ_ETERNAL;
1417
1418        desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1419        if (!CPC_IN_PCC(desired_reg))
1420                return CPUFREQ_ETERNAL;
1421
1422        if (pcc_ss_id < 0)
1423                return CPUFREQ_ETERNAL;
1424
1425        pcc_ss_data = pcc_data[pcc_ss_id];
1426        if (pcc_ss_data->pcc_mpar)
1427                latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1428
1429        latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1430        latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1431
1432        return latency_ns;
1433}
1434EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1435