linux/drivers/firmware/arm_scmi/perf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * System Control and Management Interface (SCMI) Performance Protocol
   4 *
   5 * Copyright (C) 2018 ARM Ltd.
   6 */
   7
   8#include <linux/of.h>
   9#include <linux/platform_device.h>
  10#include <linux/pm_opp.h>
  11#include <linux/sort.h>
  12
  13#include "common.h"
  14
  15enum scmi_performance_protocol_cmd {
  16        PERF_DOMAIN_ATTRIBUTES = 0x3,
  17        PERF_DESCRIBE_LEVELS = 0x4,
  18        PERF_LIMITS_SET = 0x5,
  19        PERF_LIMITS_GET = 0x6,
  20        PERF_LEVEL_SET = 0x7,
  21        PERF_LEVEL_GET = 0x8,
  22        PERF_NOTIFY_LIMITS = 0x9,
  23        PERF_NOTIFY_LEVEL = 0xa,
  24};
  25
  26struct scmi_opp {
  27        u32 perf;
  28        u32 power;
  29        u32 trans_latency_us;
  30};
  31
  32struct scmi_msg_resp_perf_attributes {
  33        __le16 num_domains;
  34        __le16 flags;
  35#define POWER_SCALE_IN_MILLIWATT(x)     ((x) & BIT(0))
  36        __le32 stats_addr_low;
  37        __le32 stats_addr_high;
  38        __le32 stats_size;
  39};
  40
  41struct scmi_msg_resp_perf_domain_attributes {
  42        __le32 flags;
  43#define SUPPORTS_SET_LIMITS(x)          ((x) & BIT(31))
  44#define SUPPORTS_SET_PERF_LVL(x)        ((x) & BIT(30))
  45#define SUPPORTS_PERF_LIMIT_NOTIFY(x)   ((x) & BIT(29))
  46#define SUPPORTS_PERF_LEVEL_NOTIFY(x)   ((x) & BIT(28))
  47        __le32 rate_limit_us;
  48        __le32 sustained_freq_khz;
  49        __le32 sustained_perf_level;
  50            u8 name[SCMI_MAX_STR_SIZE];
  51};
  52
  53struct scmi_msg_perf_describe_levels {
  54        __le32 domain;
  55        __le32 level_index;
  56};
  57
  58struct scmi_perf_set_limits {
  59        __le32 domain;
  60        __le32 max_level;
  61        __le32 min_level;
  62};
  63
  64struct scmi_perf_get_limits {
  65        __le32 max_level;
  66        __le32 min_level;
  67};
  68
  69struct scmi_perf_set_level {
  70        __le32 domain;
  71        __le32 level;
  72};
  73
  74struct scmi_perf_notify_level_or_limits {
  75        __le32 domain;
  76        __le32 notify_enable;
  77};
  78
  79struct scmi_msg_resp_perf_describe_levels {
  80        __le16 num_returned;
  81        __le16 num_remaining;
  82        struct {
  83                __le32 perf_val;
  84                __le32 power;
  85                __le16 transition_latency_us;
  86                __le16 reserved;
  87        } opp[0];
  88};
  89
  90struct perf_dom_info {
  91        bool set_limits;
  92        bool set_perf;
  93        bool perf_limit_notify;
  94        bool perf_level_notify;
  95        u32 opp_count;
  96        u32 sustained_freq_khz;
  97        u32 sustained_perf_level;
  98        u32 mult_factor;
  99        char name[SCMI_MAX_STR_SIZE];
 100        struct scmi_opp opp[MAX_OPPS];
 101};
 102
 103struct scmi_perf_info {
 104        int num_domains;
 105        bool power_scale_mw;
 106        u64 stats_addr;
 107        u32 stats_size;
 108        struct perf_dom_info *dom_info;
 109};
 110
 111static int scmi_perf_attributes_get(const struct scmi_handle *handle,
 112                                    struct scmi_perf_info *pi)
 113{
 114        int ret;
 115        struct scmi_xfer *t;
 116        struct scmi_msg_resp_perf_attributes *attr;
 117
 118        ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
 119                                 SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
 120        if (ret)
 121                return ret;
 122
 123        attr = t->rx.buf;
 124
 125        ret = scmi_do_xfer(handle, t);
 126        if (!ret) {
 127                u16 flags = le16_to_cpu(attr->flags);
 128
 129                pi->num_domains = le16_to_cpu(attr->num_domains);
 130                pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
 131                pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
 132                                (u64)le32_to_cpu(attr->stats_addr_high) << 32;
 133                pi->stats_size = le32_to_cpu(attr->stats_size);
 134        }
 135
 136        scmi_xfer_put(handle, t);
 137        return ret;
 138}
 139
 140static int
 141scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
 142                                struct perf_dom_info *dom_info)
 143{
 144        int ret;
 145        struct scmi_xfer *t;
 146        struct scmi_msg_resp_perf_domain_attributes *attr;
 147
 148        ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
 149                                 SCMI_PROTOCOL_PERF, sizeof(domain),
 150                                 sizeof(*attr), &t);
 151        if (ret)
 152                return ret;
 153
 154        *(__le32 *)t->tx.buf = cpu_to_le32(domain);
 155        attr = t->rx.buf;
 156
 157        ret = scmi_do_xfer(handle, t);
 158        if (!ret) {
 159                u32 flags = le32_to_cpu(attr->flags);
 160
 161                dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
 162                dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
 163                dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
 164                dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
 165                dom_info->sustained_freq_khz =
 166                                        le32_to_cpu(attr->sustained_freq_khz);
 167                dom_info->sustained_perf_level =
 168                                        le32_to_cpu(attr->sustained_perf_level);
 169                dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) /
 170                                        dom_info->sustained_perf_level;
 171                memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
 172        }
 173
 174        scmi_xfer_put(handle, t);
 175        return ret;
 176}
 177
 178static int opp_cmp_func(const void *opp1, const void *opp2)
 179{
 180        const struct scmi_opp *t1 = opp1, *t2 = opp2;
 181
 182        return t1->perf - t2->perf;
 183}
 184
 185static int
 186scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
 187                              struct perf_dom_info *perf_dom)
 188{
 189        int ret, cnt;
 190        u32 tot_opp_cnt = 0;
 191        u16 num_returned, num_remaining;
 192        struct scmi_xfer *t;
 193        struct scmi_opp *opp;
 194        struct scmi_msg_perf_describe_levels *dom_info;
 195        struct scmi_msg_resp_perf_describe_levels *level_info;
 196
 197        ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
 198                                 SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
 199        if (ret)
 200                return ret;
 201
 202        dom_info = t->tx.buf;
 203        level_info = t->rx.buf;
 204
 205        do {
 206                dom_info->domain = cpu_to_le32(domain);
 207                /* Set the number of OPPs to be skipped/already read */
 208                dom_info->level_index = cpu_to_le32(tot_opp_cnt);
 209
 210                ret = scmi_do_xfer(handle, t);
 211                if (ret)
 212                        break;
 213
 214                num_returned = le16_to_cpu(level_info->num_returned);
 215                num_remaining = le16_to_cpu(level_info->num_remaining);
 216                if (tot_opp_cnt + num_returned > MAX_OPPS) {
 217                        dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
 218                        break;
 219                }
 220
 221                opp = &perf_dom->opp[tot_opp_cnt];
 222                for (cnt = 0; cnt < num_returned; cnt++, opp++) {
 223                        opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
 224                        opp->power = le32_to_cpu(level_info->opp[cnt].power);
 225                        opp->trans_latency_us = le16_to_cpu
 226                                (level_info->opp[cnt].transition_latency_us);
 227
 228                        dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
 229                                opp->perf, opp->power, opp->trans_latency_us);
 230                }
 231
 232                tot_opp_cnt += num_returned;
 233                /*
 234                 * check for both returned and remaining to avoid infinite
 235                 * loop due to buggy firmware
 236                 */
 237        } while (num_returned && num_remaining);
 238
 239        perf_dom->opp_count = tot_opp_cnt;
 240        scmi_xfer_put(handle, t);
 241
 242        sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
 243        return ret;
 244}
 245
 246static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
 247                                u32 max_perf, u32 min_perf)
 248{
 249        int ret;
 250        struct scmi_xfer *t;
 251        struct scmi_perf_set_limits *limits;
 252
 253        ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
 254                                 sizeof(*limits), 0, &t);
 255        if (ret)
 256                return ret;
 257
 258        limits = t->tx.buf;
 259        limits->domain = cpu_to_le32(domain);
 260        limits->max_level = cpu_to_le32(max_perf);
 261        limits->min_level = cpu_to_le32(min_perf);
 262
 263        ret = scmi_do_xfer(handle, t);
 264
 265        scmi_xfer_put(handle, t);
 266        return ret;
 267}
 268
 269static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
 270                                u32 *max_perf, u32 *min_perf)
 271{
 272        int ret;
 273        struct scmi_xfer *t;
 274        struct scmi_perf_get_limits *limits;
 275
 276        ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
 277                                 sizeof(__le32), 0, &t);
 278        if (ret)
 279                return ret;
 280
 281        *(__le32 *)t->tx.buf = cpu_to_le32(domain);
 282
 283        ret = scmi_do_xfer(handle, t);
 284        if (!ret) {
 285                limits = t->rx.buf;
 286
 287                *max_perf = le32_to_cpu(limits->max_level);
 288                *min_perf = le32_to_cpu(limits->min_level);
 289        }
 290
 291        scmi_xfer_put(handle, t);
 292        return ret;
 293}
 294
 295static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
 296                               u32 level, bool poll)
 297{
 298        int ret;
 299        struct scmi_xfer *t;
 300        struct scmi_perf_set_level *lvl;
 301
 302        ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
 303                                 sizeof(*lvl), 0, &t);
 304        if (ret)
 305                return ret;
 306
 307        t->hdr.poll_completion = poll;
 308        lvl = t->tx.buf;
 309        lvl->domain = cpu_to_le32(domain);
 310        lvl->level = cpu_to_le32(level);
 311
 312        ret = scmi_do_xfer(handle, t);
 313
 314        scmi_xfer_put(handle, t);
 315        return ret;
 316}
 317
 318static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
 319                               u32 *level, bool poll)
 320{
 321        int ret;
 322        struct scmi_xfer *t;
 323
 324        ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
 325                                 sizeof(u32), sizeof(u32), &t);
 326        if (ret)
 327                return ret;
 328
 329        t->hdr.poll_completion = poll;
 330        *(__le32 *)t->tx.buf = cpu_to_le32(domain);
 331
 332        ret = scmi_do_xfer(handle, t);
 333        if (!ret)
 334                *level = le32_to_cpu(*(__le32 *)t->rx.buf);
 335
 336        scmi_xfer_put(handle, t);
 337        return ret;
 338}
 339
 340/* Device specific ops */
 341static int scmi_dev_domain_id(struct device *dev)
 342{
 343        struct of_phandle_args clkspec;
 344
 345        if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
 346                                       0, &clkspec))
 347                return -EINVAL;
 348
 349        return clkspec.args[0];
 350}
 351
 352static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
 353                                     struct device *dev)
 354{
 355        int idx, ret, domain;
 356        unsigned long freq;
 357        struct scmi_opp *opp;
 358        struct perf_dom_info *dom;
 359        struct scmi_perf_info *pi = handle->perf_priv;
 360
 361        domain = scmi_dev_domain_id(dev);
 362        if (domain < 0)
 363                return domain;
 364
 365        dom = pi->dom_info + domain;
 366        if (!dom)
 367                return -EIO;
 368
 369        for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
 370                freq = opp->perf * dom->mult_factor;
 371
 372                ret = dev_pm_opp_add(dev, freq, 0);
 373                if (ret) {
 374                        dev_warn(dev, "failed to add opp %luHz\n", freq);
 375
 376                        while (idx-- > 0) {
 377                                freq = (--opp)->perf * dom->mult_factor;
 378                                dev_pm_opp_remove(dev, freq);
 379                        }
 380                        return ret;
 381                }
 382        }
 383        return 0;
 384}
 385
 386static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
 387                                            struct device *dev)
 388{
 389        struct perf_dom_info *dom;
 390        struct scmi_perf_info *pi = handle->perf_priv;
 391        int domain = scmi_dev_domain_id(dev);
 392
 393        if (domain < 0)
 394                return domain;
 395
 396        dom = pi->dom_info + domain;
 397        if (!dom)
 398                return -EIO;
 399
 400        /* uS to nS */
 401        return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
 402}
 403
 404static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
 405                              unsigned long freq, bool poll)
 406{
 407        struct scmi_perf_info *pi = handle->perf_priv;
 408        struct perf_dom_info *dom = pi->dom_info + domain;
 409
 410        return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
 411                                   poll);
 412}
 413
 414static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
 415                              unsigned long *freq, bool poll)
 416{
 417        int ret;
 418        u32 level;
 419        struct scmi_perf_info *pi = handle->perf_priv;
 420        struct perf_dom_info *dom = pi->dom_info + domain;
 421
 422        ret = scmi_perf_level_get(handle, domain, &level, poll);
 423        if (!ret)
 424                *freq = level * dom->mult_factor;
 425
 426        return ret;
 427}
 428
 429static struct scmi_perf_ops perf_ops = {
 430        .limits_set = scmi_perf_limits_set,
 431        .limits_get = scmi_perf_limits_get,
 432        .level_set = scmi_perf_level_set,
 433        .level_get = scmi_perf_level_get,
 434        .device_domain_id = scmi_dev_domain_id,
 435        .transition_latency_get = scmi_dvfs_transition_latency_get,
 436        .device_opps_add = scmi_dvfs_device_opps_add,
 437        .freq_set = scmi_dvfs_freq_set,
 438        .freq_get = scmi_dvfs_freq_get,
 439};
 440
 441static int scmi_perf_protocol_init(struct scmi_handle *handle)
 442{
 443        int domain;
 444        u32 version;
 445        struct scmi_perf_info *pinfo;
 446
 447        scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
 448
 449        dev_dbg(handle->dev, "Performance Version %d.%d\n",
 450                PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
 451
 452        pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
 453        if (!pinfo)
 454                return -ENOMEM;
 455
 456        scmi_perf_attributes_get(handle, pinfo);
 457
 458        pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
 459                                       sizeof(*pinfo->dom_info), GFP_KERNEL);
 460        if (!pinfo->dom_info)
 461                return -ENOMEM;
 462
 463        for (domain = 0; domain < pinfo->num_domains; domain++) {
 464                struct perf_dom_info *dom = pinfo->dom_info + domain;
 465
 466                scmi_perf_domain_attributes_get(handle, domain, dom);
 467                scmi_perf_describe_levels_get(handle, domain, dom);
 468        }
 469
 470        handle->perf_ops = &perf_ops;
 471        handle->perf_priv = pinfo;
 472
 473        return 0;
 474}
 475
 476static int __init scmi_perf_init(void)
 477{
 478        return scmi_protocol_register(SCMI_PROTOCOL_PERF,
 479                                      &scmi_perf_protocol_init);
 480}
 481subsys_initcall(scmi_perf_init);
 482