linux/drivers/cpufreq/qcom-cpufreq-hw.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
   4 */
   5
   6#include <linux/bitfield.h>
   7#include <linux/cpufreq.h>
   8#include <linux/init.h>
   9#include <linux/interconnect.h>
  10#include <linux/interrupt.h>
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/of_address.h>
  14#include <linux/of_platform.h>
  15#include <linux/pm_opp.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18
  19#define LUT_MAX_ENTRIES                 40U
  20#define LUT_SRC                         GENMASK(31, 30)
  21#define LUT_L_VAL                       GENMASK(7, 0)
  22#define LUT_CORE_COUNT                  GENMASK(18, 16)
  23#define LUT_VOLT                        GENMASK(11, 0)
  24#define CLK_HW_DIV                      2
  25#define LUT_TURBO_IND                   1
  26
  27#define HZ_PER_KHZ                      1000
  28
  29struct qcom_cpufreq_soc_data {
  30        u32 reg_enable;
  31        u32 reg_freq_lut;
  32        u32 reg_volt_lut;
  33        u32 reg_current_vote;
  34        u32 reg_perf_state;
  35        u8 lut_row_size;
  36};
  37
  38struct qcom_cpufreq_data {
  39        void __iomem *base;
  40        struct resource *res;
  41        const struct qcom_cpufreq_soc_data *soc_data;
  42
  43        /*
  44         * Mutex to synchronize between de-init sequence and re-starting LMh
  45         * polling/interrupts
  46         */
  47        struct mutex throttle_lock;
  48        int throttle_irq;
  49        bool cancel_throttle;
  50        struct delayed_work throttle_work;
  51        struct cpufreq_policy *policy;
  52};
  53
  54static unsigned long cpu_hw_rate, xo_rate;
  55static bool icc_scaling_enabled;
  56
  57static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
  58                               unsigned long freq_khz)
  59{
  60        unsigned long freq_hz = freq_khz * 1000;
  61        struct dev_pm_opp *opp;
  62        struct device *dev;
  63        int ret;
  64
  65        dev = get_cpu_device(policy->cpu);
  66        if (!dev)
  67                return -ENODEV;
  68
  69        opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
  70        if (IS_ERR(opp))
  71                return PTR_ERR(opp);
  72
  73        ret = dev_pm_opp_set_opp(dev, opp);
  74        dev_pm_opp_put(opp);
  75        return ret;
  76}
  77
  78static int qcom_cpufreq_update_opp(struct device *cpu_dev,
  79                                   unsigned long freq_khz,
  80                                   unsigned long volt)
  81{
  82        unsigned long freq_hz = freq_khz * 1000;
  83        int ret;
  84
  85        /* Skip voltage update if the opp table is not available */
  86        if (!icc_scaling_enabled)
  87                return dev_pm_opp_add(cpu_dev, freq_hz, volt);
  88
  89        ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt);
  90        if (ret) {
  91                dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz);
  92                return ret;
  93        }
  94
  95        return dev_pm_opp_enable(cpu_dev, freq_hz);
  96}
  97
  98static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
  99                                        unsigned int index)
 100{
 101        struct qcom_cpufreq_data *data = policy->driver_data;
 102        const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
 103        unsigned long freq = policy->freq_table[index].frequency;
 104
 105        writel_relaxed(index, data->base + soc_data->reg_perf_state);
 106
 107        if (icc_scaling_enabled)
 108                qcom_cpufreq_set_bw(policy, freq);
 109
 110        return 0;
 111}
 112
 113static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
 114{
 115        struct qcom_cpufreq_data *data;
 116        const struct qcom_cpufreq_soc_data *soc_data;
 117        struct cpufreq_policy *policy;
 118        unsigned int index;
 119
 120        policy = cpufreq_cpu_get_raw(cpu);
 121        if (!policy)
 122                return 0;
 123
 124        data = policy->driver_data;
 125        soc_data = data->soc_data;
 126
 127        index = readl_relaxed(data->base + soc_data->reg_perf_state);
 128        index = min(index, LUT_MAX_ENTRIES - 1);
 129
 130        return policy->freq_table[index].frequency;
 131}
 132
 133static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
 134                                                unsigned int target_freq)
 135{
 136        struct qcom_cpufreq_data *data = policy->driver_data;
 137        const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
 138        unsigned int index;
 139
 140        index = policy->cached_resolved_idx;
 141        writel_relaxed(index, data->base + soc_data->reg_perf_state);
 142
 143        return policy->freq_table[index].frequency;
 144}
 145
 146static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
 147                                    struct cpufreq_policy *policy)
 148{
 149        u32 data, src, lval, i, core_count, prev_freq = 0, freq;
 150        u32 volt;
 151        struct cpufreq_frequency_table  *table;
 152        struct dev_pm_opp *opp;
 153        unsigned long rate;
 154        int ret;
 155        struct qcom_cpufreq_data *drv_data = policy->driver_data;
 156        const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
 157
 158        table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
 159        if (!table)
 160                return -ENOMEM;
 161
 162        ret = dev_pm_opp_of_add_table(cpu_dev);
 163        if (!ret) {
 164                /* Disable all opps and cross-validate against LUT later */
 165                icc_scaling_enabled = true;
 166                for (rate = 0; ; rate++) {
 167                        opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
 168                        if (IS_ERR(opp))
 169                                break;
 170
 171                        dev_pm_opp_put(opp);
 172                        dev_pm_opp_disable(cpu_dev, rate);
 173                }
 174        } else if (ret != -ENODEV) {
 175                dev_err(cpu_dev, "Invalid opp table in device tree\n");
 176                return ret;
 177        } else {
 178                policy->fast_switch_possible = true;
 179                icc_scaling_enabled = false;
 180        }
 181
 182        for (i = 0; i < LUT_MAX_ENTRIES; i++) {
 183                data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
 184                                      i * soc_data->lut_row_size);
 185                src = FIELD_GET(LUT_SRC, data);
 186                lval = FIELD_GET(LUT_L_VAL, data);
 187                core_count = FIELD_GET(LUT_CORE_COUNT, data);
 188
 189                data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
 190                                      i * soc_data->lut_row_size);
 191                volt = FIELD_GET(LUT_VOLT, data) * 1000;
 192
 193                if (src)
 194                        freq = xo_rate * lval / 1000;
 195                else
 196                        freq = cpu_hw_rate / 1000;
 197
 198                if (freq != prev_freq && core_count != LUT_TURBO_IND) {
 199                        if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
 200                                table[i].frequency = freq;
 201                                dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
 202                                freq, core_count);
 203                        } else {
 204                                dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
 205                                table[i].frequency = CPUFREQ_ENTRY_INVALID;
 206                        }
 207
 208                } else if (core_count == LUT_TURBO_IND) {
 209                        table[i].frequency = CPUFREQ_ENTRY_INVALID;
 210                }
 211
 212                /*
 213                 * Two of the same frequencies with the same core counts means
 214                 * end of table
 215                 */
 216                if (i > 0 && prev_freq == freq) {
 217                        struct cpufreq_frequency_table *prev = &table[i - 1];
 218
 219                        /*
 220                         * Only treat the last frequency that might be a boost
 221                         * as the boost frequency
 222                         */
 223                        if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
 224                                if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
 225                                        prev->frequency = prev_freq;
 226                                        prev->flags = CPUFREQ_BOOST_FREQ;
 227                                } else {
 228                                        dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
 229                                                 freq);
 230                                }
 231                        }
 232
 233                        break;
 234                }
 235
 236                prev_freq = freq;
 237        }
 238
 239        table[i].frequency = CPUFREQ_TABLE_END;
 240        policy->freq_table = table;
 241        dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
 242
 243        return 0;
 244}
 245
 246static void qcom_get_related_cpus(int index, struct cpumask *m)
 247{
 248        struct device_node *cpu_np;
 249        struct of_phandle_args args;
 250        int cpu, ret;
 251
 252        for_each_possible_cpu(cpu) {
 253                cpu_np = of_cpu_device_node_get(cpu);
 254                if (!cpu_np)
 255                        continue;
 256
 257                ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
 258                                                 "#freq-domain-cells", 0,
 259                                                 &args);
 260                of_node_put(cpu_np);
 261                if (ret < 0)
 262                        continue;
 263
 264                if (index == args.args[0])
 265                        cpumask_set_cpu(cpu, m);
 266        }
 267}
 268
 269static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
 270{
 271        unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
 272
 273        return (val & 0x3FF) * 19200;
 274}
 275
 276static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
 277{
 278        unsigned long max_capacity, capacity, freq_hz, throttled_freq;
 279        struct cpufreq_policy *policy = data->policy;
 280        int cpu = cpumask_first(policy->cpus);
 281        struct device *dev = get_cpu_device(cpu);
 282        struct dev_pm_opp *opp;
 283        unsigned int freq;
 284
 285        /*
 286         * Get the h/w throttled frequency, normalize it using the
 287         * registered opp table and use it to calculate thermal pressure.
 288         */
 289        freq = qcom_lmh_get_throttle_freq(data);
 290        freq_hz = freq * HZ_PER_KHZ;
 291
 292        opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
 293        if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
 294                dev_pm_opp_find_freq_ceil(dev, &freq_hz);
 295
 296        throttled_freq = freq_hz / HZ_PER_KHZ;
 297
 298        /* Update thermal pressure */
 299
 300        max_capacity = arch_scale_cpu_capacity(cpu);
 301        capacity = mult_frac(max_capacity, throttled_freq, policy->cpuinfo.max_freq);
 302
 303        /* Don't pass boost capacity to scheduler */
 304        if (capacity > max_capacity)
 305                capacity = max_capacity;
 306
 307        arch_set_thermal_pressure(policy->cpus, max_capacity - capacity);
 308
 309        /*
 310         * In the unlikely case policy is unregistered do not enable
 311         * polling or h/w interrupt
 312         */
 313        mutex_lock(&data->throttle_lock);
 314        if (data->cancel_throttle)
 315                goto out;
 316
 317        /*
 318         * If h/w throttled frequency is higher than what cpufreq has requested
 319         * for, then stop polling and switch back to interrupt mechanism.
 320         */
 321        if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
 322                enable_irq(data->throttle_irq);
 323        else
 324                mod_delayed_work(system_highpri_wq, &data->throttle_work,
 325                                 msecs_to_jiffies(10));
 326
 327out:
 328        mutex_unlock(&data->throttle_lock);
 329}
 330
 331static void qcom_lmh_dcvs_poll(struct work_struct *work)
 332{
 333        struct qcom_cpufreq_data *data;
 334
 335        data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
 336        qcom_lmh_dcvs_notify(data);
 337}
 338
 339static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
 340{
 341        struct qcom_cpufreq_data *c_data = data;
 342
 343        /* Disable interrupt and enable polling */
 344        disable_irq_nosync(c_data->throttle_irq);
 345        qcom_lmh_dcvs_notify(c_data);
 346
 347        return 0;
 348}
 349
 350static const struct qcom_cpufreq_soc_data qcom_soc_data = {
 351        .reg_enable = 0x0,
 352        .reg_freq_lut = 0x110,
 353        .reg_volt_lut = 0x114,
 354        .reg_current_vote = 0x704,
 355        .reg_perf_state = 0x920,
 356        .lut_row_size = 32,
 357};
 358
 359static const struct qcom_cpufreq_soc_data epss_soc_data = {
 360        .reg_enable = 0x0,
 361        .reg_freq_lut = 0x100,
 362        .reg_volt_lut = 0x200,
 363        .reg_perf_state = 0x320,
 364        .lut_row_size = 4,
 365};
 366
 367static const struct of_device_id qcom_cpufreq_hw_match[] = {
 368        { .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
 369        { .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
 370        {}
 371};
 372MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
 373
 374static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
 375{
 376        struct qcom_cpufreq_data *data = policy->driver_data;
 377        struct platform_device *pdev = cpufreq_get_driver_data();
 378        char irq_name[15];
 379        int ret;
 380
 381        /*
 382         * Look for LMh interrupt. If no interrupt line is specified /
 383         * if there is an error, allow cpufreq to be enabled as usual.
 384         */
 385        data->throttle_irq = platform_get_irq(pdev, index);
 386        if (data->throttle_irq <= 0)
 387                return data->throttle_irq == -EPROBE_DEFER ? -EPROBE_DEFER : 0;
 388
 389        data->cancel_throttle = false;
 390        data->policy = policy;
 391
 392        mutex_init(&data->throttle_lock);
 393        INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
 394
 395        snprintf(irq_name, sizeof(irq_name), "dcvsh-irq-%u", policy->cpu);
 396        ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
 397                                   IRQF_ONESHOT, irq_name, data);
 398        if (ret) {
 399                dev_err(&pdev->dev, "Error registering %s: %d\n", irq_name, ret);
 400                return 0;
 401        }
 402
 403        return 0;
 404}
 405
 406static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
 407{
 408        if (data->throttle_irq <= 0)
 409                return;
 410
 411        mutex_lock(&data->throttle_lock);
 412        data->cancel_throttle = true;
 413        mutex_unlock(&data->throttle_lock);
 414
 415        cancel_delayed_work_sync(&data->throttle_work);
 416        free_irq(data->throttle_irq, data);
 417}
 418
 419static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
 420{
 421        struct platform_device *pdev = cpufreq_get_driver_data();
 422        struct device *dev = &pdev->dev;
 423        struct of_phandle_args args;
 424        struct device_node *cpu_np;
 425        struct device *cpu_dev;
 426        struct resource *res;
 427        void __iomem *base;
 428        struct qcom_cpufreq_data *data;
 429        int ret, index;
 430
 431        cpu_dev = get_cpu_device(policy->cpu);
 432        if (!cpu_dev) {
 433                pr_err("%s: failed to get cpu%d device\n", __func__,
 434                       policy->cpu);
 435                return -ENODEV;
 436        }
 437
 438        cpu_np = of_cpu_device_node_get(policy->cpu);
 439        if (!cpu_np)
 440                return -EINVAL;
 441
 442        ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
 443                                         "#freq-domain-cells", 0, &args);
 444        of_node_put(cpu_np);
 445        if (ret)
 446                return ret;
 447
 448        index = args.args[0];
 449
 450        res = platform_get_resource(pdev, IORESOURCE_MEM, index);
 451        if (!res) {
 452                dev_err(dev, "failed to get mem resource %d\n", index);
 453                return -ENODEV;
 454        }
 455
 456        if (!request_mem_region(res->start, resource_size(res), res->name)) {
 457                dev_err(dev, "failed to request resource %pR\n", res);
 458                return -EBUSY;
 459        }
 460
 461        base = ioremap(res->start, resource_size(res));
 462        if (!base) {
 463                dev_err(dev, "failed to map resource %pR\n", res);
 464                ret = -ENOMEM;
 465                goto release_region;
 466        }
 467
 468        data = kzalloc(sizeof(*data), GFP_KERNEL);
 469        if (!data) {
 470                ret = -ENOMEM;
 471                goto unmap_base;
 472        }
 473
 474        data->soc_data = of_device_get_match_data(&pdev->dev);
 475        data->base = base;
 476        data->res = res;
 477
 478        /* HW should be in enabled state to proceed */
 479        if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
 480                dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
 481                ret = -ENODEV;
 482                goto error;
 483        }
 484
 485        qcom_get_related_cpus(index, policy->cpus);
 486        if (!cpumask_weight(policy->cpus)) {
 487                dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
 488                ret = -ENOENT;
 489                goto error;
 490        }
 491
 492        policy->driver_data = data;
 493        policy->dvfs_possible_from_any_cpu = true;
 494
 495        ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
 496        if (ret) {
 497                dev_err(dev, "Domain-%d failed to read LUT\n", index);
 498                goto error;
 499        }
 500
 501        ret = dev_pm_opp_get_opp_count(cpu_dev);
 502        if (ret <= 0) {
 503                dev_err(cpu_dev, "Failed to add OPPs\n");
 504                ret = -ENODEV;
 505                goto error;
 506        }
 507
 508        if (policy_has_boost_freq(policy)) {
 509                ret = cpufreq_enable_boost_support();
 510                if (ret)
 511                        dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
 512        }
 513
 514        ret = qcom_cpufreq_hw_lmh_init(policy, index);
 515        if (ret)
 516                goto error;
 517
 518        return 0;
 519error:
 520        kfree(data);
 521unmap_base:
 522        iounmap(base);
 523release_region:
 524        release_mem_region(res->start, resource_size(res));
 525        return ret;
 526}
 527
 528static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
 529{
 530        struct device *cpu_dev = get_cpu_device(policy->cpu);
 531        struct qcom_cpufreq_data *data = policy->driver_data;
 532        struct resource *res = data->res;
 533        void __iomem *base = data->base;
 534
 535        dev_pm_opp_remove_all_dynamic(cpu_dev);
 536        dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
 537        qcom_cpufreq_hw_lmh_exit(data);
 538        kfree(policy->freq_table);
 539        kfree(data);
 540        iounmap(base);
 541        release_mem_region(res->start, resource_size(res));
 542
 543        return 0;
 544}
 545
 546static struct freq_attr *qcom_cpufreq_hw_attr[] = {
 547        &cpufreq_freq_attr_scaling_available_freqs,
 548        &cpufreq_freq_attr_scaling_boost_freqs,
 549        NULL
 550};
 551
 552static struct cpufreq_driver cpufreq_qcom_hw_driver = {
 553        .flags          = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
 554                          CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
 555                          CPUFREQ_IS_COOLING_DEV,
 556        .verify         = cpufreq_generic_frequency_table_verify,
 557        .target_index   = qcom_cpufreq_hw_target_index,
 558        .get            = qcom_cpufreq_hw_get,
 559        .init           = qcom_cpufreq_hw_cpu_init,
 560        .exit           = qcom_cpufreq_hw_cpu_exit,
 561        .register_em    = cpufreq_register_em_with_opp,
 562        .fast_switch    = qcom_cpufreq_hw_fast_switch,
 563        .name           = "qcom-cpufreq-hw",
 564        .attr           = qcom_cpufreq_hw_attr,
 565};
 566
 567static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
 568{
 569        struct device *cpu_dev;
 570        struct clk *clk;
 571        int ret;
 572
 573        clk = clk_get(&pdev->dev, "xo");
 574        if (IS_ERR(clk))
 575                return PTR_ERR(clk);
 576
 577        xo_rate = clk_get_rate(clk);
 578        clk_put(clk);
 579
 580        clk = clk_get(&pdev->dev, "alternate");
 581        if (IS_ERR(clk))
 582                return PTR_ERR(clk);
 583
 584        cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
 585        clk_put(clk);
 586
 587        cpufreq_qcom_hw_driver.driver_data = pdev;
 588
 589        /* Check for optional interconnect paths on CPU0 */
 590        cpu_dev = get_cpu_device(0);
 591        if (!cpu_dev)
 592                return -EPROBE_DEFER;
 593
 594        ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
 595        if (ret)
 596                return ret;
 597
 598        ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
 599        if (ret)
 600                dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
 601        else
 602                dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
 603
 604        return ret;
 605}
 606
 607static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
 608{
 609        return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
 610}
 611
 612static struct platform_driver qcom_cpufreq_hw_driver = {
 613        .probe = qcom_cpufreq_hw_driver_probe,
 614        .remove = qcom_cpufreq_hw_driver_remove,
 615        .driver = {
 616                .name = "qcom-cpufreq-hw",
 617                .of_match_table = qcom_cpufreq_hw_match,
 618        },
 619};
 620
 621static int __init qcom_cpufreq_hw_init(void)
 622{
 623        return platform_driver_register(&qcom_cpufreq_hw_driver);
 624}
 625postcore_initcall(qcom_cpufreq_hw_init);
 626
 627static void __exit qcom_cpufreq_hw_exit(void)
 628{
 629        platform_driver_unregister(&qcom_cpufreq_hw_driver);
 630}
 631module_exit(qcom_cpufreq_hw_exit);
 632
 633MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
 634MODULE_LICENSE("GPL v2");
 635