linux/drivers/media/platform/qcom/venus/pm_helpers.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2019 Linaro Ltd.
   4 *
   5 * Author: Stanimir Varbanov <stanimir.varbanov@linaro.org>
   6 */
   7#include <linux/clk.h>
   8#include <linux/interconnect.h>
   9#include <linux/iopoll.h>
  10#include <linux/kernel.h>
  11#include <linux/pm_domain.h>
  12#include <linux/pm_opp.h>
  13#include <linux/pm_runtime.h>
  14#include <linux/reset.h>
  15#include <linux/types.h>
  16#include <media/v4l2-mem2mem.h>
  17
  18#include "core.h"
  19#include "hfi_parser.h"
  20#include "hfi_venus_io.h"
  21#include "pm_helpers.h"
  22#include "hfi_platform.h"
  23
  24static bool legacy_binding;
  25
  26static int core_clks_get(struct venus_core *core)
  27{
  28        const struct venus_resources *res = core->res;
  29        struct device *dev = core->dev;
  30        unsigned int i;
  31
  32        for (i = 0; i < res->clks_num; i++) {
  33                core->clks[i] = devm_clk_get(dev, res->clks[i]);
  34                if (IS_ERR(core->clks[i]))
  35                        return PTR_ERR(core->clks[i]);
  36        }
  37
  38        return 0;
  39}
  40
  41static int core_clks_enable(struct venus_core *core)
  42{
  43        const struct venus_resources *res = core->res;
  44        const struct freq_tbl *freq_tbl = core->res->freq_tbl;
  45        unsigned int freq_tbl_size = core->res->freq_tbl_size;
  46        unsigned long freq;
  47        unsigned int i;
  48        int ret;
  49
  50        if (!freq_tbl)
  51                return -EINVAL;
  52
  53        freq = freq_tbl[freq_tbl_size - 1].freq;
  54
  55        for (i = 0; i < res->clks_num; i++) {
  56                if (IS_V6(core)) {
  57                        ret = clk_set_rate(core->clks[i], freq);
  58                        if (ret)
  59                                goto err;
  60                }
  61
  62                ret = clk_prepare_enable(core->clks[i]);
  63                if (ret)
  64                        goto err;
  65        }
  66
  67        return 0;
  68err:
  69        while (i--)
  70                clk_disable_unprepare(core->clks[i]);
  71
  72        return ret;
  73}
  74
  75static void core_clks_disable(struct venus_core *core)
  76{
  77        const struct venus_resources *res = core->res;
  78        unsigned int i = res->clks_num;
  79
  80        while (i--)
  81                clk_disable_unprepare(core->clks[i]);
  82}
  83
  84static int core_clks_set_rate(struct venus_core *core, unsigned long freq)
  85{
  86        int ret;
  87
  88        ret = dev_pm_opp_set_rate(core->dev, freq);
  89        if (ret)
  90                return ret;
  91
  92        ret = clk_set_rate(core->vcodec0_clks[0], freq);
  93        if (ret)
  94                return ret;
  95
  96        ret = clk_set_rate(core->vcodec1_clks[0], freq);
  97        if (ret)
  98                return ret;
  99
 100        return 0;
 101}
 102
 103static int vcodec_clks_get(struct venus_core *core, struct device *dev,
 104                           struct clk **clks, const char * const *id)
 105{
 106        const struct venus_resources *res = core->res;
 107        unsigned int i;
 108
 109        for (i = 0; i < res->vcodec_clks_num; i++) {
 110                if (!id[i])
 111                        continue;
 112                clks[i] = devm_clk_get(dev, id[i]);
 113                if (IS_ERR(clks[i]))
 114                        return PTR_ERR(clks[i]);
 115        }
 116
 117        return 0;
 118}
 119
 120static int vcodec_clks_enable(struct venus_core *core, struct clk **clks)
 121{
 122        const struct venus_resources *res = core->res;
 123        unsigned int i;
 124        int ret;
 125
 126        for (i = 0; i < res->vcodec_clks_num; i++) {
 127                ret = clk_prepare_enable(clks[i]);
 128                if (ret)
 129                        goto err;
 130        }
 131
 132        return 0;
 133err:
 134        while (i--)
 135                clk_disable_unprepare(clks[i]);
 136
 137        return ret;
 138}
 139
 140static void vcodec_clks_disable(struct venus_core *core, struct clk **clks)
 141{
 142        const struct venus_resources *res = core->res;
 143        unsigned int i = res->vcodec_clks_num;
 144
 145        while (i--)
 146                clk_disable_unprepare(clks[i]);
 147}
 148
 149static u32 load_per_instance(struct venus_inst *inst)
 150{
 151        u32 mbs;
 152
 153        if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP))
 154                return 0;
 155
 156        mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16);
 157
 158        return mbs * inst->fps;
 159}
 160
 161static u32 load_per_type(struct venus_core *core, u32 session_type)
 162{
 163        struct venus_inst *inst = NULL;
 164        u32 mbs_per_sec = 0;
 165
 166        mutex_lock(&core->lock);
 167        list_for_each_entry(inst, &core->instances, list) {
 168                if (inst->session_type != session_type)
 169                        continue;
 170
 171                mbs_per_sec += load_per_instance(inst);
 172        }
 173        mutex_unlock(&core->lock);
 174
 175        return mbs_per_sec;
 176}
 177
 178static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak)
 179{
 180        const struct venus_resources *res = inst->core->res;
 181        const struct bw_tbl *bw_tbl;
 182        unsigned int num_rows, i;
 183
 184        *avg = 0;
 185        *peak = 0;
 186
 187        if (mbs == 0)
 188                return;
 189
 190        if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
 191                num_rows = res->bw_tbl_enc_size;
 192                bw_tbl = res->bw_tbl_enc;
 193        } else if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
 194                num_rows = res->bw_tbl_dec_size;
 195                bw_tbl = res->bw_tbl_dec;
 196        } else {
 197                return;
 198        }
 199
 200        if (!bw_tbl || num_rows == 0)
 201                return;
 202
 203        for (i = 0; i < num_rows; i++) {
 204                if (i != 0 && mbs > bw_tbl[i].mbs_per_sec)
 205                        break;
 206
 207                if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) {
 208                        *avg = bw_tbl[i].avg_10bit;
 209                        *peak = bw_tbl[i].peak_10bit;
 210                } else {
 211                        *avg = bw_tbl[i].avg;
 212                        *peak = bw_tbl[i].peak;
 213                }
 214        }
 215}
 216
 217static int load_scale_bw(struct venus_core *core)
 218{
 219        struct venus_inst *inst = NULL;
 220        u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0;
 221
 222        mutex_lock(&core->lock);
 223        list_for_each_entry(inst, &core->instances, list) {
 224                mbs_per_sec = load_per_instance(inst);
 225                mbs_to_bw(inst, mbs_per_sec, &avg, &peak);
 226                total_avg += avg;
 227                total_peak += peak;
 228        }
 229        mutex_unlock(&core->lock);
 230
 231        /*
 232         * keep minimum bandwidth vote for "video-mem" path,
 233         * so that clks can be disabled during vdec_session_release().
 234         * Actual bandwidth drop will be done during device supend
 235         * so that device can power down without any warnings.
 236         */
 237
 238        if (!total_avg && !total_peak)
 239                total_avg = kbps_to_icc(1000);
 240
 241        dev_dbg(core->dev, VDBGL "total: avg_bw: %u, peak_bw: %u\n",
 242                total_avg, total_peak);
 243
 244        return icc_set_bw(core->video_path, total_avg, total_peak);
 245}
 246
 247static int load_scale_v1(struct venus_inst *inst)
 248{
 249        struct venus_core *core = inst->core;
 250        const struct freq_tbl *table = core->res->freq_tbl;
 251        unsigned int num_rows = core->res->freq_tbl_size;
 252        unsigned long freq = table[0].freq;
 253        struct device *dev = core->dev;
 254        u32 mbs_per_sec;
 255        unsigned int i;
 256        int ret;
 257
 258        mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
 259                      load_per_type(core, VIDC_SESSION_TYPE_DEC);
 260
 261        if (mbs_per_sec > core->res->max_load)
 262                dev_warn(dev, "HW is overloaded, needed: %d max: %d\n",
 263                         mbs_per_sec, core->res->max_load);
 264
 265        if (!mbs_per_sec && num_rows > 1) {
 266                freq = table[num_rows - 1].freq;
 267                goto set_freq;
 268        }
 269
 270        for (i = 0; i < num_rows; i++) {
 271                if (mbs_per_sec > table[i].load)
 272                        break;
 273                freq = table[i].freq;
 274        }
 275
 276set_freq:
 277
 278        ret = core_clks_set_rate(core, freq);
 279        if (ret) {
 280                dev_err(dev, "failed to set clock rate %lu (%d)\n",
 281                        freq, ret);
 282                return ret;
 283        }
 284
 285        ret = load_scale_bw(core);
 286        if (ret) {
 287                dev_err(dev, "failed to set bandwidth (%d)\n",
 288                        ret);
 289                return ret;
 290        }
 291
 292        return 0;
 293}
 294
 295static int core_get_v1(struct venus_core *core)
 296{
 297        int ret;
 298
 299        ret = core_clks_get(core);
 300        if (ret)
 301                return ret;
 302
 303        core->opp_table = dev_pm_opp_set_clkname(core->dev, "core");
 304        if (IS_ERR(core->opp_table))
 305                return PTR_ERR(core->opp_table);
 306
 307        return 0;
 308}
 309
 310static void core_put_v1(struct venus_core *core)
 311{
 312        dev_pm_opp_put_clkname(core->opp_table);
 313}
 314
 315static int core_power_v1(struct venus_core *core, int on)
 316{
 317        int ret = 0;
 318
 319        if (on == POWER_ON)
 320                ret = core_clks_enable(core);
 321        else
 322                core_clks_disable(core);
 323
 324        return ret;
 325}
 326
 327static const struct venus_pm_ops pm_ops_v1 = {
 328        .core_get = core_get_v1,
 329        .core_put = core_put_v1,
 330        .core_power = core_power_v1,
 331        .load_scale = load_scale_v1,
 332};
 333
 334static void
 335vcodec_control_v3(struct venus_core *core, u32 session_type, bool enable)
 336{
 337        void __iomem *ctrl;
 338
 339        if (session_type == VIDC_SESSION_TYPE_DEC)
 340                ctrl = core->wrapper_base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
 341        else
 342                ctrl = core->wrapper_base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
 343
 344        if (enable)
 345                writel(0, ctrl);
 346        else
 347                writel(1, ctrl);
 348}
 349
 350static int vdec_get_v3(struct device *dev)
 351{
 352        struct venus_core *core = dev_get_drvdata(dev);
 353
 354        return vcodec_clks_get(core, dev, core->vcodec0_clks,
 355                               core->res->vcodec0_clks);
 356}
 357
 358static int vdec_power_v3(struct device *dev, int on)
 359{
 360        struct venus_core *core = dev_get_drvdata(dev);
 361        int ret = 0;
 362
 363        vcodec_control_v3(core, VIDC_SESSION_TYPE_DEC, true);
 364
 365        if (on == POWER_ON)
 366                ret = vcodec_clks_enable(core, core->vcodec0_clks);
 367        else
 368                vcodec_clks_disable(core, core->vcodec0_clks);
 369
 370        vcodec_control_v3(core, VIDC_SESSION_TYPE_DEC, false);
 371
 372        return ret;
 373}
 374
 375static int venc_get_v3(struct device *dev)
 376{
 377        struct venus_core *core = dev_get_drvdata(dev);
 378
 379        return vcodec_clks_get(core, dev, core->vcodec1_clks,
 380                               core->res->vcodec1_clks);
 381}
 382
 383static int venc_power_v3(struct device *dev, int on)
 384{
 385        struct venus_core *core = dev_get_drvdata(dev);
 386        int ret = 0;
 387
 388        vcodec_control_v3(core, VIDC_SESSION_TYPE_ENC, true);
 389
 390        if (on == POWER_ON)
 391                ret = vcodec_clks_enable(core, core->vcodec1_clks);
 392        else
 393                vcodec_clks_disable(core, core->vcodec1_clks);
 394
 395        vcodec_control_v3(core, VIDC_SESSION_TYPE_ENC, false);
 396
 397        return ret;
 398}
 399
 400static const struct venus_pm_ops pm_ops_v3 = {
 401        .core_get = core_get_v1,
 402        .core_put = core_put_v1,
 403        .core_power = core_power_v1,
 404        .vdec_get = vdec_get_v3,
 405        .vdec_power = vdec_power_v3,
 406        .venc_get = venc_get_v3,
 407        .venc_power = venc_power_v3,
 408        .load_scale = load_scale_v1,
 409};
 410
 411static int vcodec_control_v4(struct venus_core *core, u32 coreid, bool enable)
 412{
 413        void __iomem *ctrl, *stat;
 414        u32 val;
 415        int ret;
 416
 417        if (IS_V6(core)) {
 418                ctrl = core->wrapper_base + WRAPPER_CORE_POWER_CONTROL_V6;
 419                stat = core->wrapper_base + WRAPPER_CORE_POWER_STATUS_V6;
 420        } else if (coreid == VIDC_CORE_ID_1) {
 421                ctrl = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
 422                stat = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
 423        } else {
 424                ctrl = core->wrapper_base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
 425                stat = core->wrapper_base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
 426        }
 427
 428        if (enable) {
 429                writel(0, ctrl);
 430
 431                ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100);
 432                if (ret)
 433                        return ret;
 434        } else {
 435                writel(1, ctrl);
 436
 437                ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100);
 438                if (ret)
 439                        return ret;
 440        }
 441
 442        return 0;
 443}
 444
 445static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask)
 446{
 447        int ret;
 448
 449        if (coreid_mask & VIDC_CORE_ID_1) {
 450                ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
 451                if (ret)
 452                        return ret;
 453
 454                vcodec_clks_disable(core, core->vcodec0_clks);
 455
 456                ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
 457                if (ret)
 458                        return ret;
 459
 460                ret = pm_runtime_put_sync(core->pmdomains[1]);
 461                if (ret < 0)
 462                        return ret;
 463        }
 464
 465        if (coreid_mask & VIDC_CORE_ID_2) {
 466                ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
 467                if (ret)
 468                        return ret;
 469
 470                vcodec_clks_disable(core, core->vcodec1_clks);
 471
 472                ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
 473                if (ret)
 474                        return ret;
 475
 476                ret = pm_runtime_put_sync(core->pmdomains[2]);
 477                if (ret < 0)
 478                        return ret;
 479        }
 480
 481        return 0;
 482}
 483
 484static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
 485{
 486        int ret;
 487
 488        if (coreid_mask & VIDC_CORE_ID_1) {
 489                ret = pm_runtime_get_sync(core->pmdomains[1]);
 490                if (ret < 0)
 491                        return ret;
 492
 493                ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
 494                if (ret)
 495                        return ret;
 496
 497                ret = vcodec_clks_enable(core, core->vcodec0_clks);
 498                if (ret)
 499                        return ret;
 500
 501                ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
 502                if (ret < 0)
 503                        return ret;
 504        }
 505
 506        if (coreid_mask & VIDC_CORE_ID_2) {
 507                ret = pm_runtime_get_sync(core->pmdomains[2]);
 508                if (ret < 0)
 509                        return ret;
 510
 511                ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
 512                if (ret)
 513                        return ret;
 514
 515                ret = vcodec_clks_enable(core, core->vcodec1_clks);
 516                if (ret)
 517                        return ret;
 518
 519                ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
 520                if (ret < 0)
 521                        return ret;
 522        }
 523
 524        return 0;
 525}
 526
 527static void
 528min_loaded_core(struct venus_inst *inst, u32 *min_coreid, u32 *min_load)
 529{
 530        u32 mbs_per_sec, load, core1_load = 0, core2_load = 0;
 531        u32 cores_max = core_num_max(inst);
 532        struct venus_core *core = inst->core;
 533        struct venus_inst *inst_pos;
 534        unsigned long vpp_freq;
 535        u32 coreid;
 536
 537        mutex_lock(&core->lock);
 538
 539        list_for_each_entry(inst_pos, &core->instances, list) {
 540                if (inst_pos == inst)
 541                        continue;
 542
 543                if (inst_pos->state != INST_START)
 544                        continue;
 545
 546                vpp_freq = inst_pos->clk_data.vpp_freq;
 547                coreid = inst_pos->clk_data.core_id;
 548
 549                mbs_per_sec = load_per_instance(inst_pos);
 550                load = mbs_per_sec * vpp_freq;
 551
 552                if ((coreid & VIDC_CORE_ID_3) == VIDC_CORE_ID_3) {
 553                        core1_load += load / 2;
 554                        core2_load += load / 2;
 555                } else if (coreid & VIDC_CORE_ID_1) {
 556                        core1_load += load;
 557                } else if (coreid & VIDC_CORE_ID_2) {
 558                        core2_load += load;
 559                }
 560        }
 561
 562        *min_coreid = core1_load <= core2_load ?
 563                        VIDC_CORE_ID_1 : VIDC_CORE_ID_2;
 564        *min_load = min(core1_load, core2_load);
 565
 566        if (cores_max < VIDC_CORE_ID_2 || core->res->vcodec_num < 2) {
 567                *min_coreid = VIDC_CORE_ID_1;
 568                *min_load = core1_load;
 569        }
 570
 571        mutex_unlock(&core->lock);
 572}
 573
 574static int decide_core(struct venus_inst *inst)
 575{
 576        const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
 577        struct venus_core *core = inst->core;
 578        u32 min_coreid, min_load, inst_load;
 579        struct hfi_videocores_usage_type cu;
 580        unsigned long max_freq;
 581
 582        if (legacy_binding) {
 583                if (inst->session_type == VIDC_SESSION_TYPE_DEC)
 584                        cu.video_core_enable_mask = VIDC_CORE_ID_1;
 585                else
 586                        cu.video_core_enable_mask = VIDC_CORE_ID_2;
 587
 588                goto done;
 589        }
 590
 591        if (inst->clk_data.core_id != VIDC_CORE_ID_DEFAULT)
 592                return 0;
 593
 594        inst_load = load_per_instance(inst);
 595        inst_load *= inst->clk_data.vpp_freq;
 596        max_freq = core->res->freq_tbl[0].freq;
 597
 598        min_loaded_core(inst, &min_coreid, &min_load);
 599
 600        if ((inst_load + min_load) > max_freq) {
 601                dev_warn(core->dev, "HW is overloaded, needed: %u max: %lu\n",
 602                         inst_load, max_freq);
 603                return -EINVAL;
 604        }
 605
 606        inst->clk_data.core_id = min_coreid;
 607        cu.video_core_enable_mask = min_coreid;
 608
 609done:
 610        return hfi_session_set_property(inst, ptype, &cu);
 611}
 612
 613static int acquire_core(struct venus_inst *inst)
 614{
 615        struct venus_core *core = inst->core;
 616        unsigned int coreid_mask = 0;
 617
 618        if (inst->core_acquired)
 619                return 0;
 620
 621        inst->core_acquired = true;
 622
 623        if (inst->clk_data.core_id & VIDC_CORE_ID_1) {
 624                if (core->core0_usage_count++)
 625                        return 0;
 626
 627                coreid_mask = VIDC_CORE_ID_1;
 628        }
 629
 630        if (inst->clk_data.core_id & VIDC_CORE_ID_2) {
 631                if (core->core1_usage_count++)
 632                        return 0;
 633
 634                coreid_mask |= VIDC_CORE_ID_2;
 635        }
 636
 637        return poweron_coreid(core, coreid_mask);
 638}
 639
 640static int release_core(struct venus_inst *inst)
 641{
 642        struct venus_core *core = inst->core;
 643        unsigned int coreid_mask = 0;
 644        int ret;
 645
 646        if (!inst->core_acquired)
 647                return 0;
 648
 649        if (inst->clk_data.core_id & VIDC_CORE_ID_1) {
 650                if (--core->core0_usage_count)
 651                        goto done;
 652
 653                coreid_mask = VIDC_CORE_ID_1;
 654        }
 655
 656        if (inst->clk_data.core_id & VIDC_CORE_ID_2) {
 657                if (--core->core1_usage_count)
 658                        goto done;
 659
 660                coreid_mask |= VIDC_CORE_ID_2;
 661        }
 662
 663        ret = poweroff_coreid(core, coreid_mask);
 664        if (ret)
 665                return ret;
 666
 667done:
 668        inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
 669        inst->core_acquired = false;
 670        return 0;
 671}
 672
 673static int coreid_power_v4(struct venus_inst *inst, int on)
 674{
 675        struct venus_core *core = inst->core;
 676        int ret;
 677
 678        if (legacy_binding)
 679                return 0;
 680
 681        if (on == POWER_ON) {
 682                ret = decide_core(inst);
 683                if (ret)
 684                        return ret;
 685
 686                mutex_lock(&core->lock);
 687                ret = acquire_core(inst);
 688                mutex_unlock(&core->lock);
 689        } else {
 690                mutex_lock(&core->lock);
 691                ret = release_core(inst);
 692                mutex_unlock(&core->lock);
 693        }
 694
 695        return ret;
 696}
 697
 698static int vdec_get_v4(struct device *dev)
 699{
 700        struct venus_core *core = dev_get_drvdata(dev);
 701
 702        if (!legacy_binding)
 703                return 0;
 704
 705        return vcodec_clks_get(core, dev, core->vcodec0_clks,
 706                               core->res->vcodec0_clks);
 707}
 708
 709static void vdec_put_v4(struct device *dev)
 710{
 711        struct venus_core *core = dev_get_drvdata(dev);
 712        unsigned int i;
 713
 714        if (!legacy_binding)
 715                return;
 716
 717        for (i = 0; i < core->res->vcodec_clks_num; i++)
 718                core->vcodec0_clks[i] = NULL;
 719}
 720
 721static int vdec_power_v4(struct device *dev, int on)
 722{
 723        struct venus_core *core = dev_get_drvdata(dev);
 724        int ret;
 725
 726        if (!legacy_binding)
 727                return 0;
 728
 729        ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
 730        if (ret)
 731                return ret;
 732
 733        if (on == POWER_ON)
 734                ret = vcodec_clks_enable(core, core->vcodec0_clks);
 735        else
 736                vcodec_clks_disable(core, core->vcodec0_clks);
 737
 738        vcodec_control_v4(core, VIDC_CORE_ID_1, false);
 739
 740        return ret;
 741}
 742
 743static int venc_get_v4(struct device *dev)
 744{
 745        struct venus_core *core = dev_get_drvdata(dev);
 746
 747        if (!legacy_binding)
 748                return 0;
 749
 750        return vcodec_clks_get(core, dev, core->vcodec1_clks,
 751                               core->res->vcodec1_clks);
 752}
 753
 754static void venc_put_v4(struct device *dev)
 755{
 756        struct venus_core *core = dev_get_drvdata(dev);
 757        unsigned int i;
 758
 759        if (!legacy_binding)
 760                return;
 761
 762        for (i = 0; i < core->res->vcodec_clks_num; i++)
 763                core->vcodec1_clks[i] = NULL;
 764}
 765
 766static int venc_power_v4(struct device *dev, int on)
 767{
 768        struct venus_core *core = dev_get_drvdata(dev);
 769        int ret;
 770
 771        if (!legacy_binding)
 772                return 0;
 773
 774        ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
 775        if (ret)
 776                return ret;
 777
 778        if (on == POWER_ON)
 779                ret = vcodec_clks_enable(core, core->vcodec1_clks);
 780        else
 781                vcodec_clks_disable(core, core->vcodec1_clks);
 782
 783        vcodec_control_v4(core, VIDC_CORE_ID_2, false);
 784
 785        return ret;
 786}
 787
 788static int vcodec_domains_get(struct venus_core *core)
 789{
 790        int ret;
 791        struct opp_table *opp_table;
 792        struct device **opp_virt_dev;
 793        struct device *dev = core->dev;
 794        const struct venus_resources *res = core->res;
 795        struct device *pd;
 796        unsigned int i;
 797
 798        if (!res->vcodec_pmdomains_num)
 799                goto skip_pmdomains;
 800
 801        for (i = 0; i < res->vcodec_pmdomains_num; i++) {
 802                pd = dev_pm_domain_attach_by_name(dev,
 803                                                  res->vcodec_pmdomains[i]);
 804                if (IS_ERR(pd))
 805                        return PTR_ERR(pd);
 806                core->pmdomains[i] = pd;
 807        }
 808
 809skip_pmdomains:
 810        if (!core->has_opp_table)
 811                return 0;
 812
 813        /* Attach the power domain for setting performance state */
 814        opp_table = dev_pm_opp_attach_genpd(dev, res->opp_pmdomain, &opp_virt_dev);
 815        if (IS_ERR(opp_table)) {
 816                ret = PTR_ERR(opp_table);
 817                goto opp_attach_err;
 818        }
 819
 820        core->opp_pmdomain = *opp_virt_dev;
 821        core->opp_dl_venus = device_link_add(dev, core->opp_pmdomain,
 822                                             DL_FLAG_RPM_ACTIVE |
 823                                             DL_FLAG_PM_RUNTIME |
 824                                             DL_FLAG_STATELESS);
 825        if (!core->opp_dl_venus) {
 826                ret = -ENODEV;
 827                goto opp_dl_add_err;
 828        }
 829
 830        return 0;
 831
 832opp_dl_add_err:
 833        dev_pm_opp_detach_genpd(core->opp_table);
 834opp_attach_err:
 835        for (i = 0; i < res->vcodec_pmdomains_num; i++) {
 836                if (IS_ERR_OR_NULL(core->pmdomains[i]))
 837                        continue;
 838                dev_pm_domain_detach(core->pmdomains[i], true);
 839        }
 840
 841        return ret;
 842}
 843
 844static void vcodec_domains_put(struct venus_core *core)
 845{
 846        const struct venus_resources *res = core->res;
 847        unsigned int i;
 848
 849        if (!res->vcodec_pmdomains_num)
 850                goto skip_pmdomains;
 851
 852        for (i = 0; i < res->vcodec_pmdomains_num; i++) {
 853                if (IS_ERR_OR_NULL(core->pmdomains[i]))
 854                        continue;
 855                dev_pm_domain_detach(core->pmdomains[i], true);
 856        }
 857
 858skip_pmdomains:
 859        if (!core->has_opp_table)
 860                return;
 861
 862        if (core->opp_dl_venus)
 863                device_link_del(core->opp_dl_venus);
 864
 865        dev_pm_opp_detach_genpd(core->opp_table);
 866}
 867
 868static int core_resets_reset(struct venus_core *core)
 869{
 870        const struct venus_resources *res = core->res;
 871        unsigned int i;
 872        int ret;
 873
 874        if (!res->resets_num)
 875                return 0;
 876
 877        for (i = 0; i < res->resets_num; i++) {
 878                ret = reset_control_assert(core->resets[i]);
 879                if (ret)
 880                        goto err;
 881
 882                usleep_range(150, 250);
 883                ret = reset_control_deassert(core->resets[i]);
 884                if (ret)
 885                        goto err;
 886        }
 887
 888err:
 889        return ret;
 890}
 891
 892static int core_resets_get(struct venus_core *core)
 893{
 894        struct device *dev = core->dev;
 895        const struct venus_resources *res = core->res;
 896        unsigned int i;
 897        int ret;
 898
 899        if (!res->resets_num)
 900                return 0;
 901
 902        for (i = 0; i < res->resets_num; i++) {
 903                core->resets[i] =
 904                        devm_reset_control_get_exclusive(dev, res->resets[i]);
 905                if (IS_ERR(core->resets[i])) {
 906                        ret = PTR_ERR(core->resets[i]);
 907                        return ret;
 908                }
 909        }
 910
 911        return 0;
 912}
 913
 914static int core_get_v4(struct venus_core *core)
 915{
 916        struct device *dev = core->dev;
 917        const struct venus_resources *res = core->res;
 918        int ret;
 919
 920        ret = core_clks_get(core);
 921        if (ret)
 922                return ret;
 923
 924        if (!res->vcodec_pmdomains_num)
 925                legacy_binding = true;
 926
 927        dev_info(dev, "%s legacy binding\n", legacy_binding ? "" : "non");
 928
 929        ret = vcodec_clks_get(core, dev, core->vcodec0_clks, res->vcodec0_clks);
 930        if (ret)
 931                return ret;
 932
 933        ret = vcodec_clks_get(core, dev, core->vcodec1_clks, res->vcodec1_clks);
 934        if (ret)
 935                return ret;
 936
 937        ret = core_resets_get(core);
 938        if (ret)
 939                return ret;
 940
 941        if (legacy_binding)
 942                return 0;
 943
 944        core->opp_table = dev_pm_opp_set_clkname(dev, "core");
 945        if (IS_ERR(core->opp_table))
 946                return PTR_ERR(core->opp_table);
 947
 948        if (core->res->opp_pmdomain) {
 949                ret = dev_pm_opp_of_add_table(dev);
 950                if (!ret) {
 951                        core->has_opp_table = true;
 952                } else if (ret != -ENODEV) {
 953                        dev_err(dev, "invalid OPP table in device tree\n");
 954                        dev_pm_opp_put_clkname(core->opp_table);
 955                        return ret;
 956                }
 957        }
 958
 959        ret = vcodec_domains_get(core);
 960        if (ret) {
 961                if (core->has_opp_table)
 962                        dev_pm_opp_of_remove_table(dev);
 963                dev_pm_opp_put_clkname(core->opp_table);
 964                return ret;
 965        }
 966
 967        return 0;
 968}
 969
 970static void core_put_v4(struct venus_core *core)
 971{
 972        struct device *dev = core->dev;
 973
 974        if (legacy_binding)
 975                return;
 976
 977        vcodec_domains_put(core);
 978
 979        if (core->has_opp_table)
 980                dev_pm_opp_of_remove_table(dev);
 981        dev_pm_opp_put_clkname(core->opp_table);
 982
 983}
 984
 985static int core_power_v4(struct venus_core *core, int on)
 986{
 987        struct device *dev = core->dev;
 988        struct device *pmctrl = core->pmdomains[0];
 989        int ret = 0;
 990
 991        if (on == POWER_ON) {
 992                if (pmctrl) {
 993                        ret = pm_runtime_get_sync(pmctrl);
 994                        if (ret < 0) {
 995                                pm_runtime_put_noidle(pmctrl);
 996                                return ret;
 997                        }
 998                }
 999
1000                ret = core_resets_reset(core);
1001                if (ret) {
1002                        if (pmctrl)
1003                                pm_runtime_put_sync(pmctrl);
1004                        return ret;
1005                }
1006
1007                ret = core_clks_enable(core);
1008                if (ret < 0 && pmctrl)
1009                        pm_runtime_put_sync(pmctrl);
1010        } else {
1011                /* Drop the performance state vote */
1012                if (core->opp_pmdomain)
1013                        dev_pm_opp_set_rate(dev, 0);
1014
1015                core_clks_disable(core);
1016
1017                ret = core_resets_reset(core);
1018
1019                if (pmctrl)
1020                        pm_runtime_put_sync(pmctrl);
1021        }
1022
1023        return ret;
1024}
1025
1026static unsigned long calculate_inst_freq(struct venus_inst *inst,
1027                                         unsigned long filled_len)
1028{
1029        unsigned long vpp_freq = 0, vsp_freq = 0;
1030        u32 fps = (u32)inst->fps;
1031        u32 mbs_per_sec;
1032
1033        mbs_per_sec = load_per_instance(inst);
1034
1035        if (inst->state != INST_START)
1036                return 0;
1037
1038        vpp_freq = mbs_per_sec * inst->clk_data.vpp_freq;
1039        /* 21 / 20 is overhead factor */
1040        vpp_freq += vpp_freq / 20;
1041        vsp_freq = mbs_per_sec * inst->clk_data.vsp_freq;
1042
1043        /* 10 / 7 is overhead factor */
1044        if (inst->session_type == VIDC_SESSION_TYPE_ENC)
1045                vsp_freq += (inst->controls.enc.bitrate * 10) / 7;
1046        else
1047                vsp_freq += ((fps * filled_len * 8) * 10) / 7;
1048
1049        return max(vpp_freq, vsp_freq);
1050}
1051
1052static int load_scale_v4(struct venus_inst *inst)
1053{
1054        struct venus_core *core = inst->core;
1055        const struct freq_tbl *table = core->res->freq_tbl;
1056        unsigned int num_rows = core->res->freq_tbl_size;
1057        struct device *dev = core->dev;
1058        unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
1059        unsigned long filled_len = 0;
1060        int i, ret;
1061
1062        for (i = 0; i < inst->num_input_bufs; i++)
1063                filled_len = max(filled_len, inst->payloads[i]);
1064
1065        if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len)
1066                return 0;
1067
1068        freq = calculate_inst_freq(inst, filled_len);
1069        inst->clk_data.freq = freq;
1070
1071        mutex_lock(&core->lock);
1072        list_for_each_entry(inst, &core->instances, list) {
1073                if (inst->clk_data.core_id == VIDC_CORE_ID_1) {
1074                        freq_core1 += inst->clk_data.freq;
1075                } else if (inst->clk_data.core_id == VIDC_CORE_ID_2) {
1076                        freq_core2 += inst->clk_data.freq;
1077                } else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
1078                        freq_core1 += inst->clk_data.freq;
1079                        freq_core2 += inst->clk_data.freq;
1080                }
1081        }
1082        mutex_unlock(&core->lock);
1083
1084        freq = max(freq_core1, freq_core2);
1085
1086        if (freq > table[0].freq) {
1087                freq = table[0].freq;
1088                dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
1089                         freq, table[0].freq);
1090                goto set_freq;
1091        }
1092
1093        for (i = num_rows - 1 ; i >= 0; i--) {
1094                if (freq <= table[i].freq) {
1095                        freq = table[i].freq;
1096                        break;
1097                }
1098        }
1099
1100set_freq:
1101
1102        ret = core_clks_set_rate(core, freq);
1103        if (ret) {
1104                dev_err(dev, "failed to set clock rate %lu (%d)\n",
1105                        freq, ret);
1106                return ret;
1107        }
1108
1109        ret = load_scale_bw(core);
1110        if (ret) {
1111                dev_err(dev, "failed to set bandwidth (%d)\n",
1112                        ret);
1113                return ret;
1114        }
1115
1116        return 0;
1117}
1118
1119static const struct venus_pm_ops pm_ops_v4 = {
1120        .core_get = core_get_v4,
1121        .core_put = core_put_v4,
1122        .core_power = core_power_v4,
1123        .vdec_get = vdec_get_v4,
1124        .vdec_put = vdec_put_v4,
1125        .vdec_power = vdec_power_v4,
1126        .venc_get = venc_get_v4,
1127        .venc_put = venc_put_v4,
1128        .venc_power = venc_power_v4,
1129        .coreid_power = coreid_power_v4,
1130        .load_scale = load_scale_v4,
1131};
1132
1133const struct venus_pm_ops *venus_pm_get(enum hfi_version version)
1134{
1135        switch (version) {
1136        case HFI_VERSION_1XX:
1137        default:
1138                return &pm_ops_v1;
1139        case HFI_VERSION_3XX:
1140                return &pm_ops_v3;
1141        case HFI_VERSION_4XX:
1142        case HFI_VERSION_6XX:
1143                return &pm_ops_v4;
1144        }
1145
1146        return NULL;
1147}
1148