linux/drivers/gpu/drm/msm/msm_mdss.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: GPL-2.0
   3 * Copyright (c) 2018, The Linux Foundation
   4 */
   5
   6#include <linux/clk.h>
   7#include <linux/delay.h>
   8#include <linux/interconnect.h>
   9#include <linux/irq.h>
  10#include <linux/irqchip.h>
  11#include <linux/irqdesc.h>
  12#include <linux/irqchip/chained_irq.h>
  13#include <linux/pm_runtime.h>
  14#include <linux/reset.h>
  15
  16#include "msm_drv.h"
  17#include "msm_kms.h"
  18
  19/* for DPU_HW_* defines */
  20#include "disp/dpu1/dpu_hw_catalog.h"
  21
  22#define HW_REV                          0x0
  23#define HW_INTR_STATUS                  0x0010
  24
  25#define UBWC_STATIC                     0x144
  26#define UBWC_CTRL_2                     0x150
  27#define UBWC_PREDICTION_MODE            0x154
  28
  29#define MIN_IB_BW       400000000UL /* Min ib vote 400MB */
  30
  31struct msm_mdss {
  32        struct device *dev;
  33
  34        void __iomem *mmio;
  35        struct clk_bulk_data *clocks;
  36        size_t num_clocks;
  37        bool is_mdp5;
  38        struct {
  39                unsigned long enabled_mask;
  40                struct irq_domain *domain;
  41        } irq_controller;
  42        struct icc_path *path[2];
  43        u32 num_paths;
  44};
  45
  46static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
  47                                            struct msm_mdss *msm_mdss)
  48{
  49        struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
  50        struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
  51
  52        if (IS_ERR_OR_NULL(path0))
  53                return PTR_ERR_OR_ZERO(path0);
  54
  55        msm_mdss->path[0] = path0;
  56        msm_mdss->num_paths = 1;
  57
  58        if (!IS_ERR_OR_NULL(path1)) {
  59                msm_mdss->path[1] = path1;
  60                msm_mdss->num_paths++;
  61        }
  62
  63        return 0;
  64}
  65
  66static void msm_mdss_put_icc_path(void *data)
  67{
  68        struct msm_mdss *msm_mdss = data;
  69        int i;
  70
  71        for (i = 0; i < msm_mdss->num_paths; i++)
  72                icc_put(msm_mdss->path[i]);
  73}
  74
  75static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
  76{
  77        int i;
  78
  79        for (i = 0; i < msm_mdss->num_paths; i++)
  80                icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
  81}
  82
  83static void msm_mdss_irq(struct irq_desc *desc)
  84{
  85        struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
  86        struct irq_chip *chip = irq_desc_get_chip(desc);
  87        u32 interrupts;
  88
  89        chained_irq_enter(chip, desc);
  90
  91        interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS);
  92
  93        while (interrupts) {
  94                irq_hw_number_t hwirq = fls(interrupts) - 1;
  95                int rc;
  96
  97                rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain,
  98                                               hwirq);
  99                if (rc < 0) {
 100                        dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n",
 101                                  hwirq, rc);
 102                        break;
 103                }
 104
 105                interrupts &= ~(1 << hwirq);
 106        }
 107
 108        chained_irq_exit(chip, desc);
 109}
 110
 111static void msm_mdss_irq_mask(struct irq_data *irqd)
 112{
 113        struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
 114
 115        /* memory barrier */
 116        smp_mb__before_atomic();
 117        clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
 118        /* memory barrier */
 119        smp_mb__after_atomic();
 120}
 121
 122static void msm_mdss_irq_unmask(struct irq_data *irqd)
 123{
 124        struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
 125
 126        /* memory barrier */
 127        smp_mb__before_atomic();
 128        set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
 129        /* memory barrier */
 130        smp_mb__after_atomic();
 131}
 132
 133static struct irq_chip msm_mdss_irq_chip = {
 134        .name = "msm_mdss",
 135        .irq_mask = msm_mdss_irq_mask,
 136        .irq_unmask = msm_mdss_irq_unmask,
 137};
 138
 139static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key;
 140
 141static int msm_mdss_irqdomain_map(struct irq_domain *domain,
 142                unsigned int irq, irq_hw_number_t hwirq)
 143{
 144        struct msm_mdss *msm_mdss = domain->host_data;
 145
 146        irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key);
 147        irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq);
 148
 149        return irq_set_chip_data(irq, msm_mdss);
 150}
 151
 152static const struct irq_domain_ops msm_mdss_irqdomain_ops = {
 153        .map = msm_mdss_irqdomain_map,
 154        .xlate = irq_domain_xlate_onecell,
 155};
 156
 157static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
 158{
 159        struct device *dev;
 160        struct irq_domain *domain;
 161
 162        dev = msm_mdss->dev;
 163
 164        domain = irq_domain_add_linear(dev->of_node, 32,
 165                        &msm_mdss_irqdomain_ops, msm_mdss);
 166        if (!domain) {
 167                dev_err(dev, "failed to add irq_domain\n");
 168                return -EINVAL;
 169        }
 170
 171        msm_mdss->irq_controller.enabled_mask = 0;
 172        msm_mdss->irq_controller.domain = domain;
 173
 174        return 0;
 175}
 176
 177static int msm_mdss_enable(struct msm_mdss *msm_mdss)
 178{
 179        int ret;
 180
 181        /*
 182         * Several components have AXI clocks that can only be turned on if
 183         * the interconnect is enabled (non-zero bandwidth). Let's make sure
 184         * that the interconnects are at least at a minimum amount.
 185         */
 186        msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
 187
 188        ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
 189        if (ret) {
 190                dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
 191                return ret;
 192        }
 193
 194        /*
 195         * HW_REV requires MDSS_MDP_CLK, which is not enabled by the mdss on
 196         * mdp5 hardware. Skip reading it for now.
 197         */
 198        if (msm_mdss->is_mdp5)
 199                return 0;
 200
 201        /*
 202         * ubwc config is part of the "mdss" region which is not accessible
 203         * from the rest of the driver. hardcode known configurations here
 204         */
 205        switch (readl_relaxed(msm_mdss->mmio + HW_REV)) {
 206        case DPU_HW_VER_500:
 207        case DPU_HW_VER_501:
 208                writel_relaxed(0x420, msm_mdss->mmio + UBWC_STATIC);
 209                break;
 210        case DPU_HW_VER_600:
 211                /* TODO: 0x102e for LP_DDR4 */
 212                writel_relaxed(0x103e, msm_mdss->mmio + UBWC_STATIC);
 213                writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
 214                writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
 215                break;
 216        case DPU_HW_VER_620:
 217                writel_relaxed(0x1e, msm_mdss->mmio + UBWC_STATIC);
 218                break;
 219        case DPU_HW_VER_720:
 220                writel_relaxed(0x101e, msm_mdss->mmio + UBWC_STATIC);
 221                break;
 222        }
 223
 224        return ret;
 225}
 226
 227static int msm_mdss_disable(struct msm_mdss *msm_mdss)
 228{
 229        clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
 230        msm_mdss_icc_request_bw(msm_mdss, 0);
 231
 232        return 0;
 233}
 234
 235static void msm_mdss_destroy(struct msm_mdss *msm_mdss)
 236{
 237        struct platform_device *pdev = to_platform_device(msm_mdss->dev);
 238        int irq;
 239
 240        pm_runtime_suspend(msm_mdss->dev);
 241        pm_runtime_disable(msm_mdss->dev);
 242        irq_domain_remove(msm_mdss->irq_controller.domain);
 243        msm_mdss->irq_controller.domain = NULL;
 244        irq = platform_get_irq(pdev, 0);
 245        irq_set_chained_handler_and_data(irq, NULL, NULL);
 246}
 247
 248static int msm_mdss_reset(struct device *dev)
 249{
 250        struct reset_control *reset;
 251
 252        reset = reset_control_get_optional_exclusive(dev, NULL);
 253        if (!reset) {
 254                /* Optional reset not specified */
 255                return 0;
 256        } else if (IS_ERR(reset)) {
 257                return dev_err_probe(dev, PTR_ERR(reset),
 258                                     "failed to acquire mdss reset\n");
 259        }
 260
 261        reset_control_assert(reset);
 262        /*
 263         * Tests indicate that reset has to be held for some period of time,
 264         * make it one frame in a typical system
 265         */
 266        msleep(20);
 267        reset_control_deassert(reset);
 268
 269        reset_control_put(reset);
 270
 271        return 0;
 272}
 273
 274/*
 275 * MDP5 MDSS uses at most three specified clocks.
 276 */
 277#define MDP5_MDSS_NUM_CLOCKS 3
 278static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks)
 279{
 280        struct clk_bulk_data *bulk;
 281        int num_clocks = 0;
 282        int ret;
 283
 284        if (!pdev)
 285                return -EINVAL;
 286
 287        bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL);
 288        if (!bulk)
 289                return -ENOMEM;
 290
 291        bulk[num_clocks++].id = "iface";
 292        bulk[num_clocks++].id = "bus";
 293        bulk[num_clocks++].id = "vsync";
 294
 295        ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk);
 296        if (ret)
 297                return ret;
 298
 299        *clocks = bulk;
 300
 301        return num_clocks;
 302}
 303
 304static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
 305{
 306        struct msm_mdss *msm_mdss;
 307        int ret;
 308        int irq;
 309
 310        ret = msm_mdss_reset(&pdev->dev);
 311        if (ret)
 312                return ERR_PTR(ret);
 313
 314        msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL);
 315        if (!msm_mdss)
 316                return ERR_PTR(-ENOMEM);
 317
 318        msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
 319        if (IS_ERR(msm_mdss->mmio))
 320                return ERR_CAST(msm_mdss->mmio);
 321
 322        dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
 323
 324        ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
 325        if (ret)
 326                return ERR_PTR(ret);
 327        ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
 328        if (ret)
 329                return ERR_PTR(ret);
 330
 331        if (is_mdp5)
 332                ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
 333        else
 334                ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks);
 335        if (ret < 0) {
 336                dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret);
 337                return ERR_PTR(ret);
 338        }
 339        msm_mdss->num_clocks = ret;
 340        msm_mdss->is_mdp5 = is_mdp5;
 341
 342        msm_mdss->dev = &pdev->dev;
 343
 344        irq = platform_get_irq(pdev, 0);
 345        if (irq < 0)
 346                return ERR_PTR(irq);
 347
 348        ret = _msm_mdss_irq_domain_add(msm_mdss);
 349        if (ret)
 350                return ERR_PTR(ret);
 351
 352        irq_set_chained_handler_and_data(irq, msm_mdss_irq,
 353                                         msm_mdss);
 354
 355        pm_runtime_enable(&pdev->dev);
 356
 357        return msm_mdss;
 358}
 359
 360static int __maybe_unused mdss_runtime_suspend(struct device *dev)
 361{
 362        struct msm_mdss *mdss = dev_get_drvdata(dev);
 363
 364        DBG("");
 365
 366        return msm_mdss_disable(mdss);
 367}
 368
 369static int __maybe_unused mdss_runtime_resume(struct device *dev)
 370{
 371        struct msm_mdss *mdss = dev_get_drvdata(dev);
 372
 373        DBG("");
 374
 375        return msm_mdss_enable(mdss);
 376}
 377
 378static int __maybe_unused mdss_pm_suspend(struct device *dev)
 379{
 380
 381        if (pm_runtime_suspended(dev))
 382                return 0;
 383
 384        return mdss_runtime_suspend(dev);
 385}
 386
 387static int __maybe_unused mdss_pm_resume(struct device *dev)
 388{
 389        if (pm_runtime_suspended(dev))
 390                return 0;
 391
 392        return mdss_runtime_resume(dev);
 393}
 394
 395static const struct dev_pm_ops mdss_pm_ops = {
 396        SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume)
 397        SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL)
 398};
 399
 400static int mdss_probe(struct platform_device *pdev)
 401{
 402        struct msm_mdss *mdss;
 403        bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss");
 404        struct device *dev = &pdev->dev;
 405        int ret;
 406
 407        mdss = msm_mdss_init(pdev, is_mdp5);
 408        if (IS_ERR(mdss))
 409                return PTR_ERR(mdss);
 410
 411        platform_set_drvdata(pdev, mdss);
 412
 413        /*
 414         * MDP5/DPU based devices don't have a flat hierarchy. There is a top
 415         * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
 416         * Populate the children devices, find the MDP5/DPU node, and then add
 417         * the interfaces to our components list.
 418         */
 419        ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
 420        if (ret) {
 421                DRM_DEV_ERROR(dev, "failed to populate children devices\n");
 422                msm_mdss_destroy(mdss);
 423                return ret;
 424        }
 425
 426        return 0;
 427}
 428
 429static int mdss_remove(struct platform_device *pdev)
 430{
 431        struct msm_mdss *mdss = platform_get_drvdata(pdev);
 432
 433        of_platform_depopulate(&pdev->dev);
 434
 435        msm_mdss_destroy(mdss);
 436
 437        return 0;
 438}
 439
 440static const struct of_device_id mdss_dt_match[] = {
 441        { .compatible = "qcom,mdss" },
 442        { .compatible = "qcom,msm8998-mdss" },
 443        { .compatible = "qcom,qcm2290-mdss" },
 444        { .compatible = "qcom,sdm845-mdss" },
 445        { .compatible = "qcom,sc7180-mdss" },
 446        { .compatible = "qcom,sc7280-mdss" },
 447        { .compatible = "qcom,sc8180x-mdss" },
 448        { .compatible = "qcom,sm8150-mdss" },
 449        { .compatible = "qcom,sm8250-mdss" },
 450        {}
 451};
 452MODULE_DEVICE_TABLE(of, mdss_dt_match);
 453
 454static struct platform_driver mdss_platform_driver = {
 455        .probe      = mdss_probe,
 456        .remove     = mdss_remove,
 457        .driver     = {
 458                .name   = "msm-mdss",
 459                .of_match_table = mdss_dt_match,
 460                .pm     = &mdss_pm_ops,
 461        },
 462};
 463
 464void __init msm_mdss_register(void)
 465{
 466        platform_driver_register(&mdss_platform_driver);
 467}
 468
 469void __exit msm_mdss_unregister(void)
 470{
 471        platform_driver_unregister(&mdss_platform_driver);
 472}
 473