linux/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: GPL-2.0
   3 * Copyright (c) 2018, The Linux Foundation
   4 */
   5
   6#include "dpu_kms.h"
   7
   8#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
   9
  10#define HW_INTR_STATUS                  0x0010
  11
  12struct dpu_mdss {
  13        struct msm_mdss base;
  14        void __iomem *mmio;
  15        unsigned long mmio_len;
  16        u32 hwversion;
  17        struct dss_module_power mp;
  18        struct dpu_irq_controller irq_controller;
  19};
  20
  21static irqreturn_t dpu_mdss_irq(int irq, void *arg)
  22{
  23        struct dpu_mdss *dpu_mdss = arg;
  24        u32 interrupts;
  25
  26        interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
  27
  28        while (interrupts) {
  29                irq_hw_number_t hwirq = fls(interrupts) - 1;
  30                unsigned int mapping;
  31                int rc;
  32
  33                mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
  34                                           hwirq);
  35                if (mapping == 0) {
  36                        DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
  37                        return IRQ_NONE;
  38                }
  39
  40                rc = generic_handle_irq(mapping);
  41                if (rc < 0) {
  42                        DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
  43                                  hwirq, mapping, rc);
  44                        return IRQ_NONE;
  45                }
  46
  47                interrupts &= ~(1 << hwirq);
  48        }
  49
  50        return IRQ_HANDLED;
  51}
  52
  53static void dpu_mdss_irq_mask(struct irq_data *irqd)
  54{
  55        struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
  56
  57        /* memory barrier */
  58        smp_mb__before_atomic();
  59        clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
  60        /* memory barrier */
  61        smp_mb__after_atomic();
  62}
  63
  64static void dpu_mdss_irq_unmask(struct irq_data *irqd)
  65{
  66        struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
  67
  68        /* memory barrier */
  69        smp_mb__before_atomic();
  70        set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
  71        /* memory barrier */
  72        smp_mb__after_atomic();
  73}
  74
  75static struct irq_chip dpu_mdss_irq_chip = {
  76        .name = "dpu_mdss",
  77        .irq_mask = dpu_mdss_irq_mask,
  78        .irq_unmask = dpu_mdss_irq_unmask,
  79};
  80
  81static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
  82                unsigned int irq, irq_hw_number_t hwirq)
  83{
  84        struct dpu_mdss *dpu_mdss = domain->host_data;
  85        int ret;
  86
  87        irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
  88        ret = irq_set_chip_data(irq, dpu_mdss);
  89
  90        return ret;
  91}
  92
  93static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
  94        .map = dpu_mdss_irqdomain_map,
  95        .xlate = irq_domain_xlate_onecell,
  96};
  97
  98static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
  99{
 100        struct device *dev;
 101        struct irq_domain *domain;
 102
 103        dev = dpu_mdss->base.dev->dev;
 104
 105        domain = irq_domain_add_linear(dev->of_node, 32,
 106                        &dpu_mdss_irqdomain_ops, dpu_mdss);
 107        if (!domain) {
 108                DPU_ERROR("failed to add irq_domain\n");
 109                return -EINVAL;
 110        }
 111
 112        dpu_mdss->irq_controller.enabled_mask = 0;
 113        dpu_mdss->irq_controller.domain = domain;
 114
 115        return 0;
 116}
 117
 118static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
 119{
 120        if (dpu_mdss->irq_controller.domain) {
 121                irq_domain_remove(dpu_mdss->irq_controller.domain);
 122                dpu_mdss->irq_controller.domain = NULL;
 123        }
 124        return 0;
 125}
 126static int dpu_mdss_enable(struct msm_mdss *mdss)
 127{
 128        struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
 129        struct dss_module_power *mp = &dpu_mdss->mp;
 130        int ret;
 131
 132        ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
 133        if (ret)
 134                DPU_ERROR("clock enable failed, ret:%d\n", ret);
 135
 136        return ret;
 137}
 138
 139static int dpu_mdss_disable(struct msm_mdss *mdss)
 140{
 141        struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
 142        struct dss_module_power *mp = &dpu_mdss->mp;
 143        int ret;
 144
 145        ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
 146        if (ret)
 147                DPU_ERROR("clock disable failed, ret:%d\n", ret);
 148
 149        return ret;
 150}
 151
 152static void dpu_mdss_destroy(struct drm_device *dev)
 153{
 154        struct platform_device *pdev = to_platform_device(dev->dev);
 155        struct msm_drm_private *priv = dev->dev_private;
 156        struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
 157        struct dss_module_power *mp = &dpu_mdss->mp;
 158
 159        _dpu_mdss_irq_domain_fini(dpu_mdss);
 160
 161        msm_dss_put_clk(mp->clk_config, mp->num_clk);
 162        devm_kfree(&pdev->dev, mp->clk_config);
 163
 164        if (dpu_mdss->mmio)
 165                devm_iounmap(&pdev->dev, dpu_mdss->mmio);
 166        dpu_mdss->mmio = NULL;
 167
 168        pm_runtime_disable(dev->dev);
 169        priv->mdss = NULL;
 170}
 171
 172static const struct msm_mdss_funcs mdss_funcs = {
 173        .enable = dpu_mdss_enable,
 174        .disable = dpu_mdss_disable,
 175        .destroy = dpu_mdss_destroy,
 176};
 177
 178int dpu_mdss_init(struct drm_device *dev)
 179{
 180        struct platform_device *pdev = to_platform_device(dev->dev);
 181        struct msm_drm_private *priv = dev->dev_private;
 182        struct resource *res;
 183        struct dpu_mdss *dpu_mdss;
 184        struct dss_module_power *mp;
 185        int ret = 0;
 186
 187        dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
 188        if (!dpu_mdss)
 189                return -ENOMEM;
 190
 191        dpu_mdss->mmio = msm_ioremap(pdev, "mdss", "mdss");
 192        if (IS_ERR(dpu_mdss->mmio))
 193                return PTR_ERR(dpu_mdss->mmio);
 194
 195        DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
 196
 197        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
 198        if (!res) {
 199                DRM_ERROR("failed to get memory resource for mdss\n");
 200                return -ENOMEM;
 201        }
 202        dpu_mdss->mmio_len = resource_size(res);
 203
 204        mp = &dpu_mdss->mp;
 205        ret = msm_dss_parse_clock(pdev, mp);
 206        if (ret) {
 207                DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
 208                goto clk_parse_err;
 209        }
 210
 211        dpu_mdss->base.dev = dev;
 212        dpu_mdss->base.funcs = &mdss_funcs;
 213
 214        ret = _dpu_mdss_irq_domain_add(dpu_mdss);
 215        if (ret)
 216                goto irq_domain_error;
 217
 218        ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
 219                        dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
 220        if (ret) {
 221                DPU_ERROR("failed to init irq: %d\n", ret);
 222                goto irq_error;
 223        }
 224
 225        pm_runtime_enable(dev->dev);
 226
 227        pm_runtime_get_sync(dev->dev);
 228        dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
 229        pm_runtime_put_sync(dev->dev);
 230
 231        priv->mdss = &dpu_mdss->base;
 232
 233        return ret;
 234
 235irq_error:
 236        _dpu_mdss_irq_domain_fini(dpu_mdss);
 237irq_domain_error:
 238        msm_dss_put_clk(mp->clk_config, mp->num_clk);
 239clk_parse_err:
 240        devm_kfree(&pdev->dev, mp->clk_config);
 241        if (dpu_mdss->mmio)
 242                devm_iounmap(&pdev->dev, dpu_mdss->mmio);
 243        dpu_mdss->mmio = NULL;
 244        return ret;
 245}
 246