linux/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License version 2 as published by
   6 * the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16
  17#include <linux/irqdomain.h>
  18#include <linux/irq.h>
  19
  20#include "msm_drv.h"
  21#include "mdp5_kms.h"
  22
  23#define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
  24
  25struct mdp5_mdss {
  26        struct msm_mdss base;
  27
  28        void __iomem *mmio, *vbif;
  29
  30        struct regulator *vdd;
  31
  32        struct clk *ahb_clk;
  33        struct clk *axi_clk;
  34        struct clk *vsync_clk;
  35
  36        struct {
  37                volatile unsigned long enabled_mask;
  38                struct irq_domain *domain;
  39        } irqcontroller;
  40};
  41
  42static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
  43{
  44        msm_writel(data, mdp5_mdss->mmio + reg);
  45}
  46
  47static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
  48{
  49        return msm_readl(mdp5_mdss->mmio + reg);
  50}
  51
  52static irqreturn_t mdss_irq(int irq, void *arg)
  53{
  54        struct mdp5_mdss *mdp5_mdss = arg;
  55        u32 intr;
  56
  57        intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
  58
  59        VERB("intr=%08x", intr);
  60
  61        while (intr) {
  62                irq_hw_number_t hwirq = fls(intr) - 1;
  63
  64                generic_handle_irq(irq_find_mapping(
  65                                mdp5_mdss->irqcontroller.domain, hwirq));
  66                intr &= ~(1 << hwirq);
  67        }
  68
  69        return IRQ_HANDLED;
  70}
  71
  72/*
  73 * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
  74 * can register to get their irq's delivered
  75 */
  76
  77#define VALID_IRQS  (MDSS_HW_INTR_STATUS_INTR_MDP | \
  78                MDSS_HW_INTR_STATUS_INTR_DSI0 | \
  79                MDSS_HW_INTR_STATUS_INTR_DSI1 | \
  80                MDSS_HW_INTR_STATUS_INTR_HDMI | \
  81                MDSS_HW_INTR_STATUS_INTR_EDP)
  82
  83static void mdss_hw_mask_irq(struct irq_data *irqd)
  84{
  85        struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
  86
  87        smp_mb__before_atomic();
  88        clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
  89        smp_mb__after_atomic();
  90}
  91
  92static void mdss_hw_unmask_irq(struct irq_data *irqd)
  93{
  94        struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
  95
  96        smp_mb__before_atomic();
  97        set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
  98        smp_mb__after_atomic();
  99}
 100
 101static struct irq_chip mdss_hw_irq_chip = {
 102        .name           = "mdss",
 103        .irq_mask       = mdss_hw_mask_irq,
 104        .irq_unmask     = mdss_hw_unmask_irq,
 105};
 106
 107static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
 108                                 irq_hw_number_t hwirq)
 109{
 110        struct mdp5_mdss *mdp5_mdss = d->host_data;
 111
 112        if (!(VALID_IRQS & (1 << hwirq)))
 113                return -EPERM;
 114
 115        irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
 116        irq_set_chip_data(irq, mdp5_mdss);
 117
 118        return 0;
 119}
 120
 121static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
 122        .map = mdss_hw_irqdomain_map,
 123        .xlate = irq_domain_xlate_onecell,
 124};
 125
 126
 127static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
 128{
 129        struct device *dev = mdp5_mdss->base.dev->dev;
 130        struct irq_domain *d;
 131
 132        d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
 133                                  mdp5_mdss);
 134        if (!d) {
 135                dev_err(dev, "mdss irq domain add failed\n");
 136                return -ENXIO;
 137        }
 138
 139        mdp5_mdss->irqcontroller.enabled_mask = 0;
 140        mdp5_mdss->irqcontroller.domain = d;
 141
 142        return 0;
 143}
 144
 145static int mdp5_mdss_enable(struct msm_mdss *mdss)
 146{
 147        struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
 148        DBG("");
 149
 150        clk_prepare_enable(mdp5_mdss->ahb_clk);
 151        if (mdp5_mdss->axi_clk)
 152                clk_prepare_enable(mdp5_mdss->axi_clk);
 153        if (mdp5_mdss->vsync_clk)
 154                clk_prepare_enable(mdp5_mdss->vsync_clk);
 155
 156        return 0;
 157}
 158
 159static int mdp5_mdss_disable(struct msm_mdss *mdss)
 160{
 161        struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
 162        DBG("");
 163
 164        if (mdp5_mdss->vsync_clk)
 165                clk_disable_unprepare(mdp5_mdss->vsync_clk);
 166        if (mdp5_mdss->axi_clk)
 167                clk_disable_unprepare(mdp5_mdss->axi_clk);
 168        clk_disable_unprepare(mdp5_mdss->ahb_clk);
 169
 170        return 0;
 171}
 172
 173static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
 174{
 175        struct platform_device *pdev =
 176                        to_platform_device(mdp5_mdss->base.dev->dev);
 177
 178        mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
 179        if (IS_ERR(mdp5_mdss->ahb_clk))
 180                mdp5_mdss->ahb_clk = NULL;
 181
 182        mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
 183        if (IS_ERR(mdp5_mdss->axi_clk))
 184                mdp5_mdss->axi_clk = NULL;
 185
 186        mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
 187        if (IS_ERR(mdp5_mdss->vsync_clk))
 188                mdp5_mdss->vsync_clk = NULL;
 189
 190        return 0;
 191}
 192
 193static void mdp5_mdss_destroy(struct drm_device *dev)
 194{
 195        struct msm_drm_private *priv = dev->dev_private;
 196        struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
 197
 198        if (!mdp5_mdss)
 199                return;
 200
 201        irq_domain_remove(mdp5_mdss->irqcontroller.domain);
 202        mdp5_mdss->irqcontroller.domain = NULL;
 203
 204        regulator_disable(mdp5_mdss->vdd);
 205
 206        pm_runtime_disable(dev->dev);
 207}
 208
 209static const struct msm_mdss_funcs mdss_funcs = {
 210        .enable = mdp5_mdss_enable,
 211        .disable = mdp5_mdss_disable,
 212        .destroy = mdp5_mdss_destroy,
 213};
 214
 215int mdp5_mdss_init(struct drm_device *dev)
 216{
 217        struct platform_device *pdev = to_platform_device(dev->dev);
 218        struct msm_drm_private *priv = dev->dev_private;
 219        struct mdp5_mdss *mdp5_mdss;
 220        int ret;
 221
 222        DBG("");
 223
 224        if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
 225                return 0;
 226
 227        mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
 228        if (!mdp5_mdss) {
 229                ret = -ENOMEM;
 230                goto fail;
 231        }
 232
 233        mdp5_mdss->base.dev = dev;
 234
 235        mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
 236        if (IS_ERR(mdp5_mdss->mmio)) {
 237                ret = PTR_ERR(mdp5_mdss->mmio);
 238                goto fail;
 239        }
 240
 241        mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
 242        if (IS_ERR(mdp5_mdss->vbif)) {
 243                ret = PTR_ERR(mdp5_mdss->vbif);
 244                goto fail;
 245        }
 246
 247        ret = msm_mdss_get_clocks(mdp5_mdss);
 248        if (ret) {
 249                dev_err(dev->dev, "failed to get clocks: %d\n", ret);
 250                goto fail;
 251        }
 252
 253        /* Regulator to enable GDSCs in downstream kernels */
 254        mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
 255        if (IS_ERR(mdp5_mdss->vdd)) {
 256                ret = PTR_ERR(mdp5_mdss->vdd);
 257                goto fail;
 258        }
 259
 260        ret = regulator_enable(mdp5_mdss->vdd);
 261        if (ret) {
 262                dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
 263                        ret);
 264                goto fail;
 265        }
 266
 267        ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
 268                               mdss_irq, 0, "mdss_isr", mdp5_mdss);
 269        if (ret) {
 270                dev_err(dev->dev, "failed to init irq: %d\n", ret);
 271                goto fail_irq;
 272        }
 273
 274        ret = mdss_irq_domain_init(mdp5_mdss);
 275        if (ret) {
 276                dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
 277                goto fail_irq;
 278        }
 279
 280        mdp5_mdss->base.funcs = &mdss_funcs;
 281        priv->mdss = &mdp5_mdss->base;
 282
 283        pm_runtime_enable(dev->dev);
 284
 285        return 0;
 286fail_irq:
 287        regulator_disable(mdp5_mdss->vdd);
 288fail:
 289        return ret;
 290}
 291