linux/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 and
   6 * only version 2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include "mdp5_kms.h"
  15#include "mdp5_ctl.h"
  16
  17/*
  18 * CTL - MDP Control Pool Manager
  19 *
  20 * Controls are shared between all CRTCs.
  21 *
  22 * They are intended to be used for data path configuration.
  23 * The top level register programming describes the complete data path for
  24 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
  25 *
  26 * Hardware capabilities determine the number of concurrent data paths
  27 *
  28 * In certain use cases (high-resolution dual pipe), one single CTL can be
  29 * shared across multiple CRTCs.
  30 *
  31 * Because the number of CTLs can be less than the number of CRTCs,
  32 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
  33 * requested by the client (in mdp5_crtc_mode_set()).
  34 */
  35
  36struct mdp5_ctl {
  37        struct mdp5_ctl_manager *ctlm;
  38
  39        u32 id;
  40
  41        /* whether this CTL has been allocated or not: */
  42        bool busy;
  43
  44        /* memory output connection (@see mdp5_ctl_mode): */
  45        u32 mode;
  46
  47        /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
  48        spinlock_t hw_lock;
  49        u32 reg_offset;
  50
  51        /* flush mask used to commit CTL registers */
  52        u32 flush_mask;
  53
  54        bool cursor_on;
  55
  56        struct drm_crtc *crtc;
  57};
  58
  59struct mdp5_ctl_manager {
  60        struct drm_device *dev;
  61
  62        /* number of CTL / Layer Mixers in this hw config: */
  63        u32 nlm;
  64        u32 nctl;
  65
  66        /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
  67        spinlock_t pool_lock;
  68        struct mdp5_ctl ctls[MAX_CTL];
  69};
  70
  71static inline
  72struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
  73{
  74        struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
  75
  76        return to_mdp5_kms(to_mdp_kms(priv->kms));
  77}
  78
  79static inline
  80void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
  81{
  82        struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
  83
  84        (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
  85        mdp5_write(mdp5_kms, reg, data);
  86}
  87
  88static inline
  89u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
  90{
  91        struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
  92
  93        (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
  94        return mdp5_read(mdp5_kms, reg);
  95}
  96
  97
  98int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
  99{
 100        unsigned long flags;
 101        static const enum mdp5_intfnum intfnum[] = {
 102                        INTF0, INTF1, INTF2, INTF3,
 103        };
 104
 105        spin_lock_irqsave(&ctl->hw_lock, flags);
 106        ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
 107                        MDP5_CTL_OP_MODE(ctl->mode) |
 108                        MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
 109        spin_unlock_irqrestore(&ctl->hw_lock, flags);
 110
 111        return 0;
 112}
 113
 114int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
 115{
 116        struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 117        unsigned long flags;
 118        u32 blend_cfg;
 119        int lm;
 120
 121        lm = mdp5_crtc_get_lm(ctl->crtc);
 122        if (unlikely(WARN_ON(lm < 0))) {
 123                dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
 124                                ctl->id, lm);
 125                return -EINVAL;
 126        }
 127
 128        spin_lock_irqsave(&ctl->hw_lock, flags);
 129
 130        blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
 131
 132        if (enable)
 133                blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
 134        else
 135                blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
 136
 137        ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
 138
 139        spin_unlock_irqrestore(&ctl->hw_lock, flags);
 140
 141        ctl->cursor_on = enable;
 142
 143        return 0;
 144}
 145
 146
 147int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
 148{
 149        unsigned long flags;
 150
 151        if (ctl->cursor_on)
 152                blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
 153        else
 154                blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
 155
 156        spin_lock_irqsave(&ctl->hw_lock, flags);
 157        ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
 158        spin_unlock_irqrestore(&ctl->hw_lock, flags);
 159
 160        return 0;
 161}
 162
 163int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
 164{
 165        struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 166        unsigned long flags;
 167
 168        if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
 169                int lm = mdp5_crtc_get_lm(ctl->crtc);
 170
 171                if (unlikely(WARN_ON(lm < 0))) {
 172                        dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
 173                                        ctl->id, lm);
 174                        return -EINVAL;
 175                }
 176
 177                /* for current targets, cursor bit is the same as LM bit */
 178                flush_mask |= mdp_ctl_flush_mask_lm(lm);
 179        }
 180
 181        spin_lock_irqsave(&ctl->hw_lock, flags);
 182        ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
 183        spin_unlock_irqrestore(&ctl->hw_lock, flags);
 184
 185        return 0;
 186}
 187
 188u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
 189{
 190        return ctl->flush_mask;
 191}
 192
 193void mdp5_ctl_release(struct mdp5_ctl *ctl)
 194{
 195        struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 196        unsigned long flags;
 197
 198        if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
 199                dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
 200                                ctl->id, ctl->busy);
 201                return;
 202        }
 203
 204        spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
 205        ctl->busy = false;
 206        spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 207
 208        DBG("CTL %d released", ctl->id);
 209}
 210
 211/*
 212 * mdp5_ctl_request() - CTL dynamic allocation
 213 *
 214 * Note: Current implementation considers that we can only have one CRTC per CTL
 215 *
 216 * @return first free CTL
 217 */
 218struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
 219                struct drm_crtc *crtc)
 220{
 221        struct mdp5_ctl *ctl = NULL;
 222        unsigned long flags;
 223        int c;
 224
 225        spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
 226
 227        for (c = 0; c < ctl_mgr->nctl; c++)
 228                if (!ctl_mgr->ctls[c].busy)
 229                        break;
 230
 231        if (unlikely(c >= ctl_mgr->nctl)) {
 232                dev_err(ctl_mgr->dev->dev, "No more CTL available!");
 233                goto unlock;
 234        }
 235
 236        ctl = &ctl_mgr->ctls[c];
 237
 238        ctl->crtc = crtc;
 239        ctl->busy = true;
 240        DBG("CTL %d allocated", ctl->id);
 241
 242unlock:
 243        spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 244        return ctl;
 245}
 246
 247void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
 248{
 249        unsigned long flags;
 250        int c;
 251
 252        for (c = 0; c < ctl_mgr->nctl; c++) {
 253                struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
 254
 255                spin_lock_irqsave(&ctl->hw_lock, flags);
 256                ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
 257                spin_unlock_irqrestore(&ctl->hw_lock, flags);
 258        }
 259}
 260
 261void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
 262{
 263        kfree(ctl_mgr);
 264}
 265
 266struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
 267                void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
 268{
 269        struct mdp5_ctl_manager *ctl_mgr;
 270        const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
 271        unsigned long flags;
 272        int c, ret;
 273
 274        ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
 275        if (!ctl_mgr) {
 276                dev_err(dev->dev, "failed to allocate CTL manager\n");
 277                ret = -ENOMEM;
 278                goto fail;
 279        }
 280
 281        if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
 282                dev_err(dev->dev, "Increase static pool size to at least %d\n",
 283                                ctl_cfg->count);
 284                ret = -ENOSPC;
 285                goto fail;
 286        }
 287
 288        /* initialize the CTL manager: */
 289        ctl_mgr->dev = dev;
 290        ctl_mgr->nlm = hw_cfg->lm.count;
 291        ctl_mgr->nctl = ctl_cfg->count;
 292        spin_lock_init(&ctl_mgr->pool_lock);
 293
 294        /* initialize each CTL of the pool: */
 295        spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
 296        for (c = 0; c < ctl_mgr->nctl; c++) {
 297                struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
 298
 299                if (WARN_ON(!ctl_cfg->base[c])) {
 300                        dev_err(dev->dev, "CTL_%d: base is null!\n", c);
 301                        ret = -EINVAL;
 302                        goto fail;
 303                }
 304                ctl->ctlm = ctl_mgr;
 305                ctl->id = c;
 306                ctl->mode = MODE_NONE;
 307                ctl->reg_offset = ctl_cfg->base[c];
 308                ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
 309                ctl->busy = false;
 310                spin_lock_init(&ctl->hw_lock);
 311        }
 312        spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 313        DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
 314
 315        return ctl_mgr;
 316
 317fail:
 318        if (ctl_mgr)
 319                mdp5_ctlm_destroy(ctl_mgr);
 320
 321        return ERR_PTR(ret);
 322}
 323