linux/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published by
   8 * the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#include "mdp5_kms.h"
  20
  21#include <linux/sort.h>
  22#include <drm/drm_mode.h>
  23#include "drm_crtc.h"
  24#include "drm_crtc_helper.h"
  25#include "drm_flip_work.h"
  26
  27#define CURSOR_WIDTH    64
  28#define CURSOR_HEIGHT   64
  29
  30#define SSPP_MAX        (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
  31
  32struct mdp5_crtc {
  33        struct drm_crtc base;
  34        char name[8];
  35        int id;
  36        bool enabled;
  37
  38        /* layer mixer used for this CRTC (+ its lock): */
  39#define GET_LM_ID(crtc_id)      ((crtc_id == 3) ? 5 : crtc_id)
  40        int lm;
  41        spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
  42
  43        /* CTL used for this CRTC: */
  44        struct mdp5_ctl *ctl;
  45
  46        /* if there is a pending flip, these will be non-null: */
  47        struct drm_pending_vblank_event *event;
  48
  49        /* Bits have been flushed at the last commit,
  50         * used to decide if a vsync has happened since last commit.
  51         */
  52        u32 flushed_mask;
  53
  54#define PENDING_CURSOR 0x1
  55#define PENDING_FLIP   0x2
  56        atomic_t pending;
  57
  58        /* for unref'ing cursor bo's after scanout completes: */
  59        struct drm_flip_work unref_cursor_work;
  60
  61        struct mdp_irq vblank;
  62        struct mdp_irq err;
  63        struct mdp_irq pp_done;
  64
  65        struct completion pp_completion;
  66
  67        bool cmd_mode;
  68
  69        struct {
  70                /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
  71                spinlock_t lock;
  72
  73                /* current cursor being scanned out: */
  74                struct drm_gem_object *scanout_bo;
  75                uint32_t width, height;
  76                uint32_t x, y;
  77        } cursor;
  78};
  79#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
  80
  81static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
  82{
  83        struct msm_drm_private *priv = crtc->dev->dev_private;
  84        return to_mdp5_kms(to_mdp_kms(priv->kms));
  85}
  86
  87static void request_pending(struct drm_crtc *crtc, uint32_t pending)
  88{
  89        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  90
  91        atomic_or(pending, &mdp5_crtc->pending);
  92        mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
  93}
  94
  95static void request_pp_done_pending(struct drm_crtc *crtc)
  96{
  97        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  98        reinit_completion(&mdp5_crtc->pp_completion);
  99}
 100
 101static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
 102{
 103        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 104
 105        DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
 106        return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
 107}
 108
 109/*
 110 * flush updates, to make sure hw is updated to new scanout fb,
 111 * so that we can safely queue unref to current fb (ie. next
 112 * vblank we know hw is done w/ previous scanout_fb).
 113 */
 114static u32 crtc_flush_all(struct drm_crtc *crtc)
 115{
 116        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 117        struct drm_plane *plane;
 118        uint32_t flush_mask = 0;
 119
 120        /* this should not happen: */
 121        if (WARN_ON(!mdp5_crtc->ctl))
 122                return 0;
 123
 124        drm_atomic_crtc_for_each_plane(plane, crtc) {
 125                flush_mask |= mdp5_plane_get_flush(plane);
 126        }
 127
 128        flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
 129
 130        return crtc_flush(crtc, flush_mask);
 131}
 132
 133/* if file!=NULL, this is preclose potential cancel-flip path */
 134static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
 135{
 136        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 137        struct drm_device *dev = crtc->dev;
 138        struct drm_pending_vblank_event *event;
 139        struct drm_plane *plane;
 140        unsigned long flags;
 141
 142        spin_lock_irqsave(&dev->event_lock, flags);
 143        event = mdp5_crtc->event;
 144        if (event) {
 145                /* if regular vblank case (!file) or if cancel-flip from
 146                 * preclose on file that requested flip, then send the
 147                 * event:
 148                 */
 149                if (!file || (event->base.file_priv == file)) {
 150                        mdp5_crtc->event = NULL;
 151                        DBG("%s: send event: %p", mdp5_crtc->name, event);
 152                        drm_send_vblank_event(dev, mdp5_crtc->id, event);
 153                }
 154        }
 155        spin_unlock_irqrestore(&dev->event_lock, flags);
 156
 157        drm_atomic_crtc_for_each_plane(plane, crtc) {
 158                mdp5_plane_complete_flip(plane);
 159        }
 160
 161        if (mdp5_crtc->ctl && !crtc->state->enable) {
 162                /* set STAGE_UNUSED for all layers */
 163                mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
 164                mdp5_crtc->ctl = NULL;
 165        }
 166}
 167
 168static void unref_cursor_worker(struct drm_flip_work *work, void *val)
 169{
 170        struct mdp5_crtc *mdp5_crtc =
 171                container_of(work, struct mdp5_crtc, unref_cursor_work);
 172        struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
 173
 174        msm_gem_put_iova(val, mdp5_kms->id);
 175        drm_gem_object_unreference_unlocked(val);
 176}
 177
 178static void mdp5_crtc_destroy(struct drm_crtc *crtc)
 179{
 180        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 181
 182        drm_crtc_cleanup(crtc);
 183        drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
 184
 185        kfree(mdp5_crtc);
 186}
 187
 188/*
 189 * blend_setup() - blend all the planes of a CRTC
 190 *
 191 * If no base layer is available, border will be enabled as the base layer.
 192 * Otherwise all layers will be blended based on their stage calculated
 193 * in mdp5_crtc_atomic_check.
 194 */
 195static void blend_setup(struct drm_crtc *crtc)
 196{
 197        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 198        struct mdp5_kms *mdp5_kms = get_kms(crtc);
 199        struct drm_plane *plane;
 200        const struct mdp5_cfg_hw *hw_cfg;
 201        struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
 202        const struct mdp_format *format;
 203        uint32_t lm = mdp5_crtc->lm;
 204        uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
 205        unsigned long flags;
 206        uint8_t stage[STAGE_MAX + 1];
 207        int i, plane_cnt = 0;
 208#define blender(stage)  ((stage) - STAGE0)
 209
 210        hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 211
 212        spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
 213
 214        /* ctl could be released already when we are shutting down: */
 215        if (!mdp5_crtc->ctl)
 216                goto out;
 217
 218        /* Collect all plane information */
 219        drm_atomic_crtc_for_each_plane(plane, crtc) {
 220                pstate = to_mdp5_plane_state(plane->state);
 221                pstates[pstate->stage] = pstate;
 222                stage[pstate->stage] = mdp5_plane_pipe(plane);
 223                plane_cnt++;
 224        }
 225
 226        /*
 227        * If there is no base layer, enable border color.
 228        * Although it's not possbile in current blend logic,
 229        * put it here as a reminder.
 230        */
 231        if (!pstates[STAGE_BASE] && plane_cnt) {
 232                ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
 233                DBG("Border Color is enabled");
 234        }
 235
 236        /* The reset for blending */
 237        for (i = STAGE0; i <= STAGE_MAX; i++) {
 238                if (!pstates[i])
 239                        continue;
 240
 241                format = to_mdp_format(
 242                        msm_framebuffer_format(pstates[i]->base.fb));
 243                plane = pstates[i]->base.plane;
 244                blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
 245                        MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
 246                fg_alpha = pstates[i]->alpha;
 247                bg_alpha = 0xFF - pstates[i]->alpha;
 248                DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
 249
 250                if (format->alpha_enable && pstates[i]->premultiplied) {
 251                        blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
 252                                MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
 253                        if (fg_alpha != 0xff) {
 254                                bg_alpha = fg_alpha;
 255                                blend_op |=
 256                                        MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
 257                                        MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
 258                        } else {
 259                                blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
 260                        }
 261                } else if (format->alpha_enable) {
 262                        blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
 263                                MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
 264                        if (fg_alpha != 0xff) {
 265                                bg_alpha = fg_alpha;
 266                                blend_op |=
 267                                       MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
 268                                       MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
 269                                       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
 270                                       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
 271                        } else {
 272                                blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
 273                        }
 274                }
 275
 276                mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
 277                                blender(i)), blend_op);
 278                mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
 279                                blender(i)), fg_alpha);
 280                mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
 281                                blender(i)), bg_alpha);
 282        }
 283
 284        mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
 285
 286out:
 287        spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
 288}
 289
 290static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
 291{
 292        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 293        struct mdp5_kms *mdp5_kms = get_kms(crtc);
 294        unsigned long flags;
 295        struct drm_display_mode *mode;
 296
 297        if (WARN_ON(!crtc->state))
 298                return;
 299
 300        mode = &crtc->state->adjusted_mode;
 301
 302        DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
 303                        mdp5_crtc->name, mode->base.id, mode->name,
 304                        mode->vrefresh, mode->clock,
 305                        mode->hdisplay, mode->hsync_start,
 306                        mode->hsync_end, mode->htotal,
 307                        mode->vdisplay, mode->vsync_start,
 308                        mode->vsync_end, mode->vtotal,
 309                        mode->type, mode->flags);
 310
 311        spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
 312        mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
 313                        MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
 314                        MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
 315        spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
 316}
 317
 318static void mdp5_crtc_disable(struct drm_crtc *crtc)
 319{
 320        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 321        struct mdp5_kms *mdp5_kms = get_kms(crtc);
 322
 323        DBG("%s", mdp5_crtc->name);
 324
 325        if (WARN_ON(!mdp5_crtc->enabled))
 326                return;
 327
 328        if (mdp5_crtc->cmd_mode)
 329                mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
 330
 331        mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
 332        mdp5_disable(mdp5_kms);
 333
 334        mdp5_crtc->enabled = false;
 335}
 336
 337static void mdp5_crtc_enable(struct drm_crtc *crtc)
 338{
 339        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 340        struct mdp5_kms *mdp5_kms = get_kms(crtc);
 341
 342        DBG("%s", mdp5_crtc->name);
 343
 344        if (WARN_ON(mdp5_crtc->enabled))
 345                return;
 346
 347        mdp5_enable(mdp5_kms);
 348        mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
 349
 350        if (mdp5_crtc->cmd_mode)
 351                mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
 352
 353        mdp5_crtc->enabled = true;
 354}
 355
 356struct plane_state {
 357        struct drm_plane *plane;
 358        struct mdp5_plane_state *state;
 359};
 360
 361static int pstate_cmp(const void *a, const void *b)
 362{
 363        struct plane_state *pa = (struct plane_state *)a;
 364        struct plane_state *pb = (struct plane_state *)b;
 365        return pa->state->zpos - pb->state->zpos;
 366}
 367
 368static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 369                struct drm_crtc_state *state)
 370{
 371        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 372        struct mdp5_kms *mdp5_kms = get_kms(crtc);
 373        struct drm_plane *plane;
 374        struct drm_device *dev = crtc->dev;
 375        struct plane_state pstates[STAGE_MAX + 1];
 376        const struct mdp5_cfg_hw *hw_cfg;
 377        int cnt = 0, i;
 378
 379        DBG("%s: check", mdp5_crtc->name);
 380
 381        /* verify that there are not too many planes attached to crtc
 382         * and that we don't have conflicting mixer stages:
 383         */
 384        hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 385        drm_atomic_crtc_state_for_each_plane(plane, state) {
 386                struct drm_plane_state *pstate;
 387                if (cnt >= (hw_cfg->lm.nb_stages)) {
 388                        dev_err(dev->dev, "too many planes!\n");
 389                        return -EINVAL;
 390                }
 391
 392                pstate = state->state->plane_states[drm_plane_index(plane)];
 393
 394                /* plane might not have changed, in which case take
 395                 * current state:
 396                 */
 397                if (!pstate)
 398                        pstate = plane->state;
 399                pstates[cnt].plane = plane;
 400                pstates[cnt].state = to_mdp5_plane_state(pstate);
 401
 402                cnt++;
 403        }
 404
 405        /* assign a stage based on sorted zpos property */
 406        sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
 407
 408        for (i = 0; i < cnt; i++) {
 409                pstates[i].state->stage = STAGE_BASE + i;
 410                DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
 411                                pipe2name(mdp5_plane_pipe(pstates[i].plane)),
 412                                pstates[i].state->stage);
 413        }
 414
 415        return 0;
 416}
 417
 418static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
 419                                   struct drm_crtc_state *old_crtc_state)
 420{
 421        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 422        DBG("%s: begin", mdp5_crtc->name);
 423}
 424
 425static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
 426                                   struct drm_crtc_state *old_crtc_state)
 427{
 428        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 429        struct drm_device *dev = crtc->dev;
 430        unsigned long flags;
 431
 432        DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
 433
 434        WARN_ON(mdp5_crtc->event);
 435
 436        spin_lock_irqsave(&dev->event_lock, flags);
 437        mdp5_crtc->event = crtc->state->event;
 438        spin_unlock_irqrestore(&dev->event_lock, flags);
 439
 440        /*
 441         * If no CTL has been allocated in mdp5_crtc_atomic_check(),
 442         * it means we are trying to flush a CRTC whose state is disabled:
 443         * nothing else needs to be done.
 444         */
 445        if (unlikely(!mdp5_crtc->ctl))
 446                return;
 447
 448        blend_setup(crtc);
 449
 450        /* PP_DONE irq is only used by command mode for now.
 451         * It is better to request pending before FLUSH and START trigger
 452         * to make sure no pp_done irq missed.
 453         * This is safe because no pp_done will happen before SW trigger
 454         * in command mode.
 455         */
 456        if (mdp5_crtc->cmd_mode)
 457                request_pp_done_pending(crtc);
 458
 459        mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
 460
 461        request_pending(crtc, PENDING_FLIP);
 462}
 463
 464static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
 465{
 466        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 467        uint32_t xres = crtc->mode.hdisplay;
 468        uint32_t yres = crtc->mode.vdisplay;
 469
 470        /*
 471         * Cursor Region Of Interest (ROI) is a plane read from cursor
 472         * buffer to render. The ROI region is determined by the visibility of
 473         * the cursor point. In the default Cursor image the cursor point will
 474         * be at the top left of the cursor image, unless it is specified
 475         * otherwise using hotspot feature.
 476         *
 477         * If the cursor point reaches the right (xres - x < cursor.width) or
 478         * bottom (yres - y < cursor.height) boundary of the screen, then ROI
 479         * width and ROI height need to be evaluated to crop the cursor image
 480         * accordingly.
 481         * (xres-x) will be new cursor width when x > (xres - cursor.width)
 482         * (yres-y) will be new cursor height when y > (yres - cursor.height)
 483         */
 484        *roi_w = min(mdp5_crtc->cursor.width, xres -
 485                        mdp5_crtc->cursor.x);
 486        *roi_h = min(mdp5_crtc->cursor.height, yres -
 487                        mdp5_crtc->cursor.y);
 488}
 489
 490static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 491                struct drm_file *file, uint32_t handle,
 492                uint32_t width, uint32_t height)
 493{
 494        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 495        struct drm_device *dev = crtc->dev;
 496        struct mdp5_kms *mdp5_kms = get_kms(crtc);
 497        struct drm_gem_object *cursor_bo, *old_bo = NULL;
 498        uint32_t blendcfg, cursor_addr, stride;
 499        int ret, bpp, lm;
 500        unsigned int depth;
 501        enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
 502        uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
 503        uint32_t roi_w, roi_h;
 504        bool cursor_enable = true;
 505        unsigned long flags;
 506
 507        if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
 508                dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
 509                return -EINVAL;
 510        }
 511
 512        if (NULL == mdp5_crtc->ctl)
 513                return -EINVAL;
 514
 515        if (!handle) {
 516                DBG("Cursor off");
 517                cursor_enable = false;
 518                goto set_cursor;
 519        }
 520
 521        cursor_bo = drm_gem_object_lookup(dev, file, handle);
 522        if (!cursor_bo)
 523                return -ENOENT;
 524
 525        ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
 526        if (ret)
 527                return -EINVAL;
 528
 529        lm = mdp5_crtc->lm;
 530        drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp);
 531        stride = width * (bpp >> 3);
 532
 533        spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
 534        old_bo = mdp5_crtc->cursor.scanout_bo;
 535
 536        mdp5_crtc->cursor.scanout_bo = cursor_bo;
 537        mdp5_crtc->cursor.width = width;
 538        mdp5_crtc->cursor.height = height;
 539
 540        get_roi(crtc, &roi_w, &roi_h);
 541
 542        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
 543        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
 544                        MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
 545        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
 546                        MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
 547                        MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
 548        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
 549                        MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
 550                        MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
 551        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
 552
 553        blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
 554        blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
 555        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
 556
 557        spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
 558
 559set_cursor:
 560        ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
 561        if (ret) {
 562                dev_err(dev->dev, "failed to %sable cursor: %d\n",
 563                                cursor_enable ? "en" : "dis", ret);
 564                goto end;
 565        }
 566
 567        crtc_flush(crtc, flush_mask);
 568
 569end:
 570        if (old_bo) {
 571                drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
 572                /* enable vblank to complete cursor work: */
 573                request_pending(crtc, PENDING_CURSOR);
 574        }
 575        return ret;
 576}
 577
 578static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 579{
 580        struct mdp5_kms *mdp5_kms = get_kms(crtc);
 581        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 582        uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
 583        uint32_t roi_w;
 584        uint32_t roi_h;
 585        unsigned long flags;
 586
 587        /* In case the CRTC is disabled, just drop the cursor update */
 588        if (unlikely(!crtc->state->enable))
 589                return 0;
 590
 591        mdp5_crtc->cursor.x = x = max(x, 0);
 592        mdp5_crtc->cursor.y = y = max(y, 0);
 593
 594        get_roi(crtc, &roi_w, &roi_h);
 595
 596        spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
 597        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
 598                        MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
 599                        MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
 600        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
 601                        MDP5_LM_CURSOR_START_XY_Y_START(y) |
 602                        MDP5_LM_CURSOR_START_XY_X_START(x));
 603        spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
 604
 605        crtc_flush(crtc, flush_mask);
 606
 607        return 0;
 608}
 609
 610static const struct drm_crtc_funcs mdp5_crtc_funcs = {
 611        .set_config = drm_atomic_helper_set_config,
 612        .destroy = mdp5_crtc_destroy,
 613        .page_flip = drm_atomic_helper_page_flip,
 614        .set_property = drm_atomic_helper_crtc_set_property,
 615        .reset = drm_atomic_helper_crtc_reset,
 616        .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
 617        .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 618        .cursor_set = mdp5_crtc_cursor_set,
 619        .cursor_move = mdp5_crtc_cursor_move,
 620};
 621
 622static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
 623        .mode_set_nofb = mdp5_crtc_mode_set_nofb,
 624        .disable = mdp5_crtc_disable,
 625        .enable = mdp5_crtc_enable,
 626        .atomic_check = mdp5_crtc_atomic_check,
 627        .atomic_begin = mdp5_crtc_atomic_begin,
 628        .atomic_flush = mdp5_crtc_atomic_flush,
 629};
 630
 631static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
 632{
 633        struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
 634        struct drm_crtc *crtc = &mdp5_crtc->base;
 635        struct msm_drm_private *priv = crtc->dev->dev_private;
 636        unsigned pending;
 637
 638        mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
 639
 640        pending = atomic_xchg(&mdp5_crtc->pending, 0);
 641
 642        if (pending & PENDING_FLIP) {
 643                complete_flip(crtc, NULL);
 644        }
 645
 646        if (pending & PENDING_CURSOR)
 647                drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
 648}
 649
 650static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
 651{
 652        struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
 653
 654        DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
 655}
 656
 657static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
 658{
 659        struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
 660                                                                pp_done);
 661
 662        complete(&mdp5_crtc->pp_completion);
 663}
 664
 665static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
 666{
 667        struct drm_device *dev = crtc->dev;
 668        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 669        int ret;
 670
 671        ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
 672                                                msecs_to_jiffies(50));
 673        if (ret == 0)
 674                dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm);
 675}
 676
 677static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
 678{
 679        struct drm_device *dev = crtc->dev;
 680        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 681        int ret;
 682
 683        /* Should not call this function if crtc is disabled. */
 684        if (!mdp5_crtc->ctl)
 685                return;
 686
 687        ret = drm_crtc_vblank_get(crtc);
 688        if (ret)
 689                return;
 690
 691        ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
 692                ((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) &
 693                mdp5_crtc->flushed_mask) == 0),
 694                msecs_to_jiffies(50));
 695        if (ret <= 0)
 696                dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
 697
 698        mdp5_crtc->flushed_mask = 0;
 699
 700        drm_crtc_vblank_put(crtc);
 701}
 702
 703uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
 704{
 705        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 706        return mdp5_crtc->vblank.irqmask;
 707}
 708
 709void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
 710                struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 711{
 712        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 713        struct mdp5_kms *mdp5_kms = get_kms(crtc);
 714        int lm = mdp5_crtc_get_lm(crtc);
 715
 716        /* now that we know what irq's we want: */
 717        mdp5_crtc->err.irqmask = intf2err(intf->num);
 718        mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
 719
 720        if ((intf->type == INTF_DSI) &&
 721                (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
 722                mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
 723                mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
 724                mdp5_crtc->cmd_mode = true;
 725        } else {
 726                mdp5_crtc->pp_done.irqmask = 0;
 727                mdp5_crtc->pp_done.irq = NULL;
 728                mdp5_crtc->cmd_mode = false;
 729        }
 730
 731        mdp_irq_update(&mdp5_kms->base);
 732
 733        mdp5_crtc->ctl = ctl;
 734        mdp5_ctl_set_pipeline(ctl, intf, lm);
 735}
 736
 737int mdp5_crtc_get_lm(struct drm_crtc *crtc)
 738{
 739        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 740        return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
 741}
 742
 743void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
 744{
 745        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 746
 747        if (mdp5_crtc->cmd_mode)
 748                mdp5_crtc_wait_for_pp_done(crtc);
 749        else
 750                mdp5_crtc_wait_for_flush_done(crtc);
 751}
 752
 753/* initialize crtc */
 754struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
 755                struct drm_plane *plane, int id)
 756{
 757        struct drm_crtc *crtc = NULL;
 758        struct mdp5_crtc *mdp5_crtc;
 759
 760        mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
 761        if (!mdp5_crtc)
 762                return ERR_PTR(-ENOMEM);
 763
 764        crtc = &mdp5_crtc->base;
 765
 766        mdp5_crtc->id = id;
 767        mdp5_crtc->lm = GET_LM_ID(id);
 768
 769        spin_lock_init(&mdp5_crtc->lm_lock);
 770        spin_lock_init(&mdp5_crtc->cursor.lock);
 771        init_completion(&mdp5_crtc->pp_completion);
 772
 773        mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
 774        mdp5_crtc->err.irq = mdp5_crtc_err_irq;
 775
 776        snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
 777                        pipe2name(mdp5_plane_pipe(plane)), id);
 778
 779        drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs,
 780                                  NULL);
 781
 782        drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
 783                        "unref cursor", unref_cursor_worker);
 784
 785        drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
 786        plane->crtc = crtc;
 787
 788        return crtc;
 789}
 790