linux/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2013 Red Hat
   5 * Author: Rob Clark <robdclark@gmail.com>
   6 */
   7
   8#define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
   9#include <linux/kthread.h>
  10#include <linux/debugfs.h>
  11#include <linux/seq_file.h>
  12
  13#include "msm_drv.h"
  14#include "dpu_kms.h"
  15#include <drm/drm_crtc.h>
  16#include <drm/drm_probe_helper.h>
  17#include "dpu_hwio.h"
  18#include "dpu_hw_catalog.h"
  19#include "dpu_hw_intf.h"
  20#include "dpu_hw_ctl.h"
  21#include "dpu_formats.h"
  22#include "dpu_encoder_phys.h"
  23#include "dpu_crtc.h"
  24#include "dpu_trace.h"
  25#include "dpu_core_irq.h"
  26
  27#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
  28                (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
  29
  30#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
  31                (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
  32
  33#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
  34                (p) ? (p)->parent->base.id : -1, \
  35                (p) ? (p)->intf_idx - INTF_0 : -1, \
  36                (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
  37                ##__VA_ARGS__)
  38
  39#define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
  40                (p) ? (p)->parent->base.id : -1, \
  41                (p) ? (p)->intf_idx - INTF_0 : -1, \
  42                (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
  43                ##__VA_ARGS__)
  44
  45/*
  46 * Two to anticipate panels that can do cmd/vid dynamic switching
  47 * plan is to create all possible physical encoder types, and switch between
  48 * them at runtime
  49 */
  50#define NUM_PHYS_ENCODER_TYPES 2
  51
  52#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
  53        (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
  54
  55#define MAX_CHANNELS_PER_ENC 2
  56
  57#define IDLE_SHORT_TIMEOUT      1
  58
  59#define MAX_VDISPLAY_SPLIT 1080
  60
  61/* timeout in frames waiting for frame done */
  62#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
  63
  64/**
  65 * enum dpu_enc_rc_events - events for resource control state machine
  66 * @DPU_ENC_RC_EVENT_KICKOFF:
  67 *      This event happens at NORMAL priority.
  68 *      Event that signals the start of the transfer. When this event is
  69 *      received, enable MDP/DSI core clocks. Regardless of the previous
  70 *      state, the resource should be in ON state at the end of this event.
  71 * @DPU_ENC_RC_EVENT_FRAME_DONE:
  72 *      This event happens at INTERRUPT level.
  73 *      Event signals the end of the data transfer after the PP FRAME_DONE
  74 *      event. At the end of this event, a delayed work is scheduled to go to
  75 *      IDLE_PC state after IDLE_TIMEOUT time.
  76 * @DPU_ENC_RC_EVENT_PRE_STOP:
  77 *      This event happens at NORMAL priority.
  78 *      This event, when received during the ON state, leave the RC STATE
  79 *      in the PRE_OFF state. It should be followed by the STOP event as
  80 *      part of encoder disable.
  81 *      If received during IDLE or OFF states, it will do nothing.
  82 * @DPU_ENC_RC_EVENT_STOP:
  83 *      This event happens at NORMAL priority.
  84 *      When this event is received, disable all the MDP/DSI core clocks, and
  85 *      disable IRQs. It should be called from the PRE_OFF or IDLE states.
  86 *      IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
  87 *      PRE_OFF is expected when PRE_STOP was executed during the ON state.
  88 *      Resource state should be in OFF at the end of the event.
  89 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
  90 *      This event happens at NORMAL priority from a work item.
  91 *      Event signals that there were no frame updates for IDLE_TIMEOUT time.
  92 *      This would disable MDP/DSI core clocks and change the resource state
  93 *      to IDLE.
  94 */
  95enum dpu_enc_rc_events {
  96        DPU_ENC_RC_EVENT_KICKOFF = 1,
  97        DPU_ENC_RC_EVENT_FRAME_DONE,
  98        DPU_ENC_RC_EVENT_PRE_STOP,
  99        DPU_ENC_RC_EVENT_STOP,
 100        DPU_ENC_RC_EVENT_ENTER_IDLE
 101};
 102
 103/*
 104 * enum dpu_enc_rc_states - states that the resource control maintains
 105 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
 106 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
 107 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
 108 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
 109 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
 110 */
 111enum dpu_enc_rc_states {
 112        DPU_ENC_RC_STATE_OFF,
 113        DPU_ENC_RC_STATE_PRE_OFF,
 114        DPU_ENC_RC_STATE_ON,
 115        DPU_ENC_RC_STATE_IDLE
 116};
 117
 118/**
 119 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
 120 *      encoders. Virtual encoder manages one "logical" display. Physical
 121 *      encoders manage one intf block, tied to a specific panel/sub-panel.
 122 *      Virtual encoder defers as much as possible to the physical encoders.
 123 *      Virtual encoder registers itself with the DRM Framework as the encoder.
 124 * @base:               drm_encoder base class for registration with DRM
 125 * @enc_spinlock:       Virtual-Encoder-Wide Spin Lock for IRQ purposes
 126 * @bus_scaling_client: Client handle to the bus scaling interface
 127 * @enabled:            True if the encoder is active, protected by enc_lock
 128 * @num_phys_encs:      Actual number of physical encoders contained.
 129 * @phys_encs:          Container of physical encoders managed.
 130 * @cur_master:         Pointer to the current master in this mode. Optimization
 131 *                      Only valid after enable. Cleared as disable.
 132 * @hw_pp               Handle to the pingpong blocks used for the display. No.
 133 *                      pingpong blocks can be different than num_phys_encs.
 134 * @intfs_swapped       Whether or not the phys_enc interfaces have been swapped
 135 *                      for partial update right-only cases, such as pingpong
 136 *                      split where virtual pingpong does not generate IRQs
 137 * @crtc:               Pointer to the currently assigned crtc. Normally you
 138 *                      would use crtc->state->encoder_mask to determine the
 139 *                      link between encoder/crtc. However in this case we need
 140 *                      to track crtc in the disable() hook which is called
 141 *                      _after_ encoder_mask is cleared.
 142 * @crtc_kickoff_cb:            Callback into CRTC that will flush & start
 143 *                              all CTL paths
 144 * @crtc_kickoff_cb_data:       Opaque user data given to crtc_kickoff_cb
 145 * @debugfs_root:               Debug file system root file node
 146 * @enc_lock:                   Lock around physical encoder
 147 *                              create/destroy/enable/disable
 148 * @frame_busy_mask:            Bitmask tracking which phys_enc we are still
 149 *                              busy processing current command.
 150 *                              Bit0 = phys_encs[0] etc.
 151 * @crtc_frame_event_cb:        callback handler for frame event
 152 * @crtc_frame_event_cb_data:   callback handler private data
 153 * @frame_done_timeout_ms:      frame done timeout in ms
 154 * @frame_done_timer:           watchdog timer for frame done event
 155 * @vsync_event_timer:          vsync timer
 156 * @disp_info:                  local copy of msm_display_info struct
 157 * @idle_pc_supported:          indicate if idle power collaps is supported
 158 * @rc_lock:                    resource control mutex lock to protect
 159 *                              virt encoder over various state changes
 160 * @rc_state:                   resource controller state
 161 * @delayed_off_work:           delayed worker to schedule disabling of
 162 *                              clks and resources after IDLE_TIMEOUT time.
 163 * @vsync_event_work:           worker to handle vsync event for autorefresh
 164 * @topology:                   topology of the display
 165 * @mode_set_complete:          flag to indicate modeset completion
 166 * @idle_timeout:               idle timeout duration in milliseconds
 167 */
 168struct dpu_encoder_virt {
 169        struct drm_encoder base;
 170        spinlock_t enc_spinlock;
 171        uint32_t bus_scaling_client;
 172
 173        bool enabled;
 174
 175        unsigned int num_phys_encs;
 176        struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
 177        struct dpu_encoder_phys *cur_master;
 178        struct dpu_encoder_phys *cur_slave;
 179        struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 180
 181        bool intfs_swapped;
 182
 183        struct drm_crtc *crtc;
 184
 185        struct dentry *debugfs_root;
 186        struct mutex enc_lock;
 187        DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
 188        void (*crtc_frame_event_cb)(void *, u32 event);
 189        void *crtc_frame_event_cb_data;
 190
 191        atomic_t frame_done_timeout_ms;
 192        struct timer_list frame_done_timer;
 193        struct timer_list vsync_event_timer;
 194
 195        struct msm_display_info disp_info;
 196
 197        bool idle_pc_supported;
 198        struct mutex rc_lock;
 199        enum dpu_enc_rc_states rc_state;
 200        struct delayed_work delayed_off_work;
 201        struct kthread_work vsync_event_work;
 202        struct msm_display_topology topology;
 203        bool mode_set_complete;
 204
 205        u32 idle_timeout;
 206};
 207
 208#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
 209
 210void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
 211                enum dpu_intr_idx intr_idx)
 212{
 213        DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
 214                  DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
 215                  phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
 216
 217        if (phys_enc->parent_ops->handle_frame_done)
 218                phys_enc->parent_ops->handle_frame_done(
 219                                phys_enc->parent, phys_enc,
 220                                DPU_ENCODER_FRAME_EVENT_ERROR);
 221}
 222
 223static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
 224                int32_t hw_id, struct dpu_encoder_wait_info *info);
 225
 226int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
 227                enum dpu_intr_idx intr_idx,
 228                struct dpu_encoder_wait_info *wait_info)
 229{
 230        struct dpu_encoder_irq *irq;
 231        u32 irq_status;
 232        int ret;
 233
 234        if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
 235                DPU_ERROR("invalid params\n");
 236                return -EINVAL;
 237        }
 238        irq = &phys_enc->irq[intr_idx];
 239
 240        /* note: do master / slave checking outside */
 241
 242        /* return EWOULDBLOCK since we know the wait isn't necessary */
 243        if (phys_enc->enable_state == DPU_ENC_DISABLED) {
 244                DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
 245                          DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 246                          irq->irq_idx);
 247                return -EWOULDBLOCK;
 248        }
 249
 250        if (irq->irq_idx < 0) {
 251                DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
 252                              DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 253                              irq->name);
 254                return 0;
 255        }
 256
 257        DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
 258                      DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 259                      irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
 260                      atomic_read(wait_info->atomic_cnt));
 261
 262        ret = dpu_encoder_helper_wait_event_timeout(
 263                        DRMID(phys_enc->parent),
 264                        irq->hw_idx,
 265                        wait_info);
 266
 267        if (ret <= 0) {
 268                irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
 269                                irq->irq_idx, true);
 270                if (irq_status) {
 271                        unsigned long flags;
 272
 273                        DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
 274                                      "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
 275                                      DRMID(phys_enc->parent), intr_idx,
 276                                      irq->hw_idx, irq->irq_idx,
 277                                      phys_enc->hw_pp->idx - PINGPONG_0,
 278                                      atomic_read(wait_info->atomic_cnt));
 279                        local_irq_save(flags);
 280                        irq->cb.func(phys_enc, irq->irq_idx);
 281                        local_irq_restore(flags);
 282                        ret = 0;
 283                } else {
 284                        ret = -ETIMEDOUT;
 285                        DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
 286                                      "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
 287                                      DRMID(phys_enc->parent), intr_idx,
 288                                      irq->hw_idx, irq->irq_idx,
 289                                      phys_enc->hw_pp->idx - PINGPONG_0,
 290                                      atomic_read(wait_info->atomic_cnt));
 291                }
 292        } else {
 293                ret = 0;
 294                trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
 295                        intr_idx, irq->hw_idx, irq->irq_idx,
 296                        phys_enc->hw_pp->idx - PINGPONG_0,
 297                        atomic_read(wait_info->atomic_cnt));
 298        }
 299
 300        return ret;
 301}
 302
 303int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
 304                enum dpu_intr_idx intr_idx)
 305{
 306        struct dpu_encoder_irq *irq;
 307        int ret = 0;
 308
 309        if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
 310                DPU_ERROR("invalid params\n");
 311                return -EINVAL;
 312        }
 313        irq = &phys_enc->irq[intr_idx];
 314
 315        if (irq->irq_idx >= 0) {
 316                DPU_DEBUG_PHYS(phys_enc,
 317                                "skipping already registered irq %s type %d\n",
 318                                irq->name, irq->intr_type);
 319                return 0;
 320        }
 321
 322        irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
 323                        irq->intr_type, irq->hw_idx);
 324        if (irq->irq_idx < 0) {
 325                DPU_ERROR_PHYS(phys_enc,
 326                        "failed to lookup IRQ index for %s type:%d\n",
 327                        irq->name, irq->intr_type);
 328                return -EINVAL;
 329        }
 330
 331        ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
 332                        &irq->cb);
 333        if (ret) {
 334                DPU_ERROR_PHYS(phys_enc,
 335                        "failed to register IRQ callback for %s\n",
 336                        irq->name);
 337                irq->irq_idx = -EINVAL;
 338                return ret;
 339        }
 340
 341        ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
 342        if (ret) {
 343                DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
 344                          DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 345                          irq->irq_idx);
 346                dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
 347                                irq->irq_idx, &irq->cb);
 348                irq->irq_idx = -EINVAL;
 349                return ret;
 350        }
 351
 352        trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
 353                                irq->hw_idx, irq->irq_idx);
 354
 355        return ret;
 356}
 357
 358int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
 359                enum dpu_intr_idx intr_idx)
 360{
 361        struct dpu_encoder_irq *irq;
 362        int ret;
 363
 364        if (!phys_enc) {
 365                DPU_ERROR("invalid encoder\n");
 366                return -EINVAL;
 367        }
 368        irq = &phys_enc->irq[intr_idx];
 369
 370        /* silently skip irqs that weren't registered */
 371        if (irq->irq_idx < 0) {
 372                DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
 373                          DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 374                          irq->irq_idx);
 375                return 0;
 376        }
 377
 378        ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
 379        if (ret) {
 380                DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
 381                          DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 382                          irq->irq_idx, ret);
 383        }
 384
 385        ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
 386                        &irq->cb);
 387        if (ret) {
 388                DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
 389                          DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 390                          irq->irq_idx, ret);
 391        }
 392
 393        trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
 394                                             irq->hw_idx, irq->irq_idx);
 395
 396        irq->irq_idx = -EINVAL;
 397
 398        return 0;
 399}
 400
 401void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
 402                                  struct dpu_encoder_hw_resources *hw_res)
 403{
 404        struct dpu_encoder_virt *dpu_enc = NULL;
 405        int i = 0;
 406
 407        dpu_enc = to_dpu_encoder_virt(drm_enc);
 408        DPU_DEBUG_ENC(dpu_enc, "\n");
 409
 410        /* Query resources used by phys encs, expected to be without overlap */
 411        memset(hw_res, 0, sizeof(*hw_res));
 412
 413        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 414                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 415
 416                if (phys && phys->ops.get_hw_resources)
 417                        phys->ops.get_hw_resources(phys, hw_res);
 418        }
 419}
 420
 421static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
 422{
 423        struct dpu_encoder_virt *dpu_enc = NULL;
 424        int i = 0;
 425
 426        if (!drm_enc) {
 427                DPU_ERROR("invalid encoder\n");
 428                return;
 429        }
 430
 431        dpu_enc = to_dpu_encoder_virt(drm_enc);
 432        DPU_DEBUG_ENC(dpu_enc, "\n");
 433
 434        mutex_lock(&dpu_enc->enc_lock);
 435
 436        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 437                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 438
 439                if (phys && phys->ops.destroy) {
 440                        phys->ops.destroy(phys);
 441                        --dpu_enc->num_phys_encs;
 442                        dpu_enc->phys_encs[i] = NULL;
 443                }
 444        }
 445
 446        if (dpu_enc->num_phys_encs)
 447                DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
 448                                dpu_enc->num_phys_encs);
 449        dpu_enc->num_phys_encs = 0;
 450        mutex_unlock(&dpu_enc->enc_lock);
 451
 452        drm_encoder_cleanup(drm_enc);
 453        mutex_destroy(&dpu_enc->enc_lock);
 454}
 455
 456void dpu_encoder_helper_split_config(
 457                struct dpu_encoder_phys *phys_enc,
 458                enum dpu_intf interface)
 459{
 460        struct dpu_encoder_virt *dpu_enc;
 461        struct split_pipe_cfg cfg = { 0 };
 462        struct dpu_hw_mdp *hw_mdptop;
 463        struct msm_display_info *disp_info;
 464
 465        if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
 466                DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
 467                return;
 468        }
 469
 470        dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
 471        hw_mdptop = phys_enc->hw_mdptop;
 472        disp_info = &dpu_enc->disp_info;
 473
 474        if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
 475                return;
 476
 477        /**
 478         * disable split modes since encoder will be operating in as the only
 479         * encoder, either for the entire use case in the case of, for example,
 480         * single DSI, or for this frame in the case of left/right only partial
 481         * update.
 482         */
 483        if (phys_enc->split_role == ENC_ROLE_SOLO) {
 484                if (hw_mdptop->ops.setup_split_pipe)
 485                        hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
 486                return;
 487        }
 488
 489        cfg.en = true;
 490        cfg.mode = phys_enc->intf_mode;
 491        cfg.intf = interface;
 492
 493        if (cfg.en && phys_enc->ops.needs_single_flush &&
 494                        phys_enc->ops.needs_single_flush(phys_enc))
 495                cfg.split_flush_en = true;
 496
 497        if (phys_enc->split_role == ENC_ROLE_MASTER) {
 498                DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
 499
 500                if (hw_mdptop->ops.setup_split_pipe)
 501                        hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
 502        }
 503}
 504
 505static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
 506                struct drm_display_mode *adj_mode)
 507{
 508        struct drm_display_mode *cur_mode;
 509
 510        if (!connector || !adj_mode)
 511                return;
 512
 513        list_for_each_entry(cur_mode, &connector->modes, head) {
 514                if (cur_mode->vdisplay == adj_mode->vdisplay &&
 515                    cur_mode->hdisplay == adj_mode->hdisplay &&
 516                    drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
 517                        adj_mode->private = cur_mode->private;
 518                        adj_mode->private_flags |= cur_mode->private_flags;
 519                }
 520        }
 521}
 522
 523static struct msm_display_topology dpu_encoder_get_topology(
 524                        struct dpu_encoder_virt *dpu_enc,
 525                        struct dpu_kms *dpu_kms,
 526                        struct drm_display_mode *mode)
 527{
 528        struct msm_display_topology topology;
 529        int i, intf_count = 0;
 530
 531        for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
 532                if (dpu_enc->phys_encs[i])
 533                        intf_count++;
 534
 535        /* User split topology for width > 1080 */
 536        topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
 537        topology.num_enc = 0;
 538        topology.num_intf = intf_count;
 539
 540        return topology;
 541}
 542static int dpu_encoder_virt_atomic_check(
 543                struct drm_encoder *drm_enc,
 544                struct drm_crtc_state *crtc_state,
 545                struct drm_connector_state *conn_state)
 546{
 547        struct dpu_encoder_virt *dpu_enc;
 548        struct msm_drm_private *priv;
 549        struct dpu_kms *dpu_kms;
 550        const struct drm_display_mode *mode;
 551        struct drm_display_mode *adj_mode;
 552        struct msm_display_topology topology;
 553        int i = 0;
 554        int ret = 0;
 555
 556        if (!drm_enc || !crtc_state || !conn_state) {
 557                DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
 558                                drm_enc != 0, crtc_state != 0, conn_state != 0);
 559                return -EINVAL;
 560        }
 561
 562        dpu_enc = to_dpu_encoder_virt(drm_enc);
 563        DPU_DEBUG_ENC(dpu_enc, "\n");
 564
 565        priv = drm_enc->dev->dev_private;
 566        dpu_kms = to_dpu_kms(priv->kms);
 567        mode = &crtc_state->mode;
 568        adj_mode = &crtc_state->adjusted_mode;
 569        trace_dpu_enc_atomic_check(DRMID(drm_enc));
 570
 571        /*
 572         * display drivers may populate private fields of the drm display mode
 573         * structure while registering possible modes of a connector with DRM.
 574         * These private fields are not populated back while DRM invokes
 575         * the mode_set callbacks. This module retrieves and populates the
 576         * private fields of the given mode.
 577         */
 578        _dpu_encoder_adjust_mode(conn_state->connector, adj_mode);
 579
 580        /* perform atomic check on the first physical encoder (master) */
 581        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 582                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 583
 584                if (phys && phys->ops.atomic_check)
 585                        ret = phys->ops.atomic_check(phys, crtc_state,
 586                                        conn_state);
 587                else if (phys && phys->ops.mode_fixup)
 588                        if (!phys->ops.mode_fixup(phys, mode, adj_mode))
 589                                ret = -EINVAL;
 590
 591                if (ret) {
 592                        DPU_ERROR_ENC(dpu_enc,
 593                                        "mode unsupported, phys idx %d\n", i);
 594                        break;
 595                }
 596        }
 597
 598        topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
 599
 600        /* Reserve dynamic resources now. Indicating AtomicTest phase */
 601        if (!ret) {
 602                /*
 603                 * Avoid reserving resources when mode set is pending. Topology
 604                 * info may not be available to complete reservation.
 605                 */
 606                if (drm_atomic_crtc_needs_modeset(crtc_state)
 607                                && dpu_enc->mode_set_complete) {
 608                        ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
 609                                             topology, true);
 610                        dpu_enc->mode_set_complete = false;
 611                }
 612        }
 613
 614        trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
 615                        adj_mode->private_flags);
 616
 617        return ret;
 618}
 619
 620static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
 621                        struct msm_display_info *disp_info)
 622{
 623        struct dpu_vsync_source_cfg vsync_cfg = { 0 };
 624        struct msm_drm_private *priv;
 625        struct dpu_kms *dpu_kms;
 626        struct dpu_hw_mdp *hw_mdptop;
 627        struct drm_encoder *drm_enc;
 628        int i;
 629
 630        if (!dpu_enc || !disp_info) {
 631                DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
 632                                        dpu_enc != NULL, disp_info != NULL);
 633                return;
 634        } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
 635                DPU_ERROR("invalid num phys enc %d/%d\n",
 636                                dpu_enc->num_phys_encs,
 637                                (int) ARRAY_SIZE(dpu_enc->hw_pp));
 638                return;
 639        }
 640
 641        drm_enc = &dpu_enc->base;
 642        /* this pointers are checked in virt_enable_helper */
 643        priv = drm_enc->dev->dev_private;
 644
 645        dpu_kms = to_dpu_kms(priv->kms);
 646        if (!dpu_kms) {
 647                DPU_ERROR("invalid dpu_kms\n");
 648                return;
 649        }
 650
 651        hw_mdptop = dpu_kms->hw_mdp;
 652        if (!hw_mdptop) {
 653                DPU_ERROR("invalid mdptop\n");
 654                return;
 655        }
 656
 657        if (hw_mdptop->ops.setup_vsync_source &&
 658                        disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
 659                for (i = 0; i < dpu_enc->num_phys_encs; i++)
 660                        vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
 661
 662                vsync_cfg.pp_count = dpu_enc->num_phys_encs;
 663                if (disp_info->is_te_using_watchdog_timer)
 664                        vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
 665                else
 666                        vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
 667
 668                hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
 669        }
 670}
 671
 672static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
 673{
 674        struct dpu_encoder_virt *dpu_enc;
 675        int i;
 676
 677        if (!drm_enc) {
 678                DPU_ERROR("invalid encoder\n");
 679                return;
 680        }
 681
 682        dpu_enc = to_dpu_encoder_virt(drm_enc);
 683
 684        DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
 685        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 686                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 687
 688                if (phys && phys->ops.irq_control)
 689                        phys->ops.irq_control(phys, enable);
 690        }
 691
 692}
 693
 694static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
 695                bool enable)
 696{
 697        struct msm_drm_private *priv;
 698        struct dpu_kms *dpu_kms;
 699        struct dpu_encoder_virt *dpu_enc;
 700
 701        dpu_enc = to_dpu_encoder_virt(drm_enc);
 702        priv = drm_enc->dev->dev_private;
 703        dpu_kms = to_dpu_kms(priv->kms);
 704
 705        trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
 706
 707        if (!dpu_enc->cur_master) {
 708                DPU_ERROR("encoder master not set\n");
 709                return;
 710        }
 711
 712        if (enable) {
 713                /* enable DPU core clks */
 714                pm_runtime_get_sync(&dpu_kms->pdev->dev);
 715
 716                /* enable all the irq */
 717                _dpu_encoder_irq_control(drm_enc, true);
 718
 719        } else {
 720                /* disable all the irq */
 721                _dpu_encoder_irq_control(drm_enc, false);
 722
 723                /* disable DPU core clks */
 724                pm_runtime_put_sync(&dpu_kms->pdev->dev);
 725        }
 726
 727}
 728
 729static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
 730                u32 sw_event)
 731{
 732        struct dpu_encoder_virt *dpu_enc;
 733        struct msm_drm_private *priv;
 734        bool is_vid_mode = false;
 735
 736        if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
 737                        !drm_enc->crtc) {
 738                DPU_ERROR("invalid parameters\n");
 739                return -EINVAL;
 740        }
 741        dpu_enc = to_dpu_encoder_virt(drm_enc);
 742        priv = drm_enc->dev->dev_private;
 743        is_vid_mode = dpu_enc->disp_info.capabilities &
 744                                                MSM_DISPLAY_CAP_VID_MODE;
 745
 746        /*
 747         * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
 748         * events and return early for other events (ie wb display).
 749         */
 750        if (!dpu_enc->idle_pc_supported &&
 751                        (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
 752                        sw_event != DPU_ENC_RC_EVENT_STOP &&
 753                        sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
 754                return 0;
 755
 756        trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
 757                         dpu_enc->rc_state, "begin");
 758
 759        switch (sw_event) {
 760        case DPU_ENC_RC_EVENT_KICKOFF:
 761                /* cancel delayed off work, if any */
 762                if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
 763                        DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
 764                                        sw_event);
 765
 766                mutex_lock(&dpu_enc->rc_lock);
 767
 768                /* return if the resource control is already in ON state */
 769                if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
 770                        DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
 771                                      DRMID(drm_enc), sw_event);
 772                        mutex_unlock(&dpu_enc->rc_lock);
 773                        return 0;
 774                } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
 775                                dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
 776                        DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
 777                                      DRMID(drm_enc), sw_event,
 778                                      dpu_enc->rc_state);
 779                        mutex_unlock(&dpu_enc->rc_lock);
 780                        return -EINVAL;
 781                }
 782
 783                if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
 784                        _dpu_encoder_irq_control(drm_enc, true);
 785                else
 786                        _dpu_encoder_resource_control_helper(drm_enc, true);
 787
 788                dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
 789
 790                trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 791                                 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 792                                 "kickoff");
 793
 794                mutex_unlock(&dpu_enc->rc_lock);
 795                break;
 796
 797        case DPU_ENC_RC_EVENT_FRAME_DONE:
 798                /*
 799                 * mutex lock is not used as this event happens at interrupt
 800                 * context. And locking is not required as, the other events
 801                 * like KICKOFF and STOP does a wait-for-idle before executing
 802                 * the resource_control
 803                 */
 804                if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
 805                        DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
 806                                      DRMID(drm_enc), sw_event,
 807                                      dpu_enc->rc_state);
 808                        return -EINVAL;
 809                }
 810
 811                /*
 812                 * schedule off work item only when there are no
 813                 * frames pending
 814                 */
 815                if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
 816                        DRM_DEBUG_KMS("id:%d skip schedule work\n",
 817                                      DRMID(drm_enc));
 818                        return 0;
 819                }
 820
 821                queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
 822                                   msecs_to_jiffies(dpu_enc->idle_timeout));
 823
 824                trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 825                                 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 826                                 "frame done");
 827                break;
 828
 829        case DPU_ENC_RC_EVENT_PRE_STOP:
 830                /* cancel delayed off work, if any */
 831                if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
 832                        DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
 833                                        sw_event);
 834
 835                mutex_lock(&dpu_enc->rc_lock);
 836
 837                if (is_vid_mode &&
 838                          dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
 839                        _dpu_encoder_irq_control(drm_enc, true);
 840                }
 841                /* skip if is already OFF or IDLE, resources are off already */
 842                else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
 843                                dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
 844                        DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
 845                                      DRMID(drm_enc), sw_event,
 846                                      dpu_enc->rc_state);
 847                        mutex_unlock(&dpu_enc->rc_lock);
 848                        return 0;
 849                }
 850
 851                dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
 852
 853                trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 854                                 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 855                                 "pre stop");
 856
 857                mutex_unlock(&dpu_enc->rc_lock);
 858                break;
 859
 860        case DPU_ENC_RC_EVENT_STOP:
 861                mutex_lock(&dpu_enc->rc_lock);
 862
 863                /* return if the resource control is already in OFF state */
 864                if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
 865                        DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
 866                                      DRMID(drm_enc), sw_event);
 867                        mutex_unlock(&dpu_enc->rc_lock);
 868                        return 0;
 869                } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
 870                        DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
 871                                  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
 872                        mutex_unlock(&dpu_enc->rc_lock);
 873                        return -EINVAL;
 874                }
 875
 876                /**
 877                 * expect to arrive here only if in either idle state or pre-off
 878                 * and in IDLE state the resources are already disabled
 879                 */
 880                if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
 881                        _dpu_encoder_resource_control_helper(drm_enc, false);
 882
 883                dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
 884
 885                trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 886                                 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 887                                 "stop");
 888
 889                mutex_unlock(&dpu_enc->rc_lock);
 890                break;
 891
 892        case DPU_ENC_RC_EVENT_ENTER_IDLE:
 893                mutex_lock(&dpu_enc->rc_lock);
 894
 895                if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
 896                        DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
 897                                  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
 898                        mutex_unlock(&dpu_enc->rc_lock);
 899                        return 0;
 900                }
 901
 902                /*
 903                 * if we are in ON but a frame was just kicked off,
 904                 * ignore the IDLE event, it's probably a stale timer event
 905                 */
 906                if (dpu_enc->frame_busy_mask[0]) {
 907                        DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
 908                                  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
 909                        mutex_unlock(&dpu_enc->rc_lock);
 910                        return 0;
 911                }
 912
 913                if (is_vid_mode)
 914                        _dpu_encoder_irq_control(drm_enc, false);
 915                else
 916                        _dpu_encoder_resource_control_helper(drm_enc, false);
 917
 918                dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
 919
 920                trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 921                                 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 922                                 "idle");
 923
 924                mutex_unlock(&dpu_enc->rc_lock);
 925                break;
 926
 927        default:
 928                DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
 929                          sw_event);
 930                trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 931                                 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 932                                 "error");
 933                break;
 934        }
 935
 936        trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 937                         dpu_enc->idle_pc_supported, dpu_enc->rc_state,
 938                         "end");
 939        return 0;
 940}
 941
 942static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 943                                      struct drm_display_mode *mode,
 944                                      struct drm_display_mode *adj_mode)
 945{
 946        struct dpu_encoder_virt *dpu_enc;
 947        struct msm_drm_private *priv;
 948        struct dpu_kms *dpu_kms;
 949        struct list_head *connector_list;
 950        struct drm_connector *conn = NULL, *conn_iter;
 951        struct drm_crtc *drm_crtc;
 952        struct dpu_crtc_state *cstate;
 953        struct dpu_rm_hw_iter hw_iter;
 954        struct msm_display_topology topology;
 955        struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
 956        struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
 957        int num_lm = 0, num_ctl = 0;
 958        int i, j, ret;
 959
 960        if (!drm_enc) {
 961                DPU_ERROR("invalid encoder\n");
 962                return;
 963        }
 964
 965        dpu_enc = to_dpu_encoder_virt(drm_enc);
 966        DPU_DEBUG_ENC(dpu_enc, "\n");
 967
 968        priv = drm_enc->dev->dev_private;
 969        dpu_kms = to_dpu_kms(priv->kms);
 970        connector_list = &dpu_kms->dev->mode_config.connector_list;
 971
 972        trace_dpu_enc_mode_set(DRMID(drm_enc));
 973
 974        list_for_each_entry(conn_iter, connector_list, head)
 975                if (conn_iter->encoder == drm_enc)
 976                        conn = conn_iter;
 977
 978        if (!conn) {
 979                DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
 980                return;
 981        } else if (!conn->state) {
 982                DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
 983                return;
 984        }
 985
 986        drm_for_each_crtc(drm_crtc, drm_enc->dev)
 987                if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc))
 988                        break;
 989
 990        topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
 991
 992        /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
 993        ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state,
 994                             topology, false);
 995        if (ret) {
 996                DPU_ERROR_ENC(dpu_enc,
 997                                "failed to reserve hw resources, %d\n", ret);
 998                return;
 999        }
1000
1001        dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
1002        for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1003                dpu_enc->hw_pp[i] = NULL;
1004                if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
1005                        break;
1006                dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
1007        }
1008
1009        dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
1010        for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1011                if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
1012                        break;
1013                hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
1014                num_ctl++;
1015        }
1016
1017        dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
1018        for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1019                if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
1020                        break;
1021                hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
1022                num_lm++;
1023        }
1024
1025        cstate = to_dpu_crtc_state(drm_crtc->state);
1026
1027        for (i = 0; i < num_lm; i++) {
1028                int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1029
1030                cstate->mixers[i].hw_lm = hw_lm[i];
1031                cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
1032        }
1033
1034        cstate->num_mixers = num_lm;
1035
1036        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1037                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1038
1039                if (phys) {
1040                        if (!dpu_enc->hw_pp[i]) {
1041                                DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
1042                                             "at idx: %d\n", i);
1043                                goto error;
1044                        }
1045
1046                        if (!hw_ctl[i]) {
1047                                DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
1048                                             "at idx: %d\n", i);
1049                                goto error;
1050                        }
1051
1052                        phys->hw_pp = dpu_enc->hw_pp[i];
1053                        phys->hw_ctl = hw_ctl[i];
1054
1055                        dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
1056                                            DPU_HW_BLK_INTF);
1057                        for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
1058                                struct dpu_hw_intf *hw_intf;
1059
1060                                if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
1061                                        break;
1062
1063                                hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
1064                                if (hw_intf->idx == phys->intf_idx)
1065                                        phys->hw_intf = hw_intf;
1066                        }
1067
1068                        if (!phys->hw_intf) {
1069                                DPU_ERROR_ENC(dpu_enc,
1070                                              "no intf block assigned at idx: %d\n",
1071                                              i);
1072                                goto error;
1073                        }
1074
1075                        phys->connector = conn->state->connector;
1076                        if (phys->ops.mode_set)
1077                                phys->ops.mode_set(phys, mode, adj_mode);
1078                }
1079        }
1080
1081        dpu_enc->mode_set_complete = true;
1082
1083error:
1084        dpu_rm_release(&dpu_kms->rm, drm_enc);
1085}
1086
1087static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1088{
1089        struct dpu_encoder_virt *dpu_enc = NULL;
1090        struct msm_drm_private *priv;
1091        struct dpu_kms *dpu_kms;
1092
1093        if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
1094                DPU_ERROR("invalid parameters\n");
1095                return;
1096        }
1097
1098        priv = drm_enc->dev->dev_private;
1099        dpu_kms = to_dpu_kms(priv->kms);
1100        if (!dpu_kms) {
1101                DPU_ERROR("invalid dpu_kms\n");
1102                return;
1103        }
1104
1105        dpu_enc = to_dpu_encoder_virt(drm_enc);
1106        if (!dpu_enc || !dpu_enc->cur_master) {
1107                DPU_ERROR("invalid dpu encoder/master\n");
1108                return;
1109        }
1110
1111        if (dpu_enc->cur_master->hw_mdptop &&
1112                        dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
1113                dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
1114                                dpu_enc->cur_master->hw_mdptop,
1115                                dpu_kms->catalog);
1116
1117        _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1118}
1119
1120void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1121{
1122        struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1123
1124        mutex_lock(&dpu_enc->enc_lock);
1125
1126        if (!dpu_enc->enabled)
1127                goto out;
1128
1129        if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1130                dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1131        if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1132                dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1133
1134        _dpu_encoder_virt_enable_helper(drm_enc);
1135
1136out:
1137        mutex_unlock(&dpu_enc->enc_lock);
1138}
1139
1140static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
1141{
1142        struct dpu_encoder_virt *dpu_enc = NULL;
1143        int ret = 0;
1144        struct drm_display_mode *cur_mode = NULL;
1145
1146        if (!drm_enc) {
1147                DPU_ERROR("invalid encoder\n");
1148                return;
1149        }
1150        dpu_enc = to_dpu_encoder_virt(drm_enc);
1151
1152        mutex_lock(&dpu_enc->enc_lock);
1153        cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1154
1155        trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1156                             cur_mode->vdisplay);
1157
1158        /* always enable slave encoder before master */
1159        if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1160                dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1161
1162        if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1163                dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1164
1165        ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1166        if (ret) {
1167                DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1168                                ret);
1169                goto out;
1170        }
1171
1172        _dpu_encoder_virt_enable_helper(drm_enc);
1173
1174        dpu_enc->enabled = true;
1175
1176out:
1177        mutex_unlock(&dpu_enc->enc_lock);
1178}
1179
1180static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1181{
1182        struct dpu_encoder_virt *dpu_enc = NULL;
1183        struct msm_drm_private *priv;
1184        struct dpu_kms *dpu_kms;
1185        struct drm_display_mode *mode;
1186        int i = 0;
1187
1188        if (!drm_enc) {
1189                DPU_ERROR("invalid encoder\n");
1190                return;
1191        } else if (!drm_enc->dev) {
1192                DPU_ERROR("invalid dev\n");
1193                return;
1194        } else if (!drm_enc->dev->dev_private) {
1195                DPU_ERROR("invalid dev_private\n");
1196                return;
1197        }
1198
1199        dpu_enc = to_dpu_encoder_virt(drm_enc);
1200        DPU_DEBUG_ENC(dpu_enc, "\n");
1201
1202        mutex_lock(&dpu_enc->enc_lock);
1203        dpu_enc->enabled = false;
1204
1205        mode = &drm_enc->crtc->state->adjusted_mode;
1206
1207        priv = drm_enc->dev->dev_private;
1208        dpu_kms = to_dpu_kms(priv->kms);
1209
1210        trace_dpu_enc_disable(DRMID(drm_enc));
1211
1212        /* wait for idle */
1213        dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1214
1215        dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1216
1217        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1218                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1219
1220                if (phys && phys->ops.disable)
1221                        phys->ops.disable(phys);
1222        }
1223
1224        /* after phys waits for frame-done, should be no more frames pending */
1225        if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1226                DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1227                del_timer_sync(&dpu_enc->frame_done_timer);
1228        }
1229
1230        dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1231
1232        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1233                if (dpu_enc->phys_encs[i])
1234                        dpu_enc->phys_encs[i]->connector = NULL;
1235        }
1236
1237        DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1238
1239        dpu_rm_release(&dpu_kms->rm, drm_enc);
1240
1241        mutex_unlock(&dpu_enc->enc_lock);
1242}
1243
1244static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
1245                enum dpu_intf_type type, u32 controller_id)
1246{
1247        int i = 0;
1248
1249        for (i = 0; i < catalog->intf_count; i++) {
1250                if (catalog->intf[i].type == type
1251                    && catalog->intf[i].controller_id == controller_id) {
1252                        return catalog->intf[i].id;
1253                }
1254        }
1255
1256        return INTF_MAX;
1257}
1258
1259static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1260                struct dpu_encoder_phys *phy_enc)
1261{
1262        struct dpu_encoder_virt *dpu_enc = NULL;
1263        unsigned long lock_flags;
1264
1265        if (!drm_enc || !phy_enc)
1266                return;
1267
1268        DPU_ATRACE_BEGIN("encoder_vblank_callback");
1269        dpu_enc = to_dpu_encoder_virt(drm_enc);
1270
1271        spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1272        if (dpu_enc->crtc)
1273                dpu_crtc_vblank_callback(dpu_enc->crtc);
1274        spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1275
1276        atomic_inc(&phy_enc->vsync_cnt);
1277        DPU_ATRACE_END("encoder_vblank_callback");
1278}
1279
1280static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1281                struct dpu_encoder_phys *phy_enc)
1282{
1283        if (!phy_enc)
1284                return;
1285
1286        DPU_ATRACE_BEGIN("encoder_underrun_callback");
1287        atomic_inc(&phy_enc->underrun_cnt);
1288        trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1289                                  atomic_read(&phy_enc->underrun_cnt));
1290        DPU_ATRACE_END("encoder_underrun_callback");
1291}
1292
1293void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1294{
1295        struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1296        unsigned long lock_flags;
1297
1298        spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1299        /* crtc should always be cleared before re-assigning */
1300        WARN_ON(crtc && dpu_enc->crtc);
1301        dpu_enc->crtc = crtc;
1302        spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1303}
1304
1305void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1306                                        struct drm_crtc *crtc, bool enable)
1307{
1308        struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1309        unsigned long lock_flags;
1310        int i;
1311
1312        trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1313
1314        spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1315        if (dpu_enc->crtc != crtc) {
1316                spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1317                return;
1318        }
1319        spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1320
1321        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1322                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1323
1324                if (phys && phys->ops.control_vblank_irq)
1325                        phys->ops.control_vblank_irq(phys, enable);
1326        }
1327}
1328
1329void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1330                void (*frame_event_cb)(void *, u32 event),
1331                void *frame_event_cb_data)
1332{
1333        struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1334        unsigned long lock_flags;
1335        bool enable;
1336
1337        enable = frame_event_cb ? true : false;
1338
1339        if (!drm_enc) {
1340                DPU_ERROR("invalid encoder\n");
1341                return;
1342        }
1343        trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1344
1345        spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1346        dpu_enc->crtc_frame_event_cb = frame_event_cb;
1347        dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1348        spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1349}
1350
1351static void dpu_encoder_frame_done_callback(
1352                struct drm_encoder *drm_enc,
1353                struct dpu_encoder_phys *ready_phys, u32 event)
1354{
1355        struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1356        unsigned int i;
1357
1358        if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1359                        | DPU_ENCODER_FRAME_EVENT_ERROR
1360                        | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1361
1362                if (!dpu_enc->frame_busy_mask[0]) {
1363                        /**
1364                         * suppress frame_done without waiter,
1365                         * likely autorefresh
1366                         */
1367                        trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
1368                                        event, ready_phys->intf_idx);
1369                        return;
1370                }
1371
1372                /* One of the physical encoders has become idle */
1373                for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1374                        if (dpu_enc->phys_encs[i] == ready_phys) {
1375                                trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1376                                                dpu_enc->frame_busy_mask[0]);
1377                                clear_bit(i, dpu_enc->frame_busy_mask);
1378                        }
1379                }
1380
1381                if (!dpu_enc->frame_busy_mask[0]) {
1382                        atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1383                        del_timer(&dpu_enc->frame_done_timer);
1384
1385                        dpu_encoder_resource_control(drm_enc,
1386                                        DPU_ENC_RC_EVENT_FRAME_DONE);
1387
1388                        if (dpu_enc->crtc_frame_event_cb)
1389                                dpu_enc->crtc_frame_event_cb(
1390                                        dpu_enc->crtc_frame_event_cb_data,
1391                                        event);
1392                }
1393        } else {
1394                if (dpu_enc->crtc_frame_event_cb)
1395                        dpu_enc->crtc_frame_event_cb(
1396                                dpu_enc->crtc_frame_event_cb_data, event);
1397        }
1398}
1399
1400static void dpu_encoder_off_work(struct work_struct *work)
1401{
1402        struct dpu_encoder_virt *dpu_enc = container_of(work,
1403                        struct dpu_encoder_virt, delayed_off_work.work);
1404
1405        if (!dpu_enc) {
1406                DPU_ERROR("invalid dpu encoder\n");
1407                return;
1408        }
1409
1410        dpu_encoder_resource_control(&dpu_enc->base,
1411                                                DPU_ENC_RC_EVENT_ENTER_IDLE);
1412
1413        dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1414                                DPU_ENCODER_FRAME_EVENT_IDLE);
1415}
1416
1417/**
1418 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1419 * drm_enc: Pointer to drm encoder structure
1420 * phys: Pointer to physical encoder structure
1421 * extra_flush_bits: Additional bit mask to include in flush trigger
1422 */
1423static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1424                struct dpu_encoder_phys *phys, uint32_t extra_flush_bits,
1425                bool async)
1426{
1427        struct dpu_hw_ctl *ctl;
1428        int pending_kickoff_cnt;
1429        u32 ret = UINT_MAX;
1430
1431        if (!drm_enc || !phys) {
1432                DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
1433                                drm_enc != 0, phys != 0);
1434                return;
1435        }
1436
1437        if (!phys->hw_pp) {
1438                DPU_ERROR("invalid pingpong hw\n");
1439                return;
1440        }
1441
1442        ctl = phys->hw_ctl;
1443        if (!ctl || !ctl->ops.trigger_flush) {
1444                DPU_ERROR("missing trigger cb\n");
1445                return;
1446        }
1447
1448        if (!async)
1449                pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1450        else
1451                pending_kickoff_cnt = atomic_read(&phys->pending_kickoff_cnt);
1452
1453        if (extra_flush_bits && ctl->ops.update_pending_flush)
1454                ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1455
1456        ctl->ops.trigger_flush(ctl);
1457
1458        if (ctl->ops.get_pending_flush)
1459                ret = ctl->ops.get_pending_flush(ctl);
1460
1461        trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
1462                                    pending_kickoff_cnt, ctl->idx,
1463                                    extra_flush_bits, ret);
1464}
1465
1466/**
1467 * _dpu_encoder_trigger_start - trigger start for a physical encoder
1468 * phys: Pointer to physical encoder structure
1469 */
1470static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1471{
1472        if (!phys) {
1473                DPU_ERROR("invalid argument(s)\n");
1474                return;
1475        }
1476
1477        if (!phys->hw_pp) {
1478                DPU_ERROR("invalid pingpong hw\n");
1479                return;
1480        }
1481
1482        if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1483                phys->ops.trigger_start(phys);
1484}
1485
1486void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1487{
1488        struct dpu_hw_ctl *ctl;
1489
1490        if (!phys_enc) {
1491                DPU_ERROR("invalid encoder\n");
1492                return;
1493        }
1494
1495        ctl = phys_enc->hw_ctl;
1496        if (ctl && ctl->ops.trigger_start) {
1497                ctl->ops.trigger_start(ctl);
1498                trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1499        }
1500}
1501
1502static int dpu_encoder_helper_wait_event_timeout(
1503                int32_t drm_id,
1504                int32_t hw_id,
1505                struct dpu_encoder_wait_info *info)
1506{
1507        int rc = 0;
1508        s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1509        s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1510        s64 time;
1511
1512        do {
1513                rc = wait_event_timeout(*(info->wq),
1514                                atomic_read(info->atomic_cnt) == 0, jiffies);
1515                time = ktime_to_ms(ktime_get());
1516
1517                trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
1518                                                 expected_time,
1519                                                 atomic_read(info->atomic_cnt));
1520        /* If we timed out, counter is valid and time is less, wait again */
1521        } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1522                        (time < expected_time));
1523
1524        return rc;
1525}
1526
1527static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1528{
1529        struct dpu_encoder_virt *dpu_enc;
1530        struct dpu_hw_ctl *ctl;
1531        int rc;
1532
1533        if (!phys_enc) {
1534                DPU_ERROR("invalid encoder\n");
1535                return;
1536        }
1537        dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1538        ctl = phys_enc->hw_ctl;
1539
1540        if (!ctl || !ctl->ops.reset)
1541                return;
1542
1543        DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
1544                      ctl->idx);
1545
1546        rc = ctl->ops.reset(ctl);
1547        if (rc)
1548                DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
1549
1550        phys_enc->enable_state = DPU_ENC_ENABLED;
1551}
1552
1553/**
1554 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1555 *      Iterate through the physical encoders and perform consolidated flush
1556 *      and/or control start triggering as needed. This is done in the virtual
1557 *      encoder rather than the individual physical ones in order to handle
1558 *      use cases that require visibility into multiple physical encoders at
1559 *      a time.
1560 * dpu_enc: Pointer to virtual encoder structure
1561 */
1562static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
1563                                      bool async)
1564{
1565        struct dpu_hw_ctl *ctl;
1566        uint32_t i, pending_flush;
1567        unsigned long lock_flags;
1568
1569        if (!dpu_enc) {
1570                DPU_ERROR("invalid encoder\n");
1571                return;
1572        }
1573
1574        pending_flush = 0x0;
1575
1576        /* update pending counts and trigger kickoff ctl flush atomically */
1577        spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1578
1579        /* don't perform flush/start operations for slave encoders */
1580        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1581                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1582
1583                if (!phys || phys->enable_state == DPU_ENC_DISABLED)
1584                        continue;
1585
1586                ctl = phys->hw_ctl;
1587                if (!ctl)
1588                        continue;
1589
1590                /*
1591                 * This is cleared in frame_done worker, which isn't invoked
1592                 * for async commits. So don't set this for async, since it'll
1593                 * roll over to the next commit.
1594                 */
1595                if (!async && phys->split_role != ENC_ROLE_SLAVE)
1596                        set_bit(i, dpu_enc->frame_busy_mask);
1597
1598                if (!phys->ops.needs_single_flush ||
1599                                !phys->ops.needs_single_flush(phys))
1600                        _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
1601                                                   async);
1602                else if (ctl->ops.get_pending_flush)
1603                        pending_flush |= ctl->ops.get_pending_flush(ctl);
1604        }
1605
1606        /* for split flush, combine pending flush masks and send to master */
1607        if (pending_flush && dpu_enc->cur_master) {
1608                _dpu_encoder_trigger_flush(
1609                                &dpu_enc->base,
1610                                dpu_enc->cur_master,
1611                                pending_flush, async);
1612        }
1613
1614        _dpu_encoder_trigger_start(dpu_enc->cur_master);
1615
1616        spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1617}
1618
1619void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1620{
1621        struct dpu_encoder_virt *dpu_enc;
1622        struct dpu_encoder_phys *phys;
1623        unsigned int i;
1624        struct dpu_hw_ctl *ctl;
1625        struct msm_display_info *disp_info;
1626
1627        if (!drm_enc) {
1628                DPU_ERROR("invalid encoder\n");
1629                return;
1630        }
1631        dpu_enc = to_dpu_encoder_virt(drm_enc);
1632        disp_info = &dpu_enc->disp_info;
1633
1634        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1635                phys = dpu_enc->phys_encs[i];
1636
1637                if (phys && phys->hw_ctl) {
1638                        ctl = phys->hw_ctl;
1639                        if (ctl->ops.clear_pending_flush)
1640                                ctl->ops.clear_pending_flush(ctl);
1641
1642                        /* update only for command mode primary ctl */
1643                        if ((phys == dpu_enc->cur_master) &&
1644                           (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
1645                            && ctl->ops.trigger_pending)
1646                                ctl->ops.trigger_pending(ctl);
1647                }
1648        }
1649}
1650
1651static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1652                struct drm_display_mode *mode)
1653{
1654        u64 pclk_rate;
1655        u32 pclk_period;
1656        u32 line_time;
1657
1658        /*
1659         * For linetime calculation, only operate on master encoder.
1660         */
1661        if (!dpu_enc->cur_master)
1662                return 0;
1663
1664        if (!dpu_enc->cur_master->ops.get_line_count) {
1665                DPU_ERROR("get_line_count function not defined\n");
1666                return 0;
1667        }
1668
1669        pclk_rate = mode->clock; /* pixel clock in kHz */
1670        if (pclk_rate == 0) {
1671                DPU_ERROR("pclk is 0, cannot calculate line time\n");
1672                return 0;
1673        }
1674
1675        pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1676        if (pclk_period == 0) {
1677                DPU_ERROR("pclk period is 0\n");
1678                return 0;
1679        }
1680
1681        /*
1682         * Line time calculation based on Pixel clock and HTOTAL.
1683         * Final unit is in ns.
1684         */
1685        line_time = (pclk_period * mode->htotal) / 1000;
1686        if (line_time == 0) {
1687                DPU_ERROR("line time calculation is 0\n");
1688                return 0;
1689        }
1690
1691        DPU_DEBUG_ENC(dpu_enc,
1692                        "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1693                        pclk_rate, pclk_period, line_time);
1694
1695        return line_time;
1696}
1697
1698static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc,
1699                ktime_t *wakeup_time)
1700{
1701        struct drm_display_mode *mode;
1702        struct dpu_encoder_virt *dpu_enc;
1703        u32 cur_line;
1704        u32 line_time;
1705        u32 vtotal, time_to_vsync;
1706        ktime_t cur_time;
1707
1708        dpu_enc = to_dpu_encoder_virt(drm_enc);
1709
1710        if (!drm_enc->crtc || !drm_enc->crtc->state) {
1711                DPU_ERROR("crtc/crtc state object is NULL\n");
1712                return -EINVAL;
1713        }
1714        mode = &drm_enc->crtc->state->adjusted_mode;
1715
1716        line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1717        if (!line_time)
1718                return -EINVAL;
1719
1720        cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1721
1722        vtotal = mode->vtotal;
1723        if (cur_line >= vtotal)
1724                time_to_vsync = line_time * vtotal;
1725        else
1726                time_to_vsync = line_time * (vtotal - cur_line);
1727
1728        if (time_to_vsync == 0) {
1729                DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1730                                vtotal);
1731                return -EINVAL;
1732        }
1733
1734        cur_time = ktime_get();
1735        *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1736
1737        DPU_DEBUG_ENC(dpu_enc,
1738                        "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1739                        cur_line, vtotal, time_to_vsync,
1740                        ktime_to_ms(cur_time),
1741                        ktime_to_ms(*wakeup_time));
1742        return 0;
1743}
1744
1745static void dpu_encoder_vsync_event_handler(struct timer_list *t)
1746{
1747        struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
1748                        vsync_event_timer);
1749        struct drm_encoder *drm_enc = &dpu_enc->base;
1750        struct msm_drm_private *priv;
1751        struct msm_drm_thread *event_thread;
1752
1753        if (!drm_enc->dev || !drm_enc->dev->dev_private ||
1754                        !drm_enc->crtc) {
1755                DPU_ERROR("invalid parameters\n");
1756                return;
1757        }
1758
1759        priv = drm_enc->dev->dev_private;
1760
1761        if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
1762                DPU_ERROR("invalid crtc index\n");
1763                return;
1764        }
1765        event_thread = &priv->event_thread[drm_enc->crtc->index];
1766        if (!event_thread) {
1767                DPU_ERROR("event_thread not found for crtc:%d\n",
1768                                drm_enc->crtc->index);
1769                return;
1770        }
1771
1772        del_timer(&dpu_enc->vsync_event_timer);
1773}
1774
1775static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
1776{
1777        struct dpu_encoder_virt *dpu_enc = container_of(work,
1778                        struct dpu_encoder_virt, vsync_event_work);
1779        ktime_t wakeup_time;
1780
1781        if (!dpu_enc) {
1782                DPU_ERROR("invalid dpu encoder\n");
1783                return;
1784        }
1785
1786        if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
1787                return;
1788
1789        trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
1790        mod_timer(&dpu_enc->vsync_event_timer,
1791                        nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1792}
1793
1794void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, bool async)
1795{
1796        struct dpu_encoder_virt *dpu_enc;
1797        struct dpu_encoder_phys *phys;
1798        bool needs_hw_reset = false;
1799        unsigned int i;
1800
1801        if (!drm_enc) {
1802                DPU_ERROR("invalid args\n");
1803                return;
1804        }
1805        dpu_enc = to_dpu_encoder_virt(drm_enc);
1806
1807        trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1808
1809        /* prepare for next kickoff, may include waiting on previous kickoff */
1810        DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1811        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1812                phys = dpu_enc->phys_encs[i];
1813                if (phys) {
1814                        if (phys->ops.prepare_for_kickoff)
1815                                phys->ops.prepare_for_kickoff(phys);
1816                        if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1817                                needs_hw_reset = true;
1818                }
1819        }
1820        DPU_ATRACE_END("enc_prepare_for_kickoff");
1821
1822        dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1823
1824        /* if any phys needs reset, reset all phys, in-order */
1825        if (needs_hw_reset) {
1826                trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1827                for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1828                        dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1829                }
1830        }
1831}
1832
1833void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
1834{
1835        struct dpu_encoder_virt *dpu_enc;
1836        struct dpu_encoder_phys *phys;
1837        ktime_t wakeup_time;
1838        unsigned int i;
1839
1840        if (!drm_enc) {
1841                DPU_ERROR("invalid encoder\n");
1842                return;
1843        }
1844        DPU_ATRACE_BEGIN("encoder_kickoff");
1845        dpu_enc = to_dpu_encoder_virt(drm_enc);
1846
1847        trace_dpu_enc_kickoff(DRMID(drm_enc));
1848
1849        /*
1850         * Asynchronous frames don't handle FRAME_DONE events. As such, they
1851         * shouldn't enable the frame_done watchdog since it will always time
1852         * out.
1853         */
1854        if (!async) {
1855                unsigned long timeout_ms;
1856                timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1857                        drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1858
1859                atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1860                mod_timer(&dpu_enc->frame_done_timer,
1861                          jiffies + msecs_to_jiffies(timeout_ms));
1862        }
1863
1864        /* All phys encs are ready to go, trigger the kickoff */
1865        _dpu_encoder_kickoff_phys(dpu_enc, async);
1866
1867        /* allow phys encs to handle any post-kickoff business */
1868        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1869                phys = dpu_enc->phys_encs[i];
1870                if (phys && phys->ops.handle_post_kickoff)
1871                        phys->ops.handle_post_kickoff(phys);
1872        }
1873
1874        if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1875                        !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
1876                trace_dpu_enc_early_kickoff(DRMID(drm_enc),
1877                                            ktime_to_ms(wakeup_time));
1878                mod_timer(&dpu_enc->vsync_event_timer,
1879                                nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1880        }
1881
1882        DPU_ATRACE_END("encoder_kickoff");
1883}
1884
1885void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
1886{
1887        struct dpu_encoder_virt *dpu_enc;
1888        struct dpu_encoder_phys *phys;
1889        int i;
1890
1891        if (!drm_enc) {
1892                DPU_ERROR("invalid encoder\n");
1893                return;
1894        }
1895        dpu_enc = to_dpu_encoder_virt(drm_enc);
1896
1897        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1898                phys = dpu_enc->phys_encs[i];
1899                if (phys && phys->ops.prepare_commit)
1900                        phys->ops.prepare_commit(phys);
1901        }
1902}
1903
1904#ifdef CONFIG_DEBUG_FS
1905static int _dpu_encoder_status_show(struct seq_file *s, void *data)
1906{
1907        struct dpu_encoder_virt *dpu_enc = s->private;
1908        int i;
1909
1910        mutex_lock(&dpu_enc->enc_lock);
1911        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1912                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1913
1914                if (!phys)
1915                        continue;
1916
1917                seq_printf(s, "intf:%d    vsync:%8d     underrun:%8d    ",
1918                                phys->intf_idx - INTF_0,
1919                                atomic_read(&phys->vsync_cnt),
1920                                atomic_read(&phys->underrun_cnt));
1921
1922                switch (phys->intf_mode) {
1923                case INTF_MODE_VIDEO:
1924                        seq_puts(s, "mode: video\n");
1925                        break;
1926                case INTF_MODE_CMD:
1927                        seq_puts(s, "mode: command\n");
1928                        break;
1929                default:
1930                        seq_puts(s, "mode: ???\n");
1931                        break;
1932                }
1933        }
1934        mutex_unlock(&dpu_enc->enc_lock);
1935
1936        return 0;
1937}
1938
1939static int _dpu_encoder_debugfs_status_open(struct inode *inode,
1940                struct file *file)
1941{
1942        return single_open(file, _dpu_encoder_status_show, inode->i_private);
1943}
1944
1945static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
1946{
1947        struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1948        struct msm_drm_private *priv;
1949        struct dpu_kms *dpu_kms;
1950        int i;
1951
1952        static const struct file_operations debugfs_status_fops = {
1953                .open =         _dpu_encoder_debugfs_status_open,
1954                .read =         seq_read,
1955                .llseek =       seq_lseek,
1956                .release =      single_release,
1957        };
1958
1959        char name[DPU_NAME_SIZE];
1960
1961        if (!drm_enc->dev || !drm_enc->dev->dev_private) {
1962                DPU_ERROR("invalid encoder or kms\n");
1963                return -EINVAL;
1964        }
1965
1966        priv = drm_enc->dev->dev_private;
1967        dpu_kms = to_dpu_kms(priv->kms);
1968
1969        snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
1970
1971        /* create overall sub-directory for the encoder */
1972        dpu_enc->debugfs_root = debugfs_create_dir(name,
1973                        drm_enc->dev->primary->debugfs_root);
1974
1975        /* don't error check these */
1976        debugfs_create_file("status", 0600,
1977                dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
1978
1979        for (i = 0; i < dpu_enc->num_phys_encs; i++)
1980                if (dpu_enc->phys_encs[i] &&
1981                                dpu_enc->phys_encs[i]->ops.late_register)
1982                        dpu_enc->phys_encs[i]->ops.late_register(
1983                                        dpu_enc->phys_encs[i],
1984                                        dpu_enc->debugfs_root);
1985
1986        return 0;
1987}
1988#else
1989static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
1990{
1991        return 0;
1992}
1993#endif
1994
1995static int dpu_encoder_late_register(struct drm_encoder *encoder)
1996{
1997        return _dpu_encoder_init_debugfs(encoder);
1998}
1999
2000static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
2001{
2002        struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2003
2004        debugfs_remove_recursive(dpu_enc->debugfs_root);
2005}
2006
2007static int dpu_encoder_virt_add_phys_encs(
2008                u32 display_caps,
2009                struct dpu_encoder_virt *dpu_enc,
2010                struct dpu_enc_phys_init_params *params)
2011{
2012        struct dpu_encoder_phys *enc = NULL;
2013
2014        DPU_DEBUG_ENC(dpu_enc, "\n");
2015
2016        /*
2017         * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
2018         * in this function, check up-front.
2019         */
2020        if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
2021                        ARRAY_SIZE(dpu_enc->phys_encs)) {
2022                DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
2023                          dpu_enc->num_phys_encs);
2024                return -EINVAL;
2025        }
2026
2027        if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
2028                enc = dpu_encoder_phys_vid_init(params);
2029
2030                if (IS_ERR_OR_NULL(enc)) {
2031                        DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
2032                                PTR_ERR(enc));
2033                        return enc == 0 ? -EINVAL : PTR_ERR(enc);
2034                }
2035
2036                dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2037                ++dpu_enc->num_phys_encs;
2038        }
2039
2040        if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
2041                enc = dpu_encoder_phys_cmd_init(params);
2042
2043                if (IS_ERR_OR_NULL(enc)) {
2044                        DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2045                                PTR_ERR(enc));
2046                        return enc == 0 ? -EINVAL : PTR_ERR(enc);
2047                }
2048
2049                dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2050                ++dpu_enc->num_phys_encs;
2051        }
2052
2053        if (params->split_role == ENC_ROLE_SLAVE)
2054                dpu_enc->cur_slave = enc;
2055        else
2056                dpu_enc->cur_master = enc;
2057
2058        return 0;
2059}
2060
2061static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
2062        .handle_vblank_virt = dpu_encoder_vblank_callback,
2063        .handle_underrun_virt = dpu_encoder_underrun_callback,
2064        .handle_frame_done = dpu_encoder_frame_done_callback,
2065};
2066
2067static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2068                                 struct dpu_kms *dpu_kms,
2069                                 struct msm_display_info *disp_info)
2070{
2071        int ret = 0;
2072        int i = 0;
2073        enum dpu_intf_type intf_type;
2074        struct dpu_enc_phys_init_params phys_params;
2075
2076        if (!dpu_enc || !dpu_kms) {
2077                DPU_ERROR("invalid arg(s), enc %d kms %d\n",
2078                                dpu_enc != 0, dpu_kms != 0);
2079                return -EINVAL;
2080        }
2081
2082        dpu_enc->cur_master = NULL;
2083
2084        memset(&phys_params, 0, sizeof(phys_params));
2085        phys_params.dpu_kms = dpu_kms;
2086        phys_params.parent = &dpu_enc->base;
2087        phys_params.parent_ops = &dpu_encoder_parent_ops;
2088        phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2089
2090        DPU_DEBUG("\n");
2091
2092        switch (disp_info->intf_type) {
2093        case DRM_MODE_ENCODER_DSI:
2094                intf_type = INTF_DSI;
2095                break;
2096        default:
2097                DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
2098                return -EINVAL;
2099        }
2100
2101        WARN_ON(disp_info->num_of_h_tiles < 1);
2102
2103        DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2104
2105        if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
2106            (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
2107                dpu_enc->idle_pc_supported =
2108                                dpu_kms->catalog->caps->has_idle_pc;
2109
2110        mutex_lock(&dpu_enc->enc_lock);
2111        for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2112                /*
2113                 * Left-most tile is at index 0, content is controller id
2114                 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2115                 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2116                 */
2117                u32 controller_id = disp_info->h_tile_instance[i];
2118
2119                if (disp_info->num_of_h_tiles > 1) {
2120                        if (i == 0)
2121                                phys_params.split_role = ENC_ROLE_MASTER;
2122                        else
2123                                phys_params.split_role = ENC_ROLE_SLAVE;
2124                } else {
2125                        phys_params.split_role = ENC_ROLE_SOLO;
2126                }
2127
2128                DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2129                                i, controller_id, phys_params.split_role);
2130
2131                phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
2132                                                                                                        intf_type,
2133                                                                                                        controller_id);
2134                if (phys_params.intf_idx == INTF_MAX) {
2135                        DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
2136                                                  intf_type, controller_id);
2137                        ret = -EINVAL;
2138                }
2139
2140                if (!ret) {
2141                        ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
2142                                                                                                 dpu_enc,
2143                                                                                                 &phys_params);
2144                        if (ret)
2145                                DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2146                }
2147        }
2148
2149        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2150                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2151
2152                if (phys) {
2153                        atomic_set(&phys->vsync_cnt, 0);
2154                        atomic_set(&phys->underrun_cnt, 0);
2155                }
2156        }
2157        mutex_unlock(&dpu_enc->enc_lock);
2158
2159        return ret;
2160}
2161
2162static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2163{
2164        struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2165                        frame_done_timer);
2166        struct drm_encoder *drm_enc = &dpu_enc->base;
2167        struct msm_drm_private *priv;
2168        u32 event;
2169
2170        if (!drm_enc->dev || !drm_enc->dev->dev_private) {
2171                DPU_ERROR("invalid parameters\n");
2172                return;
2173        }
2174        priv = drm_enc->dev->dev_private;
2175
2176        if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2177                DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2178                              DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2179                return;
2180        } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2181                DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2182                return;
2183        }
2184
2185        DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
2186
2187        event = DPU_ENCODER_FRAME_EVENT_ERROR;
2188        trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2189        dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2190}
2191
2192static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2193        .mode_set = dpu_encoder_virt_mode_set,
2194        .disable = dpu_encoder_virt_disable,
2195        .enable = dpu_kms_encoder_enable,
2196        .atomic_check = dpu_encoder_virt_atomic_check,
2197
2198        /* This is called by dpu_kms_encoder_enable */
2199        .commit = dpu_encoder_virt_enable,
2200};
2201
2202static const struct drm_encoder_funcs dpu_encoder_funcs = {
2203                .destroy = dpu_encoder_destroy,
2204                .late_register = dpu_encoder_late_register,
2205                .early_unregister = dpu_encoder_early_unregister,
2206};
2207
2208int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
2209                struct msm_display_info *disp_info)
2210{
2211        struct msm_drm_private *priv = dev->dev_private;
2212        struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2213        struct drm_encoder *drm_enc = NULL;
2214        struct dpu_encoder_virt *dpu_enc = NULL;
2215        int ret = 0;
2216
2217        dpu_enc = to_dpu_encoder_virt(enc);
2218
2219        mutex_init(&dpu_enc->enc_lock);
2220        ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2221        if (ret)
2222                goto fail;
2223
2224        atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2225        timer_setup(&dpu_enc->frame_done_timer,
2226                        dpu_encoder_frame_done_timeout, 0);
2227
2228        if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
2229                timer_setup(&dpu_enc->vsync_event_timer,
2230                                dpu_encoder_vsync_event_handler,
2231                                0);
2232
2233
2234        mutex_init(&dpu_enc->rc_lock);
2235        INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2236                        dpu_encoder_off_work);
2237        dpu_enc->idle_timeout = IDLE_TIMEOUT;
2238
2239        kthread_init_work(&dpu_enc->vsync_event_work,
2240                        dpu_encoder_vsync_event_work_handler);
2241
2242        memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2243
2244        DPU_DEBUG_ENC(dpu_enc, "created\n");
2245
2246        return ret;
2247
2248fail:
2249        DPU_ERROR("failed to create encoder\n");
2250        if (drm_enc)
2251                dpu_encoder_destroy(drm_enc);
2252
2253        return ret;
2254
2255
2256}
2257
2258struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2259                int drm_enc_mode)
2260{
2261        struct dpu_encoder_virt *dpu_enc = NULL;
2262        int rc = 0;
2263
2264        dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
2265        if (!dpu_enc)
2266                return ERR_PTR(ENOMEM);
2267
2268        rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
2269                        drm_enc_mode, NULL);
2270        if (rc) {
2271                devm_kfree(dev->dev, dpu_enc);
2272                return ERR_PTR(rc);
2273        }
2274
2275        drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2276
2277        spin_lock_init(&dpu_enc->enc_spinlock);
2278        dpu_enc->enabled = false;
2279
2280        return &dpu_enc->base;
2281}
2282
2283int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2284        enum msm_event_wait event)
2285{
2286        int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2287        struct dpu_encoder_virt *dpu_enc = NULL;
2288        int i, ret = 0;
2289
2290        if (!drm_enc) {
2291                DPU_ERROR("invalid encoder\n");
2292                return -EINVAL;
2293        }
2294        dpu_enc = to_dpu_encoder_virt(drm_enc);
2295        DPU_DEBUG_ENC(dpu_enc, "\n");
2296
2297        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2298                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2299                if (!phys)
2300                        continue;
2301
2302                switch (event) {
2303                case MSM_ENC_COMMIT_DONE:
2304                        fn_wait = phys->ops.wait_for_commit_done;
2305                        break;
2306                case MSM_ENC_TX_COMPLETE:
2307                        fn_wait = phys->ops.wait_for_tx_complete;
2308                        break;
2309                case MSM_ENC_VBLANK:
2310                        fn_wait = phys->ops.wait_for_vblank;
2311                        break;
2312                default:
2313                        DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2314                                        event);
2315                        return -EINVAL;
2316                };
2317
2318                if (fn_wait) {
2319                        DPU_ATRACE_BEGIN("wait_for_completion_event");
2320                        ret = fn_wait(phys);
2321                        DPU_ATRACE_END("wait_for_completion_event");
2322                        if (ret)
2323                                return ret;
2324                }
2325        }
2326
2327        return ret;
2328}
2329
2330enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2331{
2332        struct dpu_encoder_virt *dpu_enc = NULL;
2333        int i;
2334
2335        if (!encoder) {
2336                DPU_ERROR("invalid encoder\n");
2337                return INTF_MODE_NONE;
2338        }
2339        dpu_enc = to_dpu_encoder_virt(encoder);
2340
2341        if (dpu_enc->cur_master)
2342                return dpu_enc->cur_master->intf_mode;
2343
2344        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2345                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2346
2347                if (phys)
2348                        return phys->intf_mode;
2349        }
2350
2351        return INTF_MODE_NONE;
2352}
2353