linux/drivers/gpu/drm/msm/msm_atomic.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2014 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include <drm/drm_atomic_uapi.h>
   8#include <drm/drm_gem_framebuffer_helper.h>
   9#include <drm/drm_vblank.h>
  10
  11#include "msm_atomic_trace.h"
  12#include "msm_drv.h"
  13#include "msm_gem.h"
  14#include "msm_kms.h"
  15
  16int msm_atomic_prepare_fb(struct drm_plane *plane,
  17                          struct drm_plane_state *new_state)
  18{
  19        struct msm_drm_private *priv = plane->dev->dev_private;
  20        struct msm_kms *kms = priv->kms;
  21
  22        if (!new_state->fb)
  23                return 0;
  24
  25        drm_gem_fb_prepare_fb(plane, new_state);
  26
  27        return msm_framebuffer_prepare(new_state->fb, kms->aspace);
  28}
  29
  30/*
  31 * Helpers to control vblanks while we flush.. basically just to ensure
  32 * that vblank accounting is switched on, so we get valid seqn/timestamp
  33 * on pageflip events (if requested)
  34 */
  35
  36static void vblank_get(struct msm_kms *kms, unsigned crtc_mask)
  37{
  38        struct drm_crtc *crtc;
  39
  40        for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
  41                if (!crtc->state->active)
  42                        continue;
  43                drm_crtc_vblank_get(crtc);
  44        }
  45}
  46
  47static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
  48{
  49        struct drm_crtc *crtc;
  50
  51        for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
  52                if (!crtc->state->active)
  53                        continue;
  54                drm_crtc_vblank_put(crtc);
  55        }
  56}
  57
  58static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
  59{
  60        unsigned crtc_mask = BIT(crtc_idx);
  61
  62        trace_msm_atomic_async_commit_start(crtc_mask);
  63
  64        mutex_lock(&kms->commit_lock);
  65
  66        if (!(kms->pending_crtc_mask & crtc_mask)) {
  67                mutex_unlock(&kms->commit_lock);
  68                goto out;
  69        }
  70
  71        kms->pending_crtc_mask &= ~crtc_mask;
  72
  73        kms->funcs->enable_commit(kms);
  74
  75        vblank_get(kms, crtc_mask);
  76
  77        /*
  78         * Flush hardware updates:
  79         */
  80        trace_msm_atomic_flush_commit(crtc_mask);
  81        kms->funcs->flush_commit(kms, crtc_mask);
  82        mutex_unlock(&kms->commit_lock);
  83
  84        /*
  85         * Wait for flush to complete:
  86         */
  87        trace_msm_atomic_wait_flush_start(crtc_mask);
  88        kms->funcs->wait_flush(kms, crtc_mask);
  89        trace_msm_atomic_wait_flush_finish(crtc_mask);
  90
  91        vblank_put(kms, crtc_mask);
  92
  93        mutex_lock(&kms->commit_lock);
  94        kms->funcs->complete_commit(kms, crtc_mask);
  95        mutex_unlock(&kms->commit_lock);
  96        kms->funcs->disable_commit(kms);
  97
  98out:
  99        trace_msm_atomic_async_commit_finish(crtc_mask);
 100}
 101
 102static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t)
 103{
 104        struct msm_pending_timer *timer = container_of(t,
 105                        struct msm_pending_timer, timer);
 106        struct msm_drm_private *priv = timer->kms->dev->dev_private;
 107
 108        queue_work(priv->wq, &timer->work);
 109
 110        return HRTIMER_NORESTART;
 111}
 112
 113static void msm_atomic_pending_work(struct work_struct *work)
 114{
 115        struct msm_pending_timer *timer = container_of(work,
 116                        struct msm_pending_timer, work);
 117
 118        msm_atomic_async_commit(timer->kms, timer->crtc_idx);
 119}
 120
 121void msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
 122                struct msm_kms *kms, int crtc_idx)
 123{
 124        timer->kms = kms;
 125        timer->crtc_idx = crtc_idx;
 126        hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 127        timer->timer.function = msm_atomic_pending_timer;
 128        INIT_WORK(&timer->work, msm_atomic_pending_work);
 129}
 130
 131static bool can_do_async(struct drm_atomic_state *state,
 132                struct drm_crtc **async_crtc)
 133{
 134        struct drm_connector_state *connector_state;
 135        struct drm_connector *connector;
 136        struct drm_crtc_state *crtc_state;
 137        struct drm_crtc *crtc;
 138        int i, num_crtcs = 0;
 139
 140        if (!(state->legacy_cursor_update || state->async_update))
 141                return false;
 142
 143        /* any connector change, means slow path: */
 144        for_each_new_connector_in_state(state, connector, connector_state, i)
 145                return false;
 146
 147        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 148                if (drm_atomic_crtc_needs_modeset(crtc_state))
 149                        return false;
 150                if (++num_crtcs > 1)
 151                        return false;
 152                *async_crtc = crtc;
 153        }
 154
 155        return true;
 156}
 157
 158/* Get bitmask of crtcs that will need to be flushed.  The bitmask
 159 * can be used with for_each_crtc_mask() iterator, to iterate
 160 * effected crtcs without needing to preserve the atomic state.
 161 */
 162static unsigned get_crtc_mask(struct drm_atomic_state *state)
 163{
 164        struct drm_crtc_state *crtc_state;
 165        struct drm_crtc *crtc;
 166        unsigned i, mask = 0;
 167
 168        for_each_new_crtc_in_state(state, crtc, crtc_state, i)
 169                mask |= drm_crtc_mask(crtc);
 170
 171        return mask;
 172}
 173
 174void msm_atomic_commit_tail(struct drm_atomic_state *state)
 175{
 176        struct drm_device *dev = state->dev;
 177        struct msm_drm_private *priv = dev->dev_private;
 178        struct msm_kms *kms = priv->kms;
 179        struct drm_crtc *async_crtc = NULL;
 180        unsigned crtc_mask = get_crtc_mask(state);
 181        bool async = kms->funcs->vsync_time &&
 182                        can_do_async(state, &async_crtc);
 183
 184        trace_msm_atomic_commit_tail_start(async, crtc_mask);
 185
 186        kms->funcs->enable_commit(kms);
 187
 188        /*
 189         * Ensure any previous (potentially async) commit has
 190         * completed:
 191         */
 192        trace_msm_atomic_wait_flush_start(crtc_mask);
 193        kms->funcs->wait_flush(kms, crtc_mask);
 194        trace_msm_atomic_wait_flush_finish(crtc_mask);
 195
 196        mutex_lock(&kms->commit_lock);
 197
 198        /*
 199         * Now that there is no in-progress flush, prepare the
 200         * current update:
 201         */
 202        kms->funcs->prepare_commit(kms, state);
 203
 204        /*
 205         * Push atomic updates down to hardware:
 206         */
 207        drm_atomic_helper_commit_modeset_disables(dev, state);
 208        drm_atomic_helper_commit_planes(dev, state, 0);
 209        drm_atomic_helper_commit_modeset_enables(dev, state);
 210
 211        if (async) {
 212                struct msm_pending_timer *timer =
 213                        &kms->pending_timers[drm_crtc_index(async_crtc)];
 214
 215                /* async updates are limited to single-crtc updates: */
 216                WARN_ON(crtc_mask != drm_crtc_mask(async_crtc));
 217
 218                /*
 219                 * Start timer if we don't already have an update pending
 220                 * on this crtc:
 221                 */
 222                if (!(kms->pending_crtc_mask & crtc_mask)) {
 223                        ktime_t vsync_time, wakeup_time;
 224
 225                        kms->pending_crtc_mask |= crtc_mask;
 226
 227                        vsync_time = kms->funcs->vsync_time(kms, async_crtc);
 228                        wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
 229
 230                        hrtimer_start(&timer->timer, wakeup_time,
 231                                        HRTIMER_MODE_ABS);
 232                }
 233
 234                kms->funcs->disable_commit(kms);
 235                mutex_unlock(&kms->commit_lock);
 236
 237                /*
 238                 * At this point, from drm core's perspective, we
 239                 * are done with the atomic update, so we can just
 240                 * go ahead and signal that it is done:
 241                 */
 242                drm_atomic_helper_commit_hw_done(state);
 243                drm_atomic_helper_cleanup_planes(dev, state);
 244
 245                trace_msm_atomic_commit_tail_finish(async, crtc_mask);
 246
 247                return;
 248        }
 249
 250        /*
 251         * If there is any async flush pending on updated crtcs, fold
 252         * them into the current flush.
 253         */
 254        kms->pending_crtc_mask &= ~crtc_mask;
 255
 256        vblank_get(kms, crtc_mask);
 257
 258        /*
 259         * Flush hardware updates:
 260         */
 261        trace_msm_atomic_flush_commit(crtc_mask);
 262        kms->funcs->flush_commit(kms, crtc_mask);
 263        mutex_unlock(&kms->commit_lock);
 264
 265        /*
 266         * Wait for flush to complete:
 267         */
 268        trace_msm_atomic_wait_flush_start(crtc_mask);
 269        kms->funcs->wait_flush(kms, crtc_mask);
 270        trace_msm_atomic_wait_flush_finish(crtc_mask);
 271
 272        vblank_put(kms, crtc_mask);
 273
 274        mutex_lock(&kms->commit_lock);
 275        kms->funcs->complete_commit(kms, crtc_mask);
 276        mutex_unlock(&kms->commit_lock);
 277        kms->funcs->disable_commit(kms);
 278
 279        drm_atomic_helper_commit_hw_done(state);
 280        drm_atomic_helper_cleanup_planes(dev, state);
 281
 282        trace_msm_atomic_commit_tail_finish(async, crtc_mask);
 283}
 284