linux/drivers/gpu/drm/msm/msm_atomic.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2014 Red Hat
   3 * Author: Rob Clark <robdclark@gmail.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include "msm_drv.h"
  19#include "msm_kms.h"
  20#include "msm_gem.h"
  21
  22struct msm_commit {
  23        struct drm_device *dev;
  24        struct drm_atomic_state *state;
  25        uint32_t fence;
  26        struct msm_fence_cb fence_cb;
  27        uint32_t crtc_mask;
  28};
  29
  30static void fence_cb(struct msm_fence_cb *cb);
  31
  32/* block until specified crtcs are no longer pending update, and
  33 * atomically mark them as pending update
  34 */
  35static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  36{
  37        int ret;
  38
  39        spin_lock(&priv->pending_crtcs_event.lock);
  40        ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
  41                        !(priv->pending_crtcs & crtc_mask));
  42        if (ret == 0) {
  43                DBG("start: %08x", crtc_mask);
  44                priv->pending_crtcs |= crtc_mask;
  45        }
  46        spin_unlock(&priv->pending_crtcs_event.lock);
  47
  48        return ret;
  49}
  50
  51/* clear specified crtcs (no longer pending update)
  52 */
  53static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  54{
  55        spin_lock(&priv->pending_crtcs_event.lock);
  56        DBG("end: %08x", crtc_mask);
  57        priv->pending_crtcs &= ~crtc_mask;
  58        wake_up_all_locked(&priv->pending_crtcs_event);
  59        spin_unlock(&priv->pending_crtcs_event.lock);
  60}
  61
  62static struct msm_commit *commit_init(struct drm_atomic_state *state)
  63{
  64        struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
  65
  66        if (!c)
  67                return NULL;
  68
  69        c->dev = state->dev;
  70        c->state = state;
  71
  72        /* TODO we might need a way to indicate to run the cb on a
  73         * different wq so wait_for_vblanks() doesn't block retiring
  74         * bo's..
  75         */
  76        INIT_FENCE_CB(&c->fence_cb, fence_cb);
  77
  78        return c;
  79}
  80
  81static void commit_destroy(struct msm_commit *c)
  82{
  83        end_atomic(c->dev->dev_private, c->crtc_mask);
  84        kfree(c);
  85}
  86
  87static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
  88                struct drm_atomic_state *old_state)
  89{
  90        struct drm_crtc *crtc;
  91        struct msm_drm_private *priv = old_state->dev->dev_private;
  92        struct msm_kms *kms = priv->kms;
  93        int ncrtcs = old_state->dev->mode_config.num_crtc;
  94        int i;
  95
  96        for (i = 0; i < ncrtcs; i++) {
  97                crtc = old_state->crtcs[i];
  98
  99                if (!crtc)
 100                        continue;
 101
 102                if (!crtc->state->enable)
 103                        continue;
 104
 105                /* Legacy cursor ioctls are completely unsynced, and userspace
 106                 * relies on that (by doing tons of cursor updates). */
 107                if (old_state->legacy_cursor_update)
 108                        continue;
 109
 110                kms->funcs->wait_for_crtc_commit_done(kms, crtc);
 111        }
 112}
 113
 114/* The (potentially) asynchronous part of the commit.  At this point
 115 * nothing can fail short of armageddon.
 116 */
 117static void complete_commit(struct msm_commit *c)
 118{
 119        struct drm_atomic_state *state = c->state;
 120        struct drm_device *dev = state->dev;
 121        struct msm_drm_private *priv = dev->dev_private;
 122        struct msm_kms *kms = priv->kms;
 123
 124        kms->funcs->prepare_commit(kms, state);
 125
 126        drm_atomic_helper_commit_modeset_disables(dev, state);
 127
 128        drm_atomic_helper_commit_planes(dev, state, false);
 129
 130        drm_atomic_helper_commit_modeset_enables(dev, state);
 131
 132        /* NOTE: _wait_for_vblanks() only waits for vblank on
 133         * enabled CRTCs.  So we end up faulting when disabling
 134         * due to (potentially) unref'ing the outgoing fb's
 135         * before the vblank when the disable has latched.
 136         *
 137         * But if it did wait on disabled (or newly disabled)
 138         * CRTCs, that would be racy (ie. we could have missed
 139         * the irq.  We need some way to poll for pipe shut
 140         * down.  Or just live with occasionally hitting the
 141         * timeout in the CRTC disable path (which really should
 142         * not be critical path)
 143         */
 144
 145        msm_atomic_wait_for_commit_done(dev, state);
 146
 147        drm_atomic_helper_cleanup_planes(dev, state);
 148
 149        kms->funcs->complete_commit(kms, state);
 150
 151        drm_atomic_state_free(state);
 152
 153        commit_destroy(c);
 154}
 155
 156static void fence_cb(struct msm_fence_cb *cb)
 157{
 158        struct msm_commit *c =
 159                        container_of(cb, struct msm_commit, fence_cb);
 160        complete_commit(c);
 161}
 162
 163static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
 164{
 165        struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
 166        c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
 167}
 168
 169int msm_atomic_check(struct drm_device *dev,
 170                     struct drm_atomic_state *state)
 171{
 172        int ret;
 173
 174        /*
 175         * msm ->atomic_check can update ->mode_changed for pixel format
 176         * changes, hence must be run before we check the modeset changes.
 177         */
 178        ret = drm_atomic_helper_check_planes(dev, state);
 179        if (ret)
 180                return ret;
 181
 182        ret = drm_atomic_helper_check_modeset(dev, state);
 183        if (ret)
 184                return ret;
 185
 186        return ret;
 187}
 188
 189/**
 190 * drm_atomic_helper_commit - commit validated state object
 191 * @dev: DRM device
 192 * @state: the driver state object
 193 * @async: asynchronous commit
 194 *
 195 * This function commits a with drm_atomic_helper_check() pre-validated state
 196 * object. This can still fail when e.g. the framebuffer reservation fails. For
 197 * now this doesn't implement asynchronous commits.
 198 *
 199 * RETURNS
 200 * Zero for success or -errno.
 201 */
 202int msm_atomic_commit(struct drm_device *dev,
 203                struct drm_atomic_state *state, bool async)
 204{
 205        int nplanes = dev->mode_config.num_total_plane;
 206        int ncrtcs = dev->mode_config.num_crtc;
 207        ktime_t timeout;
 208        struct msm_commit *c;
 209        int i, ret;
 210
 211        ret = drm_atomic_helper_prepare_planes(dev, state);
 212        if (ret)
 213                return ret;
 214
 215        c = commit_init(state);
 216        if (!c) {
 217                ret = -ENOMEM;
 218                goto error;
 219        }
 220
 221        /*
 222         * Figure out what crtcs we have:
 223         */
 224        for (i = 0; i < ncrtcs; i++) {
 225                struct drm_crtc *crtc = state->crtcs[i];
 226                if (!crtc)
 227                        continue;
 228                c->crtc_mask |= (1 << drm_crtc_index(crtc));
 229        }
 230
 231        /*
 232         * Figure out what fence to wait for:
 233         */
 234        for (i = 0; i < nplanes; i++) {
 235                struct drm_plane *plane = state->planes[i];
 236                struct drm_plane_state *new_state = state->plane_states[i];
 237
 238                if (!plane)
 239                        continue;
 240
 241                if ((plane->state->fb != new_state->fb) && new_state->fb)
 242                        add_fb(c, new_state->fb);
 243        }
 244
 245        /*
 246         * Wait for pending updates on any of the same crtc's and then
 247         * mark our set of crtc's as busy:
 248         */
 249        ret = start_atomic(dev->dev_private, c->crtc_mask);
 250        if (ret) {
 251                kfree(c);
 252                goto error;
 253        }
 254
 255        /*
 256         * This is the point of no return - everything below never fails except
 257         * when the hw goes bonghits. Which means we can commit the new state on
 258         * the software side now.
 259         */
 260
 261        drm_atomic_helper_swap_state(dev, state);
 262
 263        /*
 264         * Everything below can be run asynchronously without the need to grab
 265         * any modeset locks at all under one conditions: It must be guaranteed
 266         * that the asynchronous work has either been cancelled (if the driver
 267         * supports it, which at least requires that the framebuffers get
 268         * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
 269         * before the new state gets committed on the software side with
 270         * drm_atomic_helper_swap_state().
 271         *
 272         * This scheme allows new atomic state updates to be prepared and
 273         * checked in parallel to the asynchronous completion of the previous
 274         * update. Which is important since compositors need to figure out the
 275         * composition of the next frame right after having submitted the
 276         * current layout.
 277         */
 278
 279        if (async) {
 280                msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
 281                return 0;
 282        }
 283
 284        timeout = ktime_add_ms(ktime_get(), 1000);
 285
 286        /* uninterruptible wait */
 287        msm_wait_fence(dev, c->fence, &timeout, false);
 288
 289        complete_commit(c);
 290
 291        return 0;
 292
 293error:
 294        drm_atomic_helper_cleanup_planes(dev, state);
 295        return ret;
 296}
 297