linux/drivers/gpu/drm/i915/display/intel_global_state.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include <linux/string.h>
   7
   8#include "i915_drv.h"
   9#include "intel_atomic.h"
  10#include "intel_display_types.h"
  11#include "intel_global_state.h"
  12
  13void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
  14                                  struct intel_global_obj *obj,
  15                                  struct intel_global_state *state,
  16                                  const struct intel_global_state_funcs *funcs)
  17{
  18        memset(obj, 0, sizeof(*obj));
  19
  20        obj->state = state;
  21        obj->funcs = funcs;
  22        list_add_tail(&obj->head, &dev_priv->global_obj_list);
  23}
  24
  25void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
  26{
  27        struct intel_global_obj *obj, *next;
  28
  29        list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
  30                list_del(&obj->head);
  31                obj->funcs->atomic_destroy_state(obj, obj->state);
  32        }
  33}
  34
  35static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
  36{
  37        struct intel_crtc *crtc;
  38
  39        for_each_intel_crtc(&dev_priv->drm, crtc)
  40                drm_modeset_lock_assert_held(&crtc->base.mutex);
  41}
  42
  43static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
  44                                 struct drm_modeset_lock *lock)
  45{
  46        struct drm_modeset_lock *l;
  47
  48        list_for_each_entry(l, &ctx->locked, head) {
  49                if (lock == l)
  50                        return true;
  51        }
  52
  53        return false;
  54}
  55
  56static void assert_global_state_read_locked(struct intel_atomic_state *state)
  57{
  58        struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
  59        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
  60        struct intel_crtc *crtc;
  61
  62        for_each_intel_crtc(&dev_priv->drm, crtc) {
  63                if (modeset_lock_is_held(ctx, &crtc->base.mutex))
  64                        return;
  65        }
  66
  67        WARN(1, "Global state not read locked\n");
  68}
  69
  70struct intel_global_state *
  71intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
  72                                  struct intel_global_obj *obj)
  73{
  74        int index, num_objs, i;
  75        size_t size;
  76        struct __intel_global_objs_state *arr;
  77        struct intel_global_state *obj_state;
  78
  79        for (i = 0; i < state->num_global_objs; i++)
  80                if (obj == state->global_objs[i].ptr)
  81                        return state->global_objs[i].state;
  82
  83        assert_global_state_read_locked(state);
  84
  85        num_objs = state->num_global_objs + 1;
  86        size = sizeof(*state->global_objs) * num_objs;
  87        arr = krealloc(state->global_objs, size, GFP_KERNEL);
  88        if (!arr)
  89                return ERR_PTR(-ENOMEM);
  90
  91        state->global_objs = arr;
  92        index = state->num_global_objs;
  93        memset(&state->global_objs[index], 0, sizeof(*state->global_objs));
  94
  95        obj_state = obj->funcs->atomic_duplicate_state(obj);
  96        if (!obj_state)
  97                return ERR_PTR(-ENOMEM);
  98
  99        obj_state->changed = false;
 100
 101        state->global_objs[index].state = obj_state;
 102        state->global_objs[index].old_state = obj->state;
 103        state->global_objs[index].new_state = obj_state;
 104        state->global_objs[index].ptr = obj;
 105        obj_state->state = state;
 106
 107        state->num_global_objs = num_objs;
 108
 109        DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n",
 110                         obj, obj_state, state);
 111
 112        return obj_state;
 113}
 114
 115struct intel_global_state *
 116intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
 117                                      struct intel_global_obj *obj)
 118{
 119        int i;
 120
 121        for (i = 0; i < state->num_global_objs; i++)
 122                if (obj == state->global_objs[i].ptr)
 123                        return state->global_objs[i].old_state;
 124
 125        return NULL;
 126}
 127
 128struct intel_global_state *
 129intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
 130                                      struct intel_global_obj *obj)
 131{
 132        int i;
 133
 134        for (i = 0; i < state->num_global_objs; i++)
 135                if (obj == state->global_objs[i].ptr)
 136                        return state->global_objs[i].new_state;
 137
 138        return NULL;
 139}
 140
 141void intel_atomic_swap_global_state(struct intel_atomic_state *state)
 142{
 143        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 144        struct intel_global_state *old_obj_state, *new_obj_state;
 145        struct intel_global_obj *obj;
 146        int i;
 147
 148        for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
 149                                            new_obj_state, i) {
 150                WARN_ON(obj->state != old_obj_state);
 151
 152                /*
 153                 * If the new state wasn't modified (and properly
 154                 * locked for write access) we throw it away.
 155                 */
 156                if (!new_obj_state->changed)
 157                        continue;
 158
 159                assert_global_state_write_locked(dev_priv);
 160
 161                old_obj_state->state = state;
 162                new_obj_state->state = NULL;
 163
 164                state->global_objs[i].state = old_obj_state;
 165                obj->state = new_obj_state;
 166        }
 167}
 168
 169void intel_atomic_clear_global_state(struct intel_atomic_state *state)
 170{
 171        int i;
 172
 173        for (i = 0; i < state->num_global_objs; i++) {
 174                struct intel_global_obj *obj = state->global_objs[i].ptr;
 175
 176                obj->funcs->atomic_destroy_state(obj,
 177                                                 state->global_objs[i].state);
 178                state->global_objs[i].ptr = NULL;
 179                state->global_objs[i].state = NULL;
 180                state->global_objs[i].old_state = NULL;
 181                state->global_objs[i].new_state = NULL;
 182        }
 183        state->num_global_objs = 0;
 184}
 185
 186int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
 187{
 188        struct intel_atomic_state *state = obj_state->state;
 189        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 190        struct intel_crtc *crtc;
 191
 192        for_each_intel_crtc(&dev_priv->drm, crtc) {
 193                int ret;
 194
 195                ret = drm_modeset_lock(&crtc->base.mutex,
 196                                       state->base.acquire_ctx);
 197                if (ret)
 198                        return ret;
 199        }
 200
 201        obj_state->changed = true;
 202
 203        return 0;
 204}
 205
 206int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
 207{
 208        struct intel_atomic_state *state = obj_state->state;
 209        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 210        struct intel_crtc *crtc;
 211
 212        for_each_intel_crtc(&dev_priv->drm, crtc) {
 213                struct intel_crtc_state *crtc_state;
 214
 215                crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
 216                if (IS_ERR(crtc_state))
 217                        return PTR_ERR(crtc_state);
 218        }
 219
 220        obj_state->changed = true;
 221
 222        return 0;
 223}
 224