linux/drivers/gpu/drm/vc4/vc4_kms.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2015 Broadcom
   4 */
   5
   6/**
   7 * DOC: VC4 KMS
   8 *
   9 * This is the general code for implementing KMS mode setting that
  10 * doesn't clearly associate with any of the other objects (plane,
  11 * crtc, HDMI encoder).
  12 */
  13
  14#include <linux/clk.h>
  15
  16#include <drm/drm_atomic.h>
  17#include <drm/drm_atomic_helper.h>
  18#include <drm/drm_crtc.h>
  19#include <drm/drm_gem_framebuffer_helper.h>
  20#include <drm/drm_plane_helper.h>
  21#include <drm/drm_probe_helper.h>
  22#include <drm/drm_vblank.h>
  23
  24#include "vc4_drv.h"
  25#include "vc4_regs.h"
  26
  27#define HVS_NUM_CHANNELS 3
  28
  29struct vc4_ctm_state {
  30        struct drm_private_state base;
  31        struct drm_color_ctm *ctm;
  32        int fifo;
  33};
  34
  35static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
  36{
  37        return container_of(priv, struct vc4_ctm_state, base);
  38}
  39
  40struct vc4_hvs_state {
  41        struct drm_private_state base;
  42
  43        struct {
  44                unsigned in_use: 1;
  45                struct drm_crtc_commit *pending_commit;
  46        } fifo_state[HVS_NUM_CHANNELS];
  47};
  48
  49static struct vc4_hvs_state *
  50to_vc4_hvs_state(struct drm_private_state *priv)
  51{
  52        return container_of(priv, struct vc4_hvs_state, base);
  53}
  54
  55struct vc4_load_tracker_state {
  56        struct drm_private_state base;
  57        u64 hvs_load;
  58        u64 membus_load;
  59};
  60
  61static struct vc4_load_tracker_state *
  62to_vc4_load_tracker_state(struct drm_private_state *priv)
  63{
  64        return container_of(priv, struct vc4_load_tracker_state, base);
  65}
  66
  67static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
  68                                               struct drm_private_obj *manager)
  69{
  70        struct drm_device *dev = state->dev;
  71        struct vc4_dev *vc4 = to_vc4_dev(dev);
  72        struct drm_private_state *priv_state;
  73        int ret;
  74
  75        ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
  76        if (ret)
  77                return ERR_PTR(ret);
  78
  79        priv_state = drm_atomic_get_private_obj_state(state, manager);
  80        if (IS_ERR(priv_state))
  81                return ERR_CAST(priv_state);
  82
  83        return to_vc4_ctm_state(priv_state);
  84}
  85
  86static struct drm_private_state *
  87vc4_ctm_duplicate_state(struct drm_private_obj *obj)
  88{
  89        struct vc4_ctm_state *state;
  90
  91        state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
  92        if (!state)
  93                return NULL;
  94
  95        __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
  96
  97        return &state->base;
  98}
  99
 100static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
 101                                  struct drm_private_state *state)
 102{
 103        struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
 104
 105        kfree(ctm_state);
 106}
 107
 108static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
 109        .atomic_duplicate_state = vc4_ctm_duplicate_state,
 110        .atomic_destroy_state = vc4_ctm_destroy_state,
 111};
 112
 113static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
 114{
 115        struct vc4_dev *vc4 = to_vc4_dev(dev);
 116
 117        drm_atomic_private_obj_fini(&vc4->ctm_manager);
 118}
 119
 120static int vc4_ctm_obj_init(struct vc4_dev *vc4)
 121{
 122        struct vc4_ctm_state *ctm_state;
 123
 124        drm_modeset_lock_init(&vc4->ctm_state_lock);
 125
 126        ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
 127        if (!ctm_state)
 128                return -ENOMEM;
 129
 130        drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
 131                                    &vc4_ctm_state_funcs);
 132
 133        return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
 134}
 135
 136/* Converts a DRM S31.32 value to the HW S0.9 format. */
 137static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
 138{
 139        u16 r;
 140
 141        /* Sign bit. */
 142        r = in & BIT_ULL(63) ? BIT(9) : 0;
 143
 144        if ((in & GENMASK_ULL(62, 32)) > 0) {
 145                /* We have zero integer bits so we can only saturate here. */
 146                r |= GENMASK(8, 0);
 147        } else {
 148                /* Otherwise take the 9 most important fractional bits. */
 149                r |= (in >> 23) & GENMASK(8, 0);
 150        }
 151
 152        return r;
 153}
 154
 155static void
 156vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
 157{
 158        struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
 159        struct drm_color_ctm *ctm = ctm_state->ctm;
 160
 161        if (ctm_state->fifo) {
 162                HVS_WRITE(SCALER_OLEDCOEF2,
 163                          VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
 164                                        SCALER_OLEDCOEF2_R_TO_R) |
 165                          VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
 166                                        SCALER_OLEDCOEF2_R_TO_G) |
 167                          VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
 168                                        SCALER_OLEDCOEF2_R_TO_B));
 169                HVS_WRITE(SCALER_OLEDCOEF1,
 170                          VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
 171                                        SCALER_OLEDCOEF1_G_TO_R) |
 172                          VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
 173                                        SCALER_OLEDCOEF1_G_TO_G) |
 174                          VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
 175                                        SCALER_OLEDCOEF1_G_TO_B));
 176                HVS_WRITE(SCALER_OLEDCOEF0,
 177                          VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
 178                                        SCALER_OLEDCOEF0_B_TO_R) |
 179                          VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
 180                                        SCALER_OLEDCOEF0_B_TO_G) |
 181                          VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
 182                                        SCALER_OLEDCOEF0_B_TO_B));
 183        }
 184
 185        HVS_WRITE(SCALER_OLEDOFFS,
 186                  VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
 187}
 188
 189static struct vc4_hvs_state *
 190vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
 191{
 192        struct vc4_dev *vc4 = to_vc4_dev(state->dev);
 193        struct drm_private_state *priv_state;
 194
 195        priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
 196        if (IS_ERR(priv_state))
 197                return ERR_CAST(priv_state);
 198
 199        return to_vc4_hvs_state(priv_state);
 200}
 201
 202static struct vc4_hvs_state *
 203vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
 204{
 205        struct vc4_dev *vc4 = to_vc4_dev(state->dev);
 206        struct drm_private_state *priv_state;
 207
 208        priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
 209        if (IS_ERR(priv_state))
 210                return ERR_CAST(priv_state);
 211
 212        return to_vc4_hvs_state(priv_state);
 213}
 214
 215static struct vc4_hvs_state *
 216vc4_hvs_get_global_state(struct drm_atomic_state *state)
 217{
 218        struct vc4_dev *vc4 = to_vc4_dev(state->dev);
 219        struct drm_private_state *priv_state;
 220
 221        priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
 222        if (IS_ERR(priv_state))
 223                return ERR_CAST(priv_state);
 224
 225        return to_vc4_hvs_state(priv_state);
 226}
 227
 228static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
 229                                     struct drm_atomic_state *state)
 230{
 231        struct drm_crtc_state *crtc_state;
 232        struct drm_crtc *crtc;
 233        unsigned int i;
 234
 235        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 236                struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
 237                u32 dispctrl;
 238                u32 dsp3_mux;
 239
 240                if (!crtc_state->active)
 241                        continue;
 242
 243                if (vc4_state->assigned_channel != 2)
 244                        continue;
 245
 246                /*
 247                 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
 248                 * FIFO X'.
 249                 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
 250                 *
 251                 * DSP3 is connected to FIFO2 unless the transposer is
 252                 * enabled. In this case, FIFO 2 is directly accessed by the
 253                 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
 254                 * route.
 255                 */
 256                if (vc4_state->feed_txp)
 257                        dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
 258                else
 259                        dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
 260
 261                dispctrl = HVS_READ(SCALER_DISPCTRL) &
 262                           ~SCALER_DISPCTRL_DSP3_MUX_MASK;
 263                HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
 264        }
 265}
 266
 267static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
 268                                     struct drm_atomic_state *state)
 269{
 270        struct drm_crtc_state *crtc_state;
 271        struct drm_crtc *crtc;
 272        unsigned char mux;
 273        unsigned int i;
 274        u32 reg;
 275
 276        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 277                struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
 278                struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
 279
 280                if (!vc4_state->update_muxing)
 281                        continue;
 282
 283                switch (vc4_crtc->data->hvs_output) {
 284                case 2:
 285                        mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
 286                        reg = HVS_READ(SCALER_DISPECTRL);
 287                        HVS_WRITE(SCALER_DISPECTRL,
 288                                  (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
 289                                  VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
 290                        break;
 291
 292                case 3:
 293                        if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
 294                                mux = 3;
 295                        else
 296                                mux = vc4_state->assigned_channel;
 297
 298                        reg = HVS_READ(SCALER_DISPCTRL);
 299                        HVS_WRITE(SCALER_DISPCTRL,
 300                                  (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
 301                                  VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
 302                        break;
 303
 304                case 4:
 305                        if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
 306                                mux = 3;
 307                        else
 308                                mux = vc4_state->assigned_channel;
 309
 310                        reg = HVS_READ(SCALER_DISPEOLN);
 311                        HVS_WRITE(SCALER_DISPEOLN,
 312                                  (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
 313                                  VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
 314
 315                        break;
 316
 317                case 5:
 318                        if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
 319                                mux = 3;
 320                        else
 321                                mux = vc4_state->assigned_channel;
 322
 323                        reg = HVS_READ(SCALER_DISPDITHER);
 324                        HVS_WRITE(SCALER_DISPDITHER,
 325                                  (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
 326                                  VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
 327                        break;
 328
 329                default:
 330                        break;
 331                }
 332        }
 333}
 334
 335static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
 336{
 337        struct drm_device *dev = state->dev;
 338        struct vc4_dev *vc4 = to_vc4_dev(dev);
 339        struct vc4_hvs *hvs = vc4->hvs;
 340        struct drm_crtc_state *old_crtc_state;
 341        struct drm_crtc_state *new_crtc_state;
 342        struct drm_crtc *crtc;
 343        struct vc4_hvs_state *old_hvs_state;
 344        int i;
 345
 346        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
 347                struct vc4_crtc_state *vc4_crtc_state;
 348
 349                if (!new_crtc_state->commit)
 350                        continue;
 351
 352                vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
 353                vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
 354        }
 355
 356        if (vc4->hvs->hvs5)
 357                clk_set_min_rate(hvs->core_clk, 500000000);
 358
 359        old_hvs_state = vc4_hvs_get_old_global_state(state);
 360        if (!old_hvs_state)
 361                return;
 362
 363        for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
 364                struct vc4_crtc_state *vc4_crtc_state =
 365                        to_vc4_crtc_state(old_crtc_state);
 366                unsigned int channel = vc4_crtc_state->assigned_channel;
 367                int ret;
 368
 369                if (channel == VC4_HVS_CHANNEL_DISABLED)
 370                        continue;
 371
 372                if (!old_hvs_state->fifo_state[channel].in_use)
 373                        continue;
 374
 375                ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
 376                if (ret)
 377                        drm_err(dev, "Timed out waiting for commit\n");
 378        }
 379
 380        drm_atomic_helper_commit_modeset_disables(dev, state);
 381
 382        vc4_ctm_commit(vc4, state);
 383
 384        if (vc4->hvs->hvs5)
 385                vc5_hvs_pv_muxing_commit(vc4, state);
 386        else
 387                vc4_hvs_pv_muxing_commit(vc4, state);
 388
 389        drm_atomic_helper_commit_planes(dev, state, 0);
 390
 391        drm_atomic_helper_commit_modeset_enables(dev, state);
 392
 393        drm_atomic_helper_fake_vblank(state);
 394
 395        drm_atomic_helper_commit_hw_done(state);
 396
 397        drm_atomic_helper_wait_for_flip_done(dev, state);
 398
 399        drm_atomic_helper_cleanup_planes(dev, state);
 400
 401        if (vc4->hvs->hvs5)
 402                clk_set_min_rate(hvs->core_clk, 0);
 403}
 404
 405static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
 406{
 407        struct drm_crtc_state *crtc_state;
 408        struct vc4_hvs_state *hvs_state;
 409        struct drm_crtc *crtc;
 410        unsigned int i;
 411
 412        hvs_state = vc4_hvs_get_new_global_state(state);
 413        if (!hvs_state)
 414                return -EINVAL;
 415
 416        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 417                struct vc4_crtc_state *vc4_crtc_state =
 418                        to_vc4_crtc_state(crtc_state);
 419                unsigned int channel =
 420                        vc4_crtc_state->assigned_channel;
 421
 422                if (channel == VC4_HVS_CHANNEL_DISABLED)
 423                        continue;
 424
 425                if (!hvs_state->fifo_state[channel].in_use)
 426                        continue;
 427
 428                hvs_state->fifo_state[channel].pending_commit =
 429                        drm_crtc_commit_get(crtc_state->commit);
 430        }
 431
 432        return 0;
 433}
 434
 435static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
 436                                             struct drm_file *file_priv,
 437                                             const struct drm_mode_fb_cmd2 *mode_cmd)
 438{
 439        struct drm_mode_fb_cmd2 mode_cmd_local;
 440
 441        /* If the user didn't specify a modifier, use the
 442         * vc4_set_tiling_ioctl() state for the BO.
 443         */
 444        if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
 445                struct drm_gem_object *gem_obj;
 446                struct vc4_bo *bo;
 447
 448                gem_obj = drm_gem_object_lookup(file_priv,
 449                                                mode_cmd->handles[0]);
 450                if (!gem_obj) {
 451                        DRM_DEBUG("Failed to look up GEM BO %d\n",
 452                                  mode_cmd->handles[0]);
 453                        return ERR_PTR(-ENOENT);
 454                }
 455                bo = to_vc4_bo(gem_obj);
 456
 457                mode_cmd_local = *mode_cmd;
 458
 459                if (bo->t_format) {
 460                        mode_cmd_local.modifier[0] =
 461                                DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
 462                } else {
 463                        mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
 464                }
 465
 466                drm_gem_object_put(gem_obj);
 467
 468                mode_cmd = &mode_cmd_local;
 469        }
 470
 471        return drm_gem_fb_create(dev, file_priv, mode_cmd);
 472}
 473
 474/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
 475 * at a time and the HW only supports S0.9 scalars. To account for the latter,
 476 * we don't allow userland to set a CTM that we have no hope of approximating.
 477 */
 478static int
 479vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
 480{
 481        struct vc4_dev *vc4 = to_vc4_dev(dev);
 482        struct vc4_ctm_state *ctm_state = NULL;
 483        struct drm_crtc *crtc;
 484        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 485        struct drm_color_ctm *ctm;
 486        int i;
 487
 488        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 489                /* CTM is being disabled. */
 490                if (!new_crtc_state->ctm && old_crtc_state->ctm) {
 491                        ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
 492                        if (IS_ERR(ctm_state))
 493                                return PTR_ERR(ctm_state);
 494                        ctm_state->fifo = 0;
 495                }
 496        }
 497
 498        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 499                if (new_crtc_state->ctm == old_crtc_state->ctm)
 500                        continue;
 501
 502                if (!ctm_state) {
 503                        ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
 504                        if (IS_ERR(ctm_state))
 505                                return PTR_ERR(ctm_state);
 506                }
 507
 508                /* CTM is being enabled or the matrix changed. */
 509                if (new_crtc_state->ctm) {
 510                        struct vc4_crtc_state *vc4_crtc_state =
 511                                to_vc4_crtc_state(new_crtc_state);
 512
 513                        /* fifo is 1-based since 0 disables CTM. */
 514                        int fifo = vc4_crtc_state->assigned_channel + 1;
 515
 516                        /* Check userland isn't trying to turn on CTM for more
 517                         * than one CRTC at a time.
 518                         */
 519                        if (ctm_state->fifo && ctm_state->fifo != fifo) {
 520                                DRM_DEBUG_DRIVER("Too many CTM configured\n");
 521                                return -EINVAL;
 522                        }
 523
 524                        /* Check we can approximate the specified CTM.
 525                         * We disallow scalars |c| > 1.0 since the HW has
 526                         * no integer bits.
 527                         */
 528                        ctm = new_crtc_state->ctm->data;
 529                        for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
 530                                u64 val = ctm->matrix[i];
 531
 532                                val &= ~BIT_ULL(63);
 533                                if (val > BIT_ULL(32))
 534                                        return -EINVAL;
 535                        }
 536
 537                        ctm_state->fifo = fifo;
 538                        ctm_state->ctm = ctm;
 539                }
 540        }
 541
 542        return 0;
 543}
 544
 545static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
 546{
 547        struct drm_plane_state *old_plane_state, *new_plane_state;
 548        struct vc4_dev *vc4 = to_vc4_dev(state->dev);
 549        struct vc4_load_tracker_state *load_state;
 550        struct drm_private_state *priv_state;
 551        struct drm_plane *plane;
 552        int i;
 553
 554        if (!vc4->load_tracker_available)
 555                return 0;
 556
 557        priv_state = drm_atomic_get_private_obj_state(state,
 558                                                      &vc4->load_tracker);
 559        if (IS_ERR(priv_state))
 560                return PTR_ERR(priv_state);
 561
 562        load_state = to_vc4_load_tracker_state(priv_state);
 563        for_each_oldnew_plane_in_state(state, plane, old_plane_state,
 564                                       new_plane_state, i) {
 565                struct vc4_plane_state *vc4_plane_state;
 566
 567                if (old_plane_state->fb && old_plane_state->crtc) {
 568                        vc4_plane_state = to_vc4_plane_state(old_plane_state);
 569                        load_state->membus_load -= vc4_plane_state->membus_load;
 570                        load_state->hvs_load -= vc4_plane_state->hvs_load;
 571                }
 572
 573                if (new_plane_state->fb && new_plane_state->crtc) {
 574                        vc4_plane_state = to_vc4_plane_state(new_plane_state);
 575                        load_state->membus_load += vc4_plane_state->membus_load;
 576                        load_state->hvs_load += vc4_plane_state->hvs_load;
 577                }
 578        }
 579
 580        /* Don't check the load when the tracker is disabled. */
 581        if (!vc4->load_tracker_enabled)
 582                return 0;
 583
 584        /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
 585         * the system work when other blocks are accessing the memory.
 586         */
 587        if (load_state->membus_load > SZ_1G + SZ_512M)
 588                return -ENOSPC;
 589
 590        /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
 591         * consider the maximum number of cycles is 240M.
 592         */
 593        if (load_state->hvs_load > 240000000ULL)
 594                return -ENOSPC;
 595
 596        return 0;
 597}
 598
 599static struct drm_private_state *
 600vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
 601{
 602        struct vc4_load_tracker_state *state;
 603
 604        state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
 605        if (!state)
 606                return NULL;
 607
 608        __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
 609
 610        return &state->base;
 611}
 612
 613static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
 614                                           struct drm_private_state *state)
 615{
 616        struct vc4_load_tracker_state *load_state;
 617
 618        load_state = to_vc4_load_tracker_state(state);
 619        kfree(load_state);
 620}
 621
 622static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
 623        .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
 624        .atomic_destroy_state = vc4_load_tracker_destroy_state,
 625};
 626
 627static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
 628{
 629        struct vc4_dev *vc4 = to_vc4_dev(dev);
 630
 631        if (!vc4->load_tracker_available)
 632                return;
 633
 634        drm_atomic_private_obj_fini(&vc4->load_tracker);
 635}
 636
 637static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
 638{
 639        struct vc4_load_tracker_state *load_state;
 640
 641        if (!vc4->load_tracker_available)
 642                return 0;
 643
 644        load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
 645        if (!load_state)
 646                return -ENOMEM;
 647
 648        drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
 649                                    &load_state->base,
 650                                    &vc4_load_tracker_state_funcs);
 651
 652        return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
 653}
 654
 655static struct drm_private_state *
 656vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
 657{
 658        struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
 659        struct vc4_hvs_state *state;
 660        unsigned int i;
 661
 662        state = kzalloc(sizeof(*state), GFP_KERNEL);
 663        if (!state)
 664                return NULL;
 665
 666        __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
 667
 668
 669        for (i = 0; i < HVS_NUM_CHANNELS; i++) {
 670                state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
 671
 672                if (!old_state->fifo_state[i].pending_commit)
 673                        continue;
 674
 675                state->fifo_state[i].pending_commit =
 676                        drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
 677        }
 678
 679        return &state->base;
 680}
 681
 682static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
 683                                           struct drm_private_state *state)
 684{
 685        struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
 686        unsigned int i;
 687
 688        for (i = 0; i < HVS_NUM_CHANNELS; i++) {
 689                if (!hvs_state->fifo_state[i].pending_commit)
 690                        continue;
 691
 692                drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
 693        }
 694
 695        kfree(hvs_state);
 696}
 697
 698static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
 699        .atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
 700        .atomic_destroy_state = vc4_hvs_channels_destroy_state,
 701};
 702
 703static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
 704{
 705        struct vc4_dev *vc4 = to_vc4_dev(dev);
 706
 707        drm_atomic_private_obj_fini(&vc4->hvs_channels);
 708}
 709
 710static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
 711{
 712        struct vc4_hvs_state *state;
 713
 714        state = kzalloc(sizeof(*state), GFP_KERNEL);
 715        if (!state)
 716                return -ENOMEM;
 717
 718        drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
 719                                    &state->base,
 720                                    &vc4_hvs_state_funcs);
 721
 722        return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
 723}
 724
 725/*
 726 * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
 727 * the TXP (and therefore all the CRTCs found on that platform).
 728 *
 729 * The naive (and our initial) implementation would just iterate over
 730 * all the active CRTCs, try to find a suitable FIFO, and then remove it
 731 * from the pool of available FIFOs. However, there are a few corner
 732 * cases that need to be considered:
 733 *
 734 * - When running in a dual-display setup (so with two CRTCs involved),
 735 *   we can update the state of a single CRTC (for example by changing
 736 *   its mode using xrandr under X11) without affecting the other. In
 737 *   this case, the other CRTC wouldn't be in the state at all, so we
 738 *   need to consider all the running CRTCs in the DRM device to assign
 739 *   a FIFO, not just the one in the state.
 740 *
 741 * - To fix the above, we can't use drm_atomic_get_crtc_state on all
 742 *   enabled CRTCs to pull their CRTC state into the global state, since
 743 *   a page flip would start considering their vblank to complete. Since
 744 *   we don't have a guarantee that they are actually active, that
 745 *   vblank might never happen, and shouldn't even be considered if we
 746 *   want to do a page flip on a single CRTC. That can be tested by
 747 *   doing a modetest -v first on HDMI1 and then on HDMI0.
 748 *
 749 * - Since we need the pixelvalve to be disabled and enabled back when
 750 *   the FIFO is changed, we should keep the FIFO assigned for as long
 751 *   as the CRTC is enabled, only considering it free again once that
 752 *   CRTC has been disabled. This can be tested by booting X11 on a
 753 *   single display, and changing the resolution down and then back up.
 754 */
 755static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
 756                                      struct drm_atomic_state *state)
 757{
 758        struct vc4_hvs_state *hvs_new_state;
 759        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 760        struct drm_crtc *crtc;
 761        unsigned int unassigned_channels = 0;
 762        unsigned int i;
 763
 764        hvs_new_state = vc4_hvs_get_global_state(state);
 765        if (!hvs_new_state)
 766                return -EINVAL;
 767
 768        for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
 769                if (!hvs_new_state->fifo_state[i].in_use)
 770                        unassigned_channels |= BIT(i);
 771
 772        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 773                struct vc4_crtc_state *old_vc4_crtc_state =
 774                        to_vc4_crtc_state(old_crtc_state);
 775                struct vc4_crtc_state *new_vc4_crtc_state =
 776                        to_vc4_crtc_state(new_crtc_state);
 777                struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
 778                unsigned int matching_channels;
 779                unsigned int channel;
 780
 781                /* Nothing to do here, let's skip it */
 782                if (old_crtc_state->enable == new_crtc_state->enable)
 783                        continue;
 784
 785                /* Muxing will need to be modified, mark it as such */
 786                new_vc4_crtc_state->update_muxing = true;
 787
 788                /* If we're disabling our CRTC, we put back our channel */
 789                if (!new_crtc_state->enable) {
 790                        channel = old_vc4_crtc_state->assigned_channel;
 791                        hvs_new_state->fifo_state[channel].in_use = false;
 792                        new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
 793                        continue;
 794                }
 795
 796                /*
 797                 * The problem we have to solve here is that we have
 798                 * up to 7 encoders, connected to up to 6 CRTCs.
 799                 *
 800                 * Those CRTCs, depending on the instance, can be
 801                 * routed to 1, 2 or 3 HVS FIFOs, and we need to set
 802                 * the change the muxing between FIFOs and outputs in
 803                 * the HVS accordingly.
 804                 *
 805                 * It would be pretty hard to come up with an
 806                 * algorithm that would generically solve
 807                 * this. However, the current routing trees we support
 808                 * allow us to simplify a bit the problem.
 809                 *
 810                 * Indeed, with the current supported layouts, if we
 811                 * try to assign in the ascending crtc index order the
 812                 * FIFOs, we can't fall into the situation where an
 813                 * earlier CRTC that had multiple routes is assigned
 814                 * one that was the only option for a later CRTC.
 815                 *
 816                 * If the layout changes and doesn't give us that in
 817                 * the future, we will need to have something smarter,
 818                 * but it works so far.
 819                 */
 820                matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
 821                if (!matching_channels)
 822                        return -EINVAL;
 823
 824                channel = ffs(matching_channels) - 1;
 825                new_vc4_crtc_state->assigned_channel = channel;
 826                unassigned_channels &= ~BIT(channel);
 827                hvs_new_state->fifo_state[channel].in_use = true;
 828        }
 829
 830        return 0;
 831}
 832
 833static int
 834vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
 835{
 836        int ret;
 837
 838        ret = vc4_pv_muxing_atomic_check(dev, state);
 839        if (ret)
 840                return ret;
 841
 842        ret = vc4_ctm_atomic_check(dev, state);
 843        if (ret < 0)
 844                return ret;
 845
 846        ret = drm_atomic_helper_check(dev, state);
 847        if (ret)
 848                return ret;
 849
 850        return vc4_load_tracker_atomic_check(state);
 851}
 852
 853static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
 854        .atomic_commit_setup    = vc4_atomic_commit_setup,
 855        .atomic_commit_tail     = vc4_atomic_commit_tail,
 856};
 857
 858static const struct drm_mode_config_funcs vc4_mode_funcs = {
 859        .atomic_check = vc4_atomic_check,
 860        .atomic_commit = drm_atomic_helper_commit,
 861        .fb_create = vc4_fb_create,
 862};
 863
 864int vc4_kms_load(struct drm_device *dev)
 865{
 866        struct vc4_dev *vc4 = to_vc4_dev(dev);
 867        bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
 868                                              "brcm,bcm2711-vc5");
 869        int ret;
 870
 871        if (!is_vc5) {
 872                vc4->load_tracker_available = true;
 873
 874                /* Start with the load tracker enabled. Can be
 875                 * disabled through the debugfs load_tracker file.
 876                 */
 877                vc4->load_tracker_enabled = true;
 878        }
 879
 880        /* Set support for vblank irq fast disable, before drm_vblank_init() */
 881        dev->vblank_disable_immediate = true;
 882
 883        ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
 884        if (ret < 0) {
 885                dev_err(dev->dev, "failed to initialize vblank\n");
 886                return ret;
 887        }
 888
 889        if (is_vc5) {
 890                dev->mode_config.max_width = 7680;
 891                dev->mode_config.max_height = 7680;
 892        } else {
 893                dev->mode_config.max_width = 2048;
 894                dev->mode_config.max_height = 2048;
 895        }
 896
 897        dev->mode_config.funcs = &vc4_mode_funcs;
 898        dev->mode_config.helper_private = &vc4_mode_config_helpers;
 899        dev->mode_config.preferred_depth = 24;
 900        dev->mode_config.async_page_flip = true;
 901
 902        ret = vc4_ctm_obj_init(vc4);
 903        if (ret)
 904                return ret;
 905
 906        ret = vc4_load_tracker_obj_init(vc4);
 907        if (ret)
 908                return ret;
 909
 910        ret = vc4_hvs_channels_obj_init(vc4);
 911        if (ret)
 912                return ret;
 913
 914        drm_mode_config_reset(dev);
 915
 916        drm_kms_helper_poll_init(dev);
 917
 918        return 0;
 919}
 920