linux/drivers/gpu/drm/i915/display/intel_tc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#include "i915_drv.h"
   7#include "intel_display.h"
   8#include "intel_display_types.h"
   9#include "intel_dp_mst.h"
  10#include "intel_tc.h"
  11
  12static const char *tc_port_mode_name(enum tc_port_mode mode)
  13{
  14        static const char * const names[] = {
  15                [TC_PORT_TBT_ALT] = "tbt-alt",
  16                [TC_PORT_DP_ALT] = "dp-alt",
  17                [TC_PORT_LEGACY] = "legacy",
  18        };
  19
  20        if (WARN_ON(mode >= ARRAY_SIZE(names)))
  21                mode = TC_PORT_TBT_ALT;
  22
  23        return names[mode];
  24}
  25
  26static void
  27tc_port_load_fia_params(struct drm_i915_private *i915,
  28                        struct intel_digital_port *dig_port)
  29{
  30        enum port port = dig_port->base.port;
  31        enum tc_port tc_port = intel_port_to_tc(i915, port);
  32        u32 modular_fia;
  33
  34        if (INTEL_INFO(i915)->display.has_modular_fia) {
  35                modular_fia = intel_uncore_read(&i915->uncore,
  36                                                PORT_TX_DFLEXDPSP(FIA1));
  37                drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
  38                modular_fia &= MODULAR_FIA_MASK;
  39        } else {
  40                modular_fia = 0;
  41        }
  42
  43        /*
  44         * Each Modular FIA instance houses 2 TC ports. In SOC that has more
  45         * than two TC ports, there are multiple instances of Modular FIA.
  46         */
  47        if (modular_fia) {
  48                dig_port->tc_phy_fia = tc_port / 2;
  49                dig_port->tc_phy_fia_idx = tc_port % 2;
  50        } else {
  51                dig_port->tc_phy_fia = FIA1;
  52                dig_port->tc_phy_fia_idx = tc_port;
  53        }
  54}
  55
  56static enum intel_display_power_domain
  57tc_cold_get_power_domain(struct intel_digital_port *dig_port)
  58{
  59        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  60
  61        if (INTEL_GEN(i915) == 11)
  62                return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
  63        else
  64                return POWER_DOMAIN_TC_COLD_OFF;
  65}
  66
  67static intel_wakeref_t
  68tc_cold_block(struct intel_digital_port *dig_port)
  69{
  70        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  71        enum intel_display_power_domain domain;
  72
  73        if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
  74                return 0;
  75
  76        domain = tc_cold_get_power_domain(dig_port);
  77        return intel_display_power_get(i915, domain);
  78}
  79
  80static void
  81tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
  82{
  83        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
  84        enum intel_display_power_domain domain;
  85
  86        /*
  87         * wakeref == -1, means some error happened saving save_depot_stack but
  88         * power should still be put down and 0 is a invalid save_depot_stack
  89         * id so can be used to skip it for non TC legacy ports.
  90         */
  91        if (wakeref == 0)
  92                return;
  93
  94        domain = tc_cold_get_power_domain(dig_port);
  95        intel_display_power_put_async(i915, domain, wakeref);
  96}
  97
  98static void
  99assert_tc_cold_blocked(struct intel_digital_port *dig_port)
 100{
 101        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 102        bool enabled;
 103
 104        if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
 105                return;
 106
 107        enabled = intel_display_power_is_enabled(i915,
 108                                                 tc_cold_get_power_domain(dig_port));
 109        drm_WARN_ON(&i915->drm, !enabled);
 110}
 111
 112u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
 113{
 114        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 115        struct intel_uncore *uncore = &i915->uncore;
 116        u32 lane_mask;
 117
 118        lane_mask = intel_uncore_read(uncore,
 119                                      PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
 120
 121        drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
 122        assert_tc_cold_blocked(dig_port);
 123
 124        lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
 125        return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
 126}
 127
 128u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
 129{
 130        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 131        struct intel_uncore *uncore = &i915->uncore;
 132        u32 pin_mask;
 133
 134        pin_mask = intel_uncore_read(uncore,
 135                                     PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
 136
 137        drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
 138        assert_tc_cold_blocked(dig_port);
 139
 140        return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
 141               DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
 142}
 143
 144int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
 145{
 146        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 147        intel_wakeref_t wakeref;
 148        u32 lane_mask;
 149
 150        if (dig_port->tc_mode != TC_PORT_DP_ALT)
 151                return 4;
 152
 153        assert_tc_cold_blocked(dig_port);
 154
 155        lane_mask = 0;
 156        with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
 157                lane_mask = intel_tc_port_get_lane_mask(dig_port);
 158
 159        switch (lane_mask) {
 160        default:
 161                MISSING_CASE(lane_mask);
 162                fallthrough;
 163        case 0x1:
 164        case 0x2:
 165        case 0x4:
 166        case 0x8:
 167                return 1;
 168        case 0x3:
 169        case 0xc:
 170                return 2;
 171        case 0xf:
 172                return 4;
 173        }
 174}
 175
 176void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
 177                                      int required_lanes)
 178{
 179        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 180        bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
 181        struct intel_uncore *uncore = &i915->uncore;
 182        u32 val;
 183
 184        drm_WARN_ON(&i915->drm,
 185                    lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
 186
 187        assert_tc_cold_blocked(dig_port);
 188
 189        val = intel_uncore_read(uncore,
 190                                PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
 191        val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
 192
 193        switch (required_lanes) {
 194        case 1:
 195                val |= lane_reversal ?
 196                        DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
 197                        DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
 198                break;
 199        case 2:
 200                val |= lane_reversal ?
 201                        DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
 202                        DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
 203                break;
 204        case 4:
 205                val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
 206                break;
 207        default:
 208                MISSING_CASE(required_lanes);
 209        }
 210
 211        intel_uncore_write(uncore,
 212                           PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
 213}
 214
 215static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
 216                                      u32 live_status_mask)
 217{
 218        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 219        u32 valid_hpd_mask;
 220
 221        if (dig_port->tc_legacy_port)
 222                valid_hpd_mask = BIT(TC_PORT_LEGACY);
 223        else
 224                valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
 225                                 BIT(TC_PORT_TBT_ALT);
 226
 227        if (!(live_status_mask & ~valid_hpd_mask))
 228                return;
 229
 230        /* If live status mismatches the VBT flag, trust the live status. */
 231        drm_err(&i915->drm,
 232                "Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
 233                dig_port->tc_port_name, live_status_mask);
 234
 235        dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
 236}
 237
 238static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
 239{
 240        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 241        struct intel_uncore *uncore = &i915->uncore;
 242        u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
 243        u32 mask = 0;
 244        u32 val;
 245
 246        val = intel_uncore_read(uncore,
 247                                PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
 248
 249        if (val == 0xffffffff) {
 250                drm_dbg_kms(&i915->drm,
 251                            "Port %s: PHY in TCCOLD, nothing connected\n",
 252                            dig_port->tc_port_name);
 253                return mask;
 254        }
 255
 256        if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
 257                mask |= BIT(TC_PORT_TBT_ALT);
 258        if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
 259                mask |= BIT(TC_PORT_DP_ALT);
 260
 261        if (intel_uncore_read(uncore, SDEISR) & isr_bit)
 262                mask |= BIT(TC_PORT_LEGACY);
 263
 264        /* The sink can be connected only in a single mode. */
 265        if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
 266                tc_port_fixup_legacy_flag(dig_port, mask);
 267
 268        return mask;
 269}
 270
 271static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
 272{
 273        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 274        struct intel_uncore *uncore = &i915->uncore;
 275        u32 val;
 276
 277        val = intel_uncore_read(uncore,
 278                                PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
 279        if (val == 0xffffffff) {
 280                drm_dbg_kms(&i915->drm,
 281                            "Port %s: PHY in TCCOLD, assuming not complete\n",
 282                            dig_port->tc_port_name);
 283                return false;
 284        }
 285
 286        return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
 287}
 288
 289static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
 290                                     bool enable)
 291{
 292        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 293        struct intel_uncore *uncore = &i915->uncore;
 294        u32 val;
 295
 296        val = intel_uncore_read(uncore,
 297                                PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
 298        if (val == 0xffffffff) {
 299                drm_dbg_kms(&i915->drm,
 300                            "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
 301                            dig_port->tc_port_name, enableddisabled(enable));
 302
 303                return false;
 304        }
 305
 306        val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
 307        if (!enable)
 308                val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
 309
 310        intel_uncore_write(uncore,
 311                           PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
 312
 313        if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
 314                drm_dbg_kms(&i915->drm,
 315                            "Port %s: PHY complete clear timed out\n",
 316                            dig_port->tc_port_name);
 317
 318        return true;
 319}
 320
 321static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
 322{
 323        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 324        struct intel_uncore *uncore = &i915->uncore;
 325        u32 val;
 326
 327        val = intel_uncore_read(uncore,
 328                                PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
 329        if (val == 0xffffffff) {
 330                drm_dbg_kms(&i915->drm,
 331                            "Port %s: PHY in TCCOLD, assume safe mode\n",
 332                            dig_port->tc_port_name);
 333                return true;
 334        }
 335
 336        return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx));
 337}
 338
 339/*
 340 * This function implements the first part of the Connect Flow described by our
 341 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
 342 * lanes, EDID, etc) is done as needed in the typical places.
 343 *
 344 * Unlike the other ports, type-C ports are not available to use as soon as we
 345 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
 346 * display, USB, etc. As a result, handshaking through FIA is required around
 347 * connect and disconnect to cleanly transfer ownership with the controller and
 348 * set the type-C power state.
 349 */
 350static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
 351                               int required_lanes)
 352{
 353        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 354        int max_lanes;
 355
 356        if (!icl_tc_phy_status_complete(dig_port)) {
 357                drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
 358                            dig_port->tc_port_name);
 359                goto out_set_tbt_alt_mode;
 360        }
 361
 362        if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
 363            !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
 364                goto out_set_tbt_alt_mode;
 365
 366        max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
 367        if (dig_port->tc_legacy_port) {
 368                drm_WARN_ON(&i915->drm, max_lanes != 4);
 369                dig_port->tc_mode = TC_PORT_LEGACY;
 370
 371                return;
 372        }
 373
 374        /*
 375         * Now we have to re-check the live state, in case the port recently
 376         * became disconnected. Not necessary for legacy mode.
 377         */
 378        if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
 379                drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
 380                            dig_port->tc_port_name);
 381                goto out_set_safe_mode;
 382        }
 383
 384        if (max_lanes < required_lanes) {
 385                drm_dbg_kms(&i915->drm,
 386                            "Port %s: PHY max lanes %d < required lanes %d\n",
 387                            dig_port->tc_port_name,
 388                            max_lanes, required_lanes);
 389                goto out_set_safe_mode;
 390        }
 391
 392        dig_port->tc_mode = TC_PORT_DP_ALT;
 393
 394        return;
 395
 396out_set_safe_mode:
 397        icl_tc_phy_set_safe_mode(dig_port, true);
 398out_set_tbt_alt_mode:
 399        dig_port->tc_mode = TC_PORT_TBT_ALT;
 400}
 401
 402/*
 403 * See the comment at the connect function. This implements the Disconnect
 404 * Flow.
 405 */
 406static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
 407{
 408        switch (dig_port->tc_mode) {
 409        case TC_PORT_LEGACY:
 410                /* Nothing to do, we never disconnect from legacy mode */
 411                break;
 412        case TC_PORT_DP_ALT:
 413                icl_tc_phy_set_safe_mode(dig_port, true);
 414                dig_port->tc_mode = TC_PORT_TBT_ALT;
 415                break;
 416        case TC_PORT_TBT_ALT:
 417                /* Nothing to do, we stay in TBT-alt mode */
 418                break;
 419        default:
 420                MISSING_CASE(dig_port->tc_mode);
 421        }
 422}
 423
 424static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
 425{
 426        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 427
 428        if (!icl_tc_phy_status_complete(dig_port)) {
 429                drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
 430                            dig_port->tc_port_name);
 431                return dig_port->tc_mode == TC_PORT_TBT_ALT;
 432        }
 433
 434        if (icl_tc_phy_is_in_safe_mode(dig_port)) {
 435                drm_dbg_kms(&i915->drm, "Port %s: PHY still in safe mode\n",
 436                            dig_port->tc_port_name);
 437
 438                return false;
 439        }
 440
 441        return dig_port->tc_mode == TC_PORT_DP_ALT ||
 442               dig_port->tc_mode == TC_PORT_LEGACY;
 443}
 444
 445static enum tc_port_mode
 446intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
 447{
 448        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 449        u32 live_status_mask = tc_port_live_status_mask(dig_port);
 450        bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
 451        enum tc_port_mode mode;
 452
 453        if (in_safe_mode ||
 454            drm_WARN_ON(&i915->drm, !icl_tc_phy_status_complete(dig_port)))
 455                return TC_PORT_TBT_ALT;
 456
 457        mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
 458        if (live_status_mask) {
 459                enum tc_port_mode live_mode = fls(live_status_mask) - 1;
 460
 461                if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
 462                        mode = live_mode;
 463        }
 464
 465        return mode;
 466}
 467
 468static enum tc_port_mode
 469intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
 470{
 471        u32 live_status_mask = tc_port_live_status_mask(dig_port);
 472
 473        if (live_status_mask)
 474                return fls(live_status_mask) - 1;
 475
 476        return icl_tc_phy_status_complete(dig_port) &&
 477               dig_port->tc_legacy_port ? TC_PORT_LEGACY :
 478                                          TC_PORT_TBT_ALT;
 479}
 480
 481static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
 482                                     int required_lanes)
 483{
 484        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 485        enum tc_port_mode old_tc_mode = dig_port->tc_mode;
 486
 487        intel_display_power_flush_work(i915);
 488        if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) {
 489                enum intel_display_power_domain aux_domain;
 490                bool aux_powered;
 491
 492                aux_domain = intel_aux_power_domain(dig_port);
 493                aux_powered = intel_display_power_is_enabled(i915, aux_domain);
 494                drm_WARN_ON(&i915->drm, aux_powered);
 495        }
 496
 497        icl_tc_phy_disconnect(dig_port);
 498        icl_tc_phy_connect(dig_port, required_lanes);
 499
 500        drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
 501                    dig_port->tc_port_name,
 502                    tc_port_mode_name(old_tc_mode),
 503                    tc_port_mode_name(dig_port->tc_mode));
 504}
 505
 506static void
 507intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
 508                                 int refcount)
 509{
 510        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 511
 512        drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
 513        dig_port->tc_link_refcount = refcount;
 514}
 515
 516void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
 517{
 518        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 519        struct intel_encoder *encoder = &dig_port->base;
 520        intel_wakeref_t tc_cold_wref;
 521        int active_links = 0;
 522
 523        mutex_lock(&dig_port->tc_lock);
 524        tc_cold_wref = tc_cold_block(dig_port);
 525
 526        dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
 527        if (dig_port->dp.is_mst)
 528                active_links = intel_dp_mst_encoder_active_links(dig_port);
 529        else if (encoder->base.crtc)
 530                active_links = to_intel_crtc(encoder->base.crtc)->active;
 531
 532        if (active_links) {
 533                if (!icl_tc_phy_is_connected(dig_port))
 534                        drm_dbg_kms(&i915->drm,
 535                                    "Port %s: PHY disconnected with %d active link(s)\n",
 536                                    dig_port->tc_port_name, active_links);
 537                intel_tc_port_link_init_refcount(dig_port, active_links);
 538
 539                goto out;
 540        }
 541
 542        if (dig_port->tc_legacy_port)
 543                icl_tc_phy_connect(dig_port, 1);
 544
 545out:
 546        drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
 547                    dig_port->tc_port_name,
 548                    tc_port_mode_name(dig_port->tc_mode));
 549
 550        tc_cold_unblock(dig_port, tc_cold_wref);
 551        mutex_unlock(&dig_port->tc_lock);
 552}
 553
 554static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
 555{
 556        return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
 557}
 558
 559/*
 560 * The type-C ports are different because even when they are connected, they may
 561 * not be available/usable by the graphics driver: see the comment on
 562 * icl_tc_phy_connect(). So in our driver instead of adding the additional
 563 * concept of "usable" and make everything check for "connected and usable" we
 564 * define a port as "connected" when it is not only connected, but also when it
 565 * is usable by the rest of the driver. That maintains the old assumption that
 566 * connected ports are usable, and avoids exposing to the users objects they
 567 * can't really use.
 568 */
 569bool intel_tc_port_connected(struct intel_encoder *encoder)
 570{
 571        struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 572        bool is_connected;
 573        intel_wakeref_t tc_cold_wref;
 574
 575        intel_tc_port_lock(dig_port);
 576        tc_cold_wref = tc_cold_block(dig_port);
 577
 578        is_connected = tc_port_live_status_mask(dig_port) &
 579                       BIT(dig_port->tc_mode);
 580
 581        tc_cold_unblock(dig_port, tc_cold_wref);
 582        intel_tc_port_unlock(dig_port);
 583
 584        return is_connected;
 585}
 586
 587static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
 588                                 int required_lanes)
 589{
 590        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 591        intel_wakeref_t wakeref;
 592
 593        wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
 594
 595        mutex_lock(&dig_port->tc_lock);
 596
 597        if (!dig_port->tc_link_refcount) {
 598                intel_wakeref_t tc_cold_wref;
 599
 600                tc_cold_wref = tc_cold_block(dig_port);
 601
 602                if (intel_tc_port_needs_reset(dig_port))
 603                        intel_tc_port_reset_mode(dig_port, required_lanes);
 604
 605                tc_cold_unblock(dig_port, tc_cold_wref);
 606        }
 607
 608        drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
 609        dig_port->tc_lock_wakeref = wakeref;
 610}
 611
 612void intel_tc_port_lock(struct intel_digital_port *dig_port)
 613{
 614        __intel_tc_port_lock(dig_port, 1);
 615}
 616
 617void intel_tc_port_unlock(struct intel_digital_port *dig_port)
 618{
 619        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 620        intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);
 621
 622        mutex_unlock(&dig_port->tc_lock);
 623
 624        intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
 625                                      wakeref);
 626}
 627
 628bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
 629{
 630        return mutex_is_locked(&dig_port->tc_lock) ||
 631               dig_port->tc_link_refcount;
 632}
 633
 634void intel_tc_port_get_link(struct intel_digital_port *dig_port,
 635                            int required_lanes)
 636{
 637        __intel_tc_port_lock(dig_port, required_lanes);
 638        dig_port->tc_link_refcount++;
 639        intel_tc_port_unlock(dig_port);
 640}
 641
 642void intel_tc_port_put_link(struct intel_digital_port *dig_port)
 643{
 644        mutex_lock(&dig_port->tc_lock);
 645        dig_port->tc_link_refcount--;
 646        mutex_unlock(&dig_port->tc_lock);
 647}
 648
 649void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
 650{
 651        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 652        enum port port = dig_port->base.port;
 653        enum tc_port tc_port = intel_port_to_tc(i915, port);
 654
 655        if (drm_WARN_ON(&i915->drm, tc_port == PORT_TC_NONE))
 656                return;
 657
 658        snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
 659                 "%c/TC#%d", port_name(port), tc_port + 1);
 660
 661        mutex_init(&dig_port->tc_lock);
 662        dig_port->tc_legacy_port = is_legacy;
 663        dig_port->tc_link_refcount = 0;
 664        tc_port_load_fia_params(i915, dig_port);
 665}
 666