linux/include/drm/drm_dp_mst_helper.h
<<
>>
Prefs
   1/*
   2 * Copyright © 2014 Red Hat.
   3 *
   4 * Permission to use, copy, modify, distribute, and sell this software and its
   5 * documentation for any purpose is hereby granted without fee, provided that
   6 * the above copyright notice appear in all copies and that both that copyright
   7 * notice and this permission notice appear in supporting documentation, and
   8 * that the name of the copyright holders not be used in advertising or
   9 * publicity pertaining to distribution of the software without specific,
  10 * written prior permission.  The copyright holders make no representations
  11 * about the suitability of this software for any purpose.  It is provided "as
  12 * is" without express or implied warranty.
  13 *
  14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
  15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
  16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
  17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
  18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
  20 * OF THIS SOFTWARE.
  21 */
  22#ifndef _DRM_DP_MST_HELPER_H_
  23#define _DRM_DP_MST_HELPER_H_
  24
  25#include <linux/types.h>
  26#include <drm/drm_dp_helper.h>
  27#include <drm/drm_atomic.h>
  28
  29#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
  30#include <linux/stackdepot.h>
  31#include <linux/timekeeping.h>
  32
  33enum drm_dp_mst_topology_ref_type {
  34        DRM_DP_MST_TOPOLOGY_REF_GET,
  35        DRM_DP_MST_TOPOLOGY_REF_PUT,
  36};
  37
  38struct drm_dp_mst_topology_ref_history {
  39        struct drm_dp_mst_topology_ref_entry {
  40                enum drm_dp_mst_topology_ref_type type;
  41                int count;
  42                ktime_t ts_nsec;
  43                depot_stack_handle_t backtrace;
  44        } *entries;
  45        int len;
  46};
  47#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
  48
  49struct drm_dp_mst_branch;
  50
  51/**
  52 * struct drm_dp_vcpi - Virtual Channel Payload Identifier
  53 * @vcpi: Virtual channel ID.
  54 * @pbn: Payload Bandwidth Number for this channel
  55 * @aligned_pbn: PBN aligned with slot size
  56 * @num_slots: number of slots for this PBN
  57 */
  58struct drm_dp_vcpi {
  59        int vcpi;
  60        int pbn;
  61        int aligned_pbn;
  62        int num_slots;
  63};
  64
  65/**
  66 * struct drm_dp_mst_port - MST port
  67 * @port_num: port number
  68 * @input: if this port is an input port. Protected by
  69 * &drm_dp_mst_topology_mgr.base.lock.
  70 * @mcs: message capability status - DP 1.2 spec. Protected by
  71 * &drm_dp_mst_topology_mgr.base.lock.
  72 * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
  73 * &drm_dp_mst_topology_mgr.base.lock.
  74 * @pdt: Peer Device Type. Protected by
  75 * &drm_dp_mst_topology_mgr.base.lock.
  76 * @ldps: Legacy Device Plug Status. Protected by
  77 * &drm_dp_mst_topology_mgr.base.lock.
  78 * @dpcd_rev: DPCD revision of device on this port. Protected by
  79 * &drm_dp_mst_topology_mgr.base.lock.
  80 * @num_sdp_streams: Number of simultaneous streams. Protected by
  81 * &drm_dp_mst_topology_mgr.base.lock.
  82 * @num_sdp_stream_sinks: Number of stream sinks. Protected by
  83 * &drm_dp_mst_topology_mgr.base.lock.
  84 * @full_pbn: Max possible bandwidth for this port. Protected by
  85 * &drm_dp_mst_topology_mgr.base.lock.
  86 * @next: link to next port on this branch device
  87 * @aux: i2c aux transport to talk to device connected to this port, protected
  88 * by &drm_dp_mst_topology_mgr.base.lock.
  89 * @parent: branch device parent of this port
  90 * @vcpi: Virtual Channel Payload info for this port.
  91 * @connector: DRM connector this port is connected to. Protected by
  92 * &drm_dp_mst_topology_mgr.base.lock.
  93 * @mgr: topology manager this port lives under.
  94 *
  95 * This structure represents an MST port endpoint on a device somewhere
  96 * in the MST topology.
  97 */
  98struct drm_dp_mst_port {
  99        /**
 100         * @topology_kref: refcount for this port's lifetime in the topology,
 101         * only the DP MST helpers should need to touch this
 102         */
 103        struct kref topology_kref;
 104
 105        /**
 106         * @malloc_kref: refcount for the memory allocation containing this
 107         * structure. See drm_dp_mst_get_port_malloc() and
 108         * drm_dp_mst_put_port_malloc().
 109         */
 110        struct kref malloc_kref;
 111
 112#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
 113        /**
 114         * @topology_ref_history: A history of each topology
 115         * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
 116         */
 117        struct drm_dp_mst_topology_ref_history topology_ref_history;
 118#endif
 119
 120        u8 port_num;
 121        bool input;
 122        bool mcs;
 123        bool ddps;
 124        u8 pdt;
 125        bool ldps;
 126        u8 dpcd_rev;
 127        u8 num_sdp_streams;
 128        u8 num_sdp_stream_sinks;
 129        uint16_t full_pbn;
 130        struct list_head next;
 131        /**
 132         * @mstb: the branch device connected to this port, if there is one.
 133         * This should be considered protected for reading by
 134         * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
 135         * &drm_dp_mst_topology_mgr.up_req_work and
 136         * &drm_dp_mst_topology_mgr.work, which do not grab
 137         * &drm_dp_mst_topology_mgr.lock during reads but are the only
 138         * updaters of this list and are protected from writing concurrently
 139         * by &drm_dp_mst_topology_mgr.probe_lock.
 140         */
 141        struct drm_dp_mst_branch *mstb;
 142        struct drm_dp_aux aux; /* i2c bus for this port? */
 143        struct drm_dp_mst_branch *parent;
 144
 145        struct drm_dp_vcpi vcpi;
 146        struct drm_connector *connector;
 147        struct drm_dp_mst_topology_mgr *mgr;
 148
 149        /**
 150         * @cached_edid: for DP logical ports - make tiling work by ensuring
 151         * that the EDID for all connectors is read immediately.
 152         */
 153        struct edid *cached_edid;
 154        /**
 155         * @has_audio: Tracks whether the sink connector to this port is
 156         * audio-capable.
 157         */
 158        bool has_audio;
 159
 160        /**
 161         * @fec_capable: bool indicating if FEC can be supported up to that
 162         * point in the MST topology.
 163         */
 164        bool fec_capable;
 165};
 166
 167/* sideband msg header - not bit struct */
 168struct drm_dp_sideband_msg_hdr {
 169        u8 lct;
 170        u8 lcr;
 171        u8 rad[8];
 172        bool broadcast;
 173        bool path_msg;
 174        u8 msg_len;
 175        bool somt;
 176        bool eomt;
 177        bool seqno;
 178};
 179
 180struct drm_dp_sideband_msg_rx {
 181        u8 chunk[48];
 182        u8 msg[256];
 183        u8 curchunk_len;
 184        u8 curchunk_idx; /* chunk we are parsing now */
 185        u8 curchunk_hdrlen;
 186        u8 curlen; /* total length of the msg */
 187        bool have_somt;
 188        bool have_eomt;
 189        struct drm_dp_sideband_msg_hdr initial_hdr;
 190};
 191
 192/**
 193 * struct drm_dp_mst_branch - MST branch device.
 194 * @rad: Relative Address to talk to this branch device.
 195 * @lct: Link count total to talk to this branch device.
 196 * @num_ports: number of ports on the branch.
 197 * @port_parent: pointer to the port parent, NULL if toplevel.
 198 * @mgr: topology manager for this branch device.
 199 * @link_address_sent: if a link address message has been sent to this device yet.
 200 * @guid: guid for DP 1.2 branch device. port under this branch can be
 201 * identified by port #.
 202 *
 203 * This structure represents an MST branch device, there is one
 204 * primary branch device at the root, along with any other branches connected
 205 * to downstream port of parent branches.
 206 */
 207struct drm_dp_mst_branch {
 208        /**
 209         * @topology_kref: refcount for this branch device's lifetime in the
 210         * topology, only the DP MST helpers should need to touch this
 211         */
 212        struct kref topology_kref;
 213
 214        /**
 215         * @malloc_kref: refcount for the memory allocation containing this
 216         * structure. See drm_dp_mst_get_mstb_malloc() and
 217         * drm_dp_mst_put_mstb_malloc().
 218         */
 219        struct kref malloc_kref;
 220
 221#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
 222        /**
 223         * @topology_ref_history: A history of each topology
 224         * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
 225         */
 226        struct drm_dp_mst_topology_ref_history topology_ref_history;
 227#endif
 228
 229        /**
 230         * @destroy_next: linked-list entry used by
 231         * drm_dp_delayed_destroy_work()
 232         */
 233        struct list_head destroy_next;
 234
 235        u8 rad[8];
 236        u8 lct;
 237        int num_ports;
 238
 239        /**
 240         * @ports: the list of ports on this branch device. This should be
 241         * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
 242         * There are two exceptions to this:
 243         * &drm_dp_mst_topology_mgr.up_req_work and
 244         * &drm_dp_mst_topology_mgr.work, which do not grab
 245         * &drm_dp_mst_topology_mgr.lock during reads but are the only
 246         * updaters of this list and are protected from updating the list
 247         * concurrently by @drm_dp_mst_topology_mgr.probe_lock
 248         */
 249        struct list_head ports;
 250
 251        struct drm_dp_mst_port *port_parent;
 252        struct drm_dp_mst_topology_mgr *mgr;
 253
 254        bool link_address_sent;
 255
 256        /* global unique identifier to identify branch devices */
 257        u8 guid[16];
 258};
 259
 260
 261struct drm_dp_nak_reply {
 262        u8 guid[16];
 263        u8 reason;
 264        u8 nak_data;
 265};
 266
 267struct drm_dp_link_address_ack_reply {
 268        u8 guid[16];
 269        u8 nports;
 270        struct drm_dp_link_addr_reply_port {
 271                bool input_port;
 272                u8 peer_device_type;
 273                u8 port_number;
 274                bool mcs;
 275                bool ddps;
 276                bool legacy_device_plug_status;
 277                u8 dpcd_revision;
 278                u8 peer_guid[16];
 279                u8 num_sdp_streams;
 280                u8 num_sdp_stream_sinks;
 281        } ports[16];
 282};
 283
 284struct drm_dp_remote_dpcd_read_ack_reply {
 285        u8 port_number;
 286        u8 num_bytes;
 287        u8 bytes[255];
 288};
 289
 290struct drm_dp_remote_dpcd_write_ack_reply {
 291        u8 port_number;
 292};
 293
 294struct drm_dp_remote_dpcd_write_nak_reply {
 295        u8 port_number;
 296        u8 reason;
 297        u8 bytes_written_before_failure;
 298};
 299
 300struct drm_dp_remote_i2c_read_ack_reply {
 301        u8 port_number;
 302        u8 num_bytes;
 303        u8 bytes[255];
 304};
 305
 306struct drm_dp_remote_i2c_read_nak_reply {
 307        u8 port_number;
 308        u8 nak_reason;
 309        u8 i2c_nak_transaction;
 310};
 311
 312struct drm_dp_remote_i2c_write_ack_reply {
 313        u8 port_number;
 314};
 315
 316
 317#define DRM_DP_MAX_SDP_STREAMS 16
 318struct drm_dp_allocate_payload {
 319        u8 port_number;
 320        u8 number_sdp_streams;
 321        u8 vcpi;
 322        u16 pbn;
 323        u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
 324};
 325
 326struct drm_dp_allocate_payload_ack_reply {
 327        u8 port_number;
 328        u8 vcpi;
 329        u16 allocated_pbn;
 330};
 331
 332struct drm_dp_connection_status_notify {
 333        u8 guid[16];
 334        u8 port_number;
 335        bool legacy_device_plug_status;
 336        bool displayport_device_plug_status;
 337        bool message_capability_status;
 338        bool input_port;
 339        u8 peer_device_type;
 340};
 341
 342struct drm_dp_remote_dpcd_read {
 343        u8 port_number;
 344        u32 dpcd_address;
 345        u8 num_bytes;
 346};
 347
 348struct drm_dp_remote_dpcd_write {
 349        u8 port_number;
 350        u32 dpcd_address;
 351        u8 num_bytes;
 352        u8 *bytes;
 353};
 354
 355#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
 356struct drm_dp_remote_i2c_read {
 357        u8 num_transactions;
 358        u8 port_number;
 359        struct drm_dp_remote_i2c_read_tx {
 360                u8 i2c_dev_id;
 361                u8 num_bytes;
 362                u8 *bytes;
 363                u8 no_stop_bit;
 364                u8 i2c_transaction_delay;
 365        } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
 366        u8 read_i2c_device_id;
 367        u8 num_bytes_read;
 368};
 369
 370struct drm_dp_remote_i2c_write {
 371        u8 port_number;
 372        u8 write_i2c_device_id;
 373        u8 num_bytes;
 374        u8 *bytes;
 375};
 376
 377/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
 378struct drm_dp_port_number_req {
 379        u8 port_number;
 380};
 381
 382struct drm_dp_enum_path_resources_ack_reply {
 383        u8 port_number;
 384        bool fec_capable;
 385        u16 full_payload_bw_number;
 386        u16 avail_payload_bw_number;
 387};
 388
 389/* covers POWER_DOWN_PHY, POWER_UP_PHY */
 390struct drm_dp_port_number_rep {
 391        u8 port_number;
 392};
 393
 394struct drm_dp_query_payload {
 395        u8 port_number;
 396        u8 vcpi;
 397};
 398
 399struct drm_dp_resource_status_notify {
 400        u8 port_number;
 401        u8 guid[16];
 402        u16 available_pbn;
 403};
 404
 405struct drm_dp_query_payload_ack_reply {
 406        u8 port_number;
 407        u16 allocated_pbn;
 408};
 409
 410struct drm_dp_sideband_msg_req_body {
 411        u8 req_type;
 412        union ack_req {
 413                struct drm_dp_connection_status_notify conn_stat;
 414                struct drm_dp_port_number_req port_num;
 415                struct drm_dp_resource_status_notify resource_stat;
 416
 417                struct drm_dp_query_payload query_payload;
 418                struct drm_dp_allocate_payload allocate_payload;
 419
 420                struct drm_dp_remote_dpcd_read dpcd_read;
 421                struct drm_dp_remote_dpcd_write dpcd_write;
 422
 423                struct drm_dp_remote_i2c_read i2c_read;
 424                struct drm_dp_remote_i2c_write i2c_write;
 425        } u;
 426};
 427
 428struct drm_dp_sideband_msg_reply_body {
 429        u8 reply_type;
 430        u8 req_type;
 431        union ack_replies {
 432                struct drm_dp_nak_reply nak;
 433                struct drm_dp_link_address_ack_reply link_addr;
 434                struct drm_dp_port_number_rep port_number;
 435
 436                struct drm_dp_enum_path_resources_ack_reply path_resources;
 437                struct drm_dp_allocate_payload_ack_reply allocate_payload;
 438                struct drm_dp_query_payload_ack_reply query_payload;
 439
 440                struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
 441                struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
 442                struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
 443
 444                struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
 445                struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
 446                struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
 447        } u;
 448};
 449
 450/* msg is queued to be put into a slot */
 451#define DRM_DP_SIDEBAND_TX_QUEUED 0
 452/* msg has started transmitting on a slot - still on msgq */
 453#define DRM_DP_SIDEBAND_TX_START_SEND 1
 454/* msg has finished transmitting on a slot - removed from msgq only in slot */
 455#define DRM_DP_SIDEBAND_TX_SENT 2
 456/* msg has received a response - removed from slot */
 457#define DRM_DP_SIDEBAND_TX_RX 3
 458#define DRM_DP_SIDEBAND_TX_TIMEOUT 4
 459
 460struct drm_dp_sideband_msg_tx {
 461        u8 msg[256];
 462        u8 chunk[48];
 463        u8 cur_offset;
 464        u8 cur_len;
 465        struct drm_dp_mst_branch *dst;
 466        struct list_head next;
 467        int seqno;
 468        int state;
 469        bool path_msg;
 470        struct drm_dp_sideband_msg_reply_body reply;
 471};
 472
 473/* sideband msg handler */
 474struct drm_dp_mst_topology_mgr;
 475struct drm_dp_mst_topology_cbs {
 476        /* create a connector for a port */
 477        struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
 478};
 479
 480#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
 481
 482#define DP_PAYLOAD_LOCAL 1
 483#define DP_PAYLOAD_REMOTE 2
 484#define DP_PAYLOAD_DELETE_LOCAL 3
 485
 486struct drm_dp_payload {
 487        int payload_state;
 488        int start_slot;
 489        int num_slots;
 490        int vcpi;
 491};
 492
 493#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
 494
 495struct drm_dp_vcpi_allocation {
 496        struct drm_dp_mst_port *port;
 497        int vcpi;
 498        int pbn;
 499        bool dsc_enabled;
 500        struct list_head next;
 501};
 502
 503struct drm_dp_mst_topology_state {
 504        struct drm_private_state base;
 505        struct list_head vcpis;
 506        struct drm_dp_mst_topology_mgr *mgr;
 507};
 508
 509#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
 510
 511/**
 512 * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
 513 *
 514 * This struct represents the toplevel displayport MST topology manager.
 515 * There should be one instance of this for every MST capable DP connector
 516 * on the GPU.
 517 */
 518struct drm_dp_mst_topology_mgr {
 519        /**
 520         * @base: Base private object for atomic
 521         */
 522        struct drm_private_obj base;
 523
 524        /**
 525         * @dev: device pointer for adding i2c devices etc.
 526         */
 527        struct drm_device *dev;
 528        /**
 529         * @cbs: callbacks for connector addition and destruction.
 530         */
 531        const struct drm_dp_mst_topology_cbs *cbs;
 532        /**
 533         * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
 534         * in one go.
 535         */
 536        int max_dpcd_transaction_bytes;
 537        /**
 538         * @aux: AUX channel for the DP MST connector this topolgy mgr is
 539         * controlling.
 540         */
 541        struct drm_dp_aux *aux;
 542        /**
 543         * @max_payloads: maximum number of payloads the GPU can generate.
 544         */
 545        int max_payloads;
 546        /**
 547         * @conn_base_id: DRM connector ID this mgr is connected to. Only used
 548         * to build the MST connector path value.
 549         */
 550        int conn_base_id;
 551
 552        /**
 553         * @up_req_recv: Message receiver state for up requests.
 554         */
 555        struct drm_dp_sideband_msg_rx up_req_recv;
 556
 557        /**
 558         * @down_rep_recv: Message receiver state for replies to down
 559         * requests.
 560         */
 561        struct drm_dp_sideband_msg_rx down_rep_recv;
 562
 563        /**
 564         * @lock: protects @mst_state, @mst_primary, @dpcd, and
 565         * @payload_id_table_cleared.
 566         */
 567        struct mutex lock;
 568
 569        /**
 570         * @probe_lock: Prevents @work and @up_req_work, the only writers of
 571         * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
 572         * while they update the topology.
 573         */
 574        struct mutex probe_lock;
 575
 576        /**
 577         * @mst_state: If this manager is enabled for an MST capable port. False
 578         * if no MST sink/branch devices is connected.
 579         */
 580        bool mst_state : 1;
 581
 582        /**
 583         * @payload_id_table_cleared: Whether or not we've cleared the payload
 584         * ID table for @mst_primary. Protected by @lock.
 585         */
 586        bool payload_id_table_cleared : 1;
 587
 588        /**
 589         * @mst_primary: Pointer to the primary/first branch device.
 590         */
 591        struct drm_dp_mst_branch *mst_primary;
 592
 593        /**
 594         * @dpcd: Cache of DPCD for primary port.
 595         */
 596        u8 dpcd[DP_RECEIVER_CAP_SIZE];
 597        /**
 598         * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
 599         */
 600        u8 sink_count;
 601        /**
 602         * @pbn_div: PBN to slots divisor.
 603         */
 604        int pbn_div;
 605
 606        /**
 607         * @funcs: Atomic helper callbacks
 608         */
 609        const struct drm_private_state_funcs *funcs;
 610
 611        /**
 612         * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
 613         */
 614        struct mutex qlock;
 615
 616        /**
 617         * @tx_msg_downq: List of pending down requests
 618         */
 619        struct list_head tx_msg_downq;
 620
 621        /**
 622         * @payload_lock: Protect payload information.
 623         */
 624        struct mutex payload_lock;
 625        /**
 626         * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
 627         * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
 628         * this array is determined by @max_payloads.
 629         */
 630        struct drm_dp_vcpi **proposed_vcpis;
 631        /**
 632         * @payloads: Array of payloads. The size of this array is determined
 633         * by @max_payloads.
 634         */
 635        struct drm_dp_payload *payloads;
 636        /**
 637         * @payload_mask: Elements of @payloads actually in use. Since
 638         * reallocation of active outputs isn't possible gaps can be created by
 639         * disabling outputs out of order compared to how they've been enabled.
 640         */
 641        unsigned long payload_mask;
 642        /**
 643         * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
 644         */
 645        unsigned long vcpi_mask;
 646
 647        /**
 648         * @tx_waitq: Wait to queue stall for the tx worker.
 649         */
 650        wait_queue_head_t tx_waitq;
 651        /**
 652         * @work: Probe work.
 653         */
 654        struct work_struct work;
 655        /**
 656         * @tx_work: Sideband transmit worker. This can nest within the main
 657         * @work worker for each transaction @work launches.
 658         */
 659        struct work_struct tx_work;
 660
 661        /**
 662         * @destroy_port_list: List of to be destroyed connectors.
 663         */
 664        struct list_head destroy_port_list;
 665        /**
 666         * @destroy_branch_device_list: List of to be destroyed branch
 667         * devices.
 668         */
 669        struct list_head destroy_branch_device_list;
 670        /**
 671         * @delayed_destroy_lock: Protects @destroy_port_list and
 672         * @destroy_branch_device_list.
 673         */
 674        struct mutex delayed_destroy_lock;
 675        /**
 676         * @delayed_destroy_work: Work item to destroy MST port and branch
 677         * devices, needed to avoid locking inversion.
 678         */
 679        struct work_struct delayed_destroy_work;
 680
 681        /**
 682         * @up_req_list: List of pending up requests from the topology that
 683         * need to be processed, in chronological order.
 684         */
 685        struct list_head up_req_list;
 686        /**
 687         * @up_req_lock: Protects @up_req_list
 688         */
 689        struct mutex up_req_lock;
 690        /**
 691         * @up_req_work: Work item to process up requests received from the
 692         * topology. Needed to avoid blocking hotplug handling and sideband
 693         * transmissions.
 694         */
 695        struct work_struct up_req_work;
 696
 697#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
 698        /**
 699         * @topology_ref_history_lock: protects
 700         * &drm_dp_mst_port.topology_ref_history and
 701         * &drm_dp_mst_branch.topology_ref_history.
 702         */
 703        struct mutex topology_ref_history_lock;
 704#endif
 705};
 706
 707int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
 708                                 struct drm_device *dev, struct drm_dp_aux *aux,
 709                                 int max_dpcd_transaction_bytes,
 710                                 int max_payloads, int conn_base_id);
 711
 712void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
 713
 714
 715int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
 716
 717
 718int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
 719
 720
 721int
 722drm_dp_mst_detect_port(struct drm_connector *connector,
 723                       struct drm_modeset_acquire_ctx *ctx,
 724                       struct drm_dp_mst_topology_mgr *mgr,
 725                       struct drm_dp_mst_port *port);
 726
 727struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 728
 729
 730int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
 731
 732bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
 733                              struct drm_dp_mst_port *port, int pbn, int slots);
 734
 735int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 736
 737
 738void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 739
 740
 741void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
 742                                struct drm_dp_mst_port *port);
 743
 744
 745int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
 746                           int pbn);
 747
 748
 749int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
 750
 751
 752int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
 753
 754int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
 755
 756void drm_dp_mst_dump_topology(struct seq_file *m,
 757                              struct drm_dp_mst_topology_mgr *mgr);
 758
 759void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
 760int __must_check
 761drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
 762                               bool sync);
 763
 764ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
 765                             unsigned int offset, void *buffer, size_t size);
 766ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
 767                              unsigned int offset, void *buffer, size_t size);
 768
 769int drm_dp_mst_connector_late_register(struct drm_connector *connector,
 770                                       struct drm_dp_mst_port *port);
 771void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
 772                                           struct drm_dp_mst_port *port);
 773
 774struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
 775                                                                    struct drm_dp_mst_topology_mgr *mgr);
 776int __must_check
 777drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
 778                              struct drm_dp_mst_topology_mgr *mgr,
 779                              struct drm_dp_mst_port *port, int pbn,
 780                              int pbn_div);
 781int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
 782                                 struct drm_dp_mst_port *port,
 783                                 int pbn, int pbn_div,
 784                                 bool enable);
 785int __must_check
 786drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
 787                                  struct drm_dp_mst_topology_mgr *mgr);
 788int __must_check
 789drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
 790                                 struct drm_dp_mst_topology_mgr *mgr,
 791                                 struct drm_dp_mst_port *port);
 792int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
 793                                 struct drm_dp_mst_port *port, bool power_up);
 794int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
 795
 796void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
 797void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
 798
 799struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
 800
 801extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
 802
 803/**
 804 * __drm_dp_mst_state_iter_get - private atomic state iterator function for
 805 * macro-internal use
 806 * @state: &struct drm_atomic_state pointer
 807 * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
 808 * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
 809 * iteration cursor
 810 * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
 811 * iteration cursor
 812 * @i: int iteration cursor, for macro-internal use
 813 *
 814 * Used by for_each_oldnew_mst_mgr_in_state(),
 815 * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
 816 * call this directly.
 817 *
 818 * Returns:
 819 * True if the current &struct drm_private_obj is a &struct
 820 * drm_dp_mst_topology_mgr, false otherwise.
 821 */
 822static inline bool
 823__drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
 824                            struct drm_dp_mst_topology_mgr **mgr,
 825                            struct drm_dp_mst_topology_state **old_state,
 826                            struct drm_dp_mst_topology_state **new_state,
 827                            int i)
 828{
 829        struct __drm_private_objs_state *objs_state = &state->private_objs[i];
 830
 831        if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
 832                return false;
 833
 834        *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
 835        if (old_state)
 836                *old_state = to_dp_mst_topology_state(objs_state->old_state);
 837        if (new_state)
 838                *new_state = to_dp_mst_topology_state(objs_state->new_state);
 839
 840        return true;
 841}
 842
 843/**
 844 * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
 845 * managers in an atomic update
 846 * @__state: &struct drm_atomic_state pointer
 847 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
 848 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
 849 * state
 850 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
 851 * state
 852 * @__i: int iteration cursor, for macro-internal use
 853 *
 854 * This iterates over all DRM DP MST topology managers in an atomic update,
 855 * tracking both old and new state. This is useful in places where the state
 856 * delta needs to be considered, for example in atomic check functions.
 857 */
 858#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
 859        for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
 860                for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
 861
 862/**
 863 * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
 864 * in an atomic update
 865 * @__state: &struct drm_atomic_state pointer
 866 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
 867 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
 868 * state
 869 * @__i: int iteration cursor, for macro-internal use
 870 *
 871 * This iterates over all DRM DP MST topology managers in an atomic update,
 872 * tracking only the old state. This is useful in disable functions, where we
 873 * need the old state the hardware is still in.
 874 */
 875#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
 876        for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
 877                for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
 878
 879/**
 880 * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
 881 * in an atomic update
 882 * @__state: &struct drm_atomic_state pointer
 883 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
 884 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
 885 * state
 886 * @__i: int iteration cursor, for macro-internal use
 887 *
 888 * This iterates over all DRM DP MST topology managers in an atomic update,
 889 * tracking only the new state. This is useful in enable functions, where we
 890 * need the new state the hardware should be in when the atomic commit
 891 * operation has completed.
 892 */
 893#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
 894        for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
 895                for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
 896
 897#endif
 898