linux/drivers/gpu/drm/drm_dp_mst_topology.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2014 Red Hat
   3 *
   4 * Permission to use, copy, modify, distribute, and sell this software and its
   5 * documentation for any purpose is hereby granted without fee, provided that
   6 * the above copyright notice appear in all copies and that both that copyright
   7 * notice and this permission notice appear in supporting documentation, and
   8 * that the name of the copyright holders not be used in advertising or
   9 * publicity pertaining to distribution of the software without specific,
  10 * written prior permission.  The copyright holders make no representations
  11 * about the suitability of this software for any purpose.  It is provided "as
  12 * is" without express or implied warranty.
  13 *
  14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
  15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
  16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
  17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
  18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
  20 * OF THIS SOFTWARE.
  21 */
  22
  23#include <linux/delay.h>
  24#include <linux/errno.h>
  25#include <linux/i2c.h>
  26#include <linux/init.h>
  27#include <linux/kernel.h>
  28#include <linux/sched.h>
  29#include <linux/seq_file.h>
  30
  31#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
  32#include <linux/stacktrace.h>
  33#include <linux/sort.h>
  34#include <linux/timekeeping.h>
  35#include <linux/math64.h>
  36#endif
  37
  38#include <drm/drm_atomic.h>
  39#include <drm/drm_atomic_helper.h>
  40#include <drm/drm_dp_mst_helper.h>
  41#include <drm/drm_drv.h>
  42#include <drm/drm_print.h>
  43#include <drm/drm_probe_helper.h>
  44
  45#include "drm_crtc_helper_internal.h"
  46#include "drm_dp_mst_topology_internal.h"
  47
  48/**
  49 * DOC: dp mst helper
  50 *
  51 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
  52 * protocol. The helpers contain a topology manager and bandwidth manager.
  53 * The helpers encapsulate the sending and received of sideband msgs.
  54 */
  55struct drm_dp_pending_up_req {
  56        struct drm_dp_sideband_msg_hdr hdr;
  57        struct drm_dp_sideband_msg_req_body msg;
  58        struct list_head next;
  59};
  60
  61static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
  62                                  char *buf);
  63
  64static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
  65
  66static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
  67                                     int id,
  68                                     struct drm_dp_payload *payload);
  69
  70static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
  71                                 struct drm_dp_mst_port *port,
  72                                 int offset, int size, u8 *bytes);
  73static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
  74                                  struct drm_dp_mst_port *port,
  75                                  int offset, int size, u8 *bytes);
  76
  77static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
  78                                    struct drm_dp_mst_branch *mstb);
  79static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
  80                                           struct drm_dp_mst_branch *mstb,
  81                                           struct drm_dp_mst_port *port);
  82static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
  83                                 u8 *guid);
  84
  85static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
  86static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
  87static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
  88
  89#define DBG_PREFIX "[dp_mst]"
  90
  91#define DP_STR(x) [DP_ ## x] = #x
  92
  93static const char *drm_dp_mst_req_type_str(u8 req_type)
  94{
  95        static const char * const req_type_str[] = {
  96                DP_STR(GET_MSG_TRANSACTION_VERSION),
  97                DP_STR(LINK_ADDRESS),
  98                DP_STR(CONNECTION_STATUS_NOTIFY),
  99                DP_STR(ENUM_PATH_RESOURCES),
 100                DP_STR(ALLOCATE_PAYLOAD),
 101                DP_STR(QUERY_PAYLOAD),
 102                DP_STR(RESOURCE_STATUS_NOTIFY),
 103                DP_STR(CLEAR_PAYLOAD_ID_TABLE),
 104                DP_STR(REMOTE_DPCD_READ),
 105                DP_STR(REMOTE_DPCD_WRITE),
 106                DP_STR(REMOTE_I2C_READ),
 107                DP_STR(REMOTE_I2C_WRITE),
 108                DP_STR(POWER_UP_PHY),
 109                DP_STR(POWER_DOWN_PHY),
 110                DP_STR(SINK_EVENT_NOTIFY),
 111                DP_STR(QUERY_STREAM_ENC_STATUS),
 112        };
 113
 114        if (req_type >= ARRAY_SIZE(req_type_str) ||
 115            !req_type_str[req_type])
 116                return "unknown";
 117
 118        return req_type_str[req_type];
 119}
 120
 121#undef DP_STR
 122#define DP_STR(x) [DP_NAK_ ## x] = #x
 123
 124static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
 125{
 126        static const char * const nak_reason_str[] = {
 127                DP_STR(WRITE_FAILURE),
 128                DP_STR(INVALID_READ),
 129                DP_STR(CRC_FAILURE),
 130                DP_STR(BAD_PARAM),
 131                DP_STR(DEFER),
 132                DP_STR(LINK_FAILURE),
 133                DP_STR(NO_RESOURCES),
 134                DP_STR(DPCD_FAIL),
 135                DP_STR(I2C_NAK),
 136                DP_STR(ALLOCATE_FAIL),
 137        };
 138
 139        if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
 140            !nak_reason_str[nak_reason])
 141                return "unknown";
 142
 143        return nak_reason_str[nak_reason];
 144}
 145
 146#undef DP_STR
 147#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
 148
 149static const char *drm_dp_mst_sideband_tx_state_str(int state)
 150{
 151        static const char * const sideband_reason_str[] = {
 152                DP_STR(QUEUED),
 153                DP_STR(START_SEND),
 154                DP_STR(SENT),
 155                DP_STR(RX),
 156                DP_STR(TIMEOUT),
 157        };
 158
 159        if (state >= ARRAY_SIZE(sideband_reason_str) ||
 160            !sideband_reason_str[state])
 161                return "unknown";
 162
 163        return sideband_reason_str[state];
 164}
 165
 166static int
 167drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
 168{
 169        int i;
 170        u8 unpacked_rad[16];
 171
 172        for (i = 0; i < lct; i++) {
 173                if (i % 2)
 174                        unpacked_rad[i] = rad[i / 2] >> 4;
 175                else
 176                        unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
 177        }
 178
 179        /* TODO: Eventually add something to printk so we can format the rad
 180         * like this: 1.2.3
 181         */
 182        return snprintf(out, len, "%*phC", lct, unpacked_rad);
 183}
 184
 185/* sideband msg handling */
 186static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
 187{
 188        u8 bitmask = 0x80;
 189        u8 bitshift = 7;
 190        u8 array_index = 0;
 191        int number_of_bits = num_nibbles * 4;
 192        u8 remainder = 0;
 193
 194        while (number_of_bits != 0) {
 195                number_of_bits--;
 196                remainder <<= 1;
 197                remainder |= (data[array_index] & bitmask) >> bitshift;
 198                bitmask >>= 1;
 199                bitshift--;
 200                if (bitmask == 0) {
 201                        bitmask = 0x80;
 202                        bitshift = 7;
 203                        array_index++;
 204                }
 205                if ((remainder & 0x10) == 0x10)
 206                        remainder ^= 0x13;
 207        }
 208
 209        number_of_bits = 4;
 210        while (number_of_bits != 0) {
 211                number_of_bits--;
 212                remainder <<= 1;
 213                if ((remainder & 0x10) != 0)
 214                        remainder ^= 0x13;
 215        }
 216
 217        return remainder;
 218}
 219
 220static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
 221{
 222        u8 bitmask = 0x80;
 223        u8 bitshift = 7;
 224        u8 array_index = 0;
 225        int number_of_bits = number_of_bytes * 8;
 226        u16 remainder = 0;
 227
 228        while (number_of_bits != 0) {
 229                number_of_bits--;
 230                remainder <<= 1;
 231                remainder |= (data[array_index] & bitmask) >> bitshift;
 232                bitmask >>= 1;
 233                bitshift--;
 234                if (bitmask == 0) {
 235                        bitmask = 0x80;
 236                        bitshift = 7;
 237                        array_index++;
 238                }
 239                if ((remainder & 0x100) == 0x100)
 240                        remainder ^= 0xd5;
 241        }
 242
 243        number_of_bits = 8;
 244        while (number_of_bits != 0) {
 245                number_of_bits--;
 246                remainder <<= 1;
 247                if ((remainder & 0x100) != 0)
 248                        remainder ^= 0xd5;
 249        }
 250
 251        return remainder & 0xff;
 252}
 253static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
 254{
 255        u8 size = 3;
 256        size += (hdr->lct / 2);
 257        return size;
 258}
 259
 260static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
 261                                           u8 *buf, int *len)
 262{
 263        int idx = 0;
 264        int i;
 265        u8 crc4;
 266        buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
 267        for (i = 0; i < (hdr->lct / 2); i++)
 268                buf[idx++] = hdr->rad[i];
 269        buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
 270                (hdr->msg_len & 0x3f);
 271        buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
 272
 273        crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
 274        buf[idx - 1] |= (crc4 & 0xf);
 275
 276        *len = idx;
 277}
 278
 279static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
 280                                           u8 *buf, int buflen, u8 *hdrlen)
 281{
 282        u8 crc4;
 283        u8 len;
 284        int i;
 285        u8 idx;
 286        if (buf[0] == 0)
 287                return false;
 288        len = 3;
 289        len += ((buf[0] & 0xf0) >> 4) / 2;
 290        if (len > buflen)
 291                return false;
 292        crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
 293
 294        if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
 295                DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
 296                return false;
 297        }
 298
 299        hdr->lct = (buf[0] & 0xf0) >> 4;
 300        hdr->lcr = (buf[0] & 0xf);
 301        idx = 1;
 302        for (i = 0; i < (hdr->lct / 2); i++)
 303                hdr->rad[i] = buf[idx++];
 304        hdr->broadcast = (buf[idx] >> 7) & 0x1;
 305        hdr->path_msg = (buf[idx] >> 6) & 0x1;
 306        hdr->msg_len = buf[idx] & 0x3f;
 307        idx++;
 308        hdr->somt = (buf[idx] >> 7) & 0x1;
 309        hdr->eomt = (buf[idx] >> 6) & 0x1;
 310        hdr->seqno = (buf[idx] >> 4) & 0x1;
 311        idx++;
 312        *hdrlen = idx;
 313        return true;
 314}
 315
 316void
 317drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
 318                           struct drm_dp_sideband_msg_tx *raw)
 319{
 320        int idx = 0;
 321        int i;
 322        u8 *buf = raw->msg;
 323        buf[idx++] = req->req_type & 0x7f;
 324
 325        switch (req->req_type) {
 326        case DP_ENUM_PATH_RESOURCES:
 327        case DP_POWER_DOWN_PHY:
 328        case DP_POWER_UP_PHY:
 329                buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
 330                idx++;
 331                break;
 332        case DP_ALLOCATE_PAYLOAD:
 333                buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
 334                        (req->u.allocate_payload.number_sdp_streams & 0xf);
 335                idx++;
 336                buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
 337                idx++;
 338                buf[idx] = (req->u.allocate_payload.pbn >> 8);
 339                idx++;
 340                buf[idx] = (req->u.allocate_payload.pbn & 0xff);
 341                idx++;
 342                for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
 343                        buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
 344                                (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
 345                        idx++;
 346                }
 347                if (req->u.allocate_payload.number_sdp_streams & 1) {
 348                        i = req->u.allocate_payload.number_sdp_streams - 1;
 349                        buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
 350                        idx++;
 351                }
 352                break;
 353        case DP_QUERY_PAYLOAD:
 354                buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
 355                idx++;
 356                buf[idx] = (req->u.query_payload.vcpi & 0x7f);
 357                idx++;
 358                break;
 359        case DP_REMOTE_DPCD_READ:
 360                buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
 361                buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
 362                idx++;
 363                buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
 364                idx++;
 365                buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
 366                idx++;
 367                buf[idx] = (req->u.dpcd_read.num_bytes);
 368                idx++;
 369                break;
 370
 371        case DP_REMOTE_DPCD_WRITE:
 372                buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
 373                buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
 374                idx++;
 375                buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
 376                idx++;
 377                buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
 378                idx++;
 379                buf[idx] = (req->u.dpcd_write.num_bytes);
 380                idx++;
 381                memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
 382                idx += req->u.dpcd_write.num_bytes;
 383                break;
 384        case DP_REMOTE_I2C_READ:
 385                buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
 386                buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
 387                idx++;
 388                for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
 389                        buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
 390                        idx++;
 391                        buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
 392                        idx++;
 393                        memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
 394                        idx += req->u.i2c_read.transactions[i].num_bytes;
 395
 396                        buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
 397                        buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
 398                        idx++;
 399                }
 400                buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
 401                idx++;
 402                buf[idx] = (req->u.i2c_read.num_bytes_read);
 403                idx++;
 404                break;
 405
 406        case DP_REMOTE_I2C_WRITE:
 407                buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
 408                idx++;
 409                buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
 410                idx++;
 411                buf[idx] = (req->u.i2c_write.num_bytes);
 412                idx++;
 413                memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
 414                idx += req->u.i2c_write.num_bytes;
 415                break;
 416        }
 417        raw->cur_len = idx;
 418}
 419EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
 420
 421/* Decode a sideband request we've encoded, mainly used for debugging */
 422int
 423drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
 424                           struct drm_dp_sideband_msg_req_body *req)
 425{
 426        const u8 *buf = raw->msg;
 427        int i, idx = 0;
 428
 429        req->req_type = buf[idx++] & 0x7f;
 430        switch (req->req_type) {
 431        case DP_ENUM_PATH_RESOURCES:
 432        case DP_POWER_DOWN_PHY:
 433        case DP_POWER_UP_PHY:
 434                req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
 435                break;
 436        case DP_ALLOCATE_PAYLOAD:
 437                {
 438                        struct drm_dp_allocate_payload *a =
 439                                &req->u.allocate_payload;
 440
 441                        a->number_sdp_streams = buf[idx] & 0xf;
 442                        a->port_number = (buf[idx] >> 4) & 0xf;
 443
 444                        WARN_ON(buf[++idx] & 0x80);
 445                        a->vcpi = buf[idx] & 0x7f;
 446
 447                        a->pbn = buf[++idx] << 8;
 448                        a->pbn |= buf[++idx];
 449
 450                        idx++;
 451                        for (i = 0; i < a->number_sdp_streams; i++) {
 452                                a->sdp_stream_sink[i] =
 453                                        (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
 454                        }
 455                }
 456                break;
 457        case DP_QUERY_PAYLOAD:
 458                req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
 459                WARN_ON(buf[++idx] & 0x80);
 460                req->u.query_payload.vcpi = buf[idx] & 0x7f;
 461                break;
 462        case DP_REMOTE_DPCD_READ:
 463                {
 464                        struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
 465
 466                        r->port_number = (buf[idx] >> 4) & 0xf;
 467
 468                        r->dpcd_address = (buf[idx] << 16) & 0xf0000;
 469                        r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
 470                        r->dpcd_address |= buf[++idx] & 0xff;
 471
 472                        r->num_bytes = buf[++idx];
 473                }
 474                break;
 475        case DP_REMOTE_DPCD_WRITE:
 476                {
 477                        struct drm_dp_remote_dpcd_write *w =
 478                                &req->u.dpcd_write;
 479
 480                        w->port_number = (buf[idx] >> 4) & 0xf;
 481
 482                        w->dpcd_address = (buf[idx] << 16) & 0xf0000;
 483                        w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
 484                        w->dpcd_address |= buf[++idx] & 0xff;
 485
 486                        w->num_bytes = buf[++idx];
 487
 488                        w->bytes = kmemdup(&buf[++idx], w->num_bytes,
 489                                           GFP_KERNEL);
 490                        if (!w->bytes)
 491                                return -ENOMEM;
 492                }
 493                break;
 494        case DP_REMOTE_I2C_READ:
 495                {
 496                        struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
 497                        struct drm_dp_remote_i2c_read_tx *tx;
 498                        bool failed = false;
 499
 500                        r->num_transactions = buf[idx] & 0x3;
 501                        r->port_number = (buf[idx] >> 4) & 0xf;
 502                        for (i = 0; i < r->num_transactions; i++) {
 503                                tx = &r->transactions[i];
 504
 505                                tx->i2c_dev_id = buf[++idx] & 0x7f;
 506                                tx->num_bytes = buf[++idx];
 507                                tx->bytes = kmemdup(&buf[++idx],
 508                                                    tx->num_bytes,
 509                                                    GFP_KERNEL);
 510                                if (!tx->bytes) {
 511                                        failed = true;
 512                                        break;
 513                                }
 514                                idx += tx->num_bytes;
 515                                tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
 516                                tx->i2c_transaction_delay = buf[idx] & 0xf;
 517                        }
 518
 519                        if (failed) {
 520                                for (i = 0; i < r->num_transactions; i++)
 521                                        kfree(tx->bytes);
 522                                return -ENOMEM;
 523                        }
 524
 525                        r->read_i2c_device_id = buf[++idx] & 0x7f;
 526                        r->num_bytes_read = buf[++idx];
 527                }
 528                break;
 529        case DP_REMOTE_I2C_WRITE:
 530                {
 531                        struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
 532
 533                        w->port_number = (buf[idx] >> 4) & 0xf;
 534                        w->write_i2c_device_id = buf[++idx] & 0x7f;
 535                        w->num_bytes = buf[++idx];
 536                        w->bytes = kmemdup(&buf[++idx], w->num_bytes,
 537                                           GFP_KERNEL);
 538                        if (!w->bytes)
 539                                return -ENOMEM;
 540                }
 541                break;
 542        }
 543
 544        return 0;
 545}
 546EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
 547
 548void
 549drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
 550                                  int indent, struct drm_printer *printer)
 551{
 552        int i;
 553
 554#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
 555        if (req->req_type == DP_LINK_ADDRESS) {
 556                /* No contents to print */
 557                P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
 558                return;
 559        }
 560
 561        P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
 562        indent++;
 563
 564        switch (req->req_type) {
 565        case DP_ENUM_PATH_RESOURCES:
 566        case DP_POWER_DOWN_PHY:
 567        case DP_POWER_UP_PHY:
 568                P("port=%d\n", req->u.port_num.port_number);
 569                break;
 570        case DP_ALLOCATE_PAYLOAD:
 571                P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
 572                  req->u.allocate_payload.port_number,
 573                  req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
 574                  req->u.allocate_payload.number_sdp_streams,
 575                  req->u.allocate_payload.number_sdp_streams,
 576                  req->u.allocate_payload.sdp_stream_sink);
 577                break;
 578        case DP_QUERY_PAYLOAD:
 579                P("port=%d vcpi=%d\n",
 580                  req->u.query_payload.port_number,
 581                  req->u.query_payload.vcpi);
 582                break;
 583        case DP_REMOTE_DPCD_READ:
 584                P("port=%d dpcd_addr=%05x len=%d\n",
 585                  req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
 586                  req->u.dpcd_read.num_bytes);
 587                break;
 588        case DP_REMOTE_DPCD_WRITE:
 589                P("port=%d addr=%05x len=%d: %*ph\n",
 590                  req->u.dpcd_write.port_number,
 591                  req->u.dpcd_write.dpcd_address,
 592                  req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
 593                  req->u.dpcd_write.bytes);
 594                break;
 595        case DP_REMOTE_I2C_READ:
 596                P("port=%d num_tx=%d id=%d size=%d:\n",
 597                  req->u.i2c_read.port_number,
 598                  req->u.i2c_read.num_transactions,
 599                  req->u.i2c_read.read_i2c_device_id,
 600                  req->u.i2c_read.num_bytes_read);
 601
 602                indent++;
 603                for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
 604                        const struct drm_dp_remote_i2c_read_tx *rtx =
 605                                &req->u.i2c_read.transactions[i];
 606
 607                        P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
 608                          i, rtx->i2c_dev_id, rtx->num_bytes,
 609                          rtx->no_stop_bit, rtx->i2c_transaction_delay,
 610                          rtx->num_bytes, rtx->bytes);
 611                }
 612                break;
 613        case DP_REMOTE_I2C_WRITE:
 614                P("port=%d id=%d size=%d: %*ph\n",
 615                  req->u.i2c_write.port_number,
 616                  req->u.i2c_write.write_i2c_device_id,
 617                  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
 618                  req->u.i2c_write.bytes);
 619                break;
 620        default:
 621                P("???\n");
 622                break;
 623        }
 624#undef P
 625}
 626EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
 627
 628static inline void
 629drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
 630                                const struct drm_dp_sideband_msg_tx *txmsg)
 631{
 632        struct drm_dp_sideband_msg_req_body req;
 633        char buf[64];
 634        int ret;
 635        int i;
 636
 637        drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
 638                              sizeof(buf));
 639        drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
 640                   txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
 641                   drm_dp_mst_sideband_tx_state_str(txmsg->state),
 642                   txmsg->path_msg, buf);
 643
 644        ret = drm_dp_decode_sideband_req(txmsg, &req);
 645        if (ret) {
 646                drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
 647                return;
 648        }
 649        drm_dp_dump_sideband_msg_req_body(&req, 1, p);
 650
 651        switch (req.req_type) {
 652        case DP_REMOTE_DPCD_WRITE:
 653                kfree(req.u.dpcd_write.bytes);
 654                break;
 655        case DP_REMOTE_I2C_READ:
 656                for (i = 0; i < req.u.i2c_read.num_transactions; i++)
 657                        kfree(req.u.i2c_read.transactions[i].bytes);
 658                break;
 659        case DP_REMOTE_I2C_WRITE:
 660                kfree(req.u.i2c_write.bytes);
 661                break;
 662        }
 663}
 664
 665static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
 666{
 667        u8 crc4;
 668        crc4 = drm_dp_msg_data_crc4(msg, len);
 669        msg[len] = crc4;
 670}
 671
 672static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
 673                                         struct drm_dp_sideband_msg_tx *raw)
 674{
 675        int idx = 0;
 676        u8 *buf = raw->msg;
 677
 678        buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
 679
 680        raw->cur_len = idx;
 681}
 682
 683/* this adds a chunk of msg to the builder to get the final msg */
 684static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
 685                                      u8 *replybuf, u8 replybuflen, bool hdr)
 686{
 687        int ret;
 688        u8 crc4;
 689
 690        if (hdr) {
 691                u8 hdrlen;
 692                struct drm_dp_sideband_msg_hdr recv_hdr;
 693                ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
 694                if (ret == false) {
 695                        print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
 696                        return false;
 697                }
 698
 699                /*
 700                 * ignore out-of-order messages or messages that are part of a
 701                 * failed transaction
 702                 */
 703                if (!recv_hdr.somt && !msg->have_somt)
 704                        return false;
 705
 706                /* get length contained in this portion */
 707                msg->curchunk_len = recv_hdr.msg_len;
 708                msg->curchunk_hdrlen = hdrlen;
 709
 710                /* we have already gotten an somt - don't bother parsing */
 711                if (recv_hdr.somt && msg->have_somt)
 712                        return false;
 713
 714                if (recv_hdr.somt) {
 715                        memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
 716                        msg->have_somt = true;
 717                }
 718                if (recv_hdr.eomt)
 719                        msg->have_eomt = true;
 720
 721                /* copy the bytes for the remainder of this header chunk */
 722                msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
 723                memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
 724        } else {
 725                memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
 726                msg->curchunk_idx += replybuflen;
 727        }
 728
 729        if (msg->curchunk_idx >= msg->curchunk_len) {
 730                /* do CRC */
 731                crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
 732                /* copy chunk into bigger msg */
 733                memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
 734                msg->curlen += msg->curchunk_len - 1;
 735        }
 736        return true;
 737}
 738
 739static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
 740                                               struct drm_dp_sideband_msg_reply_body *repmsg)
 741{
 742        int idx = 1;
 743        int i;
 744        memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
 745        idx += 16;
 746        repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
 747        idx++;
 748        if (idx > raw->curlen)
 749                goto fail_len;
 750        for (i = 0; i < repmsg->u.link_addr.nports; i++) {
 751                if (raw->msg[idx] & 0x80)
 752                        repmsg->u.link_addr.ports[i].input_port = 1;
 753
 754                repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
 755                repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
 756
 757                idx++;
 758                if (idx > raw->curlen)
 759                        goto fail_len;
 760                repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
 761                repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
 762                if (repmsg->u.link_addr.ports[i].input_port == 0)
 763                        repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
 764                idx++;
 765                if (idx > raw->curlen)
 766                        goto fail_len;
 767                if (repmsg->u.link_addr.ports[i].input_port == 0) {
 768                        repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
 769                        idx++;
 770                        if (idx > raw->curlen)
 771                                goto fail_len;
 772                        memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
 773                        idx += 16;
 774                        if (idx > raw->curlen)
 775                                goto fail_len;
 776                        repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
 777                        repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
 778                        idx++;
 779
 780                }
 781                if (idx > raw->curlen)
 782                        goto fail_len;
 783        }
 784
 785        return true;
 786fail_len:
 787        DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
 788        return false;
 789}
 790
 791static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
 792                                                   struct drm_dp_sideband_msg_reply_body *repmsg)
 793{
 794        int idx = 1;
 795        repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
 796        idx++;
 797        if (idx > raw->curlen)
 798                goto fail_len;
 799        repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
 800        idx++;
 801        if (idx > raw->curlen)
 802                goto fail_len;
 803
 804        memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
 805        return true;
 806fail_len:
 807        DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
 808        return false;
 809}
 810
 811static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
 812                                                      struct drm_dp_sideband_msg_reply_body *repmsg)
 813{
 814        int idx = 1;
 815        repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
 816        idx++;
 817        if (idx > raw->curlen)
 818                goto fail_len;
 819        return true;
 820fail_len:
 821        DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
 822        return false;
 823}
 824
 825static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
 826                                                      struct drm_dp_sideband_msg_reply_body *repmsg)
 827{
 828        int idx = 1;
 829
 830        repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
 831        idx++;
 832        if (idx > raw->curlen)
 833                goto fail_len;
 834        repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
 835        idx++;
 836        /* TODO check */
 837        memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
 838        return true;
 839fail_len:
 840        DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
 841        return false;
 842}
 843
 844static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
 845                                                          struct drm_dp_sideband_msg_reply_body *repmsg)
 846{
 847        int idx = 1;
 848        repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
 849        idx++;
 850        if (idx > raw->curlen)
 851                goto fail_len;
 852        repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
 853        idx += 2;
 854        if (idx > raw->curlen)
 855                goto fail_len;
 856        repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
 857        idx += 2;
 858        if (idx > raw->curlen)
 859                goto fail_len;
 860        return true;
 861fail_len:
 862        DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
 863        return false;
 864}
 865
 866static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
 867                                                          struct drm_dp_sideband_msg_reply_body *repmsg)
 868{
 869        int idx = 1;
 870        repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
 871        idx++;
 872        if (idx > raw->curlen)
 873                goto fail_len;
 874        repmsg->u.allocate_payload.vcpi = raw->msg[idx];
 875        idx++;
 876        if (idx > raw->curlen)
 877                goto fail_len;
 878        repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
 879        idx += 2;
 880        if (idx > raw->curlen)
 881                goto fail_len;
 882        return true;
 883fail_len:
 884        DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
 885        return false;
 886}
 887
 888static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
 889                                                    struct drm_dp_sideband_msg_reply_body *repmsg)
 890{
 891        int idx = 1;
 892        repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
 893        idx++;
 894        if (idx > raw->curlen)
 895                goto fail_len;
 896        repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
 897        idx += 2;
 898        if (idx > raw->curlen)
 899                goto fail_len;
 900        return true;
 901fail_len:
 902        DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
 903        return false;
 904}
 905
 906static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
 907                                                       struct drm_dp_sideband_msg_reply_body *repmsg)
 908{
 909        int idx = 1;
 910
 911        repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
 912        idx++;
 913        if (idx > raw->curlen) {
 914                DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
 915                              idx, raw->curlen);
 916                return false;
 917        }
 918        return true;
 919}
 920
 921static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
 922                                        struct drm_dp_sideband_msg_reply_body *msg)
 923{
 924        memset(msg, 0, sizeof(*msg));
 925        msg->reply_type = (raw->msg[0] & 0x80) >> 7;
 926        msg->req_type = (raw->msg[0] & 0x7f);
 927
 928        if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
 929                memcpy(msg->u.nak.guid, &raw->msg[1], 16);
 930                msg->u.nak.reason = raw->msg[17];
 931                msg->u.nak.nak_data = raw->msg[18];
 932                return false;
 933        }
 934
 935        switch (msg->req_type) {
 936        case DP_LINK_ADDRESS:
 937                return drm_dp_sideband_parse_link_address(raw, msg);
 938        case DP_QUERY_PAYLOAD:
 939                return drm_dp_sideband_parse_query_payload_ack(raw, msg);
 940        case DP_REMOTE_DPCD_READ:
 941                return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
 942        case DP_REMOTE_DPCD_WRITE:
 943                return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
 944        case DP_REMOTE_I2C_READ:
 945                return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
 946        case DP_ENUM_PATH_RESOURCES:
 947                return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
 948        case DP_ALLOCATE_PAYLOAD:
 949                return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
 950        case DP_POWER_DOWN_PHY:
 951        case DP_POWER_UP_PHY:
 952                return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
 953        default:
 954                DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
 955                          drm_dp_mst_req_type_str(msg->req_type));
 956                return false;
 957        }
 958}
 959
 960static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
 961                                                           struct drm_dp_sideband_msg_req_body *msg)
 962{
 963        int idx = 1;
 964
 965        msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
 966        idx++;
 967        if (idx > raw->curlen)
 968                goto fail_len;
 969
 970        memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
 971        idx += 16;
 972        if (idx > raw->curlen)
 973                goto fail_len;
 974
 975        msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
 976        msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
 977        msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
 978        msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
 979        msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
 980        idx++;
 981        return true;
 982fail_len:
 983        DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
 984        return false;
 985}
 986
 987static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
 988                                                           struct drm_dp_sideband_msg_req_body *msg)
 989{
 990        int idx = 1;
 991
 992        msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
 993        idx++;
 994        if (idx > raw->curlen)
 995                goto fail_len;
 996
 997        memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
 998        idx += 16;
 999        if (idx > raw->curlen)
1000                goto fail_len;
1001
1002        msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1003        idx++;
1004        return true;
1005fail_len:
1006        DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1007        return false;
1008}
1009
1010static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1011                                      struct drm_dp_sideband_msg_req_body *msg)
1012{
1013        memset(msg, 0, sizeof(*msg));
1014        msg->req_type = (raw->msg[0] & 0x7f);
1015
1016        switch (msg->req_type) {
1017        case DP_CONNECTION_STATUS_NOTIFY:
1018                return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1019        case DP_RESOURCE_STATUS_NOTIFY:
1020                return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1021        default:
1022                DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1023                          drm_dp_mst_req_type_str(msg->req_type));
1024                return false;
1025        }
1026}
1027
1028static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1029{
1030        struct drm_dp_sideband_msg_req_body req;
1031
1032        req.req_type = DP_REMOTE_DPCD_WRITE;
1033        req.u.dpcd_write.port_number = port_num;
1034        req.u.dpcd_write.dpcd_address = offset;
1035        req.u.dpcd_write.num_bytes = num_bytes;
1036        req.u.dpcd_write.bytes = bytes;
1037        drm_dp_encode_sideband_req(&req, msg);
1038
1039        return 0;
1040}
1041
1042static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
1043{
1044        struct drm_dp_sideband_msg_req_body req;
1045
1046        req.req_type = DP_LINK_ADDRESS;
1047        drm_dp_encode_sideband_req(&req, msg);
1048        return 0;
1049}
1050
1051static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
1052{
1053        struct drm_dp_sideband_msg_req_body req;
1054
1055        req.req_type = DP_ENUM_PATH_RESOURCES;
1056        req.u.port_num.port_number = port_num;
1057        drm_dp_encode_sideband_req(&req, msg);
1058        msg->path_msg = true;
1059        return 0;
1060}
1061
1062static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
1063                                  u8 vcpi, uint16_t pbn,
1064                                  u8 number_sdp_streams,
1065                                  u8 *sdp_stream_sink)
1066{
1067        struct drm_dp_sideband_msg_req_body req;
1068        memset(&req, 0, sizeof(req));
1069        req.req_type = DP_ALLOCATE_PAYLOAD;
1070        req.u.allocate_payload.port_number = port_num;
1071        req.u.allocate_payload.vcpi = vcpi;
1072        req.u.allocate_payload.pbn = pbn;
1073        req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1074        memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1075                   number_sdp_streams);
1076        drm_dp_encode_sideband_req(&req, msg);
1077        msg->path_msg = true;
1078        return 0;
1079}
1080
1081static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1082                                  int port_num, bool power_up)
1083{
1084        struct drm_dp_sideband_msg_req_body req;
1085
1086        if (power_up)
1087                req.req_type = DP_POWER_UP_PHY;
1088        else
1089                req.req_type = DP_POWER_DOWN_PHY;
1090
1091        req.u.port_num.port_number = port_num;
1092        drm_dp_encode_sideband_req(&req, msg);
1093        msg->path_msg = true;
1094        return 0;
1095}
1096
1097static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1098                                        struct drm_dp_vcpi *vcpi)
1099{
1100        int ret, vcpi_ret;
1101
1102        mutex_lock(&mgr->payload_lock);
1103        ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1104        if (ret > mgr->max_payloads) {
1105                ret = -EINVAL;
1106                DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1107                goto out_unlock;
1108        }
1109
1110        vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1111        if (vcpi_ret > mgr->max_payloads) {
1112                ret = -EINVAL;
1113                DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1114                goto out_unlock;
1115        }
1116
1117        set_bit(ret, &mgr->payload_mask);
1118        set_bit(vcpi_ret, &mgr->vcpi_mask);
1119        vcpi->vcpi = vcpi_ret + 1;
1120        mgr->proposed_vcpis[ret - 1] = vcpi;
1121out_unlock:
1122        mutex_unlock(&mgr->payload_lock);
1123        return ret;
1124}
1125
1126static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1127                                      int vcpi)
1128{
1129        int i;
1130        if (vcpi == 0)
1131                return;
1132
1133        mutex_lock(&mgr->payload_lock);
1134        DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1135        clear_bit(vcpi - 1, &mgr->vcpi_mask);
1136
1137        for (i = 0; i < mgr->max_payloads; i++) {
1138                if (mgr->proposed_vcpis[i] &&
1139                    mgr->proposed_vcpis[i]->vcpi == vcpi) {
1140                        mgr->proposed_vcpis[i] = NULL;
1141                        clear_bit(i + 1, &mgr->payload_mask);
1142                }
1143        }
1144        mutex_unlock(&mgr->payload_lock);
1145}
1146
1147static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1148                              struct drm_dp_sideband_msg_tx *txmsg)
1149{
1150        unsigned int state;
1151
1152        /*
1153         * All updates to txmsg->state are protected by mgr->qlock, and the two
1154         * cases we check here are terminal states. For those the barriers
1155         * provided by the wake_up/wait_event pair are enough.
1156         */
1157        state = READ_ONCE(txmsg->state);
1158        return (state == DRM_DP_SIDEBAND_TX_RX ||
1159                state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1160}
1161
1162static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1163                                    struct drm_dp_sideband_msg_tx *txmsg)
1164{
1165        struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1166        int ret;
1167
1168        ret = wait_event_timeout(mgr->tx_waitq,
1169                                 check_txmsg_state(mgr, txmsg),
1170                                 (4 * HZ));
1171        mutex_lock(&mstb->mgr->qlock);
1172        if (ret > 0) {
1173                if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1174                        ret = -EIO;
1175                        goto out;
1176                }
1177        } else {
1178                DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1179
1180                /* dump some state */
1181                ret = -EIO;
1182
1183                /* remove from q */
1184                if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1185                    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1186                        list_del(&txmsg->next);
1187                }
1188
1189                if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1190                    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
1191                        mstb->tx_slots[txmsg->seqno] = NULL;
1192                }
1193                mgr->is_waiting_for_dwn_reply = false;
1194
1195        }
1196out:
1197        if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1198                struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1199
1200                drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1201        }
1202        mutex_unlock(&mgr->qlock);
1203
1204        drm_dp_mst_kick_tx(mgr);
1205        return ret;
1206}
1207
1208static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1209{
1210        struct drm_dp_mst_branch *mstb;
1211
1212        mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1213        if (!mstb)
1214                return NULL;
1215
1216        mstb->lct = lct;
1217        if (lct > 1)
1218                memcpy(mstb->rad, rad, lct / 2);
1219        INIT_LIST_HEAD(&mstb->ports);
1220        kref_init(&mstb->topology_kref);
1221        kref_init(&mstb->malloc_kref);
1222        return mstb;
1223}
1224
1225static void drm_dp_free_mst_branch_device(struct kref *kref)
1226{
1227        struct drm_dp_mst_branch *mstb =
1228                container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1229
1230        if (mstb->port_parent)
1231                drm_dp_mst_put_port_malloc(mstb->port_parent);
1232
1233        kfree(mstb);
1234}
1235
1236/**
1237 * DOC: Branch device and port refcounting
1238 *
1239 * Topology refcount overview
1240 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1241 *
1242 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1243 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1244 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1245 *
1246 * Topology refcounts are not exposed to drivers, and are handled internally
1247 * by the DP MST helpers. The helpers use them in order to prevent the
1248 * in-memory topology state from being changed in the middle of critical
1249 * operations like changing the internal state of payload allocations. This
1250 * means each branch and port will be considered to be connected to the rest
1251 * of the topology until its topology refcount reaches zero. Additionally,
1252 * for ports this means that their associated &struct drm_connector will stay
1253 * registered with userspace until the port's refcount reaches 0.
1254 *
1255 * Malloc refcount overview
1256 * ~~~~~~~~~~~~~~~~~~~~~~~~
1257 *
1258 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1259 * drm_dp_mst_branch allocated even after all of its topology references have
1260 * been dropped, so that the driver or MST helpers can safely access each
1261 * branch's last known state before it was disconnected from the topology.
1262 * When the malloc refcount of a port or branch reaches 0, the memory
1263 * allocation containing the &struct drm_dp_mst_branch or &struct
1264 * drm_dp_mst_port respectively will be freed.
1265 *
1266 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1267 * to drivers. As of writing this documentation, there are no drivers that
1268 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1269 * helpers. Exposing this API to drivers in a race-free manner would take more
1270 * tweaking of the refcounting scheme, however patches are welcome provided
1271 * there is a legitimate driver usecase for this.
1272 *
1273 * Refcount relationships in a topology
1274 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1275 *
1276 * Let's take a look at why the relationship between topology and malloc
1277 * refcounts is designed the way it is.
1278 *
1279 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1280 *
1281 *    An example of topology and malloc refs in a DP MST topology with two
1282 *    active payloads. Topology refcount increments are indicated by solid
1283 *    lines, and malloc refcount increments are indicated by dashed lines.
1284 *    Each starts from the branch which incremented the refcount, and ends at
1285 *    the branch to which the refcount belongs to, i.e. the arrow points the
1286 *    same way as the C pointers used to reference a structure.
1287 *
1288 * As you can see in the above figure, every branch increments the topology
1289 * refcount of its children, and increments the malloc refcount of its
1290 * parent. Additionally, every payload increments the malloc refcount of its
1291 * assigned port by 1.
1292 *
1293 * So, what would happen if MSTB #3 from the above figure was unplugged from
1294 * the system, but the driver hadn't yet removed payload #2 from port #3? The
1295 * topology would start to look like the figure below.
1296 *
1297 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1298 *
1299 *    Ports and branch devices which have been released from memory are
1300 *    colored grey, and references which have been removed are colored red.
1301 *
1302 * Whenever a port or branch device's topology refcount reaches zero, it will
1303 * decrement the topology refcounts of all its children, the malloc refcount
1304 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1305 * #4, this means they both have been disconnected from the topology and freed
1306 * from memory. But, because payload #2 is still holding a reference to port
1307 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1308 * is still accessible from memory. This also means port #3 has not yet
1309 * decremented the malloc refcount of MSTB #3, so its &struct
1310 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1311 * malloc refcount reaches 0.
1312 *
1313 * This relationship is necessary because in order to release payload #2, we
1314 * need to be able to figure out the last relative of port #3 that's still
1315 * connected to the topology. In this case, we would travel up the topology as
1316 * shown below.
1317 *
1318 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1319 *
1320 * And finally, remove payload #2 by communicating with port #2 through
1321 * sideband transactions.
1322 */
1323
1324/**
1325 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1326 * device
1327 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1328 *
1329 * Increments &drm_dp_mst_branch.malloc_kref. When
1330 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1331 * will be released and @mstb may no longer be used.
1332 *
1333 * See also: drm_dp_mst_put_mstb_malloc()
1334 */
1335static void
1336drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1337{
1338        kref_get(&mstb->malloc_kref);
1339        DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1340}
1341
1342/**
1343 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1344 * device
1345 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1346 *
1347 * Decrements &drm_dp_mst_branch.malloc_kref. When
1348 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1349 * will be released and @mstb may no longer be used.
1350 *
1351 * See also: drm_dp_mst_get_mstb_malloc()
1352 */
1353static void
1354drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1355{
1356        DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1357        kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1358}
1359
1360static void drm_dp_free_mst_port(struct kref *kref)
1361{
1362        struct drm_dp_mst_port *port =
1363                container_of(kref, struct drm_dp_mst_port, malloc_kref);
1364
1365        drm_dp_mst_put_mstb_malloc(port->parent);
1366        kfree(port);
1367}
1368
1369/**
1370 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1371 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1372 *
1373 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1374 * reaches 0, the memory allocation for @port will be released and @port may
1375 * no longer be used.
1376 *
1377 * Because @port could potentially be freed at any time by the DP MST helpers
1378 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1379 * function, drivers that which to make use of &struct drm_dp_mst_port should
1380 * ensure that they grab at least one main malloc reference to their MST ports
1381 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1382 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1383 *
1384 * See also: drm_dp_mst_put_port_malloc()
1385 */
1386void
1387drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1388{
1389        kref_get(&port->malloc_kref);
1390        DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1391}
1392EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1393
1394/**
1395 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1396 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1397 *
1398 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1399 * reaches 0, the memory allocation for @port will be released and @port may
1400 * no longer be used.
1401 *
1402 * See also: drm_dp_mst_get_port_malloc()
1403 */
1404void
1405drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1406{
1407        DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1408        kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1409}
1410EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1411
1412#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1413
1414#define STACK_DEPTH 8
1415
1416static noinline void
1417__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1418                    struct drm_dp_mst_topology_ref_history *history,
1419                    enum drm_dp_mst_topology_ref_type type)
1420{
1421        struct drm_dp_mst_topology_ref_entry *entry = NULL;
1422        depot_stack_handle_t backtrace;
1423        ulong stack_entries[STACK_DEPTH];
1424        uint n;
1425        int i;
1426
1427        n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1428        backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1429        if (!backtrace)
1430                return;
1431
1432        /* Try to find an existing entry for this backtrace */
1433        for (i = 0; i < history->len; i++) {
1434                if (history->entries[i].backtrace == backtrace) {
1435                        entry = &history->entries[i];
1436                        break;
1437                }
1438        }
1439
1440        /* Otherwise add one */
1441        if (!entry) {
1442                struct drm_dp_mst_topology_ref_entry *new;
1443                int new_len = history->len + 1;
1444
1445                new = krealloc(history->entries, sizeof(*new) * new_len,
1446                               GFP_KERNEL);
1447                if (!new)
1448                        return;
1449
1450                entry = &new[history->len];
1451                history->len = new_len;
1452                history->entries = new;
1453
1454                entry->backtrace = backtrace;
1455                entry->type = type;
1456                entry->count = 0;
1457        }
1458        entry->count++;
1459        entry->ts_nsec = ktime_get_ns();
1460}
1461
1462static int
1463topology_ref_history_cmp(const void *a, const void *b)
1464{
1465        const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1466
1467        if (entry_a->ts_nsec > entry_b->ts_nsec)
1468                return 1;
1469        else if (entry_a->ts_nsec < entry_b->ts_nsec)
1470                return -1;
1471        else
1472                return 0;
1473}
1474
1475static inline const char *
1476topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1477{
1478        if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1479                return "get";
1480        else
1481                return "put";
1482}
1483
1484static void
1485__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1486                            void *ptr, const char *type_str)
1487{
1488        struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1489        char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1490        int i;
1491
1492        if (!buf)
1493                return;
1494
1495        if (!history->len)
1496                goto out;
1497
1498        /* First, sort the list so that it goes from oldest to newest
1499         * reference entry
1500         */
1501        sort(history->entries, history->len, sizeof(*history->entries),
1502             topology_ref_history_cmp, NULL);
1503
1504        drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1505                   type_str, ptr);
1506
1507        for (i = 0; i < history->len; i++) {
1508                const struct drm_dp_mst_topology_ref_entry *entry =
1509                        &history->entries[i];
1510                ulong *entries;
1511                uint nr_entries;
1512                u64 ts_nsec = entry->ts_nsec;
1513                u32 rem_nsec = do_div(ts_nsec, 1000000000);
1514
1515                nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1516                stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1517
1518                drm_printf(&p, "  %d %ss (last at %5llu.%06u):\n%s",
1519                           entry->count,
1520                           topology_ref_type_to_str(entry->type),
1521                           ts_nsec, rem_nsec / 1000, buf);
1522        }
1523
1524        /* Now free the history, since this is the only time we expose it */
1525        kfree(history->entries);
1526out:
1527        kfree(buf);
1528}
1529
1530static __always_inline void
1531drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1532{
1533        __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1534                                    "MSTB");
1535}
1536
1537static __always_inline void
1538drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1539{
1540        __dump_topology_ref_history(&port->topology_ref_history, port,
1541                                    "Port");
1542}
1543
1544static __always_inline void
1545save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1546                       enum drm_dp_mst_topology_ref_type type)
1547{
1548        __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1549}
1550
1551static __always_inline void
1552save_port_topology_ref(struct drm_dp_mst_port *port,
1553                       enum drm_dp_mst_topology_ref_type type)
1554{
1555        __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1556}
1557
1558static inline void
1559topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1560{
1561        mutex_lock(&mgr->topology_ref_history_lock);
1562}
1563
1564static inline void
1565topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1566{
1567        mutex_unlock(&mgr->topology_ref_history_lock);
1568}
1569#else
1570static inline void
1571topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1572static inline void
1573topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1574static inline void
1575drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1576static inline void
1577drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1578#define save_mstb_topology_ref(mstb, type)
1579#define save_port_topology_ref(port, type)
1580#endif
1581
1582static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1583{
1584        struct drm_dp_mst_branch *mstb =
1585                container_of(kref, struct drm_dp_mst_branch, topology_kref);
1586        struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1587
1588        drm_dp_mst_dump_mstb_topology_history(mstb);
1589
1590        INIT_LIST_HEAD(&mstb->destroy_next);
1591
1592        /*
1593         * This can get called under mgr->mutex, so we need to perform the
1594         * actual destruction of the mstb in another worker
1595         */
1596        mutex_lock(&mgr->delayed_destroy_lock);
1597        list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1598        mutex_unlock(&mgr->delayed_destroy_lock);
1599        schedule_work(&mgr->delayed_destroy_work);
1600}
1601
1602/**
1603 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1604 * branch device unless it's zero
1605 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1606 *
1607 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1608 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1609 * reached 0). Holding a topology reference implies that a malloc reference
1610 * will be held to @mstb as long as the user holds the topology reference.
1611 *
1612 * Care should be taken to ensure that the user has at least one malloc
1613 * reference to @mstb. If you already have a topology reference to @mstb, you
1614 * should use drm_dp_mst_topology_get_mstb() instead.
1615 *
1616 * See also:
1617 * drm_dp_mst_topology_get_mstb()
1618 * drm_dp_mst_topology_put_mstb()
1619 *
1620 * Returns:
1621 * * 1: A topology reference was grabbed successfully
1622 * * 0: @port is no longer in the topology, no reference was grabbed
1623 */
1624static int __must_check
1625drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1626{
1627        int ret;
1628
1629        topology_ref_history_lock(mstb->mgr);
1630        ret = kref_get_unless_zero(&mstb->topology_kref);
1631        if (ret) {
1632                DRM_DEBUG("mstb %p (%d)\n",
1633                          mstb, kref_read(&mstb->topology_kref));
1634                save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1635        }
1636
1637        topology_ref_history_unlock(mstb->mgr);
1638
1639        return ret;
1640}
1641
1642/**
1643 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1644 * branch device
1645 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1646 *
1647 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1648 * not it's already reached 0. This is only valid to use in scenarios where
1649 * you are already guaranteed to have at least one active topology reference
1650 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1651 *
1652 * See also:
1653 * drm_dp_mst_topology_try_get_mstb()
1654 * drm_dp_mst_topology_put_mstb()
1655 */
1656static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1657{
1658        topology_ref_history_lock(mstb->mgr);
1659
1660        save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1661        WARN_ON(kref_read(&mstb->topology_kref) == 0);
1662        kref_get(&mstb->topology_kref);
1663        DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1664
1665        topology_ref_history_unlock(mstb->mgr);
1666}
1667
1668/**
1669 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1670 * device
1671 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1672 *
1673 * Releases a topology reference from @mstb by decrementing
1674 * &drm_dp_mst_branch.topology_kref.
1675 *
1676 * See also:
1677 * drm_dp_mst_topology_try_get_mstb()
1678 * drm_dp_mst_topology_get_mstb()
1679 */
1680static void
1681drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1682{
1683        topology_ref_history_lock(mstb->mgr);
1684
1685        DRM_DEBUG("mstb %p (%d)\n",
1686                  mstb, kref_read(&mstb->topology_kref) - 1);
1687        save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1688
1689        topology_ref_history_unlock(mstb->mgr);
1690        kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1691}
1692
1693static void drm_dp_destroy_port(struct kref *kref)
1694{
1695        struct drm_dp_mst_port *port =
1696                container_of(kref, struct drm_dp_mst_port, topology_kref);
1697        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1698
1699        drm_dp_mst_dump_port_topology_history(port);
1700
1701        /* There's nothing that needs locking to destroy an input port yet */
1702        if (port->input) {
1703                drm_dp_mst_put_port_malloc(port);
1704                return;
1705        }
1706
1707        kfree(port->cached_edid);
1708
1709        /*
1710         * we can't destroy the connector here, as we might be holding the
1711         * mode_config.mutex from an EDID retrieval
1712         */
1713        mutex_lock(&mgr->delayed_destroy_lock);
1714        list_add(&port->next, &mgr->destroy_port_list);
1715        mutex_unlock(&mgr->delayed_destroy_lock);
1716        schedule_work(&mgr->delayed_destroy_work);
1717}
1718
1719/**
1720 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1721 * port unless it's zero
1722 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1723 *
1724 * Attempts to grab a topology reference to @port, if it hasn't yet been
1725 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1726 * 0). Holding a topology reference implies that a malloc reference will be
1727 * held to @port as long as the user holds the topology reference.
1728 *
1729 * Care should be taken to ensure that the user has at least one malloc
1730 * reference to @port. If you already have a topology reference to @port, you
1731 * should use drm_dp_mst_topology_get_port() instead.
1732 *
1733 * See also:
1734 * drm_dp_mst_topology_get_port()
1735 * drm_dp_mst_topology_put_port()
1736 *
1737 * Returns:
1738 * * 1: A topology reference was grabbed successfully
1739 * * 0: @port is no longer in the topology, no reference was grabbed
1740 */
1741static int __must_check
1742drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1743{
1744        int ret;
1745
1746        topology_ref_history_lock(port->mgr);
1747        ret = kref_get_unless_zero(&port->topology_kref);
1748        if (ret) {
1749                DRM_DEBUG("port %p (%d)\n",
1750                          port, kref_read(&port->topology_kref));
1751                save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1752        }
1753
1754        topology_ref_history_unlock(port->mgr);
1755        return ret;
1756}
1757
1758/**
1759 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1760 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1761 *
1762 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1763 * not it's already reached 0. This is only valid to use in scenarios where
1764 * you are already guaranteed to have at least one active topology reference
1765 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1766 *
1767 * See also:
1768 * drm_dp_mst_topology_try_get_port()
1769 * drm_dp_mst_topology_put_port()
1770 */
1771static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1772{
1773        topology_ref_history_lock(port->mgr);
1774
1775        WARN_ON(kref_read(&port->topology_kref) == 0);
1776        kref_get(&port->topology_kref);
1777        DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1778        save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1779
1780        topology_ref_history_unlock(port->mgr);
1781}
1782
1783/**
1784 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1785 * @port: The &struct drm_dp_mst_port to release the topology reference from
1786 *
1787 * Releases a topology reference from @port by decrementing
1788 * &drm_dp_mst_port.topology_kref.
1789 *
1790 * See also:
1791 * drm_dp_mst_topology_try_get_port()
1792 * drm_dp_mst_topology_get_port()
1793 */
1794static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1795{
1796        topology_ref_history_lock(port->mgr);
1797
1798        DRM_DEBUG("port %p (%d)\n",
1799                  port, kref_read(&port->topology_kref) - 1);
1800        save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1801
1802        topology_ref_history_unlock(port->mgr);
1803        kref_put(&port->topology_kref, drm_dp_destroy_port);
1804}
1805
1806static struct drm_dp_mst_branch *
1807drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1808                                              struct drm_dp_mst_branch *to_find)
1809{
1810        struct drm_dp_mst_port *port;
1811        struct drm_dp_mst_branch *rmstb;
1812
1813        if (to_find == mstb)
1814                return mstb;
1815
1816        list_for_each_entry(port, &mstb->ports, next) {
1817                if (port->mstb) {
1818                        rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1819                            port->mstb, to_find);
1820                        if (rmstb)
1821                                return rmstb;
1822                }
1823        }
1824        return NULL;
1825}
1826
1827static struct drm_dp_mst_branch *
1828drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1829                                       struct drm_dp_mst_branch *mstb)
1830{
1831        struct drm_dp_mst_branch *rmstb = NULL;
1832
1833        mutex_lock(&mgr->lock);
1834        if (mgr->mst_primary) {
1835                rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1836                    mgr->mst_primary, mstb);
1837
1838                if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1839                        rmstb = NULL;
1840        }
1841        mutex_unlock(&mgr->lock);
1842        return rmstb;
1843}
1844
1845static struct drm_dp_mst_port *
1846drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1847                                              struct drm_dp_mst_port *to_find)
1848{
1849        struct drm_dp_mst_port *port, *mport;
1850
1851        list_for_each_entry(port, &mstb->ports, next) {
1852                if (port == to_find)
1853                        return port;
1854
1855                if (port->mstb) {
1856                        mport = drm_dp_mst_topology_get_port_validated_locked(
1857                            port->mstb, to_find);
1858                        if (mport)
1859                                return mport;
1860                }
1861        }
1862        return NULL;
1863}
1864
1865static struct drm_dp_mst_port *
1866drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1867                                       struct drm_dp_mst_port *port)
1868{
1869        struct drm_dp_mst_port *rport = NULL;
1870
1871        mutex_lock(&mgr->lock);
1872        if (mgr->mst_primary) {
1873                rport = drm_dp_mst_topology_get_port_validated_locked(
1874                    mgr->mst_primary, port);
1875
1876                if (rport && !drm_dp_mst_topology_try_get_port(rport))
1877                        rport = NULL;
1878        }
1879        mutex_unlock(&mgr->lock);
1880        return rport;
1881}
1882
1883static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1884{
1885        struct drm_dp_mst_port *port;
1886        int ret;
1887
1888        list_for_each_entry(port, &mstb->ports, next) {
1889                if (port->port_num == port_num) {
1890                        ret = drm_dp_mst_topology_try_get_port(port);
1891                        return ret ? port : NULL;
1892                }
1893        }
1894
1895        return NULL;
1896}
1897
1898/*
1899 * calculate a new RAD for this MST branch device
1900 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1901 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1902 */
1903static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1904                                 u8 *rad)
1905{
1906        int parent_lct = port->parent->lct;
1907        int shift = 4;
1908        int idx = (parent_lct - 1) / 2;
1909        if (parent_lct > 1) {
1910                memcpy(rad, port->parent->rad, idx + 1);
1911                shift = (parent_lct % 2) ? 4 : 0;
1912        } else
1913                rad[0] = 0;
1914
1915        rad[idx] |= port->port_num << shift;
1916        return parent_lct + 1;
1917}
1918
1919static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
1920{
1921        switch (pdt) {
1922        case DP_PEER_DEVICE_DP_LEGACY_CONV:
1923        case DP_PEER_DEVICE_SST_SINK:
1924                return true;
1925        case DP_PEER_DEVICE_MST_BRANCHING:
1926                /* For sst branch device */
1927                if (!mcs)
1928                        return true;
1929
1930                return false;
1931        }
1932        return true;
1933}
1934
1935static int
1936drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
1937                    bool new_mcs)
1938{
1939        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1940        struct drm_dp_mst_branch *mstb;
1941        u8 rad[8], lct;
1942        int ret = 0;
1943
1944        if (port->pdt == new_pdt && port->mcs == new_mcs)
1945                return 0;
1946
1947        /* Teardown the old pdt, if there is one */
1948        if (port->pdt != DP_PEER_DEVICE_NONE) {
1949                if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
1950                        /*
1951                         * If the new PDT would also have an i2c bus,
1952                         * don't bother with reregistering it
1953                         */
1954                        if (new_pdt != DP_PEER_DEVICE_NONE &&
1955                            drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
1956                                port->pdt = new_pdt;
1957                                port->mcs = new_mcs;
1958                                return 0;
1959                        }
1960
1961                        /* remove i2c over sideband */
1962                        drm_dp_mst_unregister_i2c_bus(&port->aux);
1963                } else {
1964                        mutex_lock(&mgr->lock);
1965                        drm_dp_mst_topology_put_mstb(port->mstb);
1966                        port->mstb = NULL;
1967                        mutex_unlock(&mgr->lock);
1968                }
1969        }
1970
1971        port->pdt = new_pdt;
1972        port->mcs = new_mcs;
1973
1974        if (port->pdt != DP_PEER_DEVICE_NONE) {
1975                if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
1976                        /* add i2c over sideband */
1977                        ret = drm_dp_mst_register_i2c_bus(&port->aux);
1978                } else {
1979                        lct = drm_dp_calculate_rad(port, rad);
1980                        mstb = drm_dp_add_mst_branch_device(lct, rad);
1981                        if (!mstb) {
1982                                ret = -ENOMEM;
1983                                DRM_ERROR("Failed to create MSTB for port %p",
1984                                          port);
1985                                goto out;
1986                        }
1987
1988                        mutex_lock(&mgr->lock);
1989                        port->mstb = mstb;
1990                        mstb->mgr = port->mgr;
1991                        mstb->port_parent = port;
1992
1993                        /*
1994                         * Make sure this port's memory allocation stays
1995                         * around until its child MSTB releases it
1996                         */
1997                        drm_dp_mst_get_port_malloc(port);
1998                        mutex_unlock(&mgr->lock);
1999
2000                        /* And make sure we send a link address for this */
2001                        ret = 1;
2002                }
2003        }
2004
2005out:
2006        if (ret < 0)
2007                port->pdt = DP_PEER_DEVICE_NONE;
2008        return ret;
2009}
2010
2011/**
2012 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2013 * @aux: Fake sideband AUX CH
2014 * @offset: address of the (first) register to read
2015 * @buffer: buffer to store the register values
2016 * @size: number of bytes in @buffer
2017 *
2018 * Performs the same functionality for remote devices via
2019 * sideband messaging as drm_dp_dpcd_read() does for local
2020 * devices via actual AUX CH.
2021 *
2022 * Return: Number of bytes read, or negative error code on failure.
2023 */
2024ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2025                             unsigned int offset, void *buffer, size_t size)
2026{
2027        struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2028                                                    aux);
2029
2030        return drm_dp_send_dpcd_read(port->mgr, port,
2031                                     offset, size, buffer);
2032}
2033
2034/**
2035 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2036 * @aux: Fake sideband AUX CH
2037 * @offset: address of the (first) register to write
2038 * @buffer: buffer containing the values to write
2039 * @size: number of bytes in @buffer
2040 *
2041 * Performs the same functionality for remote devices via
2042 * sideband messaging as drm_dp_dpcd_write() does for local
2043 * devices via actual AUX CH.
2044 *
2045 * Return: 0 on success, negative error code on failure.
2046 */
2047ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2048                              unsigned int offset, void *buffer, size_t size)
2049{
2050        struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2051                                                    aux);
2052
2053        return drm_dp_send_dpcd_write(port->mgr, port,
2054                                      offset, size, buffer);
2055}
2056
2057static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2058{
2059        int ret;
2060
2061        memcpy(mstb->guid, guid, 16);
2062
2063        if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2064                if (mstb->port_parent) {
2065                        ret = drm_dp_send_dpcd_write(
2066                                        mstb->mgr,
2067                                        mstb->port_parent,
2068                                        DP_GUID,
2069                                        16,
2070                                        mstb->guid);
2071                } else {
2072
2073                        ret = drm_dp_dpcd_write(
2074                                        mstb->mgr->aux,
2075                                        DP_GUID,
2076                                        mstb->guid,
2077                                        16);
2078                }
2079        }
2080}
2081
2082static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2083                                int pnum,
2084                                char *proppath,
2085                                size_t proppath_size)
2086{
2087        int i;
2088        char temp[8];
2089        snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2090        for (i = 0; i < (mstb->lct - 1); i++) {
2091                int shift = (i % 2) ? 0 : 4;
2092                int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2093                snprintf(temp, sizeof(temp), "-%d", port_num);
2094                strlcat(proppath, temp, proppath_size);
2095        }
2096        snprintf(temp, sizeof(temp), "-%d", pnum);
2097        strlcat(proppath, temp, proppath_size);
2098}
2099
2100/**
2101 * drm_dp_mst_connector_late_register() - Late MST connector registration
2102 * @connector: The MST connector
2103 * @port: The MST port for this connector
2104 *
2105 * Helper to register the remote aux device for this MST port. Drivers should
2106 * call this from their mst connector's late_register hook to enable MST aux
2107 * devices.
2108 *
2109 * Return: 0 on success, negative error code on failure.
2110 */
2111int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2112                                       struct drm_dp_mst_port *port)
2113{
2114        DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2115                      port->aux.name, connector->kdev->kobj.name);
2116
2117        port->aux.dev = connector->kdev;
2118        return drm_dp_aux_register_devnode(&port->aux);
2119}
2120EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2121
2122/**
2123 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2124 * @connector: The MST connector
2125 * @port: The MST port for this connector
2126 *
2127 * Helper to unregister the remote aux device for this MST port, registered by
2128 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2129 * connector's early_unregister hook.
2130 */
2131void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2132                                           struct drm_dp_mst_port *port)
2133{
2134        DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2135                      port->aux.name, connector->kdev->kobj.name);
2136        drm_dp_aux_unregister_devnode(&port->aux);
2137}
2138EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2139
2140static void
2141drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2142                              struct drm_dp_mst_port *port)
2143{
2144        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2145        char proppath[255];
2146        int ret;
2147
2148        build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2149        port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2150        if (!port->connector) {
2151                ret = -ENOMEM;
2152                goto error;
2153        }
2154
2155        if (port->pdt != DP_PEER_DEVICE_NONE &&
2156            drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
2157                port->cached_edid = drm_get_edid(port->connector,
2158                                                 &port->aux.ddc);
2159                drm_connector_set_tile_property(port->connector);
2160        }
2161
2162        mgr->cbs->register_connector(port->connector);
2163        return;
2164
2165error:
2166        DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2167}
2168
2169/*
2170 * Drop a topology reference, and unlink the port from the in-memory topology
2171 * layout
2172 */
2173static void
2174drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2175                                struct drm_dp_mst_port *port)
2176{
2177        mutex_lock(&mgr->lock);
2178        list_del(&port->next);
2179        mutex_unlock(&mgr->lock);
2180        drm_dp_mst_topology_put_port(port);
2181}
2182
2183static struct drm_dp_mst_port *
2184drm_dp_mst_add_port(struct drm_device *dev,
2185                    struct drm_dp_mst_topology_mgr *mgr,
2186                    struct drm_dp_mst_branch *mstb, u8 port_number)
2187{
2188        struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2189
2190        if (!port)
2191                return NULL;
2192
2193        kref_init(&port->topology_kref);
2194        kref_init(&port->malloc_kref);
2195        port->parent = mstb;
2196        port->port_num = port_number;
2197        port->mgr = mgr;
2198        port->aux.name = "DPMST";
2199        port->aux.dev = dev->dev;
2200        port->aux.is_remote = true;
2201
2202        /*
2203         * Make sure the memory allocation for our parent branch stays
2204         * around until our own memory allocation is released
2205         */
2206        drm_dp_mst_get_mstb_malloc(mstb);
2207
2208        return port;
2209}
2210
2211static int
2212drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2213                                    struct drm_device *dev,
2214                                    struct drm_dp_link_addr_reply_port *port_msg)
2215{
2216        struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2217        struct drm_dp_mst_port *port;
2218        int old_ddps = 0, ret;
2219        u8 new_pdt = DP_PEER_DEVICE_NONE;
2220        bool new_mcs = 0;
2221        bool created = false, send_link_addr = false, changed = false;
2222
2223        port = drm_dp_get_port(mstb, port_msg->port_number);
2224        if (!port) {
2225                port = drm_dp_mst_add_port(dev, mgr, mstb,
2226                                           port_msg->port_number);
2227                if (!port)
2228                        return -ENOMEM;
2229                created = true;
2230                changed = true;
2231        } else if (!port->input && port_msg->input_port && port->connector) {
2232                /* Since port->connector can't be changed here, we create a
2233                 * new port if input_port changes from 0 to 1
2234                 */
2235                drm_dp_mst_topology_unlink_port(mgr, port);
2236                drm_dp_mst_topology_put_port(port);
2237                port = drm_dp_mst_add_port(dev, mgr, mstb,
2238                                           port_msg->port_number);
2239                if (!port)
2240                        return -ENOMEM;
2241                changed = true;
2242                created = true;
2243        } else if (port->input && !port_msg->input_port) {
2244                changed = true;
2245        } else if (port->connector) {
2246                /* We're updating a port that's exposed to userspace, so do it
2247                 * under lock
2248                 */
2249                drm_modeset_lock(&mgr->base.lock, NULL);
2250
2251                old_ddps = port->ddps;
2252                changed = port->ddps != port_msg->ddps ||
2253                        (port->ddps &&
2254                         (port->ldps != port_msg->legacy_device_plug_status ||
2255                          port->dpcd_rev != port_msg->dpcd_revision ||
2256                          port->mcs != port_msg->mcs ||
2257                          port->pdt != port_msg->peer_device_type ||
2258                          port->num_sdp_stream_sinks !=
2259                          port_msg->num_sdp_stream_sinks));
2260        }
2261
2262        port->input = port_msg->input_port;
2263        if (!port->input)
2264                new_pdt = port_msg->peer_device_type;
2265        new_mcs = port_msg->mcs;
2266        port->ddps = port_msg->ddps;
2267        port->ldps = port_msg->legacy_device_plug_status;
2268        port->dpcd_rev = port_msg->dpcd_revision;
2269        port->num_sdp_streams = port_msg->num_sdp_streams;
2270        port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2271
2272        /* manage mstb port lists with mgr lock - take a reference
2273           for this list */
2274        if (created) {
2275                mutex_lock(&mgr->lock);
2276                drm_dp_mst_topology_get_port(port);
2277                list_add(&port->next, &mstb->ports);
2278                mutex_unlock(&mgr->lock);
2279        }
2280
2281        if (old_ddps != port->ddps) {
2282                if (port->ddps) {
2283                        if (!port->input) {
2284                                drm_dp_send_enum_path_resources(mgr, mstb,
2285                                                                port);
2286                        }
2287                } else {
2288                        port->available_pbn = 0;
2289                }
2290        }
2291
2292        ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2293        if (ret == 1) {
2294                send_link_addr = true;
2295        } else if (ret < 0) {
2296                DRM_ERROR("Failed to change PDT on port %p: %d\n",
2297                          port, ret);
2298                goto fail;
2299        }
2300
2301        /*
2302         * If this port wasn't just created, then we're reprobing because
2303         * we're coming out of suspend. In this case, always resend the link
2304         * address if there's an MSTB on this port
2305         */
2306        if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2307            port->mcs)
2308                send_link_addr = true;
2309
2310        if (port->connector)
2311                drm_modeset_unlock(&mgr->base.lock);
2312        else if (!port->input)
2313                drm_dp_mst_port_add_connector(mstb, port);
2314
2315        if (send_link_addr && port->mstb) {
2316                ret = drm_dp_send_link_address(mgr, port->mstb);
2317                if (ret == 1) /* MSTB below us changed */
2318                        changed = true;
2319                else if (ret < 0)
2320                        goto fail_put;
2321        }
2322
2323        /* put reference to this port */
2324        drm_dp_mst_topology_put_port(port);
2325        return changed;
2326
2327fail:
2328        drm_dp_mst_topology_unlink_port(mgr, port);
2329        if (port->connector)
2330                drm_modeset_unlock(&mgr->base.lock);
2331fail_put:
2332        drm_dp_mst_topology_put_port(port);
2333        return ret;
2334}
2335
2336static void
2337drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2338                            struct drm_dp_connection_status_notify *conn_stat)
2339{
2340        struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2341        struct drm_dp_mst_port *port;
2342        int old_ddps, old_input, ret, i;
2343        u8 new_pdt;
2344        bool new_mcs;
2345        bool dowork = false, create_connector = false;
2346
2347        port = drm_dp_get_port(mstb, conn_stat->port_number);
2348        if (!port)
2349                return;
2350
2351        if (port->connector) {
2352                if (!port->input && conn_stat->input_port) {
2353                        /*
2354                         * We can't remove a connector from an already exposed
2355                         * port, so just throw the port out and make sure we
2356                         * reprobe the link address of it's parent MSTB
2357                         */
2358                        drm_dp_mst_topology_unlink_port(mgr, port);
2359                        mstb->link_address_sent = false;
2360                        dowork = true;
2361                        goto out;
2362                }
2363
2364                /* Locking is only needed if the port's exposed to userspace */
2365                drm_modeset_lock(&mgr->base.lock, NULL);
2366        } else if (port->input && !conn_stat->input_port) {
2367                create_connector = true;
2368                /* Reprobe link address so we get num_sdp_streams */
2369                mstb->link_address_sent = false;
2370                dowork = true;
2371        }
2372
2373        old_ddps = port->ddps;
2374        old_input = port->input;
2375        port->input = conn_stat->input_port;
2376        port->ldps = conn_stat->legacy_device_plug_status;
2377        port->ddps = conn_stat->displayport_device_plug_status;
2378
2379        if (old_ddps != port->ddps) {
2380                if (port->ddps) {
2381                        dowork = true;
2382                } else {
2383                        port->available_pbn = 0;
2384                }
2385        }
2386
2387        new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2388        new_mcs = conn_stat->message_capability_status;
2389        ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2390        if (ret == 1) {
2391                dowork = true;
2392        } else if (ret < 0) {
2393                DRM_ERROR("Failed to change PDT for port %p: %d\n",
2394                          port, ret);
2395                dowork = false;
2396        }
2397
2398        if (!old_input && old_ddps != port->ddps && !port->ddps) {
2399                for (i = 0; i < mgr->max_payloads; i++) {
2400                        struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2401                        struct drm_dp_mst_port *port_validated;
2402
2403                        if (!vcpi)
2404                                continue;
2405
2406                        port_validated =
2407                                container_of(vcpi, struct drm_dp_mst_port, vcpi);
2408                        port_validated =
2409                                drm_dp_mst_topology_get_port_validated(mgr, port_validated);
2410                        if (!port_validated) {
2411                                mutex_lock(&mgr->payload_lock);
2412                                vcpi->num_slots = 0;
2413                                mutex_unlock(&mgr->payload_lock);
2414                        } else {
2415                                drm_dp_mst_topology_put_port(port_validated);
2416                        }
2417                }
2418        }
2419
2420        if (port->connector)
2421                drm_modeset_unlock(&mgr->base.lock);
2422        else if (create_connector)
2423                drm_dp_mst_port_add_connector(mstb, port);
2424
2425out:
2426        drm_dp_mst_topology_put_port(port);
2427        if (dowork)
2428                queue_work(system_long_wq, &mstb->mgr->work);
2429}
2430
2431static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2432                                                               u8 lct, u8 *rad)
2433{
2434        struct drm_dp_mst_branch *mstb;
2435        struct drm_dp_mst_port *port;
2436        int i, ret;
2437        /* find the port by iterating down */
2438
2439        mutex_lock(&mgr->lock);
2440        mstb = mgr->mst_primary;
2441
2442        if (!mstb)
2443                goto out;
2444
2445        for (i = 0; i < lct - 1; i++) {
2446                int shift = (i % 2) ? 0 : 4;
2447                int port_num = (rad[i / 2] >> shift) & 0xf;
2448
2449                list_for_each_entry(port, &mstb->ports, next) {
2450                        if (port->port_num == port_num) {
2451                                mstb = port->mstb;
2452                                if (!mstb) {
2453                                        DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2454                                        goto out;
2455                                }
2456
2457                                break;
2458                        }
2459                }
2460        }
2461        ret = drm_dp_mst_topology_try_get_mstb(mstb);
2462        if (!ret)
2463                mstb = NULL;
2464out:
2465        mutex_unlock(&mgr->lock);
2466        return mstb;
2467}
2468
2469static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2470        struct drm_dp_mst_branch *mstb,
2471        const uint8_t *guid)
2472{
2473        struct drm_dp_mst_branch *found_mstb;
2474        struct drm_dp_mst_port *port;
2475
2476        if (memcmp(mstb->guid, guid, 16) == 0)
2477                return mstb;
2478
2479
2480        list_for_each_entry(port, &mstb->ports, next) {
2481                if (!port->mstb)
2482                        continue;
2483
2484                found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2485
2486                if (found_mstb)
2487                        return found_mstb;
2488        }
2489
2490        return NULL;
2491}
2492
2493static struct drm_dp_mst_branch *
2494drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2495                                     const uint8_t *guid)
2496{
2497        struct drm_dp_mst_branch *mstb;
2498        int ret;
2499
2500        /* find the port by iterating down */
2501        mutex_lock(&mgr->lock);
2502
2503        mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2504        if (mstb) {
2505                ret = drm_dp_mst_topology_try_get_mstb(mstb);
2506                if (!ret)
2507                        mstb = NULL;
2508        }
2509
2510        mutex_unlock(&mgr->lock);
2511        return mstb;
2512}
2513
2514static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2515                                               struct drm_dp_mst_branch *mstb)
2516{
2517        struct drm_dp_mst_port *port;
2518        int ret;
2519        bool changed = false;
2520
2521        if (!mstb->link_address_sent) {
2522                ret = drm_dp_send_link_address(mgr, mstb);
2523                if (ret == 1)
2524                        changed = true;
2525                else if (ret < 0)
2526                        return ret;
2527        }
2528
2529        list_for_each_entry(port, &mstb->ports, next) {
2530                struct drm_dp_mst_branch *mstb_child = NULL;
2531
2532                if (port->input || !port->ddps)
2533                        continue;
2534
2535                if (!port->available_pbn) {
2536                        drm_modeset_lock(&mgr->base.lock, NULL);
2537                        drm_dp_send_enum_path_resources(mgr, mstb, port);
2538                        drm_modeset_unlock(&mgr->base.lock);
2539                        changed = true;
2540                }
2541
2542                if (port->mstb)
2543                        mstb_child = drm_dp_mst_topology_get_mstb_validated(
2544                            mgr, port->mstb);
2545
2546                if (mstb_child) {
2547                        ret = drm_dp_check_and_send_link_address(mgr,
2548                                                                 mstb_child);
2549                        drm_dp_mst_topology_put_mstb(mstb_child);
2550                        if (ret == 1)
2551                                changed = true;
2552                        else if (ret < 0)
2553                                return ret;
2554                }
2555        }
2556
2557        return changed;
2558}
2559
2560static void drm_dp_mst_link_probe_work(struct work_struct *work)
2561{
2562        struct drm_dp_mst_topology_mgr *mgr =
2563                container_of(work, struct drm_dp_mst_topology_mgr, work);
2564        struct drm_device *dev = mgr->dev;
2565        struct drm_dp_mst_branch *mstb;
2566        int ret;
2567
2568        mutex_lock(&mgr->probe_lock);
2569
2570        mutex_lock(&mgr->lock);
2571        mstb = mgr->mst_primary;
2572        if (mstb) {
2573                ret = drm_dp_mst_topology_try_get_mstb(mstb);
2574                if (!ret)
2575                        mstb = NULL;
2576        }
2577        mutex_unlock(&mgr->lock);
2578        if (!mstb) {
2579                mutex_unlock(&mgr->probe_lock);
2580                return;
2581        }
2582
2583        ret = drm_dp_check_and_send_link_address(mgr, mstb);
2584        drm_dp_mst_topology_put_mstb(mstb);
2585
2586        mutex_unlock(&mgr->probe_lock);
2587        if (ret)
2588                drm_kms_helper_hotplug_event(dev);
2589}
2590
2591static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2592                                 u8 *guid)
2593{
2594        u64 salt;
2595
2596        if (memchr_inv(guid, 0, 16))
2597                return true;
2598
2599        salt = get_jiffies_64();
2600
2601        memcpy(&guid[0], &salt, sizeof(u64));
2602        memcpy(&guid[8], &salt, sizeof(u64));
2603
2604        return false;
2605}
2606
2607static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
2608{
2609        struct drm_dp_sideband_msg_req_body req;
2610
2611        req.req_type = DP_REMOTE_DPCD_READ;
2612        req.u.dpcd_read.port_number = port_num;
2613        req.u.dpcd_read.dpcd_address = offset;
2614        req.u.dpcd_read.num_bytes = num_bytes;
2615        drm_dp_encode_sideband_req(&req, msg);
2616
2617        return 0;
2618}
2619
2620static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2621                                    bool up, u8 *msg, int len)
2622{
2623        int ret;
2624        int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2625        int tosend, total, offset;
2626        int retries = 0;
2627
2628retry:
2629        total = len;
2630        offset = 0;
2631        do {
2632                tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2633
2634                ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2635                                        &msg[offset],
2636                                        tosend);
2637                if (ret != tosend) {
2638                        if (ret == -EIO && retries < 5) {
2639                                retries++;
2640                                goto retry;
2641                        }
2642                        DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2643
2644                        return -EIO;
2645                }
2646                offset += tosend;
2647                total -= tosend;
2648        } while (total > 0);
2649        return 0;
2650}
2651
2652static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2653                                  struct drm_dp_sideband_msg_tx *txmsg)
2654{
2655        struct drm_dp_mst_branch *mstb = txmsg->dst;
2656        u8 req_type;
2657
2658        /* both msg slots are full */
2659        if (txmsg->seqno == -1) {
2660                if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
2661                        DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
2662                        return -EAGAIN;
2663                }
2664                if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
2665                        txmsg->seqno = mstb->last_seqno;
2666                        mstb->last_seqno ^= 1;
2667                } else if (mstb->tx_slots[0] == NULL)
2668                        txmsg->seqno = 0;
2669                else
2670                        txmsg->seqno = 1;
2671                mstb->tx_slots[txmsg->seqno] = txmsg;
2672        }
2673
2674        req_type = txmsg->msg[0] & 0x7f;
2675        if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2676                req_type == DP_RESOURCE_STATUS_NOTIFY)
2677                hdr->broadcast = 1;
2678        else
2679                hdr->broadcast = 0;
2680        hdr->path_msg = txmsg->path_msg;
2681        hdr->lct = mstb->lct;
2682        hdr->lcr = mstb->lct - 1;
2683        if (mstb->lct > 1)
2684                memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2685        hdr->seqno = txmsg->seqno;
2686        return 0;
2687}
2688/*
2689 * process a single block of the next message in the sideband queue
2690 */
2691static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2692                                   struct drm_dp_sideband_msg_tx *txmsg,
2693                                   bool up)
2694{
2695        u8 chunk[48];
2696        struct drm_dp_sideband_msg_hdr hdr;
2697        int len, space, idx, tosend;
2698        int ret;
2699
2700        memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2701
2702        if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2703                txmsg->seqno = -1;
2704                txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2705        }
2706
2707        /* make hdr from dst mst - for replies use seqno
2708           otherwise assign one */
2709        ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2710        if (ret < 0)
2711                return ret;
2712
2713        /* amount left to send in this message */
2714        len = txmsg->cur_len - txmsg->cur_offset;
2715
2716        /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2717        space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2718
2719        tosend = min(len, space);
2720        if (len == txmsg->cur_len)
2721                hdr.somt = 1;
2722        if (space >= len)
2723                hdr.eomt = 1;
2724
2725
2726        hdr.msg_len = tosend + 1;
2727        drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2728        memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2729        /* add crc at end */
2730        drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2731        idx += tosend + 1;
2732
2733        ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2734        if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2735                struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2736
2737                drm_printf(&p, "sideband msg failed to send\n");
2738                drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2739                return ret;
2740        }
2741
2742        txmsg->cur_offset += tosend;
2743        if (txmsg->cur_offset == txmsg->cur_len) {
2744                txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2745                return 1;
2746        }
2747        return 0;
2748}
2749
2750static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2751{
2752        struct drm_dp_sideband_msg_tx *txmsg;
2753        int ret;
2754
2755        WARN_ON(!mutex_is_locked(&mgr->qlock));
2756
2757        /* construct a chunk from the first msg in the tx_msg queue */
2758        if (list_empty(&mgr->tx_msg_downq))
2759                return;
2760
2761        txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2762        ret = process_single_tx_qlock(mgr, txmsg, false);
2763        if (ret == 1) {
2764                /* txmsg is sent it should be in the slots now */
2765                mgr->is_waiting_for_dwn_reply = true;
2766                list_del(&txmsg->next);
2767        } else if (ret) {
2768                DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2769                mgr->is_waiting_for_dwn_reply = false;
2770                list_del(&txmsg->next);
2771                if (txmsg->seqno != -1)
2772                        txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2773                txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2774                wake_up_all(&mgr->tx_waitq);
2775        }
2776}
2777
2778/* called holding qlock */
2779static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2780                                       struct drm_dp_sideband_msg_tx *txmsg)
2781{
2782        int ret;
2783
2784        /* construct a chunk from the first msg in the tx_msg queue */
2785        ret = process_single_tx_qlock(mgr, txmsg, true);
2786
2787        if (ret != 1)
2788                DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2789
2790        if (txmsg->seqno != -1) {
2791                WARN_ON((unsigned int)txmsg->seqno >
2792                        ARRAY_SIZE(txmsg->dst->tx_slots));
2793                txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2794        }
2795}
2796
2797static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2798                                 struct drm_dp_sideband_msg_tx *txmsg)
2799{
2800        mutex_lock(&mgr->qlock);
2801        list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2802
2803        if (drm_debug_enabled(DRM_UT_DP)) {
2804                struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2805
2806                drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2807        }
2808
2809        if (list_is_singular(&mgr->tx_msg_downq) &&
2810            !mgr->is_waiting_for_dwn_reply)
2811                process_single_down_tx_qlock(mgr);
2812        mutex_unlock(&mgr->qlock);
2813}
2814
2815static void
2816drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2817{
2818        struct drm_dp_link_addr_reply_port *port_reply;
2819        int i;
2820
2821        for (i = 0; i < reply->nports; i++) {
2822                port_reply = &reply->ports[i];
2823                DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2824                              i,
2825                              port_reply->input_port,
2826                              port_reply->peer_device_type,
2827                              port_reply->port_number,
2828                              port_reply->dpcd_revision,
2829                              port_reply->mcs,
2830                              port_reply->ddps,
2831                              port_reply->legacy_device_plug_status,
2832                              port_reply->num_sdp_streams,
2833                              port_reply->num_sdp_stream_sinks);
2834        }
2835}
2836
2837static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2838                                     struct drm_dp_mst_branch *mstb)
2839{
2840        struct drm_dp_sideband_msg_tx *txmsg;
2841        struct drm_dp_link_address_ack_reply *reply;
2842        struct drm_dp_mst_port *port, *tmp;
2843        int i, len, ret, port_mask = 0;
2844        bool changed = false;
2845
2846        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2847        if (!txmsg)
2848                return -ENOMEM;
2849
2850        txmsg->dst = mstb;
2851        len = build_link_address(txmsg);
2852
2853        mstb->link_address_sent = true;
2854        drm_dp_queue_down_tx(mgr, txmsg);
2855
2856        /* FIXME: Actually do some real error handling here */
2857        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2858        if (ret <= 0) {
2859                DRM_ERROR("Sending link address failed with %d\n", ret);
2860                goto out;
2861        }
2862        if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2863                DRM_ERROR("link address NAK received\n");
2864                ret = -EIO;
2865                goto out;
2866        }
2867
2868        reply = &txmsg->reply.u.link_addr;
2869        DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2870        drm_dp_dump_link_address(reply);
2871
2872        drm_dp_check_mstb_guid(mstb, reply->guid);
2873
2874        for (i = 0; i < reply->nports; i++) {
2875                port_mask |= BIT(reply->ports[i].port_number);
2876                ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2877                                                          &reply->ports[i]);
2878                if (ret == 1)
2879                        changed = true;
2880                else if (ret < 0)
2881                        goto out;
2882        }
2883
2884        /* Prune any ports that are currently a part of mstb in our in-memory
2885         * topology, but were not seen in this link address. Usually this
2886         * means that they were removed while the topology was out of sync,
2887         * e.g. during suspend/resume
2888         */
2889        mutex_lock(&mgr->lock);
2890        list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
2891                if (port_mask & BIT(port->port_num))
2892                        continue;
2893
2894                DRM_DEBUG_KMS("port %d was not in link address, removing\n",
2895                              port->port_num);
2896                list_del(&port->next);
2897                drm_dp_mst_topology_put_port(port);
2898                changed = true;
2899        }
2900        mutex_unlock(&mgr->lock);
2901
2902out:
2903        if (ret <= 0)
2904                mstb->link_address_sent = false;
2905        kfree(txmsg);
2906        return ret < 0 ? ret : changed;
2907}
2908
2909static int
2910drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2911                                struct drm_dp_mst_branch *mstb,
2912                                struct drm_dp_mst_port *port)
2913{
2914        struct drm_dp_enum_path_resources_ack_reply *path_res;
2915        struct drm_dp_sideband_msg_tx *txmsg;
2916        int len;
2917        int ret;
2918
2919        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2920        if (!txmsg)
2921                return -ENOMEM;
2922
2923        txmsg->dst = mstb;
2924        len = build_enum_path_resources(txmsg, port->port_num);
2925
2926        drm_dp_queue_down_tx(mgr, txmsg);
2927
2928        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2929        if (ret > 0) {
2930                path_res = &txmsg->reply.u.path_resources;
2931
2932                if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2933                        DRM_DEBUG_KMS("enum path resources nak received\n");
2934                } else {
2935                        if (port->port_num != path_res->port_number)
2936                                DRM_ERROR("got incorrect port in response\n");
2937
2938                        DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
2939                                      path_res->port_number,
2940                                      path_res->full_payload_bw_number,
2941                                      path_res->avail_payload_bw_number);
2942                        port->available_pbn =
2943                                path_res->avail_payload_bw_number;
2944                }
2945        }
2946
2947        kfree(txmsg);
2948        return 0;
2949}
2950
2951static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2952{
2953        if (!mstb->port_parent)
2954                return NULL;
2955
2956        if (mstb->port_parent->mstb != mstb)
2957                return mstb->port_parent;
2958
2959        return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2960}
2961
2962/*
2963 * Searches upwards in the topology starting from mstb to try to find the
2964 * closest available parent of mstb that's still connected to the rest of the
2965 * topology. This can be used in order to perform operations like releasing
2966 * payloads, where the branch device which owned the payload may no longer be
2967 * around and thus would require that the payload on the last living relative
2968 * be freed instead.
2969 */
2970static struct drm_dp_mst_branch *
2971drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2972                                        struct drm_dp_mst_branch *mstb,
2973                                        int *port_num)
2974{
2975        struct drm_dp_mst_branch *rmstb = NULL;
2976        struct drm_dp_mst_port *found_port;
2977
2978        mutex_lock(&mgr->lock);
2979        if (!mgr->mst_primary)
2980                goto out;
2981
2982        do {
2983                found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2984                if (!found_port)
2985                        break;
2986
2987                if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2988                        rmstb = found_port->parent;
2989                        *port_num = found_port->port_num;
2990                } else {
2991                        /* Search again, starting from this parent */
2992                        mstb = found_port->parent;
2993                }
2994        } while (!rmstb);
2995out:
2996        mutex_unlock(&mgr->lock);
2997        return rmstb;
2998}
2999
3000static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3001                                   struct drm_dp_mst_port *port,
3002                                   int id,
3003                                   int pbn)
3004{
3005        struct drm_dp_sideband_msg_tx *txmsg;
3006        struct drm_dp_mst_branch *mstb;
3007        int len, ret, port_num;
3008        u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3009        int i;
3010
3011        port_num = port->port_num;
3012        mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3013        if (!mstb) {
3014                mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3015                                                               port->parent,
3016                                                               &port_num);
3017
3018                if (!mstb)
3019                        return -EINVAL;
3020        }
3021
3022        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3023        if (!txmsg) {
3024                ret = -ENOMEM;
3025                goto fail_put;
3026        }
3027
3028        for (i = 0; i < port->num_sdp_streams; i++)
3029                sinks[i] = i;
3030
3031        txmsg->dst = mstb;
3032        len = build_allocate_payload(txmsg, port_num,
3033                                     id,
3034                                     pbn, port->num_sdp_streams, sinks);
3035
3036        drm_dp_queue_down_tx(mgr, txmsg);
3037
3038        /*
3039         * FIXME: there is a small chance that between getting the last
3040         * connected mstb and sending the payload message, the last connected
3041         * mstb could also be removed from the topology. In the future, this
3042         * needs to be fixed by restarting the
3043         * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3044         * timeout if the topology is still connected to the system.
3045         */
3046        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3047        if (ret > 0) {
3048                if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3049                        ret = -EINVAL;
3050                else
3051                        ret = 0;
3052        }
3053        kfree(txmsg);
3054fail_put:
3055        drm_dp_mst_topology_put_mstb(mstb);
3056        return ret;
3057}
3058
3059int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3060                                 struct drm_dp_mst_port *port, bool power_up)
3061{
3062        struct drm_dp_sideband_msg_tx *txmsg;
3063        int len, ret;
3064
3065        port = drm_dp_mst_topology_get_port_validated(mgr, port);
3066        if (!port)
3067                return -EINVAL;
3068
3069        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3070        if (!txmsg) {
3071                drm_dp_mst_topology_put_port(port);
3072                return -ENOMEM;
3073        }
3074
3075        txmsg->dst = port->parent;
3076        len = build_power_updown_phy(txmsg, port->port_num, power_up);
3077        drm_dp_queue_down_tx(mgr, txmsg);
3078
3079        ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3080        if (ret > 0) {
3081                if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3082                        ret = -EINVAL;
3083                else
3084                        ret = 0;
3085        }
3086        kfree(txmsg);
3087        drm_dp_mst_topology_put_port(port);
3088
3089        return ret;
3090}
3091EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3092
3093static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3094                                       int id,
3095                                       struct drm_dp_payload *payload)
3096{
3097        int ret;
3098
3099        ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3100        if (ret < 0) {
3101                payload->payload_state = 0;
3102                return ret;
3103        }
3104        payload->payload_state = DP_PAYLOAD_LOCAL;
3105        return 0;
3106}
3107
3108static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3109                                       struct drm_dp_mst_port *port,
3110                                       int id,
3111                                       struct drm_dp_payload *payload)
3112{
3113        int ret;
3114        ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3115        if (ret < 0)
3116                return ret;
3117        payload->payload_state = DP_PAYLOAD_REMOTE;
3118        return ret;
3119}
3120
3121static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3122                                        struct drm_dp_mst_port *port,
3123                                        int id,
3124                                        struct drm_dp_payload *payload)
3125{
3126        DRM_DEBUG_KMS("\n");
3127        /* it's okay for these to fail */
3128        if (port) {
3129                drm_dp_payload_send_msg(mgr, port, id, 0);
3130        }
3131
3132        drm_dp_dpcd_write_payload(mgr, id, payload);
3133        payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3134        return 0;
3135}
3136
3137static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3138                                        int id,
3139                                        struct drm_dp_payload *payload)
3140{
3141        payload->payload_state = 0;
3142        return 0;
3143}
3144
3145/**
3146 * drm_dp_update_payload_part1() - Execute payload update part 1
3147 * @mgr: manager to use.
3148 *
3149 * This iterates over all proposed virtual channels, and tries to
3150 * allocate space in the link for them. For 0->slots transitions,
3151 * this step just writes the VCPI to the MST device. For slots->0
3152 * transitions, this writes the updated VCPIs and removes the
3153 * remote VC payloads.
3154 *
3155 * after calling this the driver should generate ACT and payload
3156 * packets.
3157 */
3158int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
3159{
3160        struct drm_dp_payload req_payload;
3161        struct drm_dp_mst_port *port;
3162        int i, j;
3163        int cur_slots = 1;
3164
3165        mutex_lock(&mgr->payload_lock);
3166        for (i = 0; i < mgr->max_payloads; i++) {
3167                struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3168                struct drm_dp_payload *payload = &mgr->payloads[i];
3169                bool put_port = false;
3170
3171                /* solve the current payloads - compare to the hw ones
3172                   - update the hw view */
3173                req_payload.start_slot = cur_slots;
3174                if (vcpi) {
3175                        port = container_of(vcpi, struct drm_dp_mst_port,
3176                                            vcpi);
3177
3178                        /* Validated ports don't matter if we're releasing
3179                         * VCPI
3180                         */
3181                        if (vcpi->num_slots) {
3182                                port = drm_dp_mst_topology_get_port_validated(
3183                                    mgr, port);
3184                                if (!port) {
3185                                        mutex_unlock(&mgr->payload_lock);
3186                                        return -EINVAL;
3187                                }
3188                                put_port = true;
3189                        }
3190
3191                        req_payload.num_slots = vcpi->num_slots;
3192                        req_payload.vcpi = vcpi->vcpi;
3193                } else {
3194                        port = NULL;
3195                        req_payload.num_slots = 0;
3196                }
3197
3198                payload->start_slot = req_payload.start_slot;
3199                /* work out what is required to happen with this payload */
3200                if (payload->num_slots != req_payload.num_slots) {
3201
3202                        /* need to push an update for this payload */
3203                        if (req_payload.num_slots) {
3204                                drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3205                                                            &req_payload);
3206                                payload->num_slots = req_payload.num_slots;
3207                                payload->vcpi = req_payload.vcpi;
3208
3209                        } else if (payload->num_slots) {
3210                                payload->num_slots = 0;
3211                                drm_dp_destroy_payload_step1(mgr, port,
3212                                                             payload->vcpi,
3213                                                             payload);
3214                                req_payload.payload_state =
3215                                        payload->payload_state;
3216                                payload->start_slot = 0;
3217                        }
3218                        payload->payload_state = req_payload.payload_state;
3219                }
3220                cur_slots += req_payload.num_slots;
3221
3222                if (put_port)
3223                        drm_dp_mst_topology_put_port(port);
3224        }
3225
3226        for (i = 0; i < mgr->max_payloads; /* do nothing */) {
3227                if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3228                        i++;
3229                        continue;
3230                }
3231
3232                DRM_DEBUG_KMS("removing payload %d\n", i);
3233                for (j = i; j < mgr->max_payloads - 1; j++) {
3234                        mgr->payloads[j] = mgr->payloads[j + 1];
3235                        mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3236
3237                        if (mgr->proposed_vcpis[j] &&
3238                            mgr->proposed_vcpis[j]->num_slots) {
3239                                set_bit(j + 1, &mgr->payload_mask);
3240                        } else {
3241                                clear_bit(j + 1, &mgr->payload_mask);
3242                        }
3243                }
3244
3245                memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3246                       sizeof(struct drm_dp_payload));
3247                mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3248                clear_bit(mgr->max_payloads, &mgr->payload_mask);
3249        }
3250        mutex_unlock(&mgr->payload_lock);
3251
3252        return 0;
3253}
3254EXPORT_SYMBOL(drm_dp_update_payload_part1);
3255
3256/**
3257 * drm_dp_update_payload_part2() - Execute payload update part 2
3258 * @mgr: manager to use.
3259 *
3260 * This iterates over all proposed virtual channels, and tries to
3261 * allocate space in the link for them. For 0->slots transitions,
3262 * this step writes the remote VC payload commands. For slots->0
3263 * this just resets some internal state.
3264 */
3265int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3266{
3267        struct drm_dp_mst_port *port;
3268        int i;
3269        int ret = 0;
3270        mutex_lock(&mgr->payload_lock);
3271        for (i = 0; i < mgr->max_payloads; i++) {
3272
3273                if (!mgr->proposed_vcpis[i])
3274                        continue;
3275
3276                port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3277
3278                DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
3279                if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3280                        ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3281                } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3282                        ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3283                }
3284                if (ret) {
3285                        mutex_unlock(&mgr->payload_lock);
3286                        return ret;
3287                }
3288        }
3289        mutex_unlock(&mgr->payload_lock);
3290        return 0;
3291}
3292EXPORT_SYMBOL(drm_dp_update_payload_part2);
3293
3294static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3295                                 struct drm_dp_mst_port *port,
3296                                 int offset, int size, u8 *bytes)
3297{
3298        int len;
3299        int ret = 0;
3300        struct drm_dp_sideband_msg_tx *txmsg;
3301        struct drm_dp_mst_branch *mstb;
3302
3303        mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3304        if (!mstb)
3305                return -EINVAL;
3306
3307        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3308        if (!txmsg) {
3309                ret = -ENOMEM;
3310                goto fail_put;
3311        }
3312
3313        len = build_dpcd_read(txmsg, port->port_num, offset, size);
3314        txmsg->dst = port->parent;
3315
3316        drm_dp_queue_down_tx(mgr, txmsg);
3317
3318        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3319        if (ret < 0)
3320                goto fail_free;
3321
3322        /* DPCD read should never be NACKed */
3323        if (txmsg->reply.reply_type == 1) {
3324                DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3325                          mstb, port->port_num, offset, size);
3326                ret = -EIO;
3327                goto fail_free;
3328        }
3329
3330        if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3331                ret = -EPROTO;
3332                goto fail_free;
3333        }
3334
3335        ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3336                    size);
3337        memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3338
3339fail_free:
3340        kfree(txmsg);
3341fail_put:
3342        drm_dp_mst_topology_put_mstb(mstb);
3343
3344        return ret;
3345}
3346
3347static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3348                                  struct drm_dp_mst_port *port,
3349                                  int offset, int size, u8 *bytes)
3350{
3351        int len;
3352        int ret;
3353        struct drm_dp_sideband_msg_tx *txmsg;
3354        struct drm_dp_mst_branch *mstb;
3355
3356        mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3357        if (!mstb)
3358                return -EINVAL;
3359
3360        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3361        if (!txmsg) {
3362                ret = -ENOMEM;
3363                goto fail_put;
3364        }
3365
3366        len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3367        txmsg->dst = mstb;
3368
3369        drm_dp_queue_down_tx(mgr, txmsg);
3370
3371        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3372        if (ret > 0) {
3373                if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3374                        ret = -EIO;
3375                else
3376                        ret = 0;
3377        }
3378        kfree(txmsg);
3379fail_put:
3380        drm_dp_mst_topology_put_mstb(mstb);
3381        return ret;
3382}
3383
3384static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3385{
3386        struct drm_dp_sideband_msg_reply_body reply;
3387
3388        reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3389        reply.req_type = req_type;
3390        drm_dp_encode_sideband_reply(&reply, msg);
3391        return 0;
3392}
3393
3394static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3395                                    struct drm_dp_mst_branch *mstb,
3396                                    int req_type, int seqno, bool broadcast)
3397{
3398        struct drm_dp_sideband_msg_tx *txmsg;
3399
3400        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3401        if (!txmsg)
3402                return -ENOMEM;
3403
3404        txmsg->dst = mstb;
3405        txmsg->seqno = seqno;
3406        drm_dp_encode_up_ack_reply(txmsg, req_type);
3407
3408        mutex_lock(&mgr->qlock);
3409
3410        process_single_up_tx_qlock(mgr, txmsg);
3411
3412        mutex_unlock(&mgr->qlock);
3413
3414        kfree(txmsg);
3415        return 0;
3416}
3417
3418static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8  dp_link_count)
3419{
3420        if (dp_link_bw == 0 || dp_link_count == 0)
3421                DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
3422                              dp_link_bw, dp_link_count);
3423
3424        return dp_link_bw * dp_link_count / 2;
3425}
3426
3427/**
3428 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3429 * @mgr: manager to set state for
3430 * @mst_state: true to enable MST on this connector - false to disable.
3431 *
3432 * This is called by the driver when it detects an MST capable device plugged
3433 * into a DP MST capable port, or when a DP MST capable device is unplugged.
3434 */
3435int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3436{
3437        int ret = 0;
3438        struct drm_dp_mst_branch *mstb = NULL;
3439
3440        mutex_lock(&mgr->lock);
3441        if (mst_state == mgr->mst_state)
3442                goto out_unlock;
3443
3444        mgr->mst_state = mst_state;
3445        /* set the device into MST mode */
3446        if (mst_state) {
3447                WARN_ON(mgr->mst_primary);
3448
3449                /* get dpcd info */
3450                ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3451                if (ret != DP_RECEIVER_CAP_SIZE) {
3452                        DRM_DEBUG_KMS("failed to read DPCD\n");
3453                        goto out_unlock;
3454                }
3455
3456                mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3457                                                        mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3458                if (mgr->pbn_div == 0) {
3459                        ret = -EINVAL;
3460                        goto out_unlock;
3461                }
3462
3463                /* add initial branch device at LCT 1 */
3464                mstb = drm_dp_add_mst_branch_device(1, NULL);
3465                if (mstb == NULL) {
3466                        ret = -ENOMEM;
3467                        goto out_unlock;
3468                }
3469                mstb->mgr = mgr;
3470
3471                /* give this the main reference */
3472                mgr->mst_primary = mstb;
3473                drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3474
3475                ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3476                                                         DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3477                if (ret < 0) {
3478                        goto out_unlock;
3479                }
3480
3481                {
3482                        struct drm_dp_payload reset_pay;
3483                        reset_pay.start_slot = 0;
3484                        reset_pay.num_slots = 0x3f;
3485                        drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3486                }
3487
3488                queue_work(system_long_wq, &mgr->work);
3489
3490                ret = 0;
3491        } else {
3492                /* disable MST on the device */
3493                mstb = mgr->mst_primary;
3494                mgr->mst_primary = NULL;
3495                /* this can fail if the device is gone */
3496                drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3497                ret = 0;
3498                memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
3499                mgr->payload_mask = 0;
3500                set_bit(0, &mgr->payload_mask);
3501                mgr->vcpi_mask = 0;
3502        }
3503
3504out_unlock:
3505        mutex_unlock(&mgr->lock);
3506        if (mstb)
3507                drm_dp_mst_topology_put_mstb(mstb);
3508        return ret;
3509
3510}
3511EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3512
3513static void
3514drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3515{
3516        struct drm_dp_mst_port *port;
3517
3518        /* The link address will need to be re-sent on resume */
3519        mstb->link_address_sent = false;
3520
3521        list_for_each_entry(port, &mstb->ports, next) {
3522                /* The PBN for each port will also need to be re-probed */
3523                port->available_pbn = 0;
3524
3525                if (port->mstb)
3526                        drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3527        }
3528}
3529
3530/**
3531 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3532 * @mgr: manager to suspend
3533 *
3534 * This function tells the MST device that we can't handle UP messages
3535 * anymore. This should stop it from sending any since we are suspended.
3536 */
3537void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3538{
3539        mutex_lock(&mgr->lock);
3540        drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3541                           DP_MST_EN | DP_UPSTREAM_IS_SRC);
3542        mutex_unlock(&mgr->lock);
3543        flush_work(&mgr->up_req_work);
3544        flush_work(&mgr->work);
3545        flush_work(&mgr->delayed_destroy_work);
3546
3547        mutex_lock(&mgr->lock);
3548        if (mgr->mst_state && mgr->mst_primary)
3549                drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3550        mutex_unlock(&mgr->lock);
3551}
3552EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3553
3554/**
3555 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3556 * @mgr: manager to resume
3557 * @sync: whether or not to perform topology reprobing synchronously
3558 *
3559 * This will fetch DPCD and see if the device is still there,
3560 * if it is, it will rewrite the MSTM control bits, and return.
3561 *
3562 * If the device fails this returns -1, and the driver should do
3563 * a full MST reprobe, in case we were undocked.
3564 *
3565 * During system resume (where it is assumed that the driver will be calling
3566 * drm_atomic_helper_resume()) this function should be called beforehand with
3567 * @sync set to true. In contexts like runtime resume where the driver is not
3568 * expected to be calling drm_atomic_helper_resume(), this function should be
3569 * called with @sync set to false in order to avoid deadlocking.
3570 *
3571 * Returns: -1 if the MST topology was removed while we were suspended, 0
3572 * otherwise.
3573 */
3574int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3575                                   bool sync)
3576{
3577        int ret;
3578        u8 guid[16];
3579
3580        mutex_lock(&mgr->lock);
3581        if (!mgr->mst_primary)
3582                goto out_fail;
3583
3584        ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3585                               DP_RECEIVER_CAP_SIZE);
3586        if (ret != DP_RECEIVER_CAP_SIZE) {
3587                DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3588                goto out_fail;
3589        }
3590
3591        ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3592                                 DP_MST_EN |
3593                                 DP_UP_REQ_EN |
3594                                 DP_UPSTREAM_IS_SRC);
3595        if (ret < 0) {
3596                DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3597                goto out_fail;
3598        }
3599
3600        /* Some hubs forget their guids after they resume */
3601        ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3602        if (ret != 16) {
3603                DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3604                goto out_fail;
3605        }
3606        drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3607
3608        /*
3609         * For the final step of resuming the topology, we need to bring the
3610         * state of our in-memory topology back into sync with reality. So,
3611         * restart the probing process as if we're probing a new hub
3612         */
3613        queue_work(system_long_wq, &mgr->work);
3614        mutex_unlock(&mgr->lock);
3615
3616        if (sync) {
3617                DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3618                flush_work(&mgr->work);
3619        }
3620
3621        return 0;
3622
3623out_fail:
3624        mutex_unlock(&mgr->lock);
3625        return -1;
3626}
3627EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3628
3629static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
3630{
3631        int len;
3632        u8 replyblock[32];
3633        int replylen, origlen, curreply;
3634        int ret;
3635        struct drm_dp_sideband_msg_rx *msg;
3636        int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
3637        msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3638
3639        len = min(mgr->max_dpcd_transaction_bytes, 16);
3640        ret = drm_dp_dpcd_read(mgr->aux, basereg,
3641                               replyblock, len);
3642        if (ret != len) {
3643                DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3644                return false;
3645        }
3646        ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
3647        if (!ret) {
3648                DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3649                return false;
3650        }
3651        replylen = msg->curchunk_len + msg->curchunk_hdrlen;
3652
3653        origlen = replylen;
3654        replylen -= len;
3655        curreply = len;
3656        while (replylen > 0) {
3657                len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3658                ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3659                                    replyblock, len);
3660                if (ret != len) {
3661                        DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3662                                      len, ret);
3663                        return false;
3664                }
3665
3666                ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
3667                if (!ret) {
3668                        DRM_DEBUG_KMS("failed to build sideband msg\n");
3669                        return false;
3670                }
3671
3672                curreply += len;
3673                replylen -= len;
3674        }
3675        return true;
3676}
3677
3678static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3679{
3680        struct drm_dp_sideband_msg_tx *txmsg;
3681        struct drm_dp_mst_branch *mstb;
3682        struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
3683        int slot = -1;
3684
3685        if (!drm_dp_get_one_sb_msg(mgr, false))
3686                goto clear_down_rep_recv;
3687
3688        if (!mgr->down_rep_recv.have_eomt)
3689                return 0;
3690
3691        mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3692        if (!mstb) {
3693                DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3694                              hdr->lct);
3695                goto clear_down_rep_recv;
3696        }
3697
3698        /* find the message */
3699        slot = hdr->seqno;
3700        mutex_lock(&mgr->qlock);
3701        txmsg = mstb->tx_slots[slot];
3702        /* remove from slots */
3703        mutex_unlock(&mgr->qlock);
3704
3705        if (!txmsg) {
3706                DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3707                              mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3708                              mgr->down_rep_recv.msg[0]);
3709                goto no_msg;
3710        }
3711
3712        drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
3713
3714        if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3715                DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3716                              txmsg->reply.req_type,
3717                              drm_dp_mst_req_type_str(txmsg->reply.req_type),
3718                              txmsg->reply.u.nak.reason,
3719                              drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3720                              txmsg->reply.u.nak.nak_data);
3721
3722        memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3723        drm_dp_mst_topology_put_mstb(mstb);
3724
3725        mutex_lock(&mgr->qlock);
3726        txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3727        mstb->tx_slots[slot] = NULL;
3728        mgr->is_waiting_for_dwn_reply = false;
3729        mutex_unlock(&mgr->qlock);
3730
3731        wake_up_all(&mgr->tx_waitq);
3732
3733        return 0;
3734
3735no_msg:
3736        drm_dp_mst_topology_put_mstb(mstb);
3737clear_down_rep_recv:
3738        mutex_lock(&mgr->qlock);
3739        mgr->is_waiting_for_dwn_reply = false;
3740        mutex_unlock(&mgr->qlock);
3741        memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3742
3743        return 0;
3744}
3745
3746static inline bool
3747drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
3748                          struct drm_dp_pending_up_req *up_req)
3749{
3750        struct drm_dp_mst_branch *mstb = NULL;
3751        struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
3752        struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3753        bool hotplug = false;
3754
3755        if (hdr->broadcast) {
3756                const u8 *guid = NULL;
3757
3758                if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
3759                        guid = msg->u.conn_stat.guid;
3760                else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
3761                        guid = msg->u.resource_stat.guid;
3762
3763                mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3764        } else {
3765                mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3766        }
3767
3768        if (!mstb) {
3769                DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3770                              hdr->lct);
3771                return false;
3772        }
3773
3774        /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
3775        if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
3776                drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
3777                hotplug = true;
3778        }
3779
3780        drm_dp_mst_topology_put_mstb(mstb);
3781        return hotplug;
3782}
3783
3784static void drm_dp_mst_up_req_work(struct work_struct *work)
3785{
3786        struct drm_dp_mst_topology_mgr *mgr =
3787                container_of(work, struct drm_dp_mst_topology_mgr,
3788                             up_req_work);
3789        struct drm_dp_pending_up_req *up_req;
3790        bool send_hotplug = false;
3791
3792        mutex_lock(&mgr->probe_lock);
3793        while (true) {
3794                mutex_lock(&mgr->up_req_lock);
3795                up_req = list_first_entry_or_null(&mgr->up_req_list,
3796                                                  struct drm_dp_pending_up_req,
3797                                                  next);
3798                if (up_req)
3799                        list_del(&up_req->next);
3800                mutex_unlock(&mgr->up_req_lock);
3801
3802                if (!up_req)
3803                        break;
3804
3805                send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
3806                kfree(up_req);
3807        }
3808        mutex_unlock(&mgr->probe_lock);
3809
3810        if (send_hotplug)
3811                drm_kms_helper_hotplug_event(mgr->dev);
3812}
3813
3814static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3815{
3816        struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
3817        struct drm_dp_pending_up_req *up_req;
3818        bool seqno;
3819
3820        if (!drm_dp_get_one_sb_msg(mgr, true))
3821                goto out;
3822
3823        if (!mgr->up_req_recv.have_eomt)
3824                return 0;
3825
3826        up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
3827        if (!up_req) {
3828                DRM_ERROR("Not enough memory to process MST up req\n");
3829                return -ENOMEM;
3830        }
3831        INIT_LIST_HEAD(&up_req->next);
3832
3833        seqno = hdr->seqno;
3834        drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
3835
3836        if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
3837            up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
3838                DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
3839                              up_req->msg.req_type);
3840                kfree(up_req);
3841                goto out;
3842        }
3843
3844        drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
3845                                 seqno, false);
3846
3847        if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3848                const struct drm_dp_connection_status_notify *conn_stat =
3849                        &up_req->msg.u.conn_stat;
3850
3851                DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3852                              conn_stat->port_number,
3853                              conn_stat->legacy_device_plug_status,
3854                              conn_stat->displayport_device_plug_status,
3855                              conn_stat->message_capability_status,
3856                              conn_stat->input_port,
3857                              conn_stat->peer_device_type);
3858        } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3859                const struct drm_dp_resource_status_notify *res_stat =
3860                        &up_req->msg.u.resource_stat;
3861
3862                DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3863                              res_stat->port_number,
3864                              res_stat->available_pbn);
3865        }
3866
3867        up_req->hdr = *hdr;
3868        mutex_lock(&mgr->up_req_lock);
3869        list_add_tail(&up_req->next, &mgr->up_req_list);
3870        mutex_unlock(&mgr->up_req_lock);
3871        queue_work(system_long_wq, &mgr->up_req_work);
3872
3873out:
3874        memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3875        return 0;
3876}
3877
3878/**
3879 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3880 * @mgr: manager to notify irq for.
3881 * @esi: 4 bytes from SINK_COUNT_ESI
3882 * @handled: whether the hpd interrupt was consumed or not
3883 *
3884 * This should be called from the driver when it detects a short IRQ,
3885 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3886 * topology manager will process the sideband messages received as a result
3887 * of this.
3888 */
3889int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3890{
3891        int ret = 0;
3892        int sc;
3893        *handled = false;
3894        sc = esi[0] & 0x3f;
3895
3896        if (sc != mgr->sink_count) {
3897                mgr->sink_count = sc;
3898                *handled = true;
3899        }
3900
3901        if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3902                ret = drm_dp_mst_handle_down_rep(mgr);
3903                *handled = true;
3904        }
3905
3906        if (esi[1] & DP_UP_REQ_MSG_RDY) {
3907                ret |= drm_dp_mst_handle_up_req(mgr);
3908                *handled = true;
3909        }
3910
3911        drm_dp_mst_kick_tx(mgr);
3912        return ret;
3913}
3914EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3915
3916/**
3917 * drm_dp_mst_detect_port() - get connection status for an MST port
3918 * @connector: DRM connector for this port
3919 * @ctx: The acquisition context to use for grabbing locks
3920 * @mgr: manager for this port
3921 * @port: pointer to a port
3922 *
3923 * This returns the current connection state for a port.
3924 */
3925int
3926drm_dp_mst_detect_port(struct drm_connector *connector,
3927                       struct drm_modeset_acquire_ctx *ctx,
3928                       struct drm_dp_mst_topology_mgr *mgr,
3929                       struct drm_dp_mst_port *port)
3930{
3931        int ret;
3932
3933        /* we need to search for the port in the mgr in case it's gone */
3934        port = drm_dp_mst_topology_get_port_validated(mgr, port);
3935        if (!port)
3936                return connector_status_disconnected;
3937
3938        ret = drm_modeset_lock(&mgr->base.lock, ctx);
3939        if (ret)
3940                goto out;
3941
3942        ret = connector_status_disconnected;
3943
3944        if (!port->ddps)
3945                goto out;
3946
3947        switch (port->pdt) {
3948        case DP_PEER_DEVICE_NONE:
3949        case DP_PEER_DEVICE_MST_BRANCHING:
3950                if (!port->mcs)
3951                        ret = connector_status_connected;
3952                break;
3953
3954        case DP_PEER_DEVICE_SST_SINK:
3955                ret = connector_status_connected;
3956                /* for logical ports - cache the EDID */
3957                if (port->port_num >= 8 && !port->cached_edid) {
3958                        port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
3959                }
3960                break;
3961        case DP_PEER_DEVICE_DP_LEGACY_CONV:
3962                if (port->ldps)
3963                        ret = connector_status_connected;
3964                break;
3965        }
3966out:
3967        drm_dp_mst_topology_put_port(port);
3968        return ret;
3969}
3970EXPORT_SYMBOL(drm_dp_mst_detect_port);
3971
3972/**
3973 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
3974 * @mgr: manager for this port
3975 * @port: unverified pointer to a port.
3976 *
3977 * This returns whether the port supports audio or not.
3978 */
3979bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
3980                                        struct drm_dp_mst_port *port)
3981{
3982        bool ret = false;
3983
3984        port = drm_dp_mst_topology_get_port_validated(mgr, port);
3985        if (!port)
3986                return ret;
3987        ret = port->has_audio;
3988        drm_dp_mst_topology_put_port(port);
3989        return ret;
3990}
3991EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3992
3993/**
3994 * drm_dp_mst_get_edid() - get EDID for an MST port
3995 * @connector: toplevel connector to get EDID for
3996 * @mgr: manager for this port
3997 * @port: unverified pointer to a port.
3998 *
3999 * This returns an EDID for the port connected to a connector,
4000 * It validates the pointer still exists so the caller doesn't require a
4001 * reference.
4002 */
4003struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4004{
4005        struct edid *edid = NULL;
4006
4007        /* we need to search for the port in the mgr in case it's gone */
4008        port = drm_dp_mst_topology_get_port_validated(mgr, port);
4009        if (!port)
4010                return NULL;
4011
4012        if (port->cached_edid)
4013                edid = drm_edid_duplicate(port->cached_edid);
4014        else {
4015                edid = drm_get_edid(connector, &port->aux.ddc);
4016        }
4017        port->has_audio = drm_detect_monitor_audio(edid);
4018        drm_dp_mst_topology_put_port(port);
4019        return edid;
4020}
4021EXPORT_SYMBOL(drm_dp_mst_get_edid);
4022
4023/**
4024 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
4025 * @mgr: manager to use
4026 * @pbn: payload bandwidth to convert into slots.
4027 *
4028 * Calculate the number of VCPI slots that will be required for the given PBN
4029 * value. This function is deprecated, and should not be used in atomic
4030 * drivers.
4031 *
4032 * RETURNS:
4033 * The total slots required for this port, or error.
4034 */
4035int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4036                           int pbn)
4037{
4038        int num_slots;
4039
4040        num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4041
4042        /* max. time slots - one slot for MTP header */
4043        if (num_slots > 63)
4044                return -ENOSPC;
4045        return num_slots;
4046}
4047EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4048
4049static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4050                            struct drm_dp_vcpi *vcpi, int pbn, int slots)
4051{
4052        int ret;
4053
4054        /* max. time slots - one slot for MTP header */
4055        if (slots > 63)
4056                return -ENOSPC;
4057
4058        vcpi->pbn = pbn;
4059        vcpi->aligned_pbn = slots * mgr->pbn_div;
4060        vcpi->num_slots = slots;
4061
4062        ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4063        if (ret < 0)
4064                return ret;
4065        return 0;
4066}
4067
4068/**
4069 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
4070 * @state: global atomic state
4071 * @mgr: MST topology manager for the port
4072 * @port: port to find vcpi slots for
4073 * @pbn: bandwidth required for the mode in PBN
4074 *
4075 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
4076 * may have had. Any atomic drivers which support MST must call this function
4077 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
4078 * current VCPI allocation for the new state, but only when
4079 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
4080 * to ensure compatibility with userspace applications that still use the
4081 * legacy modesetting UAPI.
4082 *
4083 * Allocations set by this function are not checked against the bandwidth
4084 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4085 *
4086 * Additionally, it is OK to call this function multiple times on the same
4087 * @port as needed. It is not OK however, to call this function and
4088 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
4089 *
4090 * See also:
4091 * drm_dp_atomic_release_vcpi_slots()
4092 * drm_dp_mst_atomic_check()
4093 *
4094 * Returns:
4095 * Total slots in the atomic state assigned for this port, or a negative error
4096 * code if the port no longer exists
4097 */
4098int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4099                                  struct drm_dp_mst_topology_mgr *mgr,
4100                                  struct drm_dp_mst_port *port, int pbn)
4101{
4102        struct drm_dp_mst_topology_state *topology_state;
4103        struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4104        int prev_slots, req_slots;
4105
4106        topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4107        if (IS_ERR(topology_state))
4108                return PTR_ERR(topology_state);
4109
4110        /* Find the current allocation for this port, if any */
4111        list_for_each_entry(pos, &topology_state->vcpis, next) {
4112                if (pos->port == port) {
4113                        vcpi = pos;
4114                        prev_slots = vcpi->vcpi;
4115
4116                        /*
4117                         * This should never happen, unless the driver tries
4118                         * releasing and allocating the same VCPI allocation,
4119                         * which is an error
4120                         */
4121                        if (WARN_ON(!prev_slots)) {
4122                                DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4123                                          port);
4124                                return -EINVAL;
4125                        }
4126
4127                        break;
4128                }
4129        }
4130        if (!vcpi)
4131                prev_slots = 0;
4132
4133        req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4134
4135        DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4136                         port->connector->base.id, port->connector->name,
4137                         port, prev_slots, req_slots);
4138
4139        /* Add the new allocation to the state */
4140        if (!vcpi) {
4141                vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4142                if (!vcpi)
4143                        return -ENOMEM;
4144
4145                drm_dp_mst_get_port_malloc(port);
4146                vcpi->port = port;
4147                list_add(&vcpi->next, &topology_state->vcpis);
4148        }
4149        vcpi->vcpi = req_slots;
4150
4151        return req_slots;
4152}
4153EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4154
4155/**
4156 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
4157 * @state: global atomic state
4158 * @mgr: MST topology manager for the port
4159 * @port: The port to release the VCPI slots from
4160 *
4161 * Releases any VCPI slots that have been allocated to a port in the atomic
4162 * state. Any atomic drivers which support MST must call this function in
4163 * their &drm_connector_helper_funcs.atomic_check() callback when the
4164 * connector will no longer have VCPI allocated (e.g. because its CRTC was
4165 * removed) when it had VCPI allocated in the previous atomic state.
4166 *
4167 * It is OK to call this even if @port has been removed from the system.
4168 * Additionally, it is OK to call this function multiple times on the same
4169 * @port as needed. It is not OK however, to call this function and
4170 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
4171 * phase.
4172 *
4173 * See also:
4174 * drm_dp_atomic_find_vcpi_slots()
4175 * drm_dp_mst_atomic_check()
4176 *
4177 * Returns:
4178 * 0 if all slots for this port were added back to
4179 * &drm_dp_mst_topology_state.avail_slots or negative error code
4180 */
4181int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4182                                     struct drm_dp_mst_topology_mgr *mgr,
4183                                     struct drm_dp_mst_port *port)
4184{
4185        struct drm_dp_mst_topology_state *topology_state;
4186        struct drm_dp_vcpi_allocation *pos;
4187        bool found = false;
4188
4189        topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4190        if (IS_ERR(topology_state))
4191                return PTR_ERR(topology_state);
4192
4193        list_for_each_entry(pos, &topology_state->vcpis, next) {
4194                if (pos->port == port) {
4195                        found = true;
4196                        break;
4197                }
4198        }
4199        if (WARN_ON(!found)) {
4200                DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4201                          port, &topology_state->base);
4202                return -EINVAL;
4203        }
4204
4205        DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4206        if (pos->vcpi) {
4207                drm_dp_mst_put_port_malloc(port);
4208                pos->vcpi = 0;
4209        }
4210
4211        return 0;
4212}
4213EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4214
4215/**
4216 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
4217 * @mgr: manager for this port
4218 * @port: port to allocate a virtual channel for.
4219 * @pbn: payload bandwidth number to request
4220 * @slots: returned number of slots for this PBN.
4221 */
4222bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4223                              struct drm_dp_mst_port *port, int pbn, int slots)
4224{
4225        int ret;
4226
4227        port = drm_dp_mst_topology_get_port_validated(mgr, port);
4228        if (!port)
4229                return false;
4230
4231        if (slots < 0)
4232                return false;
4233
4234        if (port->vcpi.vcpi > 0) {
4235                DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4236                              port->vcpi.vcpi, port->vcpi.pbn, pbn);
4237                if (pbn == port->vcpi.pbn) {
4238                        drm_dp_mst_topology_put_port(port);
4239                        return true;
4240                }
4241        }
4242
4243        ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4244        if (ret) {
4245                DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
4246                              DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4247                goto out;
4248        }
4249        DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
4250                      pbn, port->vcpi.num_slots);
4251
4252        /* Keep port allocated until its payload has been removed */
4253        drm_dp_mst_get_port_malloc(port);
4254        drm_dp_mst_topology_put_port(port);
4255        return true;
4256out:
4257        return false;
4258}
4259EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4260
4261int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4262{
4263        int slots = 0;
4264        port = drm_dp_mst_topology_get_port_validated(mgr, port);
4265        if (!port)
4266                return slots;
4267
4268        slots = port->vcpi.num_slots;
4269        drm_dp_mst_topology_put_port(port);
4270        return slots;
4271}
4272EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4273
4274/**
4275 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
4276 * @mgr: manager for this port
4277 * @port: unverified pointer to a port.
4278 *
4279 * This just resets the number of slots for the ports VCPI for later programming.
4280 */
4281void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4282{
4283        /*
4284         * A port with VCPI will remain allocated until its VCPI is
4285         * released, no verified ref needed
4286         */
4287
4288        port->vcpi.num_slots = 0;
4289}
4290EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4291
4292/**
4293 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
4294 * @mgr: manager for this port
4295 * @port: port to deallocate vcpi for
4296 *
4297 * This can be called unconditionally, regardless of whether
4298 * drm_dp_mst_allocate_vcpi() succeeded or not.
4299 */
4300void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4301                                struct drm_dp_mst_port *port)
4302{
4303        if (!port->vcpi.vcpi)
4304                return;
4305
4306        drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4307        port->vcpi.num_slots = 0;
4308        port->vcpi.pbn = 0;
4309        port->vcpi.aligned_pbn = 0;
4310        port->vcpi.vcpi = 0;
4311        drm_dp_mst_put_port_malloc(port);
4312}
4313EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4314
4315static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4316                                     int id, struct drm_dp_payload *payload)
4317{
4318        u8 payload_alloc[3], status;
4319        int ret;
4320        int retries = 0;
4321
4322        drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4323                           DP_PAYLOAD_TABLE_UPDATED);
4324
4325        payload_alloc[0] = id;
4326        payload_alloc[1] = payload->start_slot;
4327        payload_alloc[2] = payload->num_slots;
4328
4329        ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4330        if (ret != 3) {
4331                DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
4332                goto fail;
4333        }
4334
4335retry:
4336        ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4337        if (ret < 0) {
4338                DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4339                goto fail;
4340        }
4341
4342        if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4343                retries++;
4344                if (retries < 20) {
4345                        usleep_range(10000, 20000);
4346                        goto retry;
4347                }
4348                DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
4349                ret = -EINVAL;
4350                goto fail;
4351        }
4352        ret = 0;
4353fail:
4354        return ret;
4355}
4356
4357
4358/**
4359 * drm_dp_check_act_status() - Check ACT handled status.
4360 * @mgr: manager to use
4361 *
4362 * Check the payload status bits in the DPCD for ACT handled completion.
4363 */
4364int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4365{
4366        u8 status;
4367        int ret;
4368        int count = 0;
4369
4370        do {
4371                ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4372
4373                if (ret < 0) {
4374                        DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4375                        goto fail;
4376                }
4377
4378                if (status & DP_PAYLOAD_ACT_HANDLED)
4379                        break;
4380                count++;
4381                udelay(100);
4382
4383        } while (count < 30);
4384
4385        if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
4386                DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
4387                ret = -EINVAL;
4388                goto fail;
4389        }
4390        return 0;
4391fail:
4392        return ret;
4393}
4394EXPORT_SYMBOL(drm_dp_check_act_status);
4395
4396/**
4397 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4398 * @clock: dot clock for the mode
4399 * @bpp: bpp for the mode.
4400 *
4401 * This uses the formula in the spec to calculate the PBN value for a mode.
4402 */
4403int drm_dp_calc_pbn_mode(int clock, int bpp)
4404{
4405        /*
4406         * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
4407         * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4408         * common multiplier to render an integer PBN for all link rate/lane
4409         * counts combinations
4410         * calculate
4411         * peak_kbps *= (1006/1000)
4412         * peak_kbps *= (64/54)
4413         * peak_kbps *= 8    convert to bytes
4414         */
4415        return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4416                                8 * 54 * 1000 * 1000);
4417}
4418EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4419
4420/* we want to kick the TX after we've ack the up/down IRQs. */
4421static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4422{
4423        queue_work(system_long_wq, &mgr->tx_work);
4424}
4425
4426static void drm_dp_mst_dump_mstb(struct seq_file *m,
4427                                 struct drm_dp_mst_branch *mstb)
4428{
4429        struct drm_dp_mst_port *port;
4430        int tabs = mstb->lct;
4431        char prefix[10];
4432        int i;
4433
4434        for (i = 0; i < tabs; i++)
4435                prefix[i] = '\t';
4436        prefix[i] = '\0';
4437
4438        seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
4439        list_for_each_entry(port, &mstb->ports, next) {
4440                seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
4441                if (port->mstb)
4442                        drm_dp_mst_dump_mstb(m, port->mstb);
4443        }
4444}
4445
4446#define DP_PAYLOAD_TABLE_SIZE           64
4447
4448static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4449                                  char *buf)
4450{
4451        int i;
4452
4453        for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4454                if (drm_dp_dpcd_read(mgr->aux,
4455                                     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4456                                     &buf[i], 16) != 16)
4457                        return false;
4458        }
4459        return true;
4460}
4461
4462static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4463                               struct drm_dp_mst_port *port, char *name,
4464                               int namelen)
4465{
4466        struct edid *mst_edid;
4467
4468        mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4469        drm_edid_get_monitor_name(mst_edid, name, namelen);
4470}
4471
4472/**
4473 * drm_dp_mst_dump_topology(): dump topology to seq file.
4474 * @m: seq_file to dump output to
4475 * @mgr: manager to dump current topology for.
4476 *
4477 * helper to dump MST topology to a seq file for debugfs.
4478 */
4479void drm_dp_mst_dump_topology(struct seq_file *m,
4480                              struct drm_dp_mst_topology_mgr *mgr)
4481{
4482        int i;
4483        struct drm_dp_mst_port *port;
4484
4485        mutex_lock(&mgr->lock);
4486        if (mgr->mst_primary)
4487                drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4488
4489        /* dump VCPIs */
4490        mutex_unlock(&mgr->lock);
4491
4492        mutex_lock(&mgr->payload_lock);
4493        seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4494                mgr->max_payloads);
4495
4496        for (i = 0; i < mgr->max_payloads; i++) {
4497                if (mgr->proposed_vcpis[i]) {
4498                        char name[14];
4499
4500                        port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4501                        fetch_monitor_name(mgr, port, name, sizeof(name));
4502                        seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4503                                   port->port_num, port->vcpi.vcpi,
4504                                   port->vcpi.num_slots,
4505                                   (*name != 0) ? name :  "Unknown");
4506                } else
4507                        seq_printf(m, "vcpi %d:unused\n", i);
4508        }
4509        for (i = 0; i < mgr->max_payloads; i++) {
4510                seq_printf(m, "payload %d: %d, %d, %d\n",
4511                           i,
4512                           mgr->payloads[i].payload_state,
4513                           mgr->payloads[i].start_slot,
4514                           mgr->payloads[i].num_slots);
4515
4516
4517        }
4518        mutex_unlock(&mgr->payload_lock);
4519
4520        mutex_lock(&mgr->lock);
4521        if (mgr->mst_primary) {
4522                u8 buf[DP_PAYLOAD_TABLE_SIZE];
4523                int ret;
4524
4525                ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4526                seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4527                ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4528                seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4529                ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4530                seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4531
4532                /* dump the standard OUI branch header */
4533                ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4534                seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4535                for (i = 0x3; i < 0x8 && buf[i]; i++)
4536                        seq_printf(m, "%c", buf[i]);
4537                seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4538                           buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4539                if (dump_dp_payload_table(mgr, buf))
4540                        seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4541        }
4542
4543        mutex_unlock(&mgr->lock);
4544
4545}
4546EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4547
4548static void drm_dp_tx_work(struct work_struct *work)
4549{
4550        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4551
4552        mutex_lock(&mgr->qlock);
4553        if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
4554                process_single_down_tx_qlock(mgr);
4555        mutex_unlock(&mgr->qlock);
4556}
4557
4558static inline void
4559drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4560{
4561        if (port->connector)
4562                port->mgr->cbs->destroy_connector(port->mgr, port->connector);
4563
4564        drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4565        drm_dp_mst_put_port_malloc(port);
4566}
4567
4568static inline void
4569drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4570{
4571        struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4572        struct drm_dp_mst_port *port, *tmp;
4573        bool wake_tx = false;
4574
4575        mutex_lock(&mgr->lock);
4576        list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
4577                list_del(&port->next);
4578                drm_dp_mst_topology_put_port(port);
4579        }
4580        mutex_unlock(&mgr->lock);
4581
4582        /* drop any tx slots msg */
4583        mutex_lock(&mstb->mgr->qlock);
4584        if (mstb->tx_slots[0]) {
4585                mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4586                mstb->tx_slots[0] = NULL;
4587                wake_tx = true;
4588        }
4589        if (mstb->tx_slots[1]) {
4590                mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4591                mstb->tx_slots[1] = NULL;
4592                wake_tx = true;
4593        }
4594        mutex_unlock(&mstb->mgr->qlock);
4595
4596        if (wake_tx)
4597                wake_up_all(&mstb->mgr->tx_waitq);
4598
4599        drm_dp_mst_put_mstb_malloc(mstb);
4600}
4601
4602static void drm_dp_delayed_destroy_work(struct work_struct *work)
4603{
4604        struct drm_dp_mst_topology_mgr *mgr =
4605                container_of(work, struct drm_dp_mst_topology_mgr,
4606                             delayed_destroy_work);
4607        bool send_hotplug = false, go_again;
4608
4609        /*
4610         * Not a regular list traverse as we have to drop the destroy
4611         * connector lock before destroying the mstb/port, to avoid AB->BA
4612         * ordering between this lock and the config mutex.
4613         */
4614        do {
4615                go_again = false;
4616
4617                for (;;) {
4618                        struct drm_dp_mst_branch *mstb;
4619
4620                        mutex_lock(&mgr->delayed_destroy_lock);
4621                        mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4622                                                        struct drm_dp_mst_branch,
4623                                                        destroy_next);
4624                        if (mstb)
4625                                list_del(&mstb->destroy_next);
4626                        mutex_unlock(&mgr->delayed_destroy_lock);
4627
4628                        if (!mstb)
4629                                break;
4630
4631                        drm_dp_delayed_destroy_mstb(mstb);
4632                        go_again = true;
4633                }
4634
4635                for (;;) {
4636                        struct drm_dp_mst_port *port;
4637
4638                        mutex_lock(&mgr->delayed_destroy_lock);
4639                        port = list_first_entry_or_null(&mgr->destroy_port_list,
4640                                                        struct drm_dp_mst_port,
4641                                                        next);
4642                        if (port)
4643                                list_del(&port->next);
4644                        mutex_unlock(&mgr->delayed_destroy_lock);
4645
4646                        if (!port)
4647                                break;
4648
4649                        drm_dp_delayed_destroy_port(port);
4650                        send_hotplug = true;
4651                        go_again = true;
4652                }
4653        } while (go_again);
4654
4655        if (send_hotplug)
4656                drm_kms_helper_hotplug_event(mgr->dev);
4657}
4658
4659static struct drm_private_state *
4660drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4661{
4662        struct drm_dp_mst_topology_state *state, *old_state =
4663                to_dp_mst_topology_state(obj->state);
4664        struct drm_dp_vcpi_allocation *pos, *vcpi;
4665
4666        state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4667        if (!state)
4668                return NULL;
4669
4670        __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4671
4672        INIT_LIST_HEAD(&state->vcpis);
4673
4674        list_for_each_entry(pos, &old_state->vcpis, next) {
4675                /* Prune leftover freed VCPI allocations */
4676                if (!pos->vcpi)
4677                        continue;
4678
4679                vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4680                if (!vcpi)
4681                        goto fail;
4682
4683                drm_dp_mst_get_port_malloc(vcpi->port);
4684                list_add(&vcpi->next, &state->vcpis);
4685        }
4686
4687        return &state->base;
4688
4689fail:
4690        list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4691                drm_dp_mst_put_port_malloc(pos->port);
4692                kfree(pos);
4693        }
4694        kfree(state);
4695
4696        return NULL;
4697}
4698
4699static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4700                                     struct drm_private_state *state)
4701{
4702        struct drm_dp_mst_topology_state *mst_state =
4703                to_dp_mst_topology_state(state);
4704        struct drm_dp_vcpi_allocation *pos, *tmp;
4705
4706        list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4707                /* We only keep references to ports with non-zero VCPIs */
4708                if (pos->vcpi)
4709                        drm_dp_mst_put_port_malloc(pos->port);
4710                kfree(pos);
4711        }
4712
4713        kfree(mst_state);
4714}
4715
4716static inline int
4717drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
4718                                       struct drm_dp_mst_topology_state *mst_state)
4719{
4720        struct drm_dp_vcpi_allocation *vcpi;
4721        int avail_slots = 63, payload_count = 0;
4722
4723        list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4724                /* Releasing VCPI is always OK-even if the port is gone */
4725                if (!vcpi->vcpi) {
4726                        DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4727                                         vcpi->port);
4728                        continue;
4729                }
4730
4731                DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4732                                 vcpi->port, vcpi->vcpi);
4733
4734                avail_slots -= vcpi->vcpi;
4735                if (avail_slots < 0) {
4736                        DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4737                                         vcpi->port, mst_state,
4738                                         avail_slots + vcpi->vcpi);
4739                        return -ENOSPC;
4740                }
4741
4742                if (++payload_count > mgr->max_payloads) {
4743                        DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4744                                         mgr, mst_state, mgr->max_payloads);
4745                        return -EINVAL;
4746                }
4747        }
4748        DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4749                         mgr, mst_state, avail_slots,
4750                         63 - avail_slots);
4751
4752        return 0;
4753}
4754
4755/**
4756 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
4757 * atomic update is valid
4758 * @state: Pointer to the new &struct drm_dp_mst_topology_state
4759 *
4760 * Checks the given topology state for an atomic update to ensure that it's
4761 * valid. This includes checking whether there's enough bandwidth to support
4762 * the new VCPI allocations in the atomic update.
4763 *
4764 * Any atomic drivers supporting DP MST must make sure to call this after
4765 * checking the rest of their state in their
4766 * &drm_mode_config_funcs.atomic_check() callback.
4767 *
4768 * See also:
4769 * drm_dp_atomic_find_vcpi_slots()
4770 * drm_dp_atomic_release_vcpi_slots()
4771 *
4772 * Returns:
4773 *
4774 * 0 if the new state is valid, negative error code otherwise.
4775 */
4776int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
4777{
4778        struct drm_dp_mst_topology_mgr *mgr;
4779        struct drm_dp_mst_topology_state *mst_state;
4780        int i, ret = 0;
4781
4782        for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
4783                ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
4784                if (ret)
4785                        break;
4786        }
4787
4788        return ret;
4789}
4790EXPORT_SYMBOL(drm_dp_mst_atomic_check);
4791
4792const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
4793        .atomic_duplicate_state = drm_dp_mst_duplicate_state,
4794        .atomic_destroy_state = drm_dp_mst_destroy_state,
4795};
4796EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
4797
4798/**
4799 * drm_atomic_get_mst_topology_state: get MST topology state
4800 *
4801 * @state: global atomic state
4802 * @mgr: MST topology manager, also the private object in this case
4803 *
4804 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
4805 * state vtable so that the private object state returned is that of a MST
4806 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
4807 * to care of the locking, so warn if don't hold the connection_mutex.
4808 *
4809 * RETURNS:
4810 *
4811 * The MST topology state or error pointer.
4812 */
4813struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
4814                                                                    struct drm_dp_mst_topology_mgr *mgr)
4815{
4816        return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
4817}
4818EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
4819
4820/**
4821 * drm_dp_mst_topology_mgr_init - initialise a topology manager
4822 * @mgr: manager struct to initialise
4823 * @dev: device providing this structure - for i2c addition.
4824 * @aux: DP helper aux channel to talk to this device
4825 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
4826 * @max_payloads: maximum number of payloads this GPU can source
4827 * @conn_base_id: the connector object ID the MST device is connected to.
4828 *
4829 * Return 0 for success, or negative error code on failure
4830 */
4831int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
4832                                 struct drm_device *dev, struct drm_dp_aux *aux,
4833                                 int max_dpcd_transaction_bytes,
4834                                 int max_payloads, int conn_base_id)
4835{
4836        struct drm_dp_mst_topology_state *mst_state;
4837
4838        mutex_init(&mgr->lock);
4839        mutex_init(&mgr->qlock);
4840        mutex_init(&mgr->payload_lock);
4841        mutex_init(&mgr->delayed_destroy_lock);
4842        mutex_init(&mgr->up_req_lock);
4843        mutex_init(&mgr->probe_lock);
4844#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
4845        mutex_init(&mgr->topology_ref_history_lock);
4846#endif
4847        INIT_LIST_HEAD(&mgr->tx_msg_downq);
4848        INIT_LIST_HEAD(&mgr->destroy_port_list);
4849        INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
4850        INIT_LIST_HEAD(&mgr->up_req_list);
4851        INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
4852        INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
4853        INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
4854        INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
4855        init_waitqueue_head(&mgr->tx_waitq);
4856        mgr->dev = dev;
4857        mgr->aux = aux;
4858        mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
4859        mgr->max_payloads = max_payloads;
4860        mgr->conn_base_id = conn_base_id;
4861        if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
4862            max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
4863                return -EINVAL;
4864        mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
4865        if (!mgr->payloads)
4866                return -ENOMEM;
4867        mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
4868        if (!mgr->proposed_vcpis)
4869                return -ENOMEM;
4870        set_bit(0, &mgr->payload_mask);
4871
4872        mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
4873        if (mst_state == NULL)
4874                return -ENOMEM;
4875
4876        mst_state->mgr = mgr;
4877        INIT_LIST_HEAD(&mst_state->vcpis);
4878
4879        drm_atomic_private_obj_init(dev, &mgr->base,
4880                                    &mst_state->base,
4881                                    &drm_dp_mst_topology_state_funcs);
4882
4883        return 0;
4884}
4885EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
4886
4887/**
4888 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
4889 * @mgr: manager to destroy
4890 */
4891void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
4892{
4893        drm_dp_mst_topology_mgr_set_mst(mgr, false);
4894        flush_work(&mgr->work);
4895        cancel_work_sync(&mgr->delayed_destroy_work);
4896        mutex_lock(&mgr->payload_lock);
4897        kfree(mgr->payloads);
4898        mgr->payloads = NULL;
4899        kfree(mgr->proposed_vcpis);
4900        mgr->proposed_vcpis = NULL;
4901        mutex_unlock(&mgr->payload_lock);
4902        mgr->dev = NULL;
4903        mgr->aux = NULL;
4904        drm_atomic_private_obj_fini(&mgr->base);
4905        mgr->funcs = NULL;
4906
4907        mutex_destroy(&mgr->delayed_destroy_lock);
4908        mutex_destroy(&mgr->payload_lock);
4909        mutex_destroy(&mgr->qlock);
4910        mutex_destroy(&mgr->lock);
4911        mutex_destroy(&mgr->up_req_lock);
4912        mutex_destroy(&mgr->probe_lock);
4913#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
4914        mutex_destroy(&mgr->topology_ref_history_lock);
4915#endif
4916}
4917EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
4918
4919static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
4920{
4921        int i;
4922
4923        if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
4924                return false;
4925
4926        for (i = 0; i < num - 1; i++) {
4927                if (msgs[i].flags & I2C_M_RD ||
4928                    msgs[i].len > 0xff)
4929                        return false;
4930        }
4931
4932        return msgs[num - 1].flags & I2C_M_RD &&
4933                msgs[num - 1].len <= 0xff;
4934}
4935
4936/* I2C device */
4937static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
4938                               int num)
4939{
4940        struct drm_dp_aux *aux = adapter->algo_data;
4941        struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
4942        struct drm_dp_mst_branch *mstb;
4943        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
4944        unsigned int i;
4945        struct drm_dp_sideband_msg_req_body msg;
4946        struct drm_dp_sideband_msg_tx *txmsg = NULL;
4947        int ret;
4948
4949        mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
4950        if (!mstb)
4951                return -EREMOTEIO;
4952
4953        if (!remote_i2c_read_ok(msgs, num)) {
4954                DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
4955                ret = -EIO;
4956                goto out;
4957        }
4958
4959        memset(&msg, 0, sizeof(msg));
4960        msg.req_type = DP_REMOTE_I2C_READ;
4961        msg.u.i2c_read.num_transactions = num - 1;
4962        msg.u.i2c_read.port_number = port->port_num;
4963        for (i = 0; i < num - 1; i++) {
4964                msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
4965                msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
4966                msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
4967                msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
4968        }
4969        msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
4970        msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
4971
4972        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
4973        if (!txmsg) {
4974                ret = -ENOMEM;
4975                goto out;
4976        }
4977
4978        txmsg->dst = mstb;
4979        drm_dp_encode_sideband_req(&msg, txmsg);
4980
4981        drm_dp_queue_down_tx(mgr, txmsg);
4982
4983        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
4984        if (ret > 0) {
4985
4986                if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4987                        ret = -EREMOTEIO;
4988                        goto out;
4989                }
4990                if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
4991                        ret = -EIO;
4992                        goto out;
4993                }
4994                memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
4995                ret = num;
4996        }
4997out:
4998        kfree(txmsg);
4999        drm_dp_mst_topology_put_mstb(mstb);
5000        return ret;
5001}
5002
5003static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5004{
5005        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5006               I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5007               I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5008               I2C_FUNC_10BIT_ADDR;
5009}
5010
5011static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5012        .functionality = drm_dp_mst_i2c_functionality,
5013        .master_xfer = drm_dp_mst_i2c_xfer,
5014};
5015
5016/**
5017 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5018 * @aux: DisplayPort AUX channel
5019 *
5020 * Returns 0 on success or a negative error code on failure.
5021 */
5022static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
5023{
5024        aux->ddc.algo = &drm_dp_mst_i2c_algo;
5025        aux->ddc.algo_data = aux;
5026        aux->ddc.retries = 3;
5027
5028        aux->ddc.class = I2C_CLASS_DDC;
5029        aux->ddc.owner = THIS_MODULE;
5030        aux->ddc.dev.parent = aux->dev;
5031        aux->ddc.dev.of_node = aux->dev->of_node;
5032
5033        strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
5034                sizeof(aux->ddc.name));
5035
5036        return i2c_add_adapter(&aux->ddc);
5037}
5038
5039/**
5040 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5041 * @aux: DisplayPort AUX channel
5042 */
5043static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
5044{
5045        i2c_del_adapter(&aux->ddc);
5046}
5047