1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/seq_file.h>
29#include <linux/i2c.h>
30#include <drm/drm_dp_mst_helper.h>
31#include <drm/drmP.h>
32
33#include <drm/drm_fixed.h>
34#include <drm/drm_atomic.h>
35#include <drm/drm_atomic_helper.h>
36#include <drm/drm_probe_helper.h>
37
38
39
40
41
42
43
44
45static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
46 char *buf);
47static int test_calc_pbn_mode(void);
48
49static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
50
51static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
52 int id,
53 struct drm_dp_payload *payload);
54
55static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
56 struct drm_dp_mst_port *port,
57 int offset, int size, u8 *bytes);
58
59static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
60 struct drm_dp_mst_branch *mstb);
61static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
62 struct drm_dp_mst_branch *mstb,
63 struct drm_dp_mst_port *port);
64static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
65 u8 *guid);
66
67static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
68static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
69static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
70
71#define DP_STR(x) [DP_ ## x] = #x
72
73static const char *drm_dp_mst_req_type_str(u8 req_type)
74{
75 static const char * const req_type_str[] = {
76 DP_STR(GET_MSG_TRANSACTION_VERSION),
77 DP_STR(LINK_ADDRESS),
78 DP_STR(CONNECTION_STATUS_NOTIFY),
79 DP_STR(ENUM_PATH_RESOURCES),
80 DP_STR(ALLOCATE_PAYLOAD),
81 DP_STR(QUERY_PAYLOAD),
82 DP_STR(RESOURCE_STATUS_NOTIFY),
83 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
84 DP_STR(REMOTE_DPCD_READ),
85 DP_STR(REMOTE_DPCD_WRITE),
86 DP_STR(REMOTE_I2C_READ),
87 DP_STR(REMOTE_I2C_WRITE),
88 DP_STR(POWER_UP_PHY),
89 DP_STR(POWER_DOWN_PHY),
90 DP_STR(SINK_EVENT_NOTIFY),
91 DP_STR(QUERY_STREAM_ENC_STATUS),
92 };
93
94 if (req_type >= ARRAY_SIZE(req_type_str) ||
95 !req_type_str[req_type])
96 return "unknown";
97
98 return req_type_str[req_type];
99}
100
101#undef DP_STR
102#define DP_STR(x) [DP_NAK_ ## x] = #x
103
104static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
105{
106 static const char * const nak_reason_str[] = {
107 DP_STR(WRITE_FAILURE),
108 DP_STR(INVALID_READ),
109 DP_STR(CRC_FAILURE),
110 DP_STR(BAD_PARAM),
111 DP_STR(DEFER),
112 DP_STR(LINK_FAILURE),
113 DP_STR(NO_RESOURCES),
114 DP_STR(DPCD_FAIL),
115 DP_STR(I2C_NAK),
116 DP_STR(ALLOCATE_FAIL),
117 };
118
119 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
120 !nak_reason_str[nak_reason])
121 return "unknown";
122
123 return nak_reason_str[nak_reason];
124}
125
126#undef DP_STR
127
128
129static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
130{
131 u8 bitmask = 0x80;
132 u8 bitshift = 7;
133 u8 array_index = 0;
134 int number_of_bits = num_nibbles * 4;
135 u8 remainder = 0;
136
137 while (number_of_bits != 0) {
138 number_of_bits--;
139 remainder <<= 1;
140 remainder |= (data[array_index] & bitmask) >> bitshift;
141 bitmask >>= 1;
142 bitshift--;
143 if (bitmask == 0) {
144 bitmask = 0x80;
145 bitshift = 7;
146 array_index++;
147 }
148 if ((remainder & 0x10) == 0x10)
149 remainder ^= 0x13;
150 }
151
152 number_of_bits = 4;
153 while (number_of_bits != 0) {
154 number_of_bits--;
155 remainder <<= 1;
156 if ((remainder & 0x10) != 0)
157 remainder ^= 0x13;
158 }
159
160 return remainder;
161}
162
163static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
164{
165 u8 bitmask = 0x80;
166 u8 bitshift = 7;
167 u8 array_index = 0;
168 int number_of_bits = number_of_bytes * 8;
169 u16 remainder = 0;
170
171 while (number_of_bits != 0) {
172 number_of_bits--;
173 remainder <<= 1;
174 remainder |= (data[array_index] & bitmask) >> bitshift;
175 bitmask >>= 1;
176 bitshift--;
177 if (bitmask == 0) {
178 bitmask = 0x80;
179 bitshift = 7;
180 array_index++;
181 }
182 if ((remainder & 0x100) == 0x100)
183 remainder ^= 0xd5;
184 }
185
186 number_of_bits = 8;
187 while (number_of_bits != 0) {
188 number_of_bits--;
189 remainder <<= 1;
190 if ((remainder & 0x100) != 0)
191 remainder ^= 0xd5;
192 }
193
194 return remainder & 0xff;
195}
196static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
197{
198 u8 size = 3;
199 size += (hdr->lct / 2);
200 return size;
201}
202
203static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
204 u8 *buf, int *len)
205{
206 int idx = 0;
207 int i;
208 u8 crc4;
209 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
210 for (i = 0; i < (hdr->lct / 2); i++)
211 buf[idx++] = hdr->rad[i];
212 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
213 (hdr->msg_len & 0x3f);
214 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
215
216 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
217 buf[idx - 1] |= (crc4 & 0xf);
218
219 *len = idx;
220}
221
222static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
223 u8 *buf, int buflen, u8 *hdrlen)
224{
225 u8 crc4;
226 u8 len;
227 int i;
228 u8 idx;
229 if (buf[0] == 0)
230 return false;
231 len = 3;
232 len += ((buf[0] & 0xf0) >> 4) / 2;
233 if (len > buflen)
234 return false;
235 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
236
237 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
238 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
239 return false;
240 }
241
242 hdr->lct = (buf[0] & 0xf0) >> 4;
243 hdr->lcr = (buf[0] & 0xf);
244 idx = 1;
245 for (i = 0; i < (hdr->lct / 2); i++)
246 hdr->rad[i] = buf[idx++];
247 hdr->broadcast = (buf[idx] >> 7) & 0x1;
248 hdr->path_msg = (buf[idx] >> 6) & 0x1;
249 hdr->msg_len = buf[idx] & 0x3f;
250 idx++;
251 hdr->somt = (buf[idx] >> 7) & 0x1;
252 hdr->eomt = (buf[idx] >> 6) & 0x1;
253 hdr->seqno = (buf[idx] >> 4) & 0x1;
254 idx++;
255 *hdrlen = idx;
256 return true;
257}
258
259static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
260 struct drm_dp_sideband_msg_tx *raw)
261{
262 int idx = 0;
263 int i;
264 u8 *buf = raw->msg;
265 buf[idx++] = req->req_type & 0x7f;
266
267 switch (req->req_type) {
268 case DP_ENUM_PATH_RESOURCES:
269 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
270 idx++;
271 break;
272 case DP_ALLOCATE_PAYLOAD:
273 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
274 (req->u.allocate_payload.number_sdp_streams & 0xf);
275 idx++;
276 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
277 idx++;
278 buf[idx] = (req->u.allocate_payload.pbn >> 8);
279 idx++;
280 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
281 idx++;
282 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
283 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
284 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
285 idx++;
286 }
287 if (req->u.allocate_payload.number_sdp_streams & 1) {
288 i = req->u.allocate_payload.number_sdp_streams - 1;
289 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
290 idx++;
291 }
292 break;
293 case DP_QUERY_PAYLOAD:
294 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
295 idx++;
296 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
297 idx++;
298 break;
299 case DP_REMOTE_DPCD_READ:
300 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
301 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
302 idx++;
303 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
304 idx++;
305 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
306 idx++;
307 buf[idx] = (req->u.dpcd_read.num_bytes);
308 idx++;
309 break;
310
311 case DP_REMOTE_DPCD_WRITE:
312 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
313 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
314 idx++;
315 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
316 idx++;
317 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
318 idx++;
319 buf[idx] = (req->u.dpcd_write.num_bytes);
320 idx++;
321 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
322 idx += req->u.dpcd_write.num_bytes;
323 break;
324 case DP_REMOTE_I2C_READ:
325 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
326 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
327 idx++;
328 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
329 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
330 idx++;
331 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
332 idx++;
333 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
334 idx += req->u.i2c_read.transactions[i].num_bytes;
335
336 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
337 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
338 idx++;
339 }
340 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
341 idx++;
342 buf[idx] = (req->u.i2c_read.num_bytes_read);
343 idx++;
344 break;
345
346 case DP_REMOTE_I2C_WRITE:
347 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
348 idx++;
349 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
350 idx++;
351 buf[idx] = (req->u.i2c_write.num_bytes);
352 idx++;
353 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
354 idx += req->u.i2c_write.num_bytes;
355 break;
356
357 case DP_POWER_DOWN_PHY:
358 case DP_POWER_UP_PHY:
359 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
360 idx++;
361 break;
362 }
363 raw->cur_len = idx;
364}
365
366static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
367{
368 u8 crc4;
369 crc4 = drm_dp_msg_data_crc4(msg, len);
370 msg[len] = crc4;
371}
372
373static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
374 struct drm_dp_sideband_msg_tx *raw)
375{
376 int idx = 0;
377 u8 *buf = raw->msg;
378
379 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
380
381 raw->cur_len = idx;
382}
383
384
385static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
386 u8 *replybuf, u8 replybuflen, bool hdr)
387{
388 int ret;
389 u8 crc4;
390
391 if (hdr) {
392 u8 hdrlen;
393 struct drm_dp_sideband_msg_hdr recv_hdr;
394 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
395 if (ret == false) {
396 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
397 return false;
398 }
399
400
401
402
403
404 if (!recv_hdr.somt && !msg->have_somt)
405 return false;
406
407
408 msg->curchunk_len = recv_hdr.msg_len;
409 msg->curchunk_hdrlen = hdrlen;
410
411
412 if (recv_hdr.somt && msg->have_somt)
413 return false;
414
415 if (recv_hdr.somt) {
416 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
417 msg->have_somt = true;
418 }
419 if (recv_hdr.eomt)
420 msg->have_eomt = true;
421
422
423 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
424 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
425 } else {
426 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
427 msg->curchunk_idx += replybuflen;
428 }
429
430 if (msg->curchunk_idx >= msg->curchunk_len) {
431
432 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
433
434 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
435 msg->curlen += msg->curchunk_len - 1;
436 }
437 return true;
438}
439
440static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
441 struct drm_dp_sideband_msg_reply_body *repmsg)
442{
443 int idx = 1;
444 int i;
445 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
446 idx += 16;
447 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
448 idx++;
449 if (idx > raw->curlen)
450 goto fail_len;
451 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
452 if (raw->msg[idx] & 0x80)
453 repmsg->u.link_addr.ports[i].input_port = 1;
454
455 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
456 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
457
458 idx++;
459 if (idx > raw->curlen)
460 goto fail_len;
461 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
462 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
463 if (repmsg->u.link_addr.ports[i].input_port == 0)
464 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
465 idx++;
466 if (idx > raw->curlen)
467 goto fail_len;
468 if (repmsg->u.link_addr.ports[i].input_port == 0) {
469 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
470 idx++;
471 if (idx > raw->curlen)
472 goto fail_len;
473 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
474 idx += 16;
475 if (idx > raw->curlen)
476 goto fail_len;
477 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
478 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
479 idx++;
480
481 }
482 if (idx > raw->curlen)
483 goto fail_len;
484 }
485
486 return true;
487fail_len:
488 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
489 return false;
490}
491
492static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
493 struct drm_dp_sideband_msg_reply_body *repmsg)
494{
495 int idx = 1;
496 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
497 idx++;
498 if (idx > raw->curlen)
499 goto fail_len;
500 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
501 idx++;
502 if (idx > raw->curlen)
503 goto fail_len;
504
505 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
506 return true;
507fail_len:
508 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
509 return false;
510}
511
512static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
513 struct drm_dp_sideband_msg_reply_body *repmsg)
514{
515 int idx = 1;
516 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
517 idx++;
518 if (idx > raw->curlen)
519 goto fail_len;
520 return true;
521fail_len:
522 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
523 return false;
524}
525
526static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
527 struct drm_dp_sideband_msg_reply_body *repmsg)
528{
529 int idx = 1;
530
531 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
532 idx++;
533 if (idx > raw->curlen)
534 goto fail_len;
535 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
536 idx++;
537
538 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
539 return true;
540fail_len:
541 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
542 return false;
543}
544
545static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
546 struct drm_dp_sideband_msg_reply_body *repmsg)
547{
548 int idx = 1;
549 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
550 idx++;
551 if (idx > raw->curlen)
552 goto fail_len;
553 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
554 idx += 2;
555 if (idx > raw->curlen)
556 goto fail_len;
557 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
558 idx += 2;
559 if (idx > raw->curlen)
560 goto fail_len;
561 return true;
562fail_len:
563 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
564 return false;
565}
566
567static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
568 struct drm_dp_sideband_msg_reply_body *repmsg)
569{
570 int idx = 1;
571 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
572 idx++;
573 if (idx > raw->curlen)
574 goto fail_len;
575 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
576 idx++;
577 if (idx > raw->curlen)
578 goto fail_len;
579 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
580 idx += 2;
581 if (idx > raw->curlen)
582 goto fail_len;
583 return true;
584fail_len:
585 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
586 return false;
587}
588
589static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
590 struct drm_dp_sideband_msg_reply_body *repmsg)
591{
592 int idx = 1;
593 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
594 idx++;
595 if (idx > raw->curlen)
596 goto fail_len;
597 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
598 idx += 2;
599 if (idx > raw->curlen)
600 goto fail_len;
601 return true;
602fail_len:
603 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
604 return false;
605}
606
607static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
608 struct drm_dp_sideband_msg_reply_body *repmsg)
609{
610 int idx = 1;
611
612 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
613 idx++;
614 if (idx > raw->curlen) {
615 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
616 idx, raw->curlen);
617 return false;
618 }
619 return true;
620}
621
622static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
623 struct drm_dp_sideband_msg_reply_body *msg)
624{
625 memset(msg, 0, sizeof(*msg));
626 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
627 msg->req_type = (raw->msg[0] & 0x7f);
628
629 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
630 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
631 msg->u.nak.reason = raw->msg[17];
632 msg->u.nak.nak_data = raw->msg[18];
633 return false;
634 }
635
636 switch (msg->req_type) {
637 case DP_LINK_ADDRESS:
638 return drm_dp_sideband_parse_link_address(raw, msg);
639 case DP_QUERY_PAYLOAD:
640 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
641 case DP_REMOTE_DPCD_READ:
642 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
643 case DP_REMOTE_DPCD_WRITE:
644 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
645 case DP_REMOTE_I2C_READ:
646 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
647 case DP_ENUM_PATH_RESOURCES:
648 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
649 case DP_ALLOCATE_PAYLOAD:
650 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
651 case DP_POWER_DOWN_PHY:
652 case DP_POWER_UP_PHY:
653 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
654 default:
655 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
656 drm_dp_mst_req_type_str(msg->req_type));
657 return false;
658 }
659}
660
661static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
662 struct drm_dp_sideband_msg_req_body *msg)
663{
664 int idx = 1;
665
666 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
667 idx++;
668 if (idx > raw->curlen)
669 goto fail_len;
670
671 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
672 idx += 16;
673 if (idx > raw->curlen)
674 goto fail_len;
675
676 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
677 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
678 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
679 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
680 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
681 idx++;
682 return true;
683fail_len:
684 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
685 return false;
686}
687
688static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
689 struct drm_dp_sideband_msg_req_body *msg)
690{
691 int idx = 1;
692
693 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
694 idx++;
695 if (idx > raw->curlen)
696 goto fail_len;
697
698 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
699 idx += 16;
700 if (idx > raw->curlen)
701 goto fail_len;
702
703 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
704 idx++;
705 return true;
706fail_len:
707 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
708 return false;
709}
710
711static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
712 struct drm_dp_sideband_msg_req_body *msg)
713{
714 memset(msg, 0, sizeof(*msg));
715 msg->req_type = (raw->msg[0] & 0x7f);
716
717 switch (msg->req_type) {
718 case DP_CONNECTION_STATUS_NOTIFY:
719 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
720 case DP_RESOURCE_STATUS_NOTIFY:
721 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
722 default:
723 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
724 drm_dp_mst_req_type_str(msg->req_type));
725 return false;
726 }
727}
728
729static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
730{
731 struct drm_dp_sideband_msg_req_body req;
732
733 req.req_type = DP_REMOTE_DPCD_WRITE;
734 req.u.dpcd_write.port_number = port_num;
735 req.u.dpcd_write.dpcd_address = offset;
736 req.u.dpcd_write.num_bytes = num_bytes;
737 req.u.dpcd_write.bytes = bytes;
738 drm_dp_encode_sideband_req(&req, msg);
739
740 return 0;
741}
742
743static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
744{
745 struct drm_dp_sideband_msg_req_body req;
746
747 req.req_type = DP_LINK_ADDRESS;
748 drm_dp_encode_sideband_req(&req, msg);
749 return 0;
750}
751
752static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
753{
754 struct drm_dp_sideband_msg_req_body req;
755
756 req.req_type = DP_ENUM_PATH_RESOURCES;
757 req.u.port_num.port_number = port_num;
758 drm_dp_encode_sideband_req(&req, msg);
759 msg->path_msg = true;
760 return 0;
761}
762
763static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
764 u8 vcpi, uint16_t pbn,
765 u8 number_sdp_streams,
766 u8 *sdp_stream_sink)
767{
768 struct drm_dp_sideband_msg_req_body req;
769 memset(&req, 0, sizeof(req));
770 req.req_type = DP_ALLOCATE_PAYLOAD;
771 req.u.allocate_payload.port_number = port_num;
772 req.u.allocate_payload.vcpi = vcpi;
773 req.u.allocate_payload.pbn = pbn;
774 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
775 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
776 number_sdp_streams);
777 drm_dp_encode_sideband_req(&req, msg);
778 msg->path_msg = true;
779 return 0;
780}
781
782static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
783 int port_num, bool power_up)
784{
785 struct drm_dp_sideband_msg_req_body req;
786
787 if (power_up)
788 req.req_type = DP_POWER_UP_PHY;
789 else
790 req.req_type = DP_POWER_DOWN_PHY;
791
792 req.u.port_num.port_number = port_num;
793 drm_dp_encode_sideband_req(&req, msg);
794 msg->path_msg = true;
795 return 0;
796}
797
798static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
799 struct drm_dp_vcpi *vcpi)
800{
801 int ret, vcpi_ret;
802
803 mutex_lock(&mgr->payload_lock);
804 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
805 if (ret > mgr->max_payloads) {
806 ret = -EINVAL;
807 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
808 goto out_unlock;
809 }
810
811 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
812 if (vcpi_ret > mgr->max_payloads) {
813 ret = -EINVAL;
814 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
815 goto out_unlock;
816 }
817
818 set_bit(ret, &mgr->payload_mask);
819 set_bit(vcpi_ret, &mgr->vcpi_mask);
820 vcpi->vcpi = vcpi_ret + 1;
821 mgr->proposed_vcpis[ret - 1] = vcpi;
822out_unlock:
823 mutex_unlock(&mgr->payload_lock);
824 return ret;
825}
826
827static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
828 int vcpi)
829{
830 int i;
831 if (vcpi == 0)
832 return;
833
834 mutex_lock(&mgr->payload_lock);
835 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
836 clear_bit(vcpi - 1, &mgr->vcpi_mask);
837
838 for (i = 0; i < mgr->max_payloads; i++) {
839 if (mgr->proposed_vcpis[i])
840 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
841 mgr->proposed_vcpis[i] = NULL;
842 clear_bit(i + 1, &mgr->payload_mask);
843 }
844 }
845 mutex_unlock(&mgr->payload_lock);
846}
847
848static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
849 struct drm_dp_sideband_msg_tx *txmsg)
850{
851 unsigned int state;
852
853
854
855
856
857
858 state = READ_ONCE(txmsg->state);
859 return (state == DRM_DP_SIDEBAND_TX_RX ||
860 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
861}
862
863static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
864 struct drm_dp_sideband_msg_tx *txmsg)
865{
866 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
867 int ret;
868
869 ret = wait_event_timeout(mgr->tx_waitq,
870 check_txmsg_state(mgr, txmsg),
871 (4 * HZ));
872 mutex_lock(&mstb->mgr->qlock);
873 if (ret > 0) {
874 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
875 ret = -EIO;
876 goto out;
877 }
878 } else {
879 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
880
881
882 ret = -EIO;
883
884
885 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
886 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
887 list_del(&txmsg->next);
888 }
889
890 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
891 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
892 mstb->tx_slots[txmsg->seqno] = NULL;
893 }
894 }
895out:
896 mutex_unlock(&mgr->qlock);
897
898 return ret;
899}
900
901static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
902{
903 struct drm_dp_mst_branch *mstb;
904
905 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
906 if (!mstb)
907 return NULL;
908
909 mstb->lct = lct;
910 if (lct > 1)
911 memcpy(mstb->rad, rad, lct / 2);
912 INIT_LIST_HEAD(&mstb->ports);
913 kref_init(&mstb->topology_kref);
914 kref_init(&mstb->malloc_kref);
915 return mstb;
916}
917
918static void drm_dp_free_mst_branch_device(struct kref *kref)
919{
920 struct drm_dp_mst_branch *mstb =
921 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
922
923 if (mstb->port_parent)
924 drm_dp_mst_put_port_malloc(mstb->port_parent);
925
926 kfree(mstb);
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028static void
1029drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1030{
1031 kref_get(&mstb->malloc_kref);
1032 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046static void
1047drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1048{
1049 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1050 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1051}
1052
1053static void drm_dp_free_mst_port(struct kref *kref)
1054{
1055 struct drm_dp_mst_port *port =
1056 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1057
1058 drm_dp_mst_put_mstb_malloc(port->parent);
1059 kfree(port);
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079void
1080drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1081{
1082 kref_get(&port->malloc_kref);
1083 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1084}
1085EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097void
1098drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1099{
1100 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1101 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1102}
1103EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1104
1105static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1106{
1107 struct drm_dp_mst_branch *mstb =
1108 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1109 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1110 struct drm_dp_mst_port *port, *tmp;
1111 bool wake_tx = false;
1112
1113 mutex_lock(&mgr->lock);
1114 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
1115 list_del(&port->next);
1116 drm_dp_mst_topology_put_port(port);
1117 }
1118 mutex_unlock(&mgr->lock);
1119
1120
1121 mutex_lock(&mstb->mgr->qlock);
1122 if (mstb->tx_slots[0]) {
1123 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1124 mstb->tx_slots[0] = NULL;
1125 wake_tx = true;
1126 }
1127 if (mstb->tx_slots[1]) {
1128 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1129 mstb->tx_slots[1] = NULL;
1130 wake_tx = true;
1131 }
1132 mutex_unlock(&mstb->mgr->qlock);
1133
1134 if (wake_tx)
1135 wake_up_all(&mstb->mgr->tx_waitq);
1136
1137 drm_dp_mst_put_mstb_malloc(mstb);
1138}
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162static int __must_check
1163drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1164{
1165 int ret = kref_get_unless_zero(&mstb->topology_kref);
1166
1167 if (ret)
1168 DRM_DEBUG("mstb %p (%d)\n", mstb,
1169 kref_read(&mstb->topology_kref));
1170
1171 return ret;
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1189{
1190 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1191 kref_get(&mstb->topology_kref);
1192 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207static void
1208drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1209{
1210 DRM_DEBUG("mstb %p (%d)\n",
1211 mstb, kref_read(&mstb->topology_kref) - 1);
1212 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1213}
1214
1215static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1216{
1217 struct drm_dp_mst_branch *mstb;
1218
1219 switch (old_pdt) {
1220 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1221 case DP_PEER_DEVICE_SST_SINK:
1222
1223 drm_dp_mst_unregister_i2c_bus(&port->aux);
1224 break;
1225 case DP_PEER_DEVICE_MST_BRANCHING:
1226 mstb = port->mstb;
1227 port->mstb = NULL;
1228 drm_dp_mst_topology_put_mstb(mstb);
1229 break;
1230 }
1231}
1232
1233static void drm_dp_destroy_port(struct kref *kref)
1234{
1235 struct drm_dp_mst_port *port =
1236 container_of(kref, struct drm_dp_mst_port, topology_kref);
1237 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1238
1239 if (!port->input) {
1240 kfree(port->cached_edid);
1241
1242
1243
1244
1245
1246
1247 if (port->connector) {
1248
1249
1250
1251
1252 mutex_lock(&mgr->destroy_connector_lock);
1253 list_add(&port->next, &mgr->destroy_connector_list);
1254 mutex_unlock(&mgr->destroy_connector_lock);
1255 schedule_work(&mgr->destroy_connector_work);
1256 return;
1257 }
1258
1259
1260 drm_dp_port_teardown_pdt(port, port->pdt);
1261 port->pdt = DP_PEER_DEVICE_NONE;
1262 }
1263 drm_dp_mst_put_port_malloc(port);
1264}
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288static int __must_check
1289drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1290{
1291 int ret = kref_get_unless_zero(&port->topology_kref);
1292
1293 if (ret)
1294 DRM_DEBUG("port %p (%d)\n", port,
1295 kref_read(&port->topology_kref));
1296
1297 return ret;
1298}
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1314{
1315 WARN_ON(kref_read(&port->topology_kref) == 0);
1316 kref_get(&port->topology_kref);
1317 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1332{
1333 DRM_DEBUG("port %p (%d)\n",
1334 port, kref_read(&port->topology_kref) - 1);
1335 kref_put(&port->topology_kref, drm_dp_destroy_port);
1336}
1337
1338static struct drm_dp_mst_branch *
1339drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1340 struct drm_dp_mst_branch *to_find)
1341{
1342 struct drm_dp_mst_port *port;
1343 struct drm_dp_mst_branch *rmstb;
1344
1345 if (to_find == mstb)
1346 return mstb;
1347
1348 list_for_each_entry(port, &mstb->ports, next) {
1349 if (port->mstb) {
1350 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1351 port->mstb, to_find);
1352 if (rmstb)
1353 return rmstb;
1354 }
1355 }
1356 return NULL;
1357}
1358
1359static struct drm_dp_mst_branch *
1360drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1361 struct drm_dp_mst_branch *mstb)
1362{
1363 struct drm_dp_mst_branch *rmstb = NULL;
1364
1365 mutex_lock(&mgr->lock);
1366 if (mgr->mst_primary) {
1367 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1368 mgr->mst_primary, mstb);
1369
1370 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1371 rmstb = NULL;
1372 }
1373 mutex_unlock(&mgr->lock);
1374 return rmstb;
1375}
1376
1377static struct drm_dp_mst_port *
1378drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1379 struct drm_dp_mst_port *to_find)
1380{
1381 struct drm_dp_mst_port *port, *mport;
1382
1383 list_for_each_entry(port, &mstb->ports, next) {
1384 if (port == to_find)
1385 return port;
1386
1387 if (port->mstb) {
1388 mport = drm_dp_mst_topology_get_port_validated_locked(
1389 port->mstb, to_find);
1390 if (mport)
1391 return mport;
1392 }
1393 }
1394 return NULL;
1395}
1396
1397static struct drm_dp_mst_port *
1398drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1399 struct drm_dp_mst_port *port)
1400{
1401 struct drm_dp_mst_port *rport = NULL;
1402
1403 mutex_lock(&mgr->lock);
1404 if (mgr->mst_primary) {
1405 rport = drm_dp_mst_topology_get_port_validated_locked(
1406 mgr->mst_primary, port);
1407
1408 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1409 rport = NULL;
1410 }
1411 mutex_unlock(&mgr->lock);
1412 return rport;
1413}
1414
1415static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1416{
1417 struct drm_dp_mst_port *port;
1418 int ret;
1419
1420 list_for_each_entry(port, &mstb->ports, next) {
1421 if (port->port_num == port_num) {
1422 ret = drm_dp_mst_topology_try_get_port(port);
1423 return ret ? port : NULL;
1424 }
1425 }
1426
1427 return NULL;
1428}
1429
1430
1431
1432
1433
1434
1435static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1436 u8 *rad)
1437{
1438 int parent_lct = port->parent->lct;
1439 int shift = 4;
1440 int idx = (parent_lct - 1) / 2;
1441 if (parent_lct > 1) {
1442 memcpy(rad, port->parent->rad, idx + 1);
1443 shift = (parent_lct % 2) ? 4 : 0;
1444 } else
1445 rad[0] = 0;
1446
1447 rad[idx] |= port->port_num << shift;
1448 return parent_lct + 1;
1449}
1450
1451
1452
1453
1454static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1455{
1456 int ret;
1457 u8 rad[6], lct;
1458 bool send_link = false;
1459 switch (port->pdt) {
1460 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1461 case DP_PEER_DEVICE_SST_SINK:
1462
1463 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1464 break;
1465 case DP_PEER_DEVICE_MST_BRANCHING:
1466 lct = drm_dp_calculate_rad(port, rad);
1467
1468 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1469 if (port->mstb) {
1470 port->mstb->mgr = port->mgr;
1471 port->mstb->port_parent = port;
1472
1473
1474
1475
1476 drm_dp_mst_get_port_malloc(port);
1477
1478 send_link = true;
1479 }
1480 break;
1481 }
1482 return send_link;
1483}
1484
1485static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1486{
1487 int ret;
1488
1489 memcpy(mstb->guid, guid, 16);
1490
1491 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1492 if (mstb->port_parent) {
1493 ret = drm_dp_send_dpcd_write(
1494 mstb->mgr,
1495 mstb->port_parent,
1496 DP_GUID,
1497 16,
1498 mstb->guid);
1499 } else {
1500
1501 ret = drm_dp_dpcd_write(
1502 mstb->mgr->aux,
1503 DP_GUID,
1504 mstb->guid,
1505 16);
1506 }
1507 }
1508}
1509
1510static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1511 int pnum,
1512 char *proppath,
1513 size_t proppath_size)
1514{
1515 int i;
1516 char temp[8];
1517 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1518 for (i = 0; i < (mstb->lct - 1); i++) {
1519 int shift = (i % 2) ? 0 : 4;
1520 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1521 snprintf(temp, sizeof(temp), "-%d", port_num);
1522 strlcat(proppath, temp, proppath_size);
1523 }
1524 snprintf(temp, sizeof(temp), "-%d", pnum);
1525 strlcat(proppath, temp, proppath_size);
1526}
1527
1528static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1529 struct drm_device *dev,
1530 struct drm_dp_link_addr_reply_port *port_msg)
1531{
1532 struct drm_dp_mst_port *port;
1533 bool ret;
1534 bool created = false;
1535 int old_pdt = 0;
1536 int old_ddps = 0;
1537
1538 port = drm_dp_get_port(mstb, port_msg->port_number);
1539 if (!port) {
1540 port = kzalloc(sizeof(*port), GFP_KERNEL);
1541 if (!port)
1542 return;
1543 kref_init(&port->topology_kref);
1544 kref_init(&port->malloc_kref);
1545 port->parent = mstb;
1546 port->port_num = port_msg->port_number;
1547 port->mgr = mstb->mgr;
1548 port->aux.name = "DPMST";
1549 port->aux.dev = dev->dev;
1550
1551
1552
1553
1554
1555 drm_dp_mst_get_mstb_malloc(mstb);
1556
1557 created = true;
1558 } else {
1559 old_pdt = port->pdt;
1560 old_ddps = port->ddps;
1561 }
1562
1563 port->pdt = port_msg->peer_device_type;
1564 port->input = port_msg->input_port;
1565 port->mcs = port_msg->mcs;
1566 port->ddps = port_msg->ddps;
1567 port->ldps = port_msg->legacy_device_plug_status;
1568 port->dpcd_rev = port_msg->dpcd_revision;
1569 port->num_sdp_streams = port_msg->num_sdp_streams;
1570 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1571
1572
1573
1574 if (created) {
1575 mutex_lock(&mstb->mgr->lock);
1576 drm_dp_mst_topology_get_port(port);
1577 list_add(&port->next, &mstb->ports);
1578 mutex_unlock(&mstb->mgr->lock);
1579 }
1580
1581 if (old_ddps != port->ddps) {
1582 if (port->ddps) {
1583 if (!port->input) {
1584 drm_dp_send_enum_path_resources(mstb->mgr,
1585 mstb, port);
1586 }
1587 } else {
1588 port->available_pbn = 0;
1589 }
1590 }
1591
1592 if (old_pdt != port->pdt && !port->input) {
1593 drm_dp_port_teardown_pdt(port, old_pdt);
1594
1595 ret = drm_dp_port_setup_pdt(port);
1596 if (ret == true)
1597 drm_dp_send_link_address(mstb->mgr, port->mstb);
1598 }
1599
1600 if (created && !port->input) {
1601 char proppath[255];
1602
1603 build_mst_prop_path(mstb, port->port_num, proppath,
1604 sizeof(proppath));
1605 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
1606 port,
1607 proppath);
1608 if (!port->connector) {
1609
1610 mutex_lock(&mstb->mgr->lock);
1611 list_del(&port->next);
1612 mutex_unlock(&mstb->mgr->lock);
1613
1614 drm_dp_mst_topology_put_port(port);
1615 goto out;
1616 }
1617 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1618 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1619 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1620 port->cached_edid = drm_get_edid(port->connector,
1621 &port->aux.ddc);
1622 drm_connector_set_tile_property(port->connector);
1623 }
1624 (*mstb->mgr->cbs->register_connector)(port->connector);
1625 }
1626
1627out:
1628
1629 drm_dp_mst_topology_put_port(port);
1630}
1631
1632static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1633 struct drm_dp_connection_status_notify *conn_stat)
1634{
1635 struct drm_dp_mst_port *port;
1636 int old_pdt;
1637 int old_ddps;
1638 bool dowork = false;
1639 port = drm_dp_get_port(mstb, conn_stat->port_number);
1640 if (!port)
1641 return;
1642
1643 old_ddps = port->ddps;
1644 old_pdt = port->pdt;
1645 port->pdt = conn_stat->peer_device_type;
1646 port->mcs = conn_stat->message_capability_status;
1647 port->ldps = conn_stat->legacy_device_plug_status;
1648 port->ddps = conn_stat->displayport_device_plug_status;
1649
1650 if (old_ddps != port->ddps) {
1651 if (port->ddps) {
1652 dowork = true;
1653 } else {
1654 port->available_pbn = 0;
1655 }
1656 }
1657 if (old_pdt != port->pdt && !port->input) {
1658 drm_dp_port_teardown_pdt(port, old_pdt);
1659
1660 if (drm_dp_port_setup_pdt(port))
1661 dowork = true;
1662 }
1663
1664 drm_dp_mst_topology_put_port(port);
1665 if (dowork)
1666 queue_work(system_long_wq, &mstb->mgr->work);
1667
1668}
1669
1670static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1671 u8 lct, u8 *rad)
1672{
1673 struct drm_dp_mst_branch *mstb;
1674 struct drm_dp_mst_port *port;
1675 int i, ret;
1676
1677
1678 mutex_lock(&mgr->lock);
1679 mstb = mgr->mst_primary;
1680
1681 if (!mstb)
1682 goto out;
1683
1684 for (i = 0; i < lct - 1; i++) {
1685 int shift = (i % 2) ? 0 : 4;
1686 int port_num = (rad[i / 2] >> shift) & 0xf;
1687
1688 list_for_each_entry(port, &mstb->ports, next) {
1689 if (port->port_num == port_num) {
1690 mstb = port->mstb;
1691 if (!mstb) {
1692 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1693 goto out;
1694 }
1695
1696 break;
1697 }
1698 }
1699 }
1700 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1701 if (!ret)
1702 mstb = NULL;
1703out:
1704 mutex_unlock(&mgr->lock);
1705 return mstb;
1706}
1707
1708static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1709 struct drm_dp_mst_branch *mstb,
1710 uint8_t *guid)
1711{
1712 struct drm_dp_mst_branch *found_mstb;
1713 struct drm_dp_mst_port *port;
1714
1715 if (memcmp(mstb->guid, guid, 16) == 0)
1716 return mstb;
1717
1718
1719 list_for_each_entry(port, &mstb->ports, next) {
1720 if (!port->mstb)
1721 continue;
1722
1723 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1724
1725 if (found_mstb)
1726 return found_mstb;
1727 }
1728
1729 return NULL;
1730}
1731
1732static struct drm_dp_mst_branch *
1733drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
1734 uint8_t *guid)
1735{
1736 struct drm_dp_mst_branch *mstb;
1737 int ret;
1738
1739
1740 mutex_lock(&mgr->lock);
1741
1742 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1743 if (mstb) {
1744 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1745 if (!ret)
1746 mstb = NULL;
1747 }
1748
1749 mutex_unlock(&mgr->lock);
1750 return mstb;
1751}
1752
1753static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1754 struct drm_dp_mst_branch *mstb)
1755{
1756 struct drm_dp_mst_port *port;
1757 struct drm_dp_mst_branch *mstb_child;
1758 if (!mstb->link_address_sent)
1759 drm_dp_send_link_address(mgr, mstb);
1760
1761 list_for_each_entry(port, &mstb->ports, next) {
1762 if (port->input)
1763 continue;
1764
1765 if (!port->ddps)
1766 continue;
1767
1768 if (!port->available_pbn)
1769 drm_dp_send_enum_path_resources(mgr, mstb, port);
1770
1771 if (port->mstb) {
1772 mstb_child = drm_dp_mst_topology_get_mstb_validated(
1773 mgr, port->mstb);
1774 if (mstb_child) {
1775 drm_dp_check_and_send_link_address(mgr, mstb_child);
1776 drm_dp_mst_topology_put_mstb(mstb_child);
1777 }
1778 }
1779 }
1780}
1781
1782static void drm_dp_mst_link_probe_work(struct work_struct *work)
1783{
1784 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1785 struct drm_dp_mst_branch *mstb;
1786 int ret;
1787
1788 mutex_lock(&mgr->lock);
1789 mstb = mgr->mst_primary;
1790 if (mstb) {
1791 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1792 if (!ret)
1793 mstb = NULL;
1794 }
1795 mutex_unlock(&mgr->lock);
1796 if (mstb) {
1797 drm_dp_check_and_send_link_address(mgr, mstb);
1798 drm_dp_mst_topology_put_mstb(mstb);
1799 }
1800}
1801
1802static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1803 u8 *guid)
1804{
1805 u64 salt;
1806
1807 if (memchr_inv(guid, 0, 16))
1808 return true;
1809
1810 salt = get_jiffies_64();
1811
1812 memcpy(&guid[0], &salt, sizeof(u64));
1813 memcpy(&guid[8], &salt, sizeof(u64));
1814
1815 return false;
1816}
1817
1818#if 0
1819static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1820{
1821 struct drm_dp_sideband_msg_req_body req;
1822
1823 req.req_type = DP_REMOTE_DPCD_READ;
1824 req.u.dpcd_read.port_number = port_num;
1825 req.u.dpcd_read.dpcd_address = offset;
1826 req.u.dpcd_read.num_bytes = num_bytes;
1827 drm_dp_encode_sideband_req(&req, msg);
1828
1829 return 0;
1830}
1831#endif
1832
1833static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1834 bool up, u8 *msg, int len)
1835{
1836 int ret;
1837 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1838 int tosend, total, offset;
1839 int retries = 0;
1840
1841retry:
1842 total = len;
1843 offset = 0;
1844 do {
1845 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1846
1847 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1848 &msg[offset],
1849 tosend);
1850 if (ret != tosend) {
1851 if (ret == -EIO && retries < 5) {
1852 retries++;
1853 goto retry;
1854 }
1855 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1856
1857 return -EIO;
1858 }
1859 offset += tosend;
1860 total -= tosend;
1861 } while (total > 0);
1862 return 0;
1863}
1864
1865static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1866 struct drm_dp_sideband_msg_tx *txmsg)
1867{
1868 struct drm_dp_mst_branch *mstb = txmsg->dst;
1869 u8 req_type;
1870
1871
1872 if (txmsg->seqno == -1) {
1873 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1874 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1875 return -EAGAIN;
1876 }
1877 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1878 txmsg->seqno = mstb->last_seqno;
1879 mstb->last_seqno ^= 1;
1880 } else if (mstb->tx_slots[0] == NULL)
1881 txmsg->seqno = 0;
1882 else
1883 txmsg->seqno = 1;
1884 mstb->tx_slots[txmsg->seqno] = txmsg;
1885 }
1886
1887 req_type = txmsg->msg[0] & 0x7f;
1888 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1889 req_type == DP_RESOURCE_STATUS_NOTIFY)
1890 hdr->broadcast = 1;
1891 else
1892 hdr->broadcast = 0;
1893 hdr->path_msg = txmsg->path_msg;
1894 hdr->lct = mstb->lct;
1895 hdr->lcr = mstb->lct - 1;
1896 if (mstb->lct > 1)
1897 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1898 hdr->seqno = txmsg->seqno;
1899 return 0;
1900}
1901
1902
1903
1904static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1905 struct drm_dp_sideband_msg_tx *txmsg,
1906 bool up)
1907{
1908 u8 chunk[48];
1909 struct drm_dp_sideband_msg_hdr hdr;
1910 int len, space, idx, tosend;
1911 int ret;
1912
1913 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1914
1915 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1916 txmsg->seqno = -1;
1917 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1918 }
1919
1920
1921
1922 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1923 if (ret < 0)
1924 return ret;
1925
1926
1927 len = txmsg->cur_len - txmsg->cur_offset;
1928
1929
1930 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1931
1932 tosend = min(len, space);
1933 if (len == txmsg->cur_len)
1934 hdr.somt = 1;
1935 if (space >= len)
1936 hdr.eomt = 1;
1937
1938
1939 hdr.msg_len = tosend + 1;
1940 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1941 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1942
1943 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1944 idx += tosend + 1;
1945
1946 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1947 if (ret) {
1948 DRM_DEBUG_KMS("sideband msg failed to send\n");
1949 return ret;
1950 }
1951
1952 txmsg->cur_offset += tosend;
1953 if (txmsg->cur_offset == txmsg->cur_len) {
1954 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1955 return 1;
1956 }
1957 return 0;
1958}
1959
1960static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1961{
1962 struct drm_dp_sideband_msg_tx *txmsg;
1963 int ret;
1964
1965 WARN_ON(!mutex_is_locked(&mgr->qlock));
1966
1967
1968 if (list_empty(&mgr->tx_msg_downq))
1969 return;
1970
1971 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1972 ret = process_single_tx_qlock(mgr, txmsg, false);
1973 if (ret == 1) {
1974
1975 list_del(&txmsg->next);
1976 } else if (ret) {
1977 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1978 list_del(&txmsg->next);
1979 if (txmsg->seqno != -1)
1980 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1981 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1982 wake_up_all(&mgr->tx_waitq);
1983 }
1984}
1985
1986
1987static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1988 struct drm_dp_sideband_msg_tx *txmsg)
1989{
1990 int ret;
1991
1992
1993 ret = process_single_tx_qlock(mgr, txmsg, true);
1994
1995 if (ret != 1)
1996 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1997
1998 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1999}
2000
2001static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2002 struct drm_dp_sideband_msg_tx *txmsg)
2003{
2004 mutex_lock(&mgr->qlock);
2005 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2006 if (list_is_singular(&mgr->tx_msg_downq))
2007 process_single_down_tx_qlock(mgr);
2008 mutex_unlock(&mgr->qlock);
2009}
2010
2011static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2012 struct drm_dp_mst_branch *mstb)
2013{
2014 int len;
2015 struct drm_dp_sideband_msg_tx *txmsg;
2016 int ret;
2017
2018 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2019 if (!txmsg)
2020 return;
2021
2022 txmsg->dst = mstb;
2023 len = build_link_address(txmsg);
2024
2025 mstb->link_address_sent = true;
2026 drm_dp_queue_down_tx(mgr, txmsg);
2027
2028 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2029 if (ret > 0) {
2030 int i;
2031
2032 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2033 DRM_DEBUG_KMS("link address nak received\n");
2034 } else {
2035 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
2036 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2037 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
2038 txmsg->reply.u.link_addr.ports[i].input_port,
2039 txmsg->reply.u.link_addr.ports[i].peer_device_type,
2040 txmsg->reply.u.link_addr.ports[i].port_number,
2041 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
2042 txmsg->reply.u.link_addr.ports[i].mcs,
2043 txmsg->reply.u.link_addr.ports[i].ddps,
2044 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
2045 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
2046 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
2047 }
2048
2049 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
2050
2051 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2052 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
2053 }
2054 drm_kms_helper_hotplug_event(mgr->dev);
2055 }
2056 } else {
2057 mstb->link_address_sent = false;
2058 DRM_DEBUG_KMS("link address failed %d\n", ret);
2059 }
2060
2061 kfree(txmsg);
2062}
2063
2064static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2065 struct drm_dp_mst_branch *mstb,
2066 struct drm_dp_mst_port *port)
2067{
2068 int len;
2069 struct drm_dp_sideband_msg_tx *txmsg;
2070 int ret;
2071
2072 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2073 if (!txmsg)
2074 return -ENOMEM;
2075
2076 txmsg->dst = mstb;
2077 len = build_enum_path_resources(txmsg, port->port_num);
2078
2079 drm_dp_queue_down_tx(mgr, txmsg);
2080
2081 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2082 if (ret > 0) {
2083 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2084 DRM_DEBUG_KMS("enum path resources nak received\n");
2085 } else {
2086 if (port->port_num != txmsg->reply.u.path_resources.port_number)
2087 DRM_ERROR("got incorrect port in response\n");
2088 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
2089 txmsg->reply.u.path_resources.avail_payload_bw_number);
2090 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
2091 }
2092 }
2093
2094 kfree(txmsg);
2095 return 0;
2096}
2097
2098static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2099{
2100 if (!mstb->port_parent)
2101 return NULL;
2102
2103 if (mstb->port_parent->mstb != mstb)
2104 return mstb->port_parent;
2105
2106 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117static struct drm_dp_mst_branch *
2118drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2119 struct drm_dp_mst_branch *mstb,
2120 int *port_num)
2121{
2122 struct drm_dp_mst_branch *rmstb = NULL;
2123 struct drm_dp_mst_port *found_port;
2124
2125 mutex_lock(&mgr->lock);
2126 if (!mgr->mst_primary)
2127 goto out;
2128
2129 do {
2130 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2131 if (!found_port)
2132 break;
2133
2134 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2135 rmstb = found_port->parent;
2136 *port_num = found_port->port_num;
2137 } else {
2138
2139 mstb = found_port->parent;
2140 }
2141 } while (!rmstb);
2142out:
2143 mutex_unlock(&mgr->lock);
2144 return rmstb;
2145}
2146
2147static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2148 struct drm_dp_mst_port *port,
2149 int id,
2150 int pbn)
2151{
2152 struct drm_dp_sideband_msg_tx *txmsg;
2153 struct drm_dp_mst_branch *mstb;
2154 int len, ret, port_num;
2155 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2156 int i;
2157
2158 port_num = port->port_num;
2159 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2160 if (!mstb) {
2161 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2162 port->parent,
2163 &port_num);
2164
2165 if (!mstb)
2166 return -EINVAL;
2167 }
2168
2169 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2170 if (!txmsg) {
2171 ret = -ENOMEM;
2172 goto fail_put;
2173 }
2174
2175 for (i = 0; i < port->num_sdp_streams; i++)
2176 sinks[i] = i;
2177
2178 txmsg->dst = mstb;
2179 len = build_allocate_payload(txmsg, port_num,
2180 id,
2181 pbn, port->num_sdp_streams, sinks);
2182
2183 drm_dp_queue_down_tx(mgr, txmsg);
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2194 if (ret > 0) {
2195 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2196 ret = -EINVAL;
2197 else
2198 ret = 0;
2199 }
2200 kfree(txmsg);
2201fail_put:
2202 drm_dp_mst_topology_put_mstb(mstb);
2203 return ret;
2204}
2205
2206int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
2207 struct drm_dp_mst_port *port, bool power_up)
2208{
2209 struct drm_dp_sideband_msg_tx *txmsg;
2210 int len, ret;
2211
2212 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2213 if (!port)
2214 return -EINVAL;
2215
2216 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2217 if (!txmsg) {
2218 drm_dp_mst_topology_put_port(port);
2219 return -ENOMEM;
2220 }
2221
2222 txmsg->dst = port->parent;
2223 len = build_power_updown_phy(txmsg, port->port_num, power_up);
2224 drm_dp_queue_down_tx(mgr, txmsg);
2225
2226 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
2227 if (ret > 0) {
2228 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2229 ret = -EINVAL;
2230 else
2231 ret = 0;
2232 }
2233 kfree(txmsg);
2234 drm_dp_mst_topology_put_port(port);
2235
2236 return ret;
2237}
2238EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
2239
2240static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2241 int id,
2242 struct drm_dp_payload *payload)
2243{
2244 int ret;
2245
2246 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
2247 if (ret < 0) {
2248 payload->payload_state = 0;
2249 return ret;
2250 }
2251 payload->payload_state = DP_PAYLOAD_LOCAL;
2252 return 0;
2253}
2254
2255static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2256 struct drm_dp_mst_port *port,
2257 int id,
2258 struct drm_dp_payload *payload)
2259{
2260 int ret;
2261 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
2262 if (ret < 0)
2263 return ret;
2264 payload->payload_state = DP_PAYLOAD_REMOTE;
2265 return ret;
2266}
2267
2268static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2269 struct drm_dp_mst_port *port,
2270 int id,
2271 struct drm_dp_payload *payload)
2272{
2273 DRM_DEBUG_KMS("\n");
2274
2275 if (port) {
2276 drm_dp_payload_send_msg(mgr, port, id, 0);
2277 }
2278
2279 drm_dp_dpcd_write_payload(mgr, id, payload);
2280 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
2281 return 0;
2282}
2283
2284static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2285 int id,
2286 struct drm_dp_payload *payload)
2287{
2288 payload->payload_state = 0;
2289 return 0;
2290}
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2306{
2307 struct drm_dp_payload req_payload;
2308 struct drm_dp_mst_port *port;
2309 int i, j;
2310 int cur_slots = 1;
2311
2312 mutex_lock(&mgr->payload_lock);
2313 for (i = 0; i < mgr->max_payloads; i++) {
2314 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2315 struct drm_dp_payload *payload = &mgr->payloads[i];
2316 bool put_port = false;
2317
2318
2319
2320 req_payload.start_slot = cur_slots;
2321 if (vcpi) {
2322 port = container_of(vcpi, struct drm_dp_mst_port,
2323 vcpi);
2324
2325
2326
2327
2328 if (vcpi->num_slots) {
2329 port = drm_dp_mst_topology_get_port_validated(
2330 mgr, port);
2331 if (!port) {
2332 mutex_unlock(&mgr->payload_lock);
2333 return -EINVAL;
2334 }
2335 put_port = true;
2336 }
2337
2338 req_payload.num_slots = vcpi->num_slots;
2339 req_payload.vcpi = vcpi->vcpi;
2340 } else {
2341 port = NULL;
2342 req_payload.num_slots = 0;
2343 }
2344
2345 payload->start_slot = req_payload.start_slot;
2346
2347 if (payload->num_slots != req_payload.num_slots) {
2348
2349
2350 if (req_payload.num_slots) {
2351 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
2352 &req_payload);
2353 payload->num_slots = req_payload.num_slots;
2354 payload->vcpi = req_payload.vcpi;
2355
2356 } else if (payload->num_slots) {
2357 payload->num_slots = 0;
2358 drm_dp_destroy_payload_step1(mgr, port,
2359 payload->vcpi,
2360 payload);
2361 req_payload.payload_state =
2362 payload->payload_state;
2363 payload->start_slot = 0;
2364 }
2365 payload->payload_state = req_payload.payload_state;
2366 }
2367 cur_slots += req_payload.num_slots;
2368
2369 if (put_port)
2370 drm_dp_mst_topology_put_port(port);
2371 }
2372
2373 for (i = 0; i < mgr->max_payloads; i++) {
2374 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
2375 continue;
2376
2377 DRM_DEBUG_KMS("removing payload %d\n", i);
2378 for (j = i; j < mgr->max_payloads - 1; j++) {
2379 mgr->payloads[j] = mgr->payloads[j + 1];
2380 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
2381
2382 if (mgr->proposed_vcpis[j] &&
2383 mgr->proposed_vcpis[j]->num_slots) {
2384 set_bit(j + 1, &mgr->payload_mask);
2385 } else {
2386 clear_bit(j + 1, &mgr->payload_mask);
2387 }
2388 }
2389
2390 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
2391 sizeof(struct drm_dp_payload));
2392 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
2393 clear_bit(mgr->max_payloads, &mgr->payload_mask);
2394 }
2395 mutex_unlock(&mgr->payload_lock);
2396
2397 return 0;
2398}
2399EXPORT_SYMBOL(drm_dp_update_payload_part1);
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
2411{
2412 struct drm_dp_mst_port *port;
2413 int i;
2414 int ret = 0;
2415 mutex_lock(&mgr->payload_lock);
2416 for (i = 0; i < mgr->max_payloads; i++) {
2417
2418 if (!mgr->proposed_vcpis[i])
2419 continue;
2420
2421 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2422
2423 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
2424 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
2425 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2426 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
2427 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2428 }
2429 if (ret) {
2430 mutex_unlock(&mgr->payload_lock);
2431 return ret;
2432 }
2433 }
2434 mutex_unlock(&mgr->payload_lock);
2435 return 0;
2436}
2437EXPORT_SYMBOL(drm_dp_update_payload_part2);
2438
2439#if 0
2440static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2441 struct drm_dp_mst_port *port,
2442 int offset, int size)
2443{
2444 int len;
2445 struct drm_dp_sideband_msg_tx *txmsg;
2446
2447 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2448 if (!txmsg)
2449 return -ENOMEM;
2450
2451 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
2452 txmsg->dst = port->parent;
2453
2454 drm_dp_queue_down_tx(mgr, txmsg);
2455
2456 return 0;
2457}
2458#endif
2459
2460static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2461 struct drm_dp_mst_port *port,
2462 int offset, int size, u8 *bytes)
2463{
2464 int len;
2465 int ret;
2466 struct drm_dp_sideband_msg_tx *txmsg;
2467 struct drm_dp_mst_branch *mstb;
2468
2469 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2470 if (!mstb)
2471 return -EINVAL;
2472
2473 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2474 if (!txmsg) {
2475 ret = -ENOMEM;
2476 goto fail_put;
2477 }
2478
2479 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
2480 txmsg->dst = mstb;
2481
2482 drm_dp_queue_down_tx(mgr, txmsg);
2483
2484 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2485 if (ret > 0) {
2486 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2487 ret = -EINVAL;
2488 else
2489 ret = 0;
2490 }
2491 kfree(txmsg);
2492fail_put:
2493 drm_dp_mst_topology_put_mstb(mstb);
2494 return ret;
2495}
2496
2497static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
2498{
2499 struct drm_dp_sideband_msg_reply_body reply;
2500
2501 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
2502 reply.req_type = req_type;
2503 drm_dp_encode_sideband_reply(&reply, msg);
2504 return 0;
2505}
2506
2507static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
2508 struct drm_dp_mst_branch *mstb,
2509 int req_type, int seqno, bool broadcast)
2510{
2511 struct drm_dp_sideband_msg_tx *txmsg;
2512
2513 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2514 if (!txmsg)
2515 return -ENOMEM;
2516
2517 txmsg->dst = mstb;
2518 txmsg->seqno = seqno;
2519 drm_dp_encode_up_ack_reply(txmsg, req_type);
2520
2521 mutex_lock(&mgr->qlock);
2522
2523 process_single_up_tx_qlock(mgr, txmsg);
2524
2525 mutex_unlock(&mgr->qlock);
2526
2527 kfree(txmsg);
2528 return 0;
2529}
2530
2531static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
2532 int dp_link_count,
2533 int *out)
2534{
2535 switch (dp_link_bw) {
2536 default:
2537 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2538 dp_link_bw, dp_link_count);
2539 return false;
2540
2541 case DP_LINK_BW_1_62:
2542 *out = 3 * dp_link_count;
2543 break;
2544 case DP_LINK_BW_2_7:
2545 *out = 5 * dp_link_count;
2546 break;
2547 case DP_LINK_BW_5_4:
2548 *out = 10 * dp_link_count;
2549 break;
2550 case DP_LINK_BW_8_1:
2551 *out = 15 * dp_link_count;
2552 break;
2553 }
2554 return true;
2555}
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2566{
2567 int ret = 0;
2568 struct drm_dp_mst_branch *mstb = NULL;
2569
2570 mutex_lock(&mgr->lock);
2571 if (mst_state == mgr->mst_state)
2572 goto out_unlock;
2573
2574 mgr->mst_state = mst_state;
2575
2576 if (mst_state) {
2577 WARN_ON(mgr->mst_primary);
2578
2579
2580 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2581 if (ret != DP_RECEIVER_CAP_SIZE) {
2582 DRM_DEBUG_KMS("failed to read DPCD\n");
2583 goto out_unlock;
2584 }
2585
2586 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2587 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2588 &mgr->pbn_div)) {
2589 ret = -EINVAL;
2590 goto out_unlock;
2591 }
2592
2593
2594 mstb = drm_dp_add_mst_branch_device(1, NULL);
2595 if (mstb == NULL) {
2596 ret = -ENOMEM;
2597 goto out_unlock;
2598 }
2599 mstb->mgr = mgr;
2600
2601
2602 mgr->mst_primary = mstb;
2603 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
2604
2605 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2606 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2607 if (ret < 0) {
2608 goto out_unlock;
2609 }
2610
2611 {
2612 struct drm_dp_payload reset_pay;
2613 reset_pay.start_slot = 0;
2614 reset_pay.num_slots = 0x3f;
2615 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2616 }
2617
2618 queue_work(system_long_wq, &mgr->work);
2619
2620 ret = 0;
2621 } else {
2622
2623 mstb = mgr->mst_primary;
2624 mgr->mst_primary = NULL;
2625
2626 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2627 ret = 0;
2628 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2629 mgr->payload_mask = 0;
2630 set_bit(0, &mgr->payload_mask);
2631 mgr->vcpi_mask = 0;
2632 }
2633
2634out_unlock:
2635 mutex_unlock(&mgr->lock);
2636 if (mstb)
2637 drm_dp_mst_topology_put_mstb(mstb);
2638 return ret;
2639
2640}
2641EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2642
2643
2644
2645
2646
2647
2648
2649
2650void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2651{
2652 mutex_lock(&mgr->lock);
2653 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2654 DP_MST_EN | DP_UPSTREAM_IS_SRC);
2655 mutex_unlock(&mgr->lock);
2656 flush_work(&mgr->work);
2657 flush_work(&mgr->destroy_connector_work);
2658}
2659EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2672{
2673 int ret = 0;
2674
2675 mutex_lock(&mgr->lock);
2676
2677 if (mgr->mst_primary) {
2678 int sret;
2679 u8 guid[16];
2680
2681 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2682 if (sret != DP_RECEIVER_CAP_SIZE) {
2683 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2684 ret = -1;
2685 goto out_unlock;
2686 }
2687
2688 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2689 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2690 if (ret < 0) {
2691 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2692 ret = -1;
2693 goto out_unlock;
2694 }
2695
2696
2697 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2698 if (sret != 16) {
2699 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2700 ret = -1;
2701 goto out_unlock;
2702 }
2703 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2704
2705 ret = 0;
2706 } else
2707 ret = -1;
2708
2709out_unlock:
2710 mutex_unlock(&mgr->lock);
2711 return ret;
2712}
2713EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2714
2715static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2716{
2717 int len;
2718 u8 replyblock[32];
2719 int replylen, origlen, curreply;
2720 int ret;
2721 struct drm_dp_sideband_msg_rx *msg;
2722 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2723 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2724
2725 len = min(mgr->max_dpcd_transaction_bytes, 16);
2726 ret = drm_dp_dpcd_read(mgr->aux, basereg,
2727 replyblock, len);
2728 if (ret != len) {
2729 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2730 return false;
2731 }
2732 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2733 if (!ret) {
2734 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2735 return false;
2736 }
2737 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2738
2739 origlen = replylen;
2740 replylen -= len;
2741 curreply = len;
2742 while (replylen > 0) {
2743 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2744 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2745 replyblock, len);
2746 if (ret != len) {
2747 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2748 len, ret);
2749 return false;
2750 }
2751
2752 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2753 if (!ret) {
2754 DRM_DEBUG_KMS("failed to build sideband msg\n");
2755 return false;
2756 }
2757
2758 curreply += len;
2759 replylen -= len;
2760 }
2761 return true;
2762}
2763
2764static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2765{
2766 int ret = 0;
2767
2768 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2769 memset(&mgr->down_rep_recv, 0,
2770 sizeof(struct drm_dp_sideband_msg_rx));
2771 return 0;
2772 }
2773
2774 if (mgr->down_rep_recv.have_eomt) {
2775 struct drm_dp_sideband_msg_tx *txmsg;
2776 struct drm_dp_mst_branch *mstb;
2777 int slot = -1;
2778 mstb = drm_dp_get_mst_branch_device(mgr,
2779 mgr->down_rep_recv.initial_hdr.lct,
2780 mgr->down_rep_recv.initial_hdr.rad);
2781
2782 if (!mstb) {
2783 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2784 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2785 return 0;
2786 }
2787
2788
2789 slot = mgr->down_rep_recv.initial_hdr.seqno;
2790 mutex_lock(&mgr->qlock);
2791 txmsg = mstb->tx_slots[slot];
2792
2793 mutex_unlock(&mgr->qlock);
2794
2795 if (!txmsg) {
2796 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2797 mstb,
2798 mgr->down_rep_recv.initial_hdr.seqno,
2799 mgr->down_rep_recv.initial_hdr.lct,
2800 mgr->down_rep_recv.initial_hdr.rad[0],
2801 mgr->down_rep_recv.msg[0]);
2802 drm_dp_mst_topology_put_mstb(mstb);
2803 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2804 return 0;
2805 }
2806
2807 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2808
2809 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2810 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
2811 txmsg->reply.req_type,
2812 drm_dp_mst_req_type_str(txmsg->reply.req_type),
2813 txmsg->reply.u.nak.reason,
2814 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
2815 txmsg->reply.u.nak.nak_data);
2816
2817 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2818 drm_dp_mst_topology_put_mstb(mstb);
2819
2820 mutex_lock(&mgr->qlock);
2821 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2822 mstb->tx_slots[slot] = NULL;
2823 mutex_unlock(&mgr->qlock);
2824
2825 wake_up_all(&mgr->tx_waitq);
2826 }
2827 return ret;
2828}
2829
2830static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2831{
2832 int ret = 0;
2833
2834 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2835 memset(&mgr->up_req_recv, 0,
2836 sizeof(struct drm_dp_sideband_msg_rx));
2837 return 0;
2838 }
2839
2840 if (mgr->up_req_recv.have_eomt) {
2841 struct drm_dp_sideband_msg_req_body msg;
2842 struct drm_dp_mst_branch *mstb = NULL;
2843 bool seqno;
2844
2845 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2846 mstb = drm_dp_get_mst_branch_device(mgr,
2847 mgr->up_req_recv.initial_hdr.lct,
2848 mgr->up_req_recv.initial_hdr.rad);
2849 if (!mstb) {
2850 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2851 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2852 return 0;
2853 }
2854 }
2855
2856 seqno = mgr->up_req_recv.initial_hdr.seqno;
2857 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2858
2859 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2860 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2861
2862 if (!mstb)
2863 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2864
2865 if (!mstb) {
2866 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2867 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2868 return 0;
2869 }
2870
2871 drm_dp_update_port(mstb, &msg.u.conn_stat);
2872
2873 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2874 drm_kms_helper_hotplug_event(mgr->dev);
2875
2876 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2877 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2878 if (!mstb)
2879 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2880
2881 if (!mstb) {
2882 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2883 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2884 return 0;
2885 }
2886
2887 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2888 }
2889
2890 if (mstb)
2891 drm_dp_mst_topology_put_mstb(mstb);
2892
2893 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2894 }
2895 return ret;
2896}
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2910{
2911 int ret = 0;
2912 int sc;
2913 *handled = false;
2914 sc = esi[0] & 0x3f;
2915
2916 if (sc != mgr->sink_count) {
2917 mgr->sink_count = sc;
2918 *handled = true;
2919 }
2920
2921 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2922 ret = drm_dp_mst_handle_down_rep(mgr);
2923 *handled = true;
2924 }
2925
2926 if (esi[1] & DP_UP_REQ_MSG_RDY) {
2927 ret |= drm_dp_mst_handle_up_req(mgr);
2928 *handled = true;
2929 }
2930
2931 drm_dp_mst_kick_tx(mgr);
2932 return ret;
2933}
2934EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2946 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2947{
2948 enum drm_connector_status status = connector_status_disconnected;
2949
2950
2951 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2952 if (!port)
2953 return connector_status_disconnected;
2954
2955 if (!port->ddps)
2956 goto out;
2957
2958 switch (port->pdt) {
2959 case DP_PEER_DEVICE_NONE:
2960 case DP_PEER_DEVICE_MST_BRANCHING:
2961 break;
2962
2963 case DP_PEER_DEVICE_SST_SINK:
2964 status = connector_status_connected;
2965
2966 if (port->port_num >= 8 && !port->cached_edid) {
2967 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2968 }
2969 break;
2970 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2971 if (port->ldps)
2972 status = connector_status_connected;
2973 break;
2974 }
2975out:
2976 drm_dp_mst_topology_put_port(port);
2977 return status;
2978}
2979EXPORT_SYMBOL(drm_dp_mst_detect_port);
2980
2981
2982
2983
2984
2985
2986
2987
2988bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
2989 struct drm_dp_mst_port *port)
2990{
2991 bool ret = false;
2992
2993 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2994 if (!port)
2995 return ret;
2996 ret = port->has_audio;
2997 drm_dp_mst_topology_put_port(port);
2998 return ret;
2999}
3000EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3013{
3014 struct edid *edid = NULL;
3015
3016
3017 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3018 if (!port)
3019 return NULL;
3020
3021 if (port->cached_edid)
3022 edid = drm_edid_duplicate(port->cached_edid);
3023 else {
3024 edid = drm_get_edid(connector, &port->aux.ddc);
3025 drm_connector_set_tile_property(connector);
3026 }
3027 port->has_audio = drm_detect_monitor_audio(edid);
3028 drm_dp_mst_topology_put_port(port);
3029 return edid;
3030}
3031EXPORT_SYMBOL(drm_dp_mst_get_edid);
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3046 int pbn)
3047{
3048 int num_slots;
3049
3050 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3051
3052
3053 if (num_slots > 63)
3054 return -ENOSPC;
3055 return num_slots;
3056}
3057EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3058
3059static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3060 struct drm_dp_vcpi *vcpi, int pbn, int slots)
3061{
3062 int ret;
3063
3064
3065 if (slots > 63)
3066 return -ENOSPC;
3067
3068 vcpi->pbn = pbn;
3069 vcpi->aligned_pbn = slots * mgr->pbn_div;
3070 vcpi->num_slots = slots;
3071
3072 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
3073 if (ret < 0)
3074 return ret;
3075 return 0;
3076}
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
3109 struct drm_dp_mst_topology_mgr *mgr,
3110 struct drm_dp_mst_port *port, int pbn)
3111{
3112 struct drm_dp_mst_topology_state *topology_state;
3113 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3114 int prev_slots, req_slots, ret;
3115
3116 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3117 if (IS_ERR(topology_state))
3118 return PTR_ERR(topology_state);
3119
3120
3121 list_for_each_entry(pos, &topology_state->vcpis, next) {
3122 if (pos->port == port) {
3123 vcpi = pos;
3124 prev_slots = vcpi->vcpi;
3125
3126
3127
3128
3129
3130
3131 if (WARN_ON(!prev_slots)) {
3132 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3133 port);
3134 return -EINVAL;
3135 }
3136
3137 break;
3138 }
3139 }
3140 if (!vcpi)
3141 prev_slots = 0;
3142
3143 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3144
3145 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
3146 port->connector->base.id, port->connector->name,
3147 port, prev_slots, req_slots);
3148
3149
3150 if (!vcpi) {
3151 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3152 if (!vcpi)
3153 return -ENOMEM;
3154
3155 drm_dp_mst_get_port_malloc(port);
3156 vcpi->port = port;
3157 list_add(&vcpi->next, &topology_state->vcpis);
3158 }
3159 vcpi->vcpi = req_slots;
3160
3161 ret = req_slots;
3162 return ret;
3163}
3164EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
3193 struct drm_dp_mst_topology_mgr *mgr,
3194 struct drm_dp_mst_port *port)
3195{
3196 struct drm_dp_mst_topology_state *topology_state;
3197 struct drm_dp_vcpi_allocation *pos;
3198 bool found = false;
3199
3200 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3201 if (IS_ERR(topology_state))
3202 return PTR_ERR(topology_state);
3203
3204 list_for_each_entry(pos, &topology_state->vcpis, next) {
3205 if (pos->port == port) {
3206 found = true;
3207 break;
3208 }
3209 }
3210 if (WARN_ON(!found)) {
3211 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
3212 port, &topology_state->base);
3213 return -EINVAL;
3214 }
3215
3216 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3217 if (pos->vcpi) {
3218 drm_dp_mst_put_port_malloc(port);
3219 pos->vcpi = 0;
3220 }
3221
3222 return 0;
3223}
3224EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
3225
3226
3227
3228
3229
3230
3231
3232
3233bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3234 struct drm_dp_mst_port *port, int pbn, int slots)
3235{
3236 int ret;
3237
3238 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3239 if (!port)
3240 return false;
3241
3242 if (slots < 0)
3243 return false;
3244
3245 if (port->vcpi.vcpi > 0) {
3246 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3247 port->vcpi.vcpi, port->vcpi.pbn, pbn);
3248 if (pbn == port->vcpi.pbn) {
3249 drm_dp_mst_topology_put_port(port);
3250 return true;
3251 }
3252 }
3253
3254 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
3255 if (ret) {
3256 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
3257 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
3258 goto out;
3259 }
3260 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3261 pbn, port->vcpi.num_slots);
3262
3263
3264 drm_dp_mst_get_port_malloc(port);
3265 drm_dp_mst_topology_put_port(port);
3266 return true;
3267out:
3268 return false;
3269}
3270EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
3271
3272int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3273{
3274 int slots = 0;
3275 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3276 if (!port)
3277 return slots;
3278
3279 slots = port->vcpi.num_slots;
3280 drm_dp_mst_topology_put_port(port);
3281 return slots;
3282}
3283EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3284
3285
3286
3287
3288
3289
3290
3291
3292void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3293{
3294
3295
3296
3297
3298
3299 port->vcpi.num_slots = 0;
3300}
3301EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3312 struct drm_dp_mst_port *port)
3313{
3314 if (!port->vcpi.vcpi)
3315 return;
3316
3317 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3318 port->vcpi.num_slots = 0;
3319 port->vcpi.pbn = 0;
3320 port->vcpi.aligned_pbn = 0;
3321 port->vcpi.vcpi = 0;
3322 drm_dp_mst_put_port_malloc(port);
3323}
3324EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3325
3326static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
3327 int id, struct drm_dp_payload *payload)
3328{
3329 u8 payload_alloc[3], status;
3330 int ret;
3331 int retries = 0;
3332
3333 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
3334 DP_PAYLOAD_TABLE_UPDATED);
3335
3336 payload_alloc[0] = id;
3337 payload_alloc[1] = payload->start_slot;
3338 payload_alloc[2] = payload->num_slots;
3339
3340 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
3341 if (ret != 3) {
3342 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
3343 goto fail;
3344 }
3345
3346retry:
3347 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3348 if (ret < 0) {
3349 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3350 goto fail;
3351 }
3352
3353 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
3354 retries++;
3355 if (retries < 20) {
3356 usleep_range(10000, 20000);
3357 goto retry;
3358 }
3359 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
3360 ret = -EINVAL;
3361 goto fail;
3362 }
3363 ret = 0;
3364fail:
3365 return ret;
3366}
3367
3368
3369
3370
3371
3372
3373
3374
3375int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
3376{
3377 u8 status;
3378 int ret;
3379 int count = 0;
3380
3381 do {
3382 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3383
3384 if (ret < 0) {
3385 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3386 goto fail;
3387 }
3388
3389 if (status & DP_PAYLOAD_ACT_HANDLED)
3390 break;
3391 count++;
3392 udelay(100);
3393
3394 } while (count < 30);
3395
3396 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
3397 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
3398 ret = -EINVAL;
3399 goto fail;
3400 }
3401 return 0;
3402fail:
3403 return ret;
3404}
3405EXPORT_SYMBOL(drm_dp_check_act_status);
3406
3407
3408
3409
3410
3411
3412
3413
3414int drm_dp_calc_pbn_mode(int clock, int bpp)
3415{
3416 u64 kbps;
3417 s64 peak_kbps;
3418 u32 numerator;
3419 u32 denominator;
3420
3421 kbps = clock * bpp;
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434 numerator = 64 * 1006;
3435 denominator = 54 * 8 * 1000 * 1000;
3436
3437 kbps *= numerator;
3438 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3439
3440 return drm_fixp2int_ceil(peak_kbps);
3441}
3442EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3443
3444static int test_calc_pbn_mode(void)
3445{
3446 int ret;
3447 ret = drm_dp_calc_pbn_mode(154000, 30);
3448 if (ret != 689) {
3449 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3450 154000, 30, 689, ret);
3451 return -EINVAL;
3452 }
3453 ret = drm_dp_calc_pbn_mode(234000, 30);
3454 if (ret != 1047) {
3455 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3456 234000, 30, 1047, ret);
3457 return -EINVAL;
3458 }
3459 ret = drm_dp_calc_pbn_mode(297000, 24);
3460 if (ret != 1063) {
3461 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3462 297000, 24, 1063, ret);
3463 return -EINVAL;
3464 }
3465 return 0;
3466}
3467
3468
3469static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
3470{
3471 queue_work(system_long_wq, &mgr->tx_work);
3472}
3473
3474static void drm_dp_mst_dump_mstb(struct seq_file *m,
3475 struct drm_dp_mst_branch *mstb)
3476{
3477 struct drm_dp_mst_port *port;
3478 int tabs = mstb->lct;
3479 char prefix[10];
3480 int i;
3481
3482 for (i = 0; i < tabs; i++)
3483 prefix[i] = '\t';
3484 prefix[i] = '\0';
3485
3486 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
3487 list_for_each_entry(port, &mstb->ports, next) {
3488 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
3489 if (port->mstb)
3490 drm_dp_mst_dump_mstb(m, port->mstb);
3491 }
3492}
3493
3494#define DP_PAYLOAD_TABLE_SIZE 64
3495
3496static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
3497 char *buf)
3498{
3499 int i;
3500
3501 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
3502 if (drm_dp_dpcd_read(mgr->aux,
3503 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
3504 &buf[i], 16) != 16)
3505 return false;
3506 }
3507 return true;
3508}
3509
3510static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
3511 struct drm_dp_mst_port *port, char *name,
3512 int namelen)
3513{
3514 struct edid *mst_edid;
3515
3516 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
3517 drm_edid_get_monitor_name(mst_edid, name, namelen);
3518}
3519
3520
3521
3522
3523
3524
3525
3526
3527void drm_dp_mst_dump_topology(struct seq_file *m,
3528 struct drm_dp_mst_topology_mgr *mgr)
3529{
3530 int i;
3531 struct drm_dp_mst_port *port;
3532
3533 mutex_lock(&mgr->lock);
3534 if (mgr->mst_primary)
3535 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
3536
3537
3538 mutex_unlock(&mgr->lock);
3539
3540 mutex_lock(&mgr->payload_lock);
3541 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
3542 mgr->max_payloads);
3543
3544 for (i = 0; i < mgr->max_payloads; i++) {
3545 if (mgr->proposed_vcpis[i]) {
3546 char name[14];
3547
3548 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3549 fetch_monitor_name(mgr, port, name, sizeof(name));
3550 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
3551 port->port_num, port->vcpi.vcpi,
3552 port->vcpi.num_slots,
3553 (*name != 0) ? name : "Unknown");
3554 } else
3555 seq_printf(m, "vcpi %d:unused\n", i);
3556 }
3557 for (i = 0; i < mgr->max_payloads; i++) {
3558 seq_printf(m, "payload %d: %d, %d, %d\n",
3559 i,
3560 mgr->payloads[i].payload_state,
3561 mgr->payloads[i].start_slot,
3562 mgr->payloads[i].num_slots);
3563
3564
3565 }
3566 mutex_unlock(&mgr->payload_lock);
3567
3568 mutex_lock(&mgr->lock);
3569 if (mgr->mst_primary) {
3570 u8 buf[DP_PAYLOAD_TABLE_SIZE];
3571 int ret;
3572
3573 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
3574 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
3575 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
3576 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
3577 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
3578 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
3579
3580
3581 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
3582 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
3583 for (i = 0x3; i < 0x8 && buf[i]; i++)
3584 seq_printf(m, "%c", buf[i]);
3585 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
3586 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
3587 if (dump_dp_payload_table(mgr, buf))
3588 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
3589 }
3590
3591 mutex_unlock(&mgr->lock);
3592
3593}
3594EXPORT_SYMBOL(drm_dp_mst_dump_topology);
3595
3596static void drm_dp_tx_work(struct work_struct *work)
3597{
3598 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
3599
3600 mutex_lock(&mgr->qlock);
3601 if (!list_empty(&mgr->tx_msg_downq))
3602 process_single_down_tx_qlock(mgr);
3603 mutex_unlock(&mgr->qlock);
3604}
3605
3606static void drm_dp_destroy_connector_work(struct work_struct *work)
3607{
3608 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3609 struct drm_dp_mst_port *port;
3610 bool send_hotplug = false;
3611
3612
3613
3614
3615
3616 for (;;) {
3617 mutex_lock(&mgr->destroy_connector_lock);
3618 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
3619 if (!port) {
3620 mutex_unlock(&mgr->destroy_connector_lock);
3621 break;
3622 }
3623 list_del(&port->next);
3624 mutex_unlock(&mgr->destroy_connector_lock);
3625
3626 INIT_LIST_HEAD(&port->next);
3627
3628 mgr->cbs->destroy_connector(mgr, port->connector);
3629
3630 drm_dp_port_teardown_pdt(port, port->pdt);
3631 port->pdt = DP_PEER_DEVICE_NONE;
3632
3633 drm_dp_mst_put_port_malloc(port);
3634 send_hotplug = true;
3635 }
3636 if (send_hotplug)
3637 drm_kms_helper_hotplug_event(mgr->dev);
3638}
3639
3640static struct drm_private_state *
3641drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
3642{
3643 struct drm_dp_mst_topology_state *state, *old_state =
3644 to_dp_mst_topology_state(obj->state);
3645 struct drm_dp_vcpi_allocation *pos, *vcpi;
3646
3647 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
3648 if (!state)
3649 return NULL;
3650
3651 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
3652
3653 INIT_LIST_HEAD(&state->vcpis);
3654
3655 list_for_each_entry(pos, &old_state->vcpis, next) {
3656
3657 if (!pos->vcpi)
3658 continue;
3659
3660 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
3661 if (!vcpi)
3662 goto fail;
3663
3664 drm_dp_mst_get_port_malloc(vcpi->port);
3665 list_add(&vcpi->next, &state->vcpis);
3666 }
3667
3668 return &state->base;
3669
3670fail:
3671 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
3672 drm_dp_mst_put_port_malloc(pos->port);
3673 kfree(pos);
3674 }
3675 kfree(state);
3676
3677 return NULL;
3678}
3679
3680static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
3681 struct drm_private_state *state)
3682{
3683 struct drm_dp_mst_topology_state *mst_state =
3684 to_dp_mst_topology_state(state);
3685 struct drm_dp_vcpi_allocation *pos, *tmp;
3686
3687 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
3688
3689 if (pos->vcpi)
3690 drm_dp_mst_put_port_malloc(pos->port);
3691 kfree(pos);
3692 }
3693
3694 kfree(mst_state);
3695}
3696
3697static inline int
3698drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
3699 struct drm_dp_mst_topology_state *mst_state)
3700{
3701 struct drm_dp_vcpi_allocation *vcpi;
3702 int avail_slots = 63, payload_count = 0;
3703
3704 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
3705
3706 if (!vcpi->vcpi) {
3707 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
3708 vcpi->port);
3709 continue;
3710 }
3711
3712 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
3713 vcpi->port, vcpi->vcpi);
3714
3715 avail_slots -= vcpi->vcpi;
3716 if (avail_slots < 0) {
3717 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
3718 vcpi->port, mst_state,
3719 avail_slots + vcpi->vcpi);
3720 return -ENOSPC;
3721 }
3722
3723 if (++payload_count > mgr->max_payloads) {
3724 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
3725 mgr, mst_state, mgr->max_payloads);
3726 return -EINVAL;
3727 }
3728 }
3729 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
3730 mgr, mst_state, avail_slots,
3731 63 - avail_slots);
3732
3733 return 0;
3734}
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
3758{
3759 struct drm_dp_mst_topology_mgr *mgr;
3760 struct drm_dp_mst_topology_state *mst_state;
3761 int i, ret = 0;
3762
3763 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
3764 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
3765 if (ret)
3766 break;
3767 }
3768
3769 return ret;
3770}
3771EXPORT_SYMBOL(drm_dp_mst_atomic_check);
3772
3773const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
3774 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
3775 .atomic_destroy_state = drm_dp_mst_destroy_state,
3776};
3777EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
3795 struct drm_dp_mst_topology_mgr *mgr)
3796{
3797 struct drm_device *dev = mgr->dev;
3798
3799 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3800 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
3801}
3802EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3816 struct drm_device *dev, struct drm_dp_aux *aux,
3817 int max_dpcd_transaction_bytes,
3818 int max_payloads, int conn_base_id)
3819{
3820 struct drm_dp_mst_topology_state *mst_state;
3821
3822 mutex_init(&mgr->lock);
3823 mutex_init(&mgr->qlock);
3824 mutex_init(&mgr->payload_lock);
3825 mutex_init(&mgr->destroy_connector_lock);
3826 INIT_LIST_HEAD(&mgr->tx_msg_downq);
3827 INIT_LIST_HEAD(&mgr->destroy_connector_list);
3828 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3829 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
3830 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
3831 init_waitqueue_head(&mgr->tx_waitq);
3832 mgr->dev = dev;
3833 mgr->aux = aux;
3834 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
3835 mgr->max_payloads = max_payloads;
3836 mgr->conn_base_id = conn_base_id;
3837 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
3838 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
3839 return -EINVAL;
3840 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
3841 if (!mgr->payloads)
3842 return -ENOMEM;
3843 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
3844 if (!mgr->proposed_vcpis)
3845 return -ENOMEM;
3846 set_bit(0, &mgr->payload_mask);
3847 if (test_calc_pbn_mode() < 0)
3848 DRM_ERROR("MST PBN self-test failed\n");
3849
3850 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
3851 if (mst_state == NULL)
3852 return -ENOMEM;
3853
3854 mst_state->mgr = mgr;
3855 INIT_LIST_HEAD(&mst_state->vcpis);
3856
3857 drm_atomic_private_obj_init(dev, &mgr->base,
3858 &mst_state->base,
3859 &drm_dp_mst_topology_state_funcs);
3860
3861 return 0;
3862}
3863EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
3864
3865
3866
3867
3868
3869void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
3870{
3871 drm_dp_mst_topology_mgr_set_mst(mgr, false);
3872 flush_work(&mgr->work);
3873 flush_work(&mgr->destroy_connector_work);
3874 mutex_lock(&mgr->payload_lock);
3875 kfree(mgr->payloads);
3876 mgr->payloads = NULL;
3877 kfree(mgr->proposed_vcpis);
3878 mgr->proposed_vcpis = NULL;
3879 mutex_unlock(&mgr->payload_lock);
3880 mgr->dev = NULL;
3881 mgr->aux = NULL;
3882 drm_atomic_private_obj_fini(&mgr->base);
3883 mgr->funcs = NULL;
3884}
3885EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
3886
3887static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
3888{
3889 int i;
3890
3891 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
3892 return false;
3893
3894 for (i = 0; i < num - 1; i++) {
3895 if (msgs[i].flags & I2C_M_RD ||
3896 msgs[i].len > 0xff)
3897 return false;
3898 }
3899
3900 return msgs[num - 1].flags & I2C_M_RD &&
3901 msgs[num - 1].len <= 0xff;
3902}
3903
3904
3905static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
3906 int num)
3907{
3908 struct drm_dp_aux *aux = adapter->algo_data;
3909 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
3910 struct drm_dp_mst_branch *mstb;
3911 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
3912 unsigned int i;
3913 struct drm_dp_sideband_msg_req_body msg;
3914 struct drm_dp_sideband_msg_tx *txmsg = NULL;
3915 int ret;
3916
3917 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3918 if (!mstb)
3919 return -EREMOTEIO;
3920
3921 if (!remote_i2c_read_ok(msgs, num)) {
3922 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
3923 ret = -EIO;
3924 goto out;
3925 }
3926
3927 memset(&msg, 0, sizeof(msg));
3928 msg.req_type = DP_REMOTE_I2C_READ;
3929 msg.u.i2c_read.num_transactions = num - 1;
3930 msg.u.i2c_read.port_number = port->port_num;
3931 for (i = 0; i < num - 1; i++) {
3932 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
3933 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
3934 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
3935 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
3936 }
3937 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
3938 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
3939
3940 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3941 if (!txmsg) {
3942 ret = -ENOMEM;
3943 goto out;
3944 }
3945
3946 txmsg->dst = mstb;
3947 drm_dp_encode_sideband_req(&msg, txmsg);
3948
3949 drm_dp_queue_down_tx(mgr, txmsg);
3950
3951 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3952 if (ret > 0) {
3953
3954 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3955 ret = -EREMOTEIO;
3956 goto out;
3957 }
3958 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
3959 ret = -EIO;
3960 goto out;
3961 }
3962 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
3963 ret = num;
3964 }
3965out:
3966 kfree(txmsg);
3967 drm_dp_mst_topology_put_mstb(mstb);
3968 return ret;
3969}
3970
3971static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
3972{
3973 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
3974 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
3975 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
3976 I2C_FUNC_10BIT_ADDR;
3977}
3978
3979static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
3980 .functionality = drm_dp_mst_i2c_functionality,
3981 .master_xfer = drm_dp_mst_i2c_xfer,
3982};
3983
3984
3985
3986
3987
3988
3989
3990static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
3991{
3992 aux->ddc.algo = &drm_dp_mst_i2c_algo;
3993 aux->ddc.algo_data = aux;
3994 aux->ddc.retries = 3;
3995
3996 aux->ddc.class = I2C_CLASS_DDC;
3997 aux->ddc.owner = THIS_MODULE;
3998 aux->ddc.dev.parent = aux->dev;
3999 aux->ddc.dev.of_node = aux->dev->of_node;
4000
4001 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4002 sizeof(aux->ddc.name));
4003
4004 return i2c_add_adapter(&aux->ddc);
4005}
4006
4007
4008
4009
4010
4011static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4012{
4013 i2c_del_adapter(&aux->ddc);
4014}
4015