1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/i2c.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/seq_file.h>
30
31#include <drm/drm_atomic.h>
32#include <drm/drm_atomic_helper.h>
33#include <drm/drm_dp_mst_helper.h>
34#include <drm/drm_drv.h>
35#include <drm/drm_fixed.h>
36#include <drm/drm_print.h>
37#include <drm/drm_probe_helper.h>
38
39
40
41
42
43
44
45
46static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
47 char *buf);
48static int test_calc_pbn_mode(void);
49
50static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
51
52static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
53 int id,
54 struct drm_dp_payload *payload);
55
56static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
57 struct drm_dp_mst_port *port,
58 int offset, int size, u8 *bytes);
59
60static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
61 struct drm_dp_mst_branch *mstb);
62static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
63 struct drm_dp_mst_branch *mstb,
64 struct drm_dp_mst_port *port);
65static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
66 u8 *guid);
67
68static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
69static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
70static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
71
72#define DP_STR(x) [DP_ ## x] = #x
73
74static const char *drm_dp_mst_req_type_str(u8 req_type)
75{
76 static const char * const req_type_str[] = {
77 DP_STR(GET_MSG_TRANSACTION_VERSION),
78 DP_STR(LINK_ADDRESS),
79 DP_STR(CONNECTION_STATUS_NOTIFY),
80 DP_STR(ENUM_PATH_RESOURCES),
81 DP_STR(ALLOCATE_PAYLOAD),
82 DP_STR(QUERY_PAYLOAD),
83 DP_STR(RESOURCE_STATUS_NOTIFY),
84 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
85 DP_STR(REMOTE_DPCD_READ),
86 DP_STR(REMOTE_DPCD_WRITE),
87 DP_STR(REMOTE_I2C_READ),
88 DP_STR(REMOTE_I2C_WRITE),
89 DP_STR(POWER_UP_PHY),
90 DP_STR(POWER_DOWN_PHY),
91 DP_STR(SINK_EVENT_NOTIFY),
92 DP_STR(QUERY_STREAM_ENC_STATUS),
93 };
94
95 if (req_type >= ARRAY_SIZE(req_type_str) ||
96 !req_type_str[req_type])
97 return "unknown";
98
99 return req_type_str[req_type];
100}
101
102#undef DP_STR
103#define DP_STR(x) [DP_NAK_ ## x] = #x
104
105static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
106{
107 static const char * const nak_reason_str[] = {
108 DP_STR(WRITE_FAILURE),
109 DP_STR(INVALID_READ),
110 DP_STR(CRC_FAILURE),
111 DP_STR(BAD_PARAM),
112 DP_STR(DEFER),
113 DP_STR(LINK_FAILURE),
114 DP_STR(NO_RESOURCES),
115 DP_STR(DPCD_FAIL),
116 DP_STR(I2C_NAK),
117 DP_STR(ALLOCATE_FAIL),
118 };
119
120 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
121 !nak_reason_str[nak_reason])
122 return "unknown";
123
124 return nak_reason_str[nak_reason];
125}
126
127#undef DP_STR
128
129
130static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
131{
132 u8 bitmask = 0x80;
133 u8 bitshift = 7;
134 u8 array_index = 0;
135 int number_of_bits = num_nibbles * 4;
136 u8 remainder = 0;
137
138 while (number_of_bits != 0) {
139 number_of_bits--;
140 remainder <<= 1;
141 remainder |= (data[array_index] & bitmask) >> bitshift;
142 bitmask >>= 1;
143 bitshift--;
144 if (bitmask == 0) {
145 bitmask = 0x80;
146 bitshift = 7;
147 array_index++;
148 }
149 if ((remainder & 0x10) == 0x10)
150 remainder ^= 0x13;
151 }
152
153 number_of_bits = 4;
154 while (number_of_bits != 0) {
155 number_of_bits--;
156 remainder <<= 1;
157 if ((remainder & 0x10) != 0)
158 remainder ^= 0x13;
159 }
160
161 return remainder;
162}
163
164static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
165{
166 u8 bitmask = 0x80;
167 u8 bitshift = 7;
168 u8 array_index = 0;
169 int number_of_bits = number_of_bytes * 8;
170 u16 remainder = 0;
171
172 while (number_of_bits != 0) {
173 number_of_bits--;
174 remainder <<= 1;
175 remainder |= (data[array_index] & bitmask) >> bitshift;
176 bitmask >>= 1;
177 bitshift--;
178 if (bitmask == 0) {
179 bitmask = 0x80;
180 bitshift = 7;
181 array_index++;
182 }
183 if ((remainder & 0x100) == 0x100)
184 remainder ^= 0xd5;
185 }
186
187 number_of_bits = 8;
188 while (number_of_bits != 0) {
189 number_of_bits--;
190 remainder <<= 1;
191 if ((remainder & 0x100) != 0)
192 remainder ^= 0xd5;
193 }
194
195 return remainder & 0xff;
196}
197static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
198{
199 u8 size = 3;
200 size += (hdr->lct / 2);
201 return size;
202}
203
204static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
205 u8 *buf, int *len)
206{
207 int idx = 0;
208 int i;
209 u8 crc4;
210 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
211 for (i = 0; i < (hdr->lct / 2); i++)
212 buf[idx++] = hdr->rad[i];
213 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
214 (hdr->msg_len & 0x3f);
215 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
216
217 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
218 buf[idx - 1] |= (crc4 & 0xf);
219
220 *len = idx;
221}
222
223static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
224 u8 *buf, int buflen, u8 *hdrlen)
225{
226 u8 crc4;
227 u8 len;
228 int i;
229 u8 idx;
230 if (buf[0] == 0)
231 return false;
232 len = 3;
233 len += ((buf[0] & 0xf0) >> 4) / 2;
234 if (len > buflen)
235 return false;
236 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
237
238 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
239 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
240 return false;
241 }
242
243 hdr->lct = (buf[0] & 0xf0) >> 4;
244 hdr->lcr = (buf[0] & 0xf);
245 idx = 1;
246 for (i = 0; i < (hdr->lct / 2); i++)
247 hdr->rad[i] = buf[idx++];
248 hdr->broadcast = (buf[idx] >> 7) & 0x1;
249 hdr->path_msg = (buf[idx] >> 6) & 0x1;
250 hdr->msg_len = buf[idx] & 0x3f;
251 idx++;
252 hdr->somt = (buf[idx] >> 7) & 0x1;
253 hdr->eomt = (buf[idx] >> 6) & 0x1;
254 hdr->seqno = (buf[idx] >> 4) & 0x1;
255 idx++;
256 *hdrlen = idx;
257 return true;
258}
259
260static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
261 struct drm_dp_sideband_msg_tx *raw)
262{
263 int idx = 0;
264 int i;
265 u8 *buf = raw->msg;
266 buf[idx++] = req->req_type & 0x7f;
267
268 switch (req->req_type) {
269 case DP_ENUM_PATH_RESOURCES:
270 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
271 idx++;
272 break;
273 case DP_ALLOCATE_PAYLOAD:
274 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
275 (req->u.allocate_payload.number_sdp_streams & 0xf);
276 idx++;
277 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
278 idx++;
279 buf[idx] = (req->u.allocate_payload.pbn >> 8);
280 idx++;
281 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
282 idx++;
283 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
284 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
285 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
286 idx++;
287 }
288 if (req->u.allocate_payload.number_sdp_streams & 1) {
289 i = req->u.allocate_payload.number_sdp_streams - 1;
290 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
291 idx++;
292 }
293 break;
294 case DP_QUERY_PAYLOAD:
295 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
296 idx++;
297 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
298 idx++;
299 break;
300 case DP_REMOTE_DPCD_READ:
301 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
302 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
303 idx++;
304 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
305 idx++;
306 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
307 idx++;
308 buf[idx] = (req->u.dpcd_read.num_bytes);
309 idx++;
310 break;
311
312 case DP_REMOTE_DPCD_WRITE:
313 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
314 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
315 idx++;
316 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
317 idx++;
318 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
319 idx++;
320 buf[idx] = (req->u.dpcd_write.num_bytes);
321 idx++;
322 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
323 idx += req->u.dpcd_write.num_bytes;
324 break;
325 case DP_REMOTE_I2C_READ:
326 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
327 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
328 idx++;
329 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
330 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
331 idx++;
332 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
333 idx++;
334 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
335 idx += req->u.i2c_read.transactions[i].num_bytes;
336
337 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
338 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
339 idx++;
340 }
341 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
342 idx++;
343 buf[idx] = (req->u.i2c_read.num_bytes_read);
344 idx++;
345 break;
346
347 case DP_REMOTE_I2C_WRITE:
348 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
349 idx++;
350 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
351 idx++;
352 buf[idx] = (req->u.i2c_write.num_bytes);
353 idx++;
354 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
355 idx += req->u.i2c_write.num_bytes;
356 break;
357
358 case DP_POWER_DOWN_PHY:
359 case DP_POWER_UP_PHY:
360 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
361 idx++;
362 break;
363 }
364 raw->cur_len = idx;
365}
366
367static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
368{
369 u8 crc4;
370 crc4 = drm_dp_msg_data_crc4(msg, len);
371 msg[len] = crc4;
372}
373
374static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
375 struct drm_dp_sideband_msg_tx *raw)
376{
377 int idx = 0;
378 u8 *buf = raw->msg;
379
380 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
381
382 raw->cur_len = idx;
383}
384
385
386static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
387 u8 *replybuf, u8 replybuflen, bool hdr)
388{
389 int ret;
390 u8 crc4;
391
392 if (hdr) {
393 u8 hdrlen;
394 struct drm_dp_sideband_msg_hdr recv_hdr;
395 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
396 if (ret == false) {
397 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
398 return false;
399 }
400
401
402
403
404
405 if (!recv_hdr.somt && !msg->have_somt)
406 return false;
407
408
409 msg->curchunk_len = recv_hdr.msg_len;
410 msg->curchunk_hdrlen = hdrlen;
411
412
413 if (recv_hdr.somt && msg->have_somt)
414 return false;
415
416 if (recv_hdr.somt) {
417 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
418 msg->have_somt = true;
419 }
420 if (recv_hdr.eomt)
421 msg->have_eomt = true;
422
423
424 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
425 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
426 } else {
427 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
428 msg->curchunk_idx += replybuflen;
429 }
430
431 if (msg->curchunk_idx >= msg->curchunk_len) {
432
433 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
434
435 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
436 msg->curlen += msg->curchunk_len - 1;
437 }
438 return true;
439}
440
441static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
442 struct drm_dp_sideband_msg_reply_body *repmsg)
443{
444 int idx = 1;
445 int i;
446 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
447 idx += 16;
448 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
449 idx++;
450 if (idx > raw->curlen)
451 goto fail_len;
452 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
453 if (raw->msg[idx] & 0x80)
454 repmsg->u.link_addr.ports[i].input_port = 1;
455
456 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
457 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
458
459 idx++;
460 if (idx > raw->curlen)
461 goto fail_len;
462 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
463 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
464 if (repmsg->u.link_addr.ports[i].input_port == 0)
465 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
466 idx++;
467 if (idx > raw->curlen)
468 goto fail_len;
469 if (repmsg->u.link_addr.ports[i].input_port == 0) {
470 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
471 idx++;
472 if (idx > raw->curlen)
473 goto fail_len;
474 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
475 idx += 16;
476 if (idx > raw->curlen)
477 goto fail_len;
478 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
479 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
480 idx++;
481
482 }
483 if (idx > raw->curlen)
484 goto fail_len;
485 }
486
487 return true;
488fail_len:
489 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
490 return false;
491}
492
493static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
494 struct drm_dp_sideband_msg_reply_body *repmsg)
495{
496 int idx = 1;
497 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
498 idx++;
499 if (idx > raw->curlen)
500 goto fail_len;
501 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
502 idx++;
503 if (idx > raw->curlen)
504 goto fail_len;
505
506 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
507 return true;
508fail_len:
509 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
510 return false;
511}
512
513static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
514 struct drm_dp_sideband_msg_reply_body *repmsg)
515{
516 int idx = 1;
517 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
518 idx++;
519 if (idx > raw->curlen)
520 goto fail_len;
521 return true;
522fail_len:
523 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
524 return false;
525}
526
527static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
528 struct drm_dp_sideband_msg_reply_body *repmsg)
529{
530 int idx = 1;
531
532 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
533 idx++;
534 if (idx > raw->curlen)
535 goto fail_len;
536 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
537 idx++;
538
539 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
540 return true;
541fail_len:
542 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
543 return false;
544}
545
546static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
547 struct drm_dp_sideband_msg_reply_body *repmsg)
548{
549 int idx = 1;
550 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
551 idx++;
552 if (idx > raw->curlen)
553 goto fail_len;
554 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
555 idx += 2;
556 if (idx > raw->curlen)
557 goto fail_len;
558 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
559 idx += 2;
560 if (idx > raw->curlen)
561 goto fail_len;
562 return true;
563fail_len:
564 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
565 return false;
566}
567
568static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
569 struct drm_dp_sideband_msg_reply_body *repmsg)
570{
571 int idx = 1;
572 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
573 idx++;
574 if (idx > raw->curlen)
575 goto fail_len;
576 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
577 idx++;
578 if (idx > raw->curlen)
579 goto fail_len;
580 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
581 idx += 2;
582 if (idx > raw->curlen)
583 goto fail_len;
584 return true;
585fail_len:
586 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
587 return false;
588}
589
590static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
591 struct drm_dp_sideband_msg_reply_body *repmsg)
592{
593 int idx = 1;
594 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
595 idx++;
596 if (idx > raw->curlen)
597 goto fail_len;
598 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
599 idx += 2;
600 if (idx > raw->curlen)
601 goto fail_len;
602 return true;
603fail_len:
604 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
605 return false;
606}
607
608static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
609 struct drm_dp_sideband_msg_reply_body *repmsg)
610{
611 int idx = 1;
612
613 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
614 idx++;
615 if (idx > raw->curlen) {
616 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
617 idx, raw->curlen);
618 return false;
619 }
620 return true;
621}
622
623static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
624 struct drm_dp_sideband_msg_reply_body *msg)
625{
626 memset(msg, 0, sizeof(*msg));
627 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
628 msg->req_type = (raw->msg[0] & 0x7f);
629
630 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
631 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
632 msg->u.nak.reason = raw->msg[17];
633 msg->u.nak.nak_data = raw->msg[18];
634 return false;
635 }
636
637 switch (msg->req_type) {
638 case DP_LINK_ADDRESS:
639 return drm_dp_sideband_parse_link_address(raw, msg);
640 case DP_QUERY_PAYLOAD:
641 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
642 case DP_REMOTE_DPCD_READ:
643 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
644 case DP_REMOTE_DPCD_WRITE:
645 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
646 case DP_REMOTE_I2C_READ:
647 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
648 case DP_ENUM_PATH_RESOURCES:
649 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
650 case DP_ALLOCATE_PAYLOAD:
651 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
652 case DP_POWER_DOWN_PHY:
653 case DP_POWER_UP_PHY:
654 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
655 default:
656 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
657 drm_dp_mst_req_type_str(msg->req_type));
658 return false;
659 }
660}
661
662static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
663 struct drm_dp_sideband_msg_req_body *msg)
664{
665 int idx = 1;
666
667 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
668 idx++;
669 if (idx > raw->curlen)
670 goto fail_len;
671
672 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
673 idx += 16;
674 if (idx > raw->curlen)
675 goto fail_len;
676
677 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
678 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
679 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
680 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
681 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
682 idx++;
683 return true;
684fail_len:
685 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
686 return false;
687}
688
689static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
690 struct drm_dp_sideband_msg_req_body *msg)
691{
692 int idx = 1;
693
694 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
695 idx++;
696 if (idx > raw->curlen)
697 goto fail_len;
698
699 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
700 idx += 16;
701 if (idx > raw->curlen)
702 goto fail_len;
703
704 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
705 idx++;
706 return true;
707fail_len:
708 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
709 return false;
710}
711
712static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
713 struct drm_dp_sideband_msg_req_body *msg)
714{
715 memset(msg, 0, sizeof(*msg));
716 msg->req_type = (raw->msg[0] & 0x7f);
717
718 switch (msg->req_type) {
719 case DP_CONNECTION_STATUS_NOTIFY:
720 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
721 case DP_RESOURCE_STATUS_NOTIFY:
722 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
723 default:
724 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
725 drm_dp_mst_req_type_str(msg->req_type));
726 return false;
727 }
728}
729
730static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
731{
732 struct drm_dp_sideband_msg_req_body req;
733
734 req.req_type = DP_REMOTE_DPCD_WRITE;
735 req.u.dpcd_write.port_number = port_num;
736 req.u.dpcd_write.dpcd_address = offset;
737 req.u.dpcd_write.num_bytes = num_bytes;
738 req.u.dpcd_write.bytes = bytes;
739 drm_dp_encode_sideband_req(&req, msg);
740
741 return 0;
742}
743
744static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
745{
746 struct drm_dp_sideband_msg_req_body req;
747
748 req.req_type = DP_LINK_ADDRESS;
749 drm_dp_encode_sideband_req(&req, msg);
750 return 0;
751}
752
753static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
754{
755 struct drm_dp_sideband_msg_req_body req;
756
757 req.req_type = DP_ENUM_PATH_RESOURCES;
758 req.u.port_num.port_number = port_num;
759 drm_dp_encode_sideband_req(&req, msg);
760 msg->path_msg = true;
761 return 0;
762}
763
764static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
765 u8 vcpi, uint16_t pbn,
766 u8 number_sdp_streams,
767 u8 *sdp_stream_sink)
768{
769 struct drm_dp_sideband_msg_req_body req;
770 memset(&req, 0, sizeof(req));
771 req.req_type = DP_ALLOCATE_PAYLOAD;
772 req.u.allocate_payload.port_number = port_num;
773 req.u.allocate_payload.vcpi = vcpi;
774 req.u.allocate_payload.pbn = pbn;
775 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
776 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
777 number_sdp_streams);
778 drm_dp_encode_sideband_req(&req, msg);
779 msg->path_msg = true;
780 return 0;
781}
782
783static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
784 int port_num, bool power_up)
785{
786 struct drm_dp_sideband_msg_req_body req;
787
788 if (power_up)
789 req.req_type = DP_POWER_UP_PHY;
790 else
791 req.req_type = DP_POWER_DOWN_PHY;
792
793 req.u.port_num.port_number = port_num;
794 drm_dp_encode_sideband_req(&req, msg);
795 msg->path_msg = true;
796 return 0;
797}
798
799static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
800 struct drm_dp_vcpi *vcpi)
801{
802 int ret, vcpi_ret;
803
804 mutex_lock(&mgr->payload_lock);
805 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
806 if (ret > mgr->max_payloads) {
807 ret = -EINVAL;
808 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
809 goto out_unlock;
810 }
811
812 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
813 if (vcpi_ret > mgr->max_payloads) {
814 ret = -EINVAL;
815 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
816 goto out_unlock;
817 }
818
819 set_bit(ret, &mgr->payload_mask);
820 set_bit(vcpi_ret, &mgr->vcpi_mask);
821 vcpi->vcpi = vcpi_ret + 1;
822 mgr->proposed_vcpis[ret - 1] = vcpi;
823out_unlock:
824 mutex_unlock(&mgr->payload_lock);
825 return ret;
826}
827
828static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
829 int vcpi)
830{
831 int i;
832 if (vcpi == 0)
833 return;
834
835 mutex_lock(&mgr->payload_lock);
836 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
837 clear_bit(vcpi - 1, &mgr->vcpi_mask);
838
839 for (i = 0; i < mgr->max_payloads; i++) {
840 if (mgr->proposed_vcpis[i])
841 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
842 mgr->proposed_vcpis[i] = NULL;
843 clear_bit(i + 1, &mgr->payload_mask);
844 }
845 }
846 mutex_unlock(&mgr->payload_lock);
847}
848
849static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
850 struct drm_dp_sideband_msg_tx *txmsg)
851{
852 unsigned int state;
853
854
855
856
857
858
859 state = READ_ONCE(txmsg->state);
860 return (state == DRM_DP_SIDEBAND_TX_RX ||
861 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
862}
863
864static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
865 struct drm_dp_sideband_msg_tx *txmsg)
866{
867 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
868 int ret;
869
870 ret = wait_event_timeout(mgr->tx_waitq,
871 check_txmsg_state(mgr, txmsg),
872 (4 * HZ));
873 mutex_lock(&mstb->mgr->qlock);
874 if (ret > 0) {
875 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
876 ret = -EIO;
877 goto out;
878 }
879 } else {
880 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
881
882
883 ret = -EIO;
884
885
886 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
887 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
888 list_del(&txmsg->next);
889 }
890
891 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
892 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
893 mstb->tx_slots[txmsg->seqno] = NULL;
894 }
895 }
896out:
897 mutex_unlock(&mgr->qlock);
898
899 return ret;
900}
901
902static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
903{
904 struct drm_dp_mst_branch *mstb;
905
906 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
907 if (!mstb)
908 return NULL;
909
910 mstb->lct = lct;
911 if (lct > 1)
912 memcpy(mstb->rad, rad, lct / 2);
913 INIT_LIST_HEAD(&mstb->ports);
914 kref_init(&mstb->topology_kref);
915 kref_init(&mstb->malloc_kref);
916 return mstb;
917}
918
919static void drm_dp_free_mst_branch_device(struct kref *kref)
920{
921 struct drm_dp_mst_branch *mstb =
922 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
923
924 if (mstb->port_parent)
925 drm_dp_mst_put_port_malloc(mstb->port_parent);
926
927 kfree(mstb);
928}
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029static void
1030drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1031{
1032 kref_get(&mstb->malloc_kref);
1033 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1034}
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047static void
1048drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1049{
1050 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1051 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1052}
1053
1054static void drm_dp_free_mst_port(struct kref *kref)
1055{
1056 struct drm_dp_mst_port *port =
1057 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1058
1059 drm_dp_mst_put_mstb_malloc(port->parent);
1060 kfree(port);
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080void
1081drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1082{
1083 kref_get(&port->malloc_kref);
1084 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1085}
1086EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098void
1099drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1100{
1101 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1102 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1103}
1104EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1105
1106static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1107{
1108 struct drm_dp_mst_branch *mstb =
1109 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1110 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1111 struct drm_dp_mst_port *port, *tmp;
1112 bool wake_tx = false;
1113
1114 mutex_lock(&mgr->lock);
1115 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
1116 list_del(&port->next);
1117 drm_dp_mst_topology_put_port(port);
1118 }
1119 mutex_unlock(&mgr->lock);
1120
1121
1122 mutex_lock(&mstb->mgr->qlock);
1123 if (mstb->tx_slots[0]) {
1124 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1125 mstb->tx_slots[0] = NULL;
1126 wake_tx = true;
1127 }
1128 if (mstb->tx_slots[1]) {
1129 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1130 mstb->tx_slots[1] = NULL;
1131 wake_tx = true;
1132 }
1133 mutex_unlock(&mstb->mgr->qlock);
1134
1135 if (wake_tx)
1136 wake_up_all(&mstb->mgr->tx_waitq);
1137
1138 drm_dp_mst_put_mstb_malloc(mstb);
1139}
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163static int __must_check
1164drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1165{
1166 int ret = kref_get_unless_zero(&mstb->topology_kref);
1167
1168 if (ret)
1169 DRM_DEBUG("mstb %p (%d)\n", mstb,
1170 kref_read(&mstb->topology_kref));
1171
1172 return ret;
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1190{
1191 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1192 kref_get(&mstb->topology_kref);
1193 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208static void
1209drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1210{
1211 DRM_DEBUG("mstb %p (%d)\n",
1212 mstb, kref_read(&mstb->topology_kref) - 1);
1213 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1214}
1215
1216static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1217{
1218 struct drm_dp_mst_branch *mstb;
1219
1220 switch (old_pdt) {
1221 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1222 case DP_PEER_DEVICE_SST_SINK:
1223
1224 drm_dp_mst_unregister_i2c_bus(&port->aux);
1225 break;
1226 case DP_PEER_DEVICE_MST_BRANCHING:
1227 mstb = port->mstb;
1228 port->mstb = NULL;
1229 drm_dp_mst_topology_put_mstb(mstb);
1230 break;
1231 }
1232}
1233
1234static void drm_dp_destroy_port(struct kref *kref)
1235{
1236 struct drm_dp_mst_port *port =
1237 container_of(kref, struct drm_dp_mst_port, topology_kref);
1238 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1239
1240 if (!port->input) {
1241 kfree(port->cached_edid);
1242
1243
1244
1245
1246
1247
1248 if (port->connector) {
1249
1250
1251
1252
1253 mutex_lock(&mgr->destroy_connector_lock);
1254 list_add(&port->next, &mgr->destroy_connector_list);
1255 mutex_unlock(&mgr->destroy_connector_lock);
1256 schedule_work(&mgr->destroy_connector_work);
1257 return;
1258 }
1259
1260
1261 drm_dp_port_teardown_pdt(port, port->pdt);
1262 port->pdt = DP_PEER_DEVICE_NONE;
1263 }
1264 drm_dp_mst_put_port_malloc(port);
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289static int __must_check
1290drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1291{
1292 int ret = kref_get_unless_zero(&port->topology_kref);
1293
1294 if (ret)
1295 DRM_DEBUG("port %p (%d)\n", port,
1296 kref_read(&port->topology_kref));
1297
1298 return ret;
1299}
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1315{
1316 WARN_ON(kref_read(&port->topology_kref) == 0);
1317 kref_get(&port->topology_kref);
1318 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1333{
1334 DRM_DEBUG("port %p (%d)\n",
1335 port, kref_read(&port->topology_kref) - 1);
1336 kref_put(&port->topology_kref, drm_dp_destroy_port);
1337}
1338
1339static struct drm_dp_mst_branch *
1340drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1341 struct drm_dp_mst_branch *to_find)
1342{
1343 struct drm_dp_mst_port *port;
1344 struct drm_dp_mst_branch *rmstb;
1345
1346 if (to_find == mstb)
1347 return mstb;
1348
1349 list_for_each_entry(port, &mstb->ports, next) {
1350 if (port->mstb) {
1351 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1352 port->mstb, to_find);
1353 if (rmstb)
1354 return rmstb;
1355 }
1356 }
1357 return NULL;
1358}
1359
1360static struct drm_dp_mst_branch *
1361drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1362 struct drm_dp_mst_branch *mstb)
1363{
1364 struct drm_dp_mst_branch *rmstb = NULL;
1365
1366 mutex_lock(&mgr->lock);
1367 if (mgr->mst_primary) {
1368 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1369 mgr->mst_primary, mstb);
1370
1371 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1372 rmstb = NULL;
1373 }
1374 mutex_unlock(&mgr->lock);
1375 return rmstb;
1376}
1377
1378static struct drm_dp_mst_port *
1379drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1380 struct drm_dp_mst_port *to_find)
1381{
1382 struct drm_dp_mst_port *port, *mport;
1383
1384 list_for_each_entry(port, &mstb->ports, next) {
1385 if (port == to_find)
1386 return port;
1387
1388 if (port->mstb) {
1389 mport = drm_dp_mst_topology_get_port_validated_locked(
1390 port->mstb, to_find);
1391 if (mport)
1392 return mport;
1393 }
1394 }
1395 return NULL;
1396}
1397
1398static struct drm_dp_mst_port *
1399drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1400 struct drm_dp_mst_port *port)
1401{
1402 struct drm_dp_mst_port *rport = NULL;
1403
1404 mutex_lock(&mgr->lock);
1405 if (mgr->mst_primary) {
1406 rport = drm_dp_mst_topology_get_port_validated_locked(
1407 mgr->mst_primary, port);
1408
1409 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1410 rport = NULL;
1411 }
1412 mutex_unlock(&mgr->lock);
1413 return rport;
1414}
1415
1416static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1417{
1418 struct drm_dp_mst_port *port;
1419 int ret;
1420
1421 list_for_each_entry(port, &mstb->ports, next) {
1422 if (port->port_num == port_num) {
1423 ret = drm_dp_mst_topology_try_get_port(port);
1424 return ret ? port : NULL;
1425 }
1426 }
1427
1428 return NULL;
1429}
1430
1431
1432
1433
1434
1435
1436static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1437 u8 *rad)
1438{
1439 int parent_lct = port->parent->lct;
1440 int shift = 4;
1441 int idx = (parent_lct - 1) / 2;
1442 if (parent_lct > 1) {
1443 memcpy(rad, port->parent->rad, idx + 1);
1444 shift = (parent_lct % 2) ? 4 : 0;
1445 } else
1446 rad[0] = 0;
1447
1448 rad[idx] |= port->port_num << shift;
1449 return parent_lct + 1;
1450}
1451
1452
1453
1454
1455static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1456{
1457 int ret;
1458 u8 rad[6], lct;
1459 bool send_link = false;
1460 switch (port->pdt) {
1461 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1462 case DP_PEER_DEVICE_SST_SINK:
1463
1464 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1465 break;
1466 case DP_PEER_DEVICE_MST_BRANCHING:
1467 lct = drm_dp_calculate_rad(port, rad);
1468
1469 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1470 if (port->mstb) {
1471 port->mstb->mgr = port->mgr;
1472 port->mstb->port_parent = port;
1473
1474
1475
1476
1477 drm_dp_mst_get_port_malloc(port);
1478
1479 send_link = true;
1480 }
1481 break;
1482 }
1483 return send_link;
1484}
1485
1486static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1487{
1488 int ret;
1489
1490 memcpy(mstb->guid, guid, 16);
1491
1492 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1493 if (mstb->port_parent) {
1494 ret = drm_dp_send_dpcd_write(
1495 mstb->mgr,
1496 mstb->port_parent,
1497 DP_GUID,
1498 16,
1499 mstb->guid);
1500 } else {
1501
1502 ret = drm_dp_dpcd_write(
1503 mstb->mgr->aux,
1504 DP_GUID,
1505 mstb->guid,
1506 16);
1507 }
1508 }
1509}
1510
1511static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1512 int pnum,
1513 char *proppath,
1514 size_t proppath_size)
1515{
1516 int i;
1517 char temp[8];
1518 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1519 for (i = 0; i < (mstb->lct - 1); i++) {
1520 int shift = (i % 2) ? 0 : 4;
1521 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1522 snprintf(temp, sizeof(temp), "-%d", port_num);
1523 strlcat(proppath, temp, proppath_size);
1524 }
1525 snprintf(temp, sizeof(temp), "-%d", pnum);
1526 strlcat(proppath, temp, proppath_size);
1527}
1528
1529static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1530 struct drm_device *dev,
1531 struct drm_dp_link_addr_reply_port *port_msg)
1532{
1533 struct drm_dp_mst_port *port;
1534 bool ret;
1535 bool created = false;
1536 int old_pdt = 0;
1537 int old_ddps = 0;
1538
1539 port = drm_dp_get_port(mstb, port_msg->port_number);
1540 if (!port) {
1541 port = kzalloc(sizeof(*port), GFP_KERNEL);
1542 if (!port)
1543 return;
1544 kref_init(&port->topology_kref);
1545 kref_init(&port->malloc_kref);
1546 port->parent = mstb;
1547 port->port_num = port_msg->port_number;
1548 port->mgr = mstb->mgr;
1549 port->aux.name = "DPMST";
1550 port->aux.dev = dev->dev;
1551
1552
1553
1554
1555
1556 drm_dp_mst_get_mstb_malloc(mstb);
1557
1558 created = true;
1559 } else {
1560 old_pdt = port->pdt;
1561 old_ddps = port->ddps;
1562 }
1563
1564 port->pdt = port_msg->peer_device_type;
1565 port->input = port_msg->input_port;
1566 port->mcs = port_msg->mcs;
1567 port->ddps = port_msg->ddps;
1568 port->ldps = port_msg->legacy_device_plug_status;
1569 port->dpcd_rev = port_msg->dpcd_revision;
1570 port->num_sdp_streams = port_msg->num_sdp_streams;
1571 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1572
1573
1574
1575 if (created) {
1576 mutex_lock(&mstb->mgr->lock);
1577 drm_dp_mst_topology_get_port(port);
1578 list_add(&port->next, &mstb->ports);
1579 mutex_unlock(&mstb->mgr->lock);
1580 }
1581
1582 if (old_ddps != port->ddps) {
1583 if (port->ddps) {
1584 if (!port->input) {
1585 drm_dp_send_enum_path_resources(mstb->mgr,
1586 mstb, port);
1587 }
1588 } else {
1589 port->available_pbn = 0;
1590 }
1591 }
1592
1593 if (old_pdt != port->pdt && !port->input) {
1594 drm_dp_port_teardown_pdt(port, old_pdt);
1595
1596 ret = drm_dp_port_setup_pdt(port);
1597 if (ret == true)
1598 drm_dp_send_link_address(mstb->mgr, port->mstb);
1599 }
1600
1601 if (created && !port->input) {
1602 char proppath[255];
1603
1604 build_mst_prop_path(mstb, port->port_num, proppath,
1605 sizeof(proppath));
1606 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
1607 port,
1608 proppath);
1609 if (!port->connector) {
1610
1611 mutex_lock(&mstb->mgr->lock);
1612 list_del(&port->next);
1613 mutex_unlock(&mstb->mgr->lock);
1614
1615 drm_dp_mst_topology_put_port(port);
1616 goto out;
1617 }
1618 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1619 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1620 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1621 port->cached_edid = drm_get_edid(port->connector,
1622 &port->aux.ddc);
1623 drm_connector_set_tile_property(port->connector);
1624 }
1625 (*mstb->mgr->cbs->register_connector)(port->connector);
1626 }
1627
1628out:
1629
1630 drm_dp_mst_topology_put_port(port);
1631}
1632
1633static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1634 struct drm_dp_connection_status_notify *conn_stat)
1635{
1636 struct drm_dp_mst_port *port;
1637 int old_pdt;
1638 int old_ddps;
1639 bool dowork = false;
1640 port = drm_dp_get_port(mstb, conn_stat->port_number);
1641 if (!port)
1642 return;
1643
1644 old_ddps = port->ddps;
1645 old_pdt = port->pdt;
1646 port->pdt = conn_stat->peer_device_type;
1647 port->mcs = conn_stat->message_capability_status;
1648 port->ldps = conn_stat->legacy_device_plug_status;
1649 port->ddps = conn_stat->displayport_device_plug_status;
1650
1651 if (old_ddps != port->ddps) {
1652 if (port->ddps) {
1653 dowork = true;
1654 } else {
1655 port->available_pbn = 0;
1656 }
1657 }
1658 if (old_pdt != port->pdt && !port->input) {
1659 drm_dp_port_teardown_pdt(port, old_pdt);
1660
1661 if (drm_dp_port_setup_pdt(port))
1662 dowork = true;
1663 }
1664
1665 drm_dp_mst_topology_put_port(port);
1666 if (dowork)
1667 queue_work(system_long_wq, &mstb->mgr->work);
1668
1669}
1670
1671static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1672 u8 lct, u8 *rad)
1673{
1674 struct drm_dp_mst_branch *mstb;
1675 struct drm_dp_mst_port *port;
1676 int i, ret;
1677
1678
1679 mutex_lock(&mgr->lock);
1680 mstb = mgr->mst_primary;
1681
1682 if (!mstb)
1683 goto out;
1684
1685 for (i = 0; i < lct - 1; i++) {
1686 int shift = (i % 2) ? 0 : 4;
1687 int port_num = (rad[i / 2] >> shift) & 0xf;
1688
1689 list_for_each_entry(port, &mstb->ports, next) {
1690 if (port->port_num == port_num) {
1691 mstb = port->mstb;
1692 if (!mstb) {
1693 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1694 goto out;
1695 }
1696
1697 break;
1698 }
1699 }
1700 }
1701 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1702 if (!ret)
1703 mstb = NULL;
1704out:
1705 mutex_unlock(&mgr->lock);
1706 return mstb;
1707}
1708
1709static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1710 struct drm_dp_mst_branch *mstb,
1711 uint8_t *guid)
1712{
1713 struct drm_dp_mst_branch *found_mstb;
1714 struct drm_dp_mst_port *port;
1715
1716 if (memcmp(mstb->guid, guid, 16) == 0)
1717 return mstb;
1718
1719
1720 list_for_each_entry(port, &mstb->ports, next) {
1721 if (!port->mstb)
1722 continue;
1723
1724 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1725
1726 if (found_mstb)
1727 return found_mstb;
1728 }
1729
1730 return NULL;
1731}
1732
1733static struct drm_dp_mst_branch *
1734drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
1735 uint8_t *guid)
1736{
1737 struct drm_dp_mst_branch *mstb;
1738 int ret;
1739
1740
1741 mutex_lock(&mgr->lock);
1742
1743 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1744 if (mstb) {
1745 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1746 if (!ret)
1747 mstb = NULL;
1748 }
1749
1750 mutex_unlock(&mgr->lock);
1751 return mstb;
1752}
1753
1754static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1755 struct drm_dp_mst_branch *mstb)
1756{
1757 struct drm_dp_mst_port *port;
1758 struct drm_dp_mst_branch *mstb_child;
1759 if (!mstb->link_address_sent)
1760 drm_dp_send_link_address(mgr, mstb);
1761
1762 list_for_each_entry(port, &mstb->ports, next) {
1763 if (port->input)
1764 continue;
1765
1766 if (!port->ddps)
1767 continue;
1768
1769 if (!port->available_pbn)
1770 drm_dp_send_enum_path_resources(mgr, mstb, port);
1771
1772 if (port->mstb) {
1773 mstb_child = drm_dp_mst_topology_get_mstb_validated(
1774 mgr, port->mstb);
1775 if (mstb_child) {
1776 drm_dp_check_and_send_link_address(mgr, mstb_child);
1777 drm_dp_mst_topology_put_mstb(mstb_child);
1778 }
1779 }
1780 }
1781}
1782
1783static void drm_dp_mst_link_probe_work(struct work_struct *work)
1784{
1785 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1786 struct drm_dp_mst_branch *mstb;
1787 int ret;
1788
1789 mutex_lock(&mgr->lock);
1790 mstb = mgr->mst_primary;
1791 if (mstb) {
1792 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1793 if (!ret)
1794 mstb = NULL;
1795 }
1796 mutex_unlock(&mgr->lock);
1797 if (mstb) {
1798 drm_dp_check_and_send_link_address(mgr, mstb);
1799 drm_dp_mst_topology_put_mstb(mstb);
1800 }
1801}
1802
1803static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1804 u8 *guid)
1805{
1806 u64 salt;
1807
1808 if (memchr_inv(guid, 0, 16))
1809 return true;
1810
1811 salt = get_jiffies_64();
1812
1813 memcpy(&guid[0], &salt, sizeof(u64));
1814 memcpy(&guid[8], &salt, sizeof(u64));
1815
1816 return false;
1817}
1818
1819#if 0
1820static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1821{
1822 struct drm_dp_sideband_msg_req_body req;
1823
1824 req.req_type = DP_REMOTE_DPCD_READ;
1825 req.u.dpcd_read.port_number = port_num;
1826 req.u.dpcd_read.dpcd_address = offset;
1827 req.u.dpcd_read.num_bytes = num_bytes;
1828 drm_dp_encode_sideband_req(&req, msg);
1829
1830 return 0;
1831}
1832#endif
1833
1834static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1835 bool up, u8 *msg, int len)
1836{
1837 int ret;
1838 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1839 int tosend, total, offset;
1840 int retries = 0;
1841
1842retry:
1843 total = len;
1844 offset = 0;
1845 do {
1846 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1847
1848 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1849 &msg[offset],
1850 tosend);
1851 if (ret != tosend) {
1852 if (ret == -EIO && retries < 5) {
1853 retries++;
1854 goto retry;
1855 }
1856 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1857
1858 return -EIO;
1859 }
1860 offset += tosend;
1861 total -= tosend;
1862 } while (total > 0);
1863 return 0;
1864}
1865
1866static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1867 struct drm_dp_sideband_msg_tx *txmsg)
1868{
1869 struct drm_dp_mst_branch *mstb = txmsg->dst;
1870 u8 req_type;
1871
1872
1873 if (txmsg->seqno == -1) {
1874 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1875 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1876 return -EAGAIN;
1877 }
1878 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1879 txmsg->seqno = mstb->last_seqno;
1880 mstb->last_seqno ^= 1;
1881 } else if (mstb->tx_slots[0] == NULL)
1882 txmsg->seqno = 0;
1883 else
1884 txmsg->seqno = 1;
1885 mstb->tx_slots[txmsg->seqno] = txmsg;
1886 }
1887
1888 req_type = txmsg->msg[0] & 0x7f;
1889 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1890 req_type == DP_RESOURCE_STATUS_NOTIFY)
1891 hdr->broadcast = 1;
1892 else
1893 hdr->broadcast = 0;
1894 hdr->path_msg = txmsg->path_msg;
1895 hdr->lct = mstb->lct;
1896 hdr->lcr = mstb->lct - 1;
1897 if (mstb->lct > 1)
1898 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1899 hdr->seqno = txmsg->seqno;
1900 return 0;
1901}
1902
1903
1904
1905static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1906 struct drm_dp_sideband_msg_tx *txmsg,
1907 bool up)
1908{
1909 u8 chunk[48];
1910 struct drm_dp_sideband_msg_hdr hdr;
1911 int len, space, idx, tosend;
1912 int ret;
1913
1914 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1915
1916 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1917 txmsg->seqno = -1;
1918 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1919 }
1920
1921
1922
1923 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1924 if (ret < 0)
1925 return ret;
1926
1927
1928 len = txmsg->cur_len - txmsg->cur_offset;
1929
1930
1931 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1932
1933 tosend = min(len, space);
1934 if (len == txmsg->cur_len)
1935 hdr.somt = 1;
1936 if (space >= len)
1937 hdr.eomt = 1;
1938
1939
1940 hdr.msg_len = tosend + 1;
1941 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1942 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1943
1944 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1945 idx += tosend + 1;
1946
1947 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1948 if (ret) {
1949 DRM_DEBUG_KMS("sideband msg failed to send\n");
1950 return ret;
1951 }
1952
1953 txmsg->cur_offset += tosend;
1954 if (txmsg->cur_offset == txmsg->cur_len) {
1955 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1956 return 1;
1957 }
1958 return 0;
1959}
1960
1961static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1962{
1963 struct drm_dp_sideband_msg_tx *txmsg;
1964 int ret;
1965
1966 WARN_ON(!mutex_is_locked(&mgr->qlock));
1967
1968
1969 if (list_empty(&mgr->tx_msg_downq))
1970 return;
1971
1972 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1973 ret = process_single_tx_qlock(mgr, txmsg, false);
1974 if (ret == 1) {
1975
1976 list_del(&txmsg->next);
1977 } else if (ret) {
1978 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1979 list_del(&txmsg->next);
1980 if (txmsg->seqno != -1)
1981 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1982 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1983 wake_up_all(&mgr->tx_waitq);
1984 }
1985}
1986
1987
1988static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1989 struct drm_dp_sideband_msg_tx *txmsg)
1990{
1991 int ret;
1992
1993
1994 ret = process_single_tx_qlock(mgr, txmsg, true);
1995
1996 if (ret != 1)
1997 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1998
1999 if (txmsg->seqno != -1) {
2000 WARN_ON((unsigned int)txmsg->seqno >
2001 ARRAY_SIZE(txmsg->dst->tx_slots));
2002 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2003 }
2004}
2005
2006static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2007 struct drm_dp_sideband_msg_tx *txmsg)
2008{
2009 mutex_lock(&mgr->qlock);
2010 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2011 if (list_is_singular(&mgr->tx_msg_downq))
2012 process_single_down_tx_qlock(mgr);
2013 mutex_unlock(&mgr->qlock);
2014}
2015
2016static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2017 struct drm_dp_mst_branch *mstb)
2018{
2019 int len;
2020 struct drm_dp_sideband_msg_tx *txmsg;
2021 int ret;
2022
2023 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2024 if (!txmsg)
2025 return;
2026
2027 txmsg->dst = mstb;
2028 len = build_link_address(txmsg);
2029
2030 mstb->link_address_sent = true;
2031 drm_dp_queue_down_tx(mgr, txmsg);
2032
2033 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2034 if (ret > 0) {
2035 int i;
2036
2037 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2038 DRM_DEBUG_KMS("link address nak received\n");
2039 } else {
2040 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
2041 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2042 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
2043 txmsg->reply.u.link_addr.ports[i].input_port,
2044 txmsg->reply.u.link_addr.ports[i].peer_device_type,
2045 txmsg->reply.u.link_addr.ports[i].port_number,
2046 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
2047 txmsg->reply.u.link_addr.ports[i].mcs,
2048 txmsg->reply.u.link_addr.ports[i].ddps,
2049 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
2050 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
2051 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
2052 }
2053
2054 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
2055
2056 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2057 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
2058 }
2059 drm_kms_helper_hotplug_event(mgr->dev);
2060 }
2061 } else {
2062 mstb->link_address_sent = false;
2063 DRM_DEBUG_KMS("link address failed %d\n", ret);
2064 }
2065
2066 kfree(txmsg);
2067}
2068
2069static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2070 struct drm_dp_mst_branch *mstb,
2071 struct drm_dp_mst_port *port)
2072{
2073 int len;
2074 struct drm_dp_sideband_msg_tx *txmsg;
2075 int ret;
2076
2077 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2078 if (!txmsg)
2079 return -ENOMEM;
2080
2081 txmsg->dst = mstb;
2082 len = build_enum_path_resources(txmsg, port->port_num);
2083
2084 drm_dp_queue_down_tx(mgr, txmsg);
2085
2086 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2087 if (ret > 0) {
2088 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2089 DRM_DEBUG_KMS("enum path resources nak received\n");
2090 } else {
2091 if (port->port_num != txmsg->reply.u.path_resources.port_number)
2092 DRM_ERROR("got incorrect port in response\n");
2093 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
2094 txmsg->reply.u.path_resources.avail_payload_bw_number);
2095 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
2096 }
2097 }
2098
2099 kfree(txmsg);
2100 return 0;
2101}
2102
2103static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2104{
2105 if (!mstb->port_parent)
2106 return NULL;
2107
2108 if (mstb->port_parent->mstb != mstb)
2109 return mstb->port_parent;
2110
2111 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2112}
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122static struct drm_dp_mst_branch *
2123drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2124 struct drm_dp_mst_branch *mstb,
2125 int *port_num)
2126{
2127 struct drm_dp_mst_branch *rmstb = NULL;
2128 struct drm_dp_mst_port *found_port;
2129
2130 mutex_lock(&mgr->lock);
2131 if (!mgr->mst_primary)
2132 goto out;
2133
2134 do {
2135 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2136 if (!found_port)
2137 break;
2138
2139 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2140 rmstb = found_port->parent;
2141 *port_num = found_port->port_num;
2142 } else {
2143
2144 mstb = found_port->parent;
2145 }
2146 } while (!rmstb);
2147out:
2148 mutex_unlock(&mgr->lock);
2149 return rmstb;
2150}
2151
2152static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2153 struct drm_dp_mst_port *port,
2154 int id,
2155 int pbn)
2156{
2157 struct drm_dp_sideband_msg_tx *txmsg;
2158 struct drm_dp_mst_branch *mstb;
2159 int len, ret, port_num;
2160 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2161 int i;
2162
2163 port_num = port->port_num;
2164 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2165 if (!mstb) {
2166 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2167 port->parent,
2168 &port_num);
2169
2170 if (!mstb)
2171 return -EINVAL;
2172 }
2173
2174 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2175 if (!txmsg) {
2176 ret = -ENOMEM;
2177 goto fail_put;
2178 }
2179
2180 for (i = 0; i < port->num_sdp_streams; i++)
2181 sinks[i] = i;
2182
2183 txmsg->dst = mstb;
2184 len = build_allocate_payload(txmsg, port_num,
2185 id,
2186 pbn, port->num_sdp_streams, sinks);
2187
2188 drm_dp_queue_down_tx(mgr, txmsg);
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2199 if (ret > 0) {
2200 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2201 ret = -EINVAL;
2202 else
2203 ret = 0;
2204 }
2205 kfree(txmsg);
2206fail_put:
2207 drm_dp_mst_topology_put_mstb(mstb);
2208 return ret;
2209}
2210
2211int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
2212 struct drm_dp_mst_port *port, bool power_up)
2213{
2214 struct drm_dp_sideband_msg_tx *txmsg;
2215 int len, ret;
2216
2217 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2218 if (!port)
2219 return -EINVAL;
2220
2221 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2222 if (!txmsg) {
2223 drm_dp_mst_topology_put_port(port);
2224 return -ENOMEM;
2225 }
2226
2227 txmsg->dst = port->parent;
2228 len = build_power_updown_phy(txmsg, port->port_num, power_up);
2229 drm_dp_queue_down_tx(mgr, txmsg);
2230
2231 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
2232 if (ret > 0) {
2233 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2234 ret = -EINVAL;
2235 else
2236 ret = 0;
2237 }
2238 kfree(txmsg);
2239 drm_dp_mst_topology_put_port(port);
2240
2241 return ret;
2242}
2243EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
2244
2245static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2246 int id,
2247 struct drm_dp_payload *payload)
2248{
2249 int ret;
2250
2251 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
2252 if (ret < 0) {
2253 payload->payload_state = 0;
2254 return ret;
2255 }
2256 payload->payload_state = DP_PAYLOAD_LOCAL;
2257 return 0;
2258}
2259
2260static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2261 struct drm_dp_mst_port *port,
2262 int id,
2263 struct drm_dp_payload *payload)
2264{
2265 int ret;
2266 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
2267 if (ret < 0)
2268 return ret;
2269 payload->payload_state = DP_PAYLOAD_REMOTE;
2270 return ret;
2271}
2272
2273static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2274 struct drm_dp_mst_port *port,
2275 int id,
2276 struct drm_dp_payload *payload)
2277{
2278 DRM_DEBUG_KMS("\n");
2279
2280 if (port) {
2281 drm_dp_payload_send_msg(mgr, port, id, 0);
2282 }
2283
2284 drm_dp_dpcd_write_payload(mgr, id, payload);
2285 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
2286 return 0;
2287}
2288
2289static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2290 int id,
2291 struct drm_dp_payload *payload)
2292{
2293 payload->payload_state = 0;
2294 return 0;
2295}
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2311{
2312 struct drm_dp_payload req_payload;
2313 struct drm_dp_mst_port *port;
2314 int i, j;
2315 int cur_slots = 1;
2316
2317 mutex_lock(&mgr->payload_lock);
2318 for (i = 0; i < mgr->max_payloads; i++) {
2319 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2320 struct drm_dp_payload *payload = &mgr->payloads[i];
2321 bool put_port = false;
2322
2323
2324
2325 req_payload.start_slot = cur_slots;
2326 if (vcpi) {
2327 port = container_of(vcpi, struct drm_dp_mst_port,
2328 vcpi);
2329
2330
2331
2332
2333 if (vcpi->num_slots) {
2334 port = drm_dp_mst_topology_get_port_validated(
2335 mgr, port);
2336 if (!port) {
2337 mutex_unlock(&mgr->payload_lock);
2338 return -EINVAL;
2339 }
2340 put_port = true;
2341 }
2342
2343 req_payload.num_slots = vcpi->num_slots;
2344 req_payload.vcpi = vcpi->vcpi;
2345 } else {
2346 port = NULL;
2347 req_payload.num_slots = 0;
2348 }
2349
2350 payload->start_slot = req_payload.start_slot;
2351
2352 if (payload->num_slots != req_payload.num_slots) {
2353
2354
2355 if (req_payload.num_slots) {
2356 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
2357 &req_payload);
2358 payload->num_slots = req_payload.num_slots;
2359 payload->vcpi = req_payload.vcpi;
2360
2361 } else if (payload->num_slots) {
2362 payload->num_slots = 0;
2363 drm_dp_destroy_payload_step1(mgr, port,
2364 payload->vcpi,
2365 payload);
2366 req_payload.payload_state =
2367 payload->payload_state;
2368 payload->start_slot = 0;
2369 }
2370 payload->payload_state = req_payload.payload_state;
2371 }
2372 cur_slots += req_payload.num_slots;
2373
2374 if (put_port)
2375 drm_dp_mst_topology_put_port(port);
2376 }
2377
2378 for (i = 0; i < mgr->max_payloads; i++) {
2379 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
2380 continue;
2381
2382 DRM_DEBUG_KMS("removing payload %d\n", i);
2383 for (j = i; j < mgr->max_payloads - 1; j++) {
2384 mgr->payloads[j] = mgr->payloads[j + 1];
2385 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
2386
2387 if (mgr->proposed_vcpis[j] &&
2388 mgr->proposed_vcpis[j]->num_slots) {
2389 set_bit(j + 1, &mgr->payload_mask);
2390 } else {
2391 clear_bit(j + 1, &mgr->payload_mask);
2392 }
2393 }
2394
2395 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
2396 sizeof(struct drm_dp_payload));
2397 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
2398 clear_bit(mgr->max_payloads, &mgr->payload_mask);
2399 }
2400 mutex_unlock(&mgr->payload_lock);
2401
2402 return 0;
2403}
2404EXPORT_SYMBOL(drm_dp_update_payload_part1);
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
2416{
2417 struct drm_dp_mst_port *port;
2418 int i;
2419 int ret = 0;
2420 mutex_lock(&mgr->payload_lock);
2421 for (i = 0; i < mgr->max_payloads; i++) {
2422
2423 if (!mgr->proposed_vcpis[i])
2424 continue;
2425
2426 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2427
2428 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
2429 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
2430 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2431 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
2432 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2433 }
2434 if (ret) {
2435 mutex_unlock(&mgr->payload_lock);
2436 return ret;
2437 }
2438 }
2439 mutex_unlock(&mgr->payload_lock);
2440 return 0;
2441}
2442EXPORT_SYMBOL(drm_dp_update_payload_part2);
2443
2444#if 0
2445static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2446 struct drm_dp_mst_port *port,
2447 int offset, int size)
2448{
2449 int len;
2450 struct drm_dp_sideband_msg_tx *txmsg;
2451
2452 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2453 if (!txmsg)
2454 return -ENOMEM;
2455
2456 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
2457 txmsg->dst = port->parent;
2458
2459 drm_dp_queue_down_tx(mgr, txmsg);
2460
2461 return 0;
2462}
2463#endif
2464
2465static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2466 struct drm_dp_mst_port *port,
2467 int offset, int size, u8 *bytes)
2468{
2469 int len;
2470 int ret;
2471 struct drm_dp_sideband_msg_tx *txmsg;
2472 struct drm_dp_mst_branch *mstb;
2473
2474 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2475 if (!mstb)
2476 return -EINVAL;
2477
2478 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2479 if (!txmsg) {
2480 ret = -ENOMEM;
2481 goto fail_put;
2482 }
2483
2484 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
2485 txmsg->dst = mstb;
2486
2487 drm_dp_queue_down_tx(mgr, txmsg);
2488
2489 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2490 if (ret > 0) {
2491 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2492 ret = -EINVAL;
2493 else
2494 ret = 0;
2495 }
2496 kfree(txmsg);
2497fail_put:
2498 drm_dp_mst_topology_put_mstb(mstb);
2499 return ret;
2500}
2501
2502static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
2503{
2504 struct drm_dp_sideband_msg_reply_body reply;
2505
2506 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
2507 reply.req_type = req_type;
2508 drm_dp_encode_sideband_reply(&reply, msg);
2509 return 0;
2510}
2511
2512static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
2513 struct drm_dp_mst_branch *mstb,
2514 int req_type, int seqno, bool broadcast)
2515{
2516 struct drm_dp_sideband_msg_tx *txmsg;
2517
2518 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2519 if (!txmsg)
2520 return -ENOMEM;
2521
2522 txmsg->dst = mstb;
2523 txmsg->seqno = seqno;
2524 drm_dp_encode_up_ack_reply(txmsg, req_type);
2525
2526 mutex_lock(&mgr->qlock);
2527
2528 process_single_up_tx_qlock(mgr, txmsg);
2529
2530 mutex_unlock(&mgr->qlock);
2531
2532 kfree(txmsg);
2533 return 0;
2534}
2535
2536static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
2537 int dp_link_count,
2538 int *out)
2539{
2540 switch (dp_link_bw) {
2541 default:
2542 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2543 dp_link_bw, dp_link_count);
2544 return false;
2545
2546 case DP_LINK_BW_1_62:
2547 *out = 3 * dp_link_count;
2548 break;
2549 case DP_LINK_BW_2_7:
2550 *out = 5 * dp_link_count;
2551 break;
2552 case DP_LINK_BW_5_4:
2553 *out = 10 * dp_link_count;
2554 break;
2555 case DP_LINK_BW_8_1:
2556 *out = 15 * dp_link_count;
2557 break;
2558 }
2559 return true;
2560}
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2571{
2572 int ret = 0;
2573 struct drm_dp_mst_branch *mstb = NULL;
2574
2575 mutex_lock(&mgr->lock);
2576 if (mst_state == mgr->mst_state)
2577 goto out_unlock;
2578
2579 mgr->mst_state = mst_state;
2580
2581 if (mst_state) {
2582 WARN_ON(mgr->mst_primary);
2583
2584
2585 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2586 if (ret != DP_RECEIVER_CAP_SIZE) {
2587 DRM_DEBUG_KMS("failed to read DPCD\n");
2588 goto out_unlock;
2589 }
2590
2591 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2592 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2593 &mgr->pbn_div)) {
2594 ret = -EINVAL;
2595 goto out_unlock;
2596 }
2597
2598
2599 mstb = drm_dp_add_mst_branch_device(1, NULL);
2600 if (mstb == NULL) {
2601 ret = -ENOMEM;
2602 goto out_unlock;
2603 }
2604 mstb->mgr = mgr;
2605
2606
2607 mgr->mst_primary = mstb;
2608 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
2609
2610 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2611 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2612 if (ret < 0) {
2613 goto out_unlock;
2614 }
2615
2616 {
2617 struct drm_dp_payload reset_pay;
2618 reset_pay.start_slot = 0;
2619 reset_pay.num_slots = 0x3f;
2620 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2621 }
2622
2623 queue_work(system_long_wq, &mgr->work);
2624
2625 ret = 0;
2626 } else {
2627
2628 mstb = mgr->mst_primary;
2629 mgr->mst_primary = NULL;
2630
2631 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2632 ret = 0;
2633 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2634 mgr->payload_mask = 0;
2635 set_bit(0, &mgr->payload_mask);
2636 mgr->vcpi_mask = 0;
2637 }
2638
2639out_unlock:
2640 mutex_unlock(&mgr->lock);
2641 if (mstb)
2642 drm_dp_mst_topology_put_mstb(mstb);
2643 return ret;
2644
2645}
2646EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2647
2648
2649
2650
2651
2652
2653
2654
2655void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2656{
2657 mutex_lock(&mgr->lock);
2658 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2659 DP_MST_EN | DP_UPSTREAM_IS_SRC);
2660 mutex_unlock(&mgr->lock);
2661 flush_work(&mgr->work);
2662 flush_work(&mgr->destroy_connector_work);
2663}
2664EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2677{
2678 int ret = 0;
2679
2680 mutex_lock(&mgr->lock);
2681
2682 if (mgr->mst_primary) {
2683 int sret;
2684 u8 guid[16];
2685
2686 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2687 if (sret != DP_RECEIVER_CAP_SIZE) {
2688 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2689 ret = -1;
2690 goto out_unlock;
2691 }
2692
2693 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2694 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2695 if (ret < 0) {
2696 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2697 ret = -1;
2698 goto out_unlock;
2699 }
2700
2701
2702 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2703 if (sret != 16) {
2704 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2705 ret = -1;
2706 goto out_unlock;
2707 }
2708 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2709
2710 ret = 0;
2711 } else
2712 ret = -1;
2713
2714out_unlock:
2715 mutex_unlock(&mgr->lock);
2716 return ret;
2717}
2718EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2719
2720static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2721{
2722 int len;
2723 u8 replyblock[32];
2724 int replylen, origlen, curreply;
2725 int ret;
2726 struct drm_dp_sideband_msg_rx *msg;
2727 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2728 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2729
2730 len = min(mgr->max_dpcd_transaction_bytes, 16);
2731 ret = drm_dp_dpcd_read(mgr->aux, basereg,
2732 replyblock, len);
2733 if (ret != len) {
2734 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2735 return false;
2736 }
2737 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2738 if (!ret) {
2739 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2740 return false;
2741 }
2742 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2743
2744 origlen = replylen;
2745 replylen -= len;
2746 curreply = len;
2747 while (replylen > 0) {
2748 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2749 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2750 replyblock, len);
2751 if (ret != len) {
2752 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2753 len, ret);
2754 return false;
2755 }
2756
2757 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2758 if (!ret) {
2759 DRM_DEBUG_KMS("failed to build sideband msg\n");
2760 return false;
2761 }
2762
2763 curreply += len;
2764 replylen -= len;
2765 }
2766 return true;
2767}
2768
2769static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2770{
2771 int ret = 0;
2772
2773 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2774 memset(&mgr->down_rep_recv, 0,
2775 sizeof(struct drm_dp_sideband_msg_rx));
2776 return 0;
2777 }
2778
2779 if (mgr->down_rep_recv.have_eomt) {
2780 struct drm_dp_sideband_msg_tx *txmsg;
2781 struct drm_dp_mst_branch *mstb;
2782 int slot = -1;
2783 mstb = drm_dp_get_mst_branch_device(mgr,
2784 mgr->down_rep_recv.initial_hdr.lct,
2785 mgr->down_rep_recv.initial_hdr.rad);
2786
2787 if (!mstb) {
2788 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2789 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2790 return 0;
2791 }
2792
2793
2794 slot = mgr->down_rep_recv.initial_hdr.seqno;
2795 mutex_lock(&mgr->qlock);
2796 txmsg = mstb->tx_slots[slot];
2797
2798 mutex_unlock(&mgr->qlock);
2799
2800 if (!txmsg) {
2801 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2802 mstb,
2803 mgr->down_rep_recv.initial_hdr.seqno,
2804 mgr->down_rep_recv.initial_hdr.lct,
2805 mgr->down_rep_recv.initial_hdr.rad[0],
2806 mgr->down_rep_recv.msg[0]);
2807 drm_dp_mst_topology_put_mstb(mstb);
2808 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2809 return 0;
2810 }
2811
2812 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2813
2814 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2815 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
2816 txmsg->reply.req_type,
2817 drm_dp_mst_req_type_str(txmsg->reply.req_type),
2818 txmsg->reply.u.nak.reason,
2819 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
2820 txmsg->reply.u.nak.nak_data);
2821
2822 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2823 drm_dp_mst_topology_put_mstb(mstb);
2824
2825 mutex_lock(&mgr->qlock);
2826 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2827 mstb->tx_slots[slot] = NULL;
2828 mutex_unlock(&mgr->qlock);
2829
2830 wake_up_all(&mgr->tx_waitq);
2831 }
2832 return ret;
2833}
2834
2835static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2836{
2837 int ret = 0;
2838
2839 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2840 memset(&mgr->up_req_recv, 0,
2841 sizeof(struct drm_dp_sideband_msg_rx));
2842 return 0;
2843 }
2844
2845 if (mgr->up_req_recv.have_eomt) {
2846 struct drm_dp_sideband_msg_req_body msg;
2847 struct drm_dp_mst_branch *mstb = NULL;
2848 bool seqno;
2849
2850 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2851 mstb = drm_dp_get_mst_branch_device(mgr,
2852 mgr->up_req_recv.initial_hdr.lct,
2853 mgr->up_req_recv.initial_hdr.rad);
2854 if (!mstb) {
2855 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2856 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2857 return 0;
2858 }
2859 }
2860
2861 seqno = mgr->up_req_recv.initial_hdr.seqno;
2862 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2863
2864 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2865 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2866
2867 if (!mstb)
2868 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2869
2870 if (!mstb) {
2871 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2872 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2873 return 0;
2874 }
2875
2876 drm_dp_update_port(mstb, &msg.u.conn_stat);
2877
2878 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2879 drm_kms_helper_hotplug_event(mgr->dev);
2880
2881 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2882 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2883 if (!mstb)
2884 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2885
2886 if (!mstb) {
2887 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2888 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2889 return 0;
2890 }
2891
2892 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2893 }
2894
2895 if (mstb)
2896 drm_dp_mst_topology_put_mstb(mstb);
2897
2898 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2899 }
2900 return ret;
2901}
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2915{
2916 int ret = 0;
2917 int sc;
2918 *handled = false;
2919 sc = esi[0] & 0x3f;
2920
2921 if (sc != mgr->sink_count) {
2922 mgr->sink_count = sc;
2923 *handled = true;
2924 }
2925
2926 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2927 ret = drm_dp_mst_handle_down_rep(mgr);
2928 *handled = true;
2929 }
2930
2931 if (esi[1] & DP_UP_REQ_MSG_RDY) {
2932 ret |= drm_dp_mst_handle_up_req(mgr);
2933 *handled = true;
2934 }
2935
2936 drm_dp_mst_kick_tx(mgr);
2937 return ret;
2938}
2939EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2951 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2952{
2953 enum drm_connector_status status = connector_status_disconnected;
2954
2955
2956 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2957 if (!port)
2958 return connector_status_disconnected;
2959
2960 if (!port->ddps)
2961 goto out;
2962
2963 switch (port->pdt) {
2964 case DP_PEER_DEVICE_NONE:
2965 case DP_PEER_DEVICE_MST_BRANCHING:
2966 break;
2967
2968 case DP_PEER_DEVICE_SST_SINK:
2969 status = connector_status_connected;
2970
2971 if (port->port_num >= 8 && !port->cached_edid) {
2972 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2973 }
2974 break;
2975 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2976 if (port->ldps)
2977 status = connector_status_connected;
2978 break;
2979 }
2980out:
2981 drm_dp_mst_topology_put_port(port);
2982 return status;
2983}
2984EXPORT_SYMBOL(drm_dp_mst_detect_port);
2985
2986
2987
2988
2989
2990
2991
2992
2993bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
2994 struct drm_dp_mst_port *port)
2995{
2996 bool ret = false;
2997
2998 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2999 if (!port)
3000 return ret;
3001 ret = port->has_audio;
3002 drm_dp_mst_topology_put_port(port);
3003 return ret;
3004}
3005EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3018{
3019 struct edid *edid = NULL;
3020
3021
3022 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3023 if (!port)
3024 return NULL;
3025
3026 if (port->cached_edid)
3027 edid = drm_edid_duplicate(port->cached_edid);
3028 else {
3029 edid = drm_get_edid(connector, &port->aux.ddc);
3030 }
3031 port->has_audio = drm_detect_monitor_audio(edid);
3032 drm_dp_mst_topology_put_port(port);
3033 return edid;
3034}
3035EXPORT_SYMBOL(drm_dp_mst_get_edid);
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3050 int pbn)
3051{
3052 int num_slots;
3053
3054 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3055
3056
3057 if (num_slots > 63)
3058 return -ENOSPC;
3059 return num_slots;
3060}
3061EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3062
3063static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3064 struct drm_dp_vcpi *vcpi, int pbn, int slots)
3065{
3066 int ret;
3067
3068
3069 if (slots > 63)
3070 return -ENOSPC;
3071
3072 vcpi->pbn = pbn;
3073 vcpi->aligned_pbn = slots * mgr->pbn_div;
3074 vcpi->num_slots = slots;
3075
3076 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
3077 if (ret < 0)
3078 return ret;
3079 return 0;
3080}
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
3113 struct drm_dp_mst_topology_mgr *mgr,
3114 struct drm_dp_mst_port *port, int pbn)
3115{
3116 struct drm_dp_mst_topology_state *topology_state;
3117 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3118 int prev_slots, req_slots, ret;
3119
3120 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3121 if (IS_ERR(topology_state))
3122 return PTR_ERR(topology_state);
3123
3124
3125 list_for_each_entry(pos, &topology_state->vcpis, next) {
3126 if (pos->port == port) {
3127 vcpi = pos;
3128 prev_slots = vcpi->vcpi;
3129
3130
3131
3132
3133
3134
3135 if (WARN_ON(!prev_slots)) {
3136 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3137 port);
3138 return -EINVAL;
3139 }
3140
3141 break;
3142 }
3143 }
3144 if (!vcpi)
3145 prev_slots = 0;
3146
3147 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3148
3149 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
3150 port->connector->base.id, port->connector->name,
3151 port, prev_slots, req_slots);
3152
3153
3154 if (!vcpi) {
3155 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3156 if (!vcpi)
3157 return -ENOMEM;
3158
3159 drm_dp_mst_get_port_malloc(port);
3160 vcpi->port = port;
3161 list_add(&vcpi->next, &topology_state->vcpis);
3162 }
3163 vcpi->vcpi = req_slots;
3164
3165 ret = req_slots;
3166 return ret;
3167}
3168EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
3197 struct drm_dp_mst_topology_mgr *mgr,
3198 struct drm_dp_mst_port *port)
3199{
3200 struct drm_dp_mst_topology_state *topology_state;
3201 struct drm_dp_vcpi_allocation *pos;
3202 bool found = false;
3203
3204 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3205 if (IS_ERR(topology_state))
3206 return PTR_ERR(topology_state);
3207
3208 list_for_each_entry(pos, &topology_state->vcpis, next) {
3209 if (pos->port == port) {
3210 found = true;
3211 break;
3212 }
3213 }
3214 if (WARN_ON(!found)) {
3215 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
3216 port, &topology_state->base);
3217 return -EINVAL;
3218 }
3219
3220 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3221 if (pos->vcpi) {
3222 drm_dp_mst_put_port_malloc(port);
3223 pos->vcpi = 0;
3224 }
3225
3226 return 0;
3227}
3228EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
3229
3230
3231
3232
3233
3234
3235
3236
3237bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3238 struct drm_dp_mst_port *port, int pbn, int slots)
3239{
3240 int ret;
3241
3242 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3243 if (!port)
3244 return false;
3245
3246 if (slots < 0)
3247 return false;
3248
3249 if (port->vcpi.vcpi > 0) {
3250 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3251 port->vcpi.vcpi, port->vcpi.pbn, pbn);
3252 if (pbn == port->vcpi.pbn) {
3253 drm_dp_mst_topology_put_port(port);
3254 return true;
3255 }
3256 }
3257
3258 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
3259 if (ret) {
3260 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
3261 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
3262 goto out;
3263 }
3264 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3265 pbn, port->vcpi.num_slots);
3266
3267
3268 drm_dp_mst_get_port_malloc(port);
3269 drm_dp_mst_topology_put_port(port);
3270 return true;
3271out:
3272 return false;
3273}
3274EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
3275
3276int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3277{
3278 int slots = 0;
3279 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3280 if (!port)
3281 return slots;
3282
3283 slots = port->vcpi.num_slots;
3284 drm_dp_mst_topology_put_port(port);
3285 return slots;
3286}
3287EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3288
3289
3290
3291
3292
3293
3294
3295
3296void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3297{
3298
3299
3300
3301
3302
3303 port->vcpi.num_slots = 0;
3304}
3305EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3316 struct drm_dp_mst_port *port)
3317{
3318 if (!port->vcpi.vcpi)
3319 return;
3320
3321 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3322 port->vcpi.num_slots = 0;
3323 port->vcpi.pbn = 0;
3324 port->vcpi.aligned_pbn = 0;
3325 port->vcpi.vcpi = 0;
3326 drm_dp_mst_put_port_malloc(port);
3327}
3328EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3329
3330static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
3331 int id, struct drm_dp_payload *payload)
3332{
3333 u8 payload_alloc[3], status;
3334 int ret;
3335 int retries = 0;
3336
3337 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
3338 DP_PAYLOAD_TABLE_UPDATED);
3339
3340 payload_alloc[0] = id;
3341 payload_alloc[1] = payload->start_slot;
3342 payload_alloc[2] = payload->num_slots;
3343
3344 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
3345 if (ret != 3) {
3346 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
3347 goto fail;
3348 }
3349
3350retry:
3351 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3352 if (ret < 0) {
3353 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3354 goto fail;
3355 }
3356
3357 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
3358 retries++;
3359 if (retries < 20) {
3360 usleep_range(10000, 20000);
3361 goto retry;
3362 }
3363 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
3364 ret = -EINVAL;
3365 goto fail;
3366 }
3367 ret = 0;
3368fail:
3369 return ret;
3370}
3371
3372
3373
3374
3375
3376
3377
3378
3379int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
3380{
3381 u8 status;
3382 int ret;
3383 int count = 0;
3384
3385 do {
3386 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3387
3388 if (ret < 0) {
3389 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3390 goto fail;
3391 }
3392
3393 if (status & DP_PAYLOAD_ACT_HANDLED)
3394 break;
3395 count++;
3396 udelay(100);
3397
3398 } while (count < 30);
3399
3400 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
3401 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
3402 ret = -EINVAL;
3403 goto fail;
3404 }
3405 return 0;
3406fail:
3407 return ret;
3408}
3409EXPORT_SYMBOL(drm_dp_check_act_status);
3410
3411
3412
3413
3414
3415
3416
3417
3418int drm_dp_calc_pbn_mode(int clock, int bpp)
3419{
3420 u64 kbps;
3421 s64 peak_kbps;
3422 u32 numerator;
3423 u32 denominator;
3424
3425 kbps = clock * bpp;
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438 numerator = 64 * 1006;
3439 denominator = 54 * 8 * 1000 * 1000;
3440
3441 kbps *= numerator;
3442 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3443
3444 return drm_fixp2int_ceil(peak_kbps);
3445}
3446EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3447
3448static int test_calc_pbn_mode(void)
3449{
3450 int ret;
3451 ret = drm_dp_calc_pbn_mode(154000, 30);
3452 if (ret != 689) {
3453 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3454 154000, 30, 689, ret);
3455 return -EINVAL;
3456 }
3457 ret = drm_dp_calc_pbn_mode(234000, 30);
3458 if (ret != 1047) {
3459 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3460 234000, 30, 1047, ret);
3461 return -EINVAL;
3462 }
3463 ret = drm_dp_calc_pbn_mode(297000, 24);
3464 if (ret != 1063) {
3465 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3466 297000, 24, 1063, ret);
3467 return -EINVAL;
3468 }
3469 return 0;
3470}
3471
3472
3473static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
3474{
3475 queue_work(system_long_wq, &mgr->tx_work);
3476}
3477
3478static void drm_dp_mst_dump_mstb(struct seq_file *m,
3479 struct drm_dp_mst_branch *mstb)
3480{
3481 struct drm_dp_mst_port *port;
3482 int tabs = mstb->lct;
3483 char prefix[10];
3484 int i;
3485
3486 for (i = 0; i < tabs; i++)
3487 prefix[i] = '\t';
3488 prefix[i] = '\0';
3489
3490 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
3491 list_for_each_entry(port, &mstb->ports, next) {
3492 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
3493 if (port->mstb)
3494 drm_dp_mst_dump_mstb(m, port->mstb);
3495 }
3496}
3497
3498#define DP_PAYLOAD_TABLE_SIZE 64
3499
3500static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
3501 char *buf)
3502{
3503 int i;
3504
3505 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
3506 if (drm_dp_dpcd_read(mgr->aux,
3507 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
3508 &buf[i], 16) != 16)
3509 return false;
3510 }
3511 return true;
3512}
3513
3514static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
3515 struct drm_dp_mst_port *port, char *name,
3516 int namelen)
3517{
3518 struct edid *mst_edid;
3519
3520 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
3521 drm_edid_get_monitor_name(mst_edid, name, namelen);
3522}
3523
3524
3525
3526
3527
3528
3529
3530
3531void drm_dp_mst_dump_topology(struct seq_file *m,
3532 struct drm_dp_mst_topology_mgr *mgr)
3533{
3534 int i;
3535 struct drm_dp_mst_port *port;
3536
3537 mutex_lock(&mgr->lock);
3538 if (mgr->mst_primary)
3539 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
3540
3541
3542 mutex_unlock(&mgr->lock);
3543
3544 mutex_lock(&mgr->payload_lock);
3545 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
3546 mgr->max_payloads);
3547
3548 for (i = 0; i < mgr->max_payloads; i++) {
3549 if (mgr->proposed_vcpis[i]) {
3550 char name[14];
3551
3552 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3553 fetch_monitor_name(mgr, port, name, sizeof(name));
3554 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
3555 port->port_num, port->vcpi.vcpi,
3556 port->vcpi.num_slots,
3557 (*name != 0) ? name : "Unknown");
3558 } else
3559 seq_printf(m, "vcpi %d:unused\n", i);
3560 }
3561 for (i = 0; i < mgr->max_payloads; i++) {
3562 seq_printf(m, "payload %d: %d, %d, %d\n",
3563 i,
3564 mgr->payloads[i].payload_state,
3565 mgr->payloads[i].start_slot,
3566 mgr->payloads[i].num_slots);
3567
3568
3569 }
3570 mutex_unlock(&mgr->payload_lock);
3571
3572 mutex_lock(&mgr->lock);
3573 if (mgr->mst_primary) {
3574 u8 buf[DP_PAYLOAD_TABLE_SIZE];
3575 int ret;
3576
3577 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
3578 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
3579 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
3580 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
3581 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
3582 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
3583
3584
3585 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
3586 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
3587 for (i = 0x3; i < 0x8 && buf[i]; i++)
3588 seq_printf(m, "%c", buf[i]);
3589 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
3590 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
3591 if (dump_dp_payload_table(mgr, buf))
3592 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
3593 }
3594
3595 mutex_unlock(&mgr->lock);
3596
3597}
3598EXPORT_SYMBOL(drm_dp_mst_dump_topology);
3599
3600static void drm_dp_tx_work(struct work_struct *work)
3601{
3602 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
3603
3604 mutex_lock(&mgr->qlock);
3605 if (!list_empty(&mgr->tx_msg_downq))
3606 process_single_down_tx_qlock(mgr);
3607 mutex_unlock(&mgr->qlock);
3608}
3609
3610static void drm_dp_destroy_connector_work(struct work_struct *work)
3611{
3612 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3613 struct drm_dp_mst_port *port;
3614 bool send_hotplug = false;
3615
3616
3617
3618
3619
3620 for (;;) {
3621 mutex_lock(&mgr->destroy_connector_lock);
3622 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
3623 if (!port) {
3624 mutex_unlock(&mgr->destroy_connector_lock);
3625 break;
3626 }
3627 list_del(&port->next);
3628 mutex_unlock(&mgr->destroy_connector_lock);
3629
3630 INIT_LIST_HEAD(&port->next);
3631
3632 mgr->cbs->destroy_connector(mgr, port->connector);
3633
3634 drm_dp_port_teardown_pdt(port, port->pdt);
3635 port->pdt = DP_PEER_DEVICE_NONE;
3636
3637 drm_dp_mst_put_port_malloc(port);
3638 send_hotplug = true;
3639 }
3640 if (send_hotplug)
3641 drm_kms_helper_hotplug_event(mgr->dev);
3642}
3643
3644static struct drm_private_state *
3645drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
3646{
3647 struct drm_dp_mst_topology_state *state, *old_state =
3648 to_dp_mst_topology_state(obj->state);
3649 struct drm_dp_vcpi_allocation *pos, *vcpi;
3650
3651 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
3652 if (!state)
3653 return NULL;
3654
3655 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
3656
3657 INIT_LIST_HEAD(&state->vcpis);
3658
3659 list_for_each_entry(pos, &old_state->vcpis, next) {
3660
3661 if (!pos->vcpi)
3662 continue;
3663
3664 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
3665 if (!vcpi)
3666 goto fail;
3667
3668 drm_dp_mst_get_port_malloc(vcpi->port);
3669 list_add(&vcpi->next, &state->vcpis);
3670 }
3671
3672 return &state->base;
3673
3674fail:
3675 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
3676 drm_dp_mst_put_port_malloc(pos->port);
3677 kfree(pos);
3678 }
3679 kfree(state);
3680
3681 return NULL;
3682}
3683
3684static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
3685 struct drm_private_state *state)
3686{
3687 struct drm_dp_mst_topology_state *mst_state =
3688 to_dp_mst_topology_state(state);
3689 struct drm_dp_vcpi_allocation *pos, *tmp;
3690
3691 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
3692
3693 if (pos->vcpi)
3694 drm_dp_mst_put_port_malloc(pos->port);
3695 kfree(pos);
3696 }
3697
3698 kfree(mst_state);
3699}
3700
3701static inline int
3702drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
3703 struct drm_dp_mst_topology_state *mst_state)
3704{
3705 struct drm_dp_vcpi_allocation *vcpi;
3706 int avail_slots = 63, payload_count = 0;
3707
3708 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
3709
3710 if (!vcpi->vcpi) {
3711 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
3712 vcpi->port);
3713 continue;
3714 }
3715
3716 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
3717 vcpi->port, vcpi->vcpi);
3718
3719 avail_slots -= vcpi->vcpi;
3720 if (avail_slots < 0) {
3721 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
3722 vcpi->port, mst_state,
3723 avail_slots + vcpi->vcpi);
3724 return -ENOSPC;
3725 }
3726
3727 if (++payload_count > mgr->max_payloads) {
3728 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
3729 mgr, mst_state, mgr->max_payloads);
3730 return -EINVAL;
3731 }
3732 }
3733 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
3734 mgr, mst_state, avail_slots,
3735 63 - avail_slots);
3736
3737 return 0;
3738}
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
3762{
3763 struct drm_dp_mst_topology_mgr *mgr;
3764 struct drm_dp_mst_topology_state *mst_state;
3765 int i, ret = 0;
3766
3767 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
3768 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
3769 if (ret)
3770 break;
3771 }
3772
3773 return ret;
3774}
3775EXPORT_SYMBOL(drm_dp_mst_atomic_check);
3776
3777const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
3778 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
3779 .atomic_destroy_state = drm_dp_mst_destroy_state,
3780};
3781EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
3799 struct drm_dp_mst_topology_mgr *mgr)
3800{
3801 struct drm_device *dev = mgr->dev;
3802
3803 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3804 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
3805}
3806EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3820 struct drm_device *dev, struct drm_dp_aux *aux,
3821 int max_dpcd_transaction_bytes,
3822 int max_payloads, int conn_base_id)
3823{
3824 struct drm_dp_mst_topology_state *mst_state;
3825
3826 mutex_init(&mgr->lock);
3827 mutex_init(&mgr->qlock);
3828 mutex_init(&mgr->payload_lock);
3829 mutex_init(&mgr->destroy_connector_lock);
3830 INIT_LIST_HEAD(&mgr->tx_msg_downq);
3831 INIT_LIST_HEAD(&mgr->destroy_connector_list);
3832 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3833 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
3834 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
3835 init_waitqueue_head(&mgr->tx_waitq);
3836 mgr->dev = dev;
3837 mgr->aux = aux;
3838 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
3839 mgr->max_payloads = max_payloads;
3840 mgr->conn_base_id = conn_base_id;
3841 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
3842 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
3843 return -EINVAL;
3844 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
3845 if (!mgr->payloads)
3846 return -ENOMEM;
3847 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
3848 if (!mgr->proposed_vcpis)
3849 return -ENOMEM;
3850 set_bit(0, &mgr->payload_mask);
3851 if (test_calc_pbn_mode() < 0)
3852 DRM_ERROR("MST PBN self-test failed\n");
3853
3854 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
3855 if (mst_state == NULL)
3856 return -ENOMEM;
3857
3858 mst_state->mgr = mgr;
3859 INIT_LIST_HEAD(&mst_state->vcpis);
3860
3861 drm_atomic_private_obj_init(dev, &mgr->base,
3862 &mst_state->base,
3863 &drm_dp_mst_topology_state_funcs);
3864
3865 return 0;
3866}
3867EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
3868
3869
3870
3871
3872
3873void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
3874{
3875 drm_dp_mst_topology_mgr_set_mst(mgr, false);
3876 flush_work(&mgr->work);
3877 flush_work(&mgr->destroy_connector_work);
3878 mutex_lock(&mgr->payload_lock);
3879 kfree(mgr->payloads);
3880 mgr->payloads = NULL;
3881 kfree(mgr->proposed_vcpis);
3882 mgr->proposed_vcpis = NULL;
3883 mutex_unlock(&mgr->payload_lock);
3884 mgr->dev = NULL;
3885 mgr->aux = NULL;
3886 drm_atomic_private_obj_fini(&mgr->base);
3887 mgr->funcs = NULL;
3888}
3889EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
3890
3891static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
3892{
3893 int i;
3894
3895 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
3896 return false;
3897
3898 for (i = 0; i < num - 1; i++) {
3899 if (msgs[i].flags & I2C_M_RD ||
3900 msgs[i].len > 0xff)
3901 return false;
3902 }
3903
3904 return msgs[num - 1].flags & I2C_M_RD &&
3905 msgs[num - 1].len <= 0xff;
3906}
3907
3908
3909static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
3910 int num)
3911{
3912 struct drm_dp_aux *aux = adapter->algo_data;
3913 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
3914 struct drm_dp_mst_branch *mstb;
3915 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
3916 unsigned int i;
3917 struct drm_dp_sideband_msg_req_body msg;
3918 struct drm_dp_sideband_msg_tx *txmsg = NULL;
3919 int ret;
3920
3921 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3922 if (!mstb)
3923 return -EREMOTEIO;
3924
3925 if (!remote_i2c_read_ok(msgs, num)) {
3926 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
3927 ret = -EIO;
3928 goto out;
3929 }
3930
3931 memset(&msg, 0, sizeof(msg));
3932 msg.req_type = DP_REMOTE_I2C_READ;
3933 msg.u.i2c_read.num_transactions = num - 1;
3934 msg.u.i2c_read.port_number = port->port_num;
3935 for (i = 0; i < num - 1; i++) {
3936 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
3937 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
3938 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
3939 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
3940 }
3941 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
3942 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
3943
3944 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3945 if (!txmsg) {
3946 ret = -ENOMEM;
3947 goto out;
3948 }
3949
3950 txmsg->dst = mstb;
3951 drm_dp_encode_sideband_req(&msg, txmsg);
3952
3953 drm_dp_queue_down_tx(mgr, txmsg);
3954
3955 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3956 if (ret > 0) {
3957
3958 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3959 ret = -EREMOTEIO;
3960 goto out;
3961 }
3962 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
3963 ret = -EIO;
3964 goto out;
3965 }
3966 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
3967 ret = num;
3968 }
3969out:
3970 kfree(txmsg);
3971 drm_dp_mst_topology_put_mstb(mstb);
3972 return ret;
3973}
3974
3975static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
3976{
3977 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
3978 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
3979 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
3980 I2C_FUNC_10BIT_ADDR;
3981}
3982
3983static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
3984 .functionality = drm_dp_mst_i2c_functionality,
3985 .master_xfer = drm_dp_mst_i2c_xfer,
3986};
3987
3988
3989
3990
3991
3992
3993
3994static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
3995{
3996 aux->ddc.algo = &drm_dp_mst_i2c_algo;
3997 aux->ddc.algo_data = aux;
3998 aux->ddc.retries = 3;
3999
4000 aux->ddc.class = I2C_CLASS_DDC;
4001 aux->ddc.owner = THIS_MODULE;
4002 aux->ddc.dev.parent = aux->dev;
4003 aux->ddc.dev.of_node = aux->dev->of_node;
4004
4005 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4006 sizeof(aux->ddc.name));
4007
4008 return i2c_add_adapter(&aux->ddc);
4009}
4010
4011
4012
4013
4014
4015static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4016{
4017 i2c_del_adapter(&aux->ddc);
4018}
4019