1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/seq_file.h>
29#include <linux/i2c.h>
30#include <drm/drm_dp_mst_helper.h>
31#include <drm/drmP.h>
32
33#include <drm/drm_fixed.h>
34#include <drm/drm_atomic.h>
35#include <drm/drm_atomic_helper.h>
36
37
38
39
40
41
42
43
44static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
45 char *buf);
46static int test_calc_pbn_mode(void);
47
48static void drm_dp_put_port(struct drm_dp_mst_port *port);
49
50static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
51 int id,
52 struct drm_dp_payload *payload);
53
54static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
55 struct drm_dp_mst_port *port,
56 int offset, int size, u8 *bytes);
57
58static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
59 struct drm_dp_mst_branch *mstb);
60static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
61 struct drm_dp_mst_branch *mstb,
62 struct drm_dp_mst_port *port);
63static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
64 u8 *guid);
65
66static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
67static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
68static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
69
70static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
71{
72 u8 bitmask = 0x80;
73 u8 bitshift = 7;
74 u8 array_index = 0;
75 int number_of_bits = num_nibbles * 4;
76 u8 remainder = 0;
77
78 while (number_of_bits != 0) {
79 number_of_bits--;
80 remainder <<= 1;
81 remainder |= (data[array_index] & bitmask) >> bitshift;
82 bitmask >>= 1;
83 bitshift--;
84 if (bitmask == 0) {
85 bitmask = 0x80;
86 bitshift = 7;
87 array_index++;
88 }
89 if ((remainder & 0x10) == 0x10)
90 remainder ^= 0x13;
91 }
92
93 number_of_bits = 4;
94 while (number_of_bits != 0) {
95 number_of_bits--;
96 remainder <<= 1;
97 if ((remainder & 0x10) != 0)
98 remainder ^= 0x13;
99 }
100
101 return remainder;
102}
103
104static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
105{
106 u8 bitmask = 0x80;
107 u8 bitshift = 7;
108 u8 array_index = 0;
109 int number_of_bits = number_of_bytes * 8;
110 u16 remainder = 0;
111
112 while (number_of_bits != 0) {
113 number_of_bits--;
114 remainder <<= 1;
115 remainder |= (data[array_index] & bitmask) >> bitshift;
116 bitmask >>= 1;
117 bitshift--;
118 if (bitmask == 0) {
119 bitmask = 0x80;
120 bitshift = 7;
121 array_index++;
122 }
123 if ((remainder & 0x100) == 0x100)
124 remainder ^= 0xd5;
125 }
126
127 number_of_bits = 8;
128 while (number_of_bits != 0) {
129 number_of_bits--;
130 remainder <<= 1;
131 if ((remainder & 0x100) != 0)
132 remainder ^= 0xd5;
133 }
134
135 return remainder & 0xff;
136}
137static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
138{
139 u8 size = 3;
140 size += (hdr->lct / 2);
141 return size;
142}
143
144static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
145 u8 *buf, int *len)
146{
147 int idx = 0;
148 int i;
149 u8 crc4;
150 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
151 for (i = 0; i < (hdr->lct / 2); i++)
152 buf[idx++] = hdr->rad[i];
153 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
154 (hdr->msg_len & 0x3f);
155 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
156
157 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
158 buf[idx - 1] |= (crc4 & 0xf);
159
160 *len = idx;
161}
162
163static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
164 u8 *buf, int buflen, u8 *hdrlen)
165{
166 u8 crc4;
167 u8 len;
168 int i;
169 u8 idx;
170 if (buf[0] == 0)
171 return false;
172 len = 3;
173 len += ((buf[0] & 0xf0) >> 4) / 2;
174 if (len > buflen)
175 return false;
176 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
177
178 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
179 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
180 return false;
181 }
182
183 hdr->lct = (buf[0] & 0xf0) >> 4;
184 hdr->lcr = (buf[0] & 0xf);
185 idx = 1;
186 for (i = 0; i < (hdr->lct / 2); i++)
187 hdr->rad[i] = buf[idx++];
188 hdr->broadcast = (buf[idx] >> 7) & 0x1;
189 hdr->path_msg = (buf[idx] >> 6) & 0x1;
190 hdr->msg_len = buf[idx] & 0x3f;
191 idx++;
192 hdr->somt = (buf[idx] >> 7) & 0x1;
193 hdr->eomt = (buf[idx] >> 6) & 0x1;
194 hdr->seqno = (buf[idx] >> 4) & 0x1;
195 idx++;
196 *hdrlen = idx;
197 return true;
198}
199
200static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
201 struct drm_dp_sideband_msg_tx *raw)
202{
203 int idx = 0;
204 int i;
205 u8 *buf = raw->msg;
206 buf[idx++] = req->req_type & 0x7f;
207
208 switch (req->req_type) {
209 case DP_ENUM_PATH_RESOURCES:
210 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
211 idx++;
212 break;
213 case DP_ALLOCATE_PAYLOAD:
214 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
215 (req->u.allocate_payload.number_sdp_streams & 0xf);
216 idx++;
217 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
218 idx++;
219 buf[idx] = (req->u.allocate_payload.pbn >> 8);
220 idx++;
221 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
222 idx++;
223 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
224 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
225 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
226 idx++;
227 }
228 if (req->u.allocate_payload.number_sdp_streams & 1) {
229 i = req->u.allocate_payload.number_sdp_streams - 1;
230 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
231 idx++;
232 }
233 break;
234 case DP_QUERY_PAYLOAD:
235 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
236 idx++;
237 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
238 idx++;
239 break;
240 case DP_REMOTE_DPCD_READ:
241 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
242 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
243 idx++;
244 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
245 idx++;
246 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
247 idx++;
248 buf[idx] = (req->u.dpcd_read.num_bytes);
249 idx++;
250 break;
251
252 case DP_REMOTE_DPCD_WRITE:
253 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
254 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
255 idx++;
256 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
257 idx++;
258 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
259 idx++;
260 buf[idx] = (req->u.dpcd_write.num_bytes);
261 idx++;
262 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
263 idx += req->u.dpcd_write.num_bytes;
264 break;
265 case DP_REMOTE_I2C_READ:
266 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
267 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
268 idx++;
269 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
270 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
271 idx++;
272 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
273 idx++;
274 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
275 idx += req->u.i2c_read.transactions[i].num_bytes;
276
277 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
278 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
279 idx++;
280 }
281 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
282 idx++;
283 buf[idx] = (req->u.i2c_read.num_bytes_read);
284 idx++;
285 break;
286
287 case DP_REMOTE_I2C_WRITE:
288 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
289 idx++;
290 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
291 idx++;
292 buf[idx] = (req->u.i2c_write.num_bytes);
293 idx++;
294 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
295 idx += req->u.i2c_write.num_bytes;
296 break;
297 }
298 raw->cur_len = idx;
299}
300
301static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
302{
303 u8 crc4;
304 crc4 = drm_dp_msg_data_crc4(msg, len);
305 msg[len] = crc4;
306}
307
308static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
309 struct drm_dp_sideband_msg_tx *raw)
310{
311 int idx = 0;
312 u8 *buf = raw->msg;
313
314 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
315
316 raw->cur_len = idx;
317}
318
319
320static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
321 u8 *replybuf, u8 replybuflen, bool hdr)
322{
323 int ret;
324 u8 crc4;
325
326 if (hdr) {
327 u8 hdrlen;
328 struct drm_dp_sideband_msg_hdr recv_hdr;
329 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
330 if (ret == false) {
331 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
332 return false;
333 }
334
335
336
337
338
339 if (!recv_hdr.somt && !msg->have_somt)
340 return false;
341
342
343 msg->curchunk_len = recv_hdr.msg_len;
344 msg->curchunk_hdrlen = hdrlen;
345
346
347 if (recv_hdr.somt && msg->have_somt)
348 return false;
349
350 if (recv_hdr.somt) {
351 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
352 msg->have_somt = true;
353 }
354 if (recv_hdr.eomt)
355 msg->have_eomt = true;
356
357
358 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
359 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
360 } else {
361 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
362 msg->curchunk_idx += replybuflen;
363 }
364
365 if (msg->curchunk_idx >= msg->curchunk_len) {
366
367 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
368
369 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
370 msg->curlen += msg->curchunk_len - 1;
371 }
372 return true;
373}
374
375static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
376 struct drm_dp_sideband_msg_reply_body *repmsg)
377{
378 int idx = 1;
379 int i;
380 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
381 idx += 16;
382 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
383 idx++;
384 if (idx > raw->curlen)
385 goto fail_len;
386 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
387 if (raw->msg[idx] & 0x80)
388 repmsg->u.link_addr.ports[i].input_port = 1;
389
390 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
391 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
392
393 idx++;
394 if (idx > raw->curlen)
395 goto fail_len;
396 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
397 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
398 if (repmsg->u.link_addr.ports[i].input_port == 0)
399 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
400 idx++;
401 if (idx > raw->curlen)
402 goto fail_len;
403 if (repmsg->u.link_addr.ports[i].input_port == 0) {
404 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
405 idx++;
406 if (idx > raw->curlen)
407 goto fail_len;
408 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
409 idx += 16;
410 if (idx > raw->curlen)
411 goto fail_len;
412 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
413 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
414 idx++;
415
416 }
417 if (idx > raw->curlen)
418 goto fail_len;
419 }
420
421 return true;
422fail_len:
423 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
424 return false;
425}
426
427static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
428 struct drm_dp_sideband_msg_reply_body *repmsg)
429{
430 int idx = 1;
431 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
432 idx++;
433 if (idx > raw->curlen)
434 goto fail_len;
435 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
436 if (idx > raw->curlen)
437 goto fail_len;
438
439 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
440 return true;
441fail_len:
442 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
443 return false;
444}
445
446static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
447 struct drm_dp_sideband_msg_reply_body *repmsg)
448{
449 int idx = 1;
450 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
451 idx++;
452 if (idx > raw->curlen)
453 goto fail_len;
454 return true;
455fail_len:
456 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
457 return false;
458}
459
460static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
461 struct drm_dp_sideband_msg_reply_body *repmsg)
462{
463 int idx = 1;
464
465 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
466 idx++;
467 if (idx > raw->curlen)
468 goto fail_len;
469 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
470 idx++;
471
472 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
473 return true;
474fail_len:
475 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
476 return false;
477}
478
479static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
480 struct drm_dp_sideband_msg_reply_body *repmsg)
481{
482 int idx = 1;
483 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
484 idx++;
485 if (idx > raw->curlen)
486 goto fail_len;
487 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
488 idx += 2;
489 if (idx > raw->curlen)
490 goto fail_len;
491 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
492 idx += 2;
493 if (idx > raw->curlen)
494 goto fail_len;
495 return true;
496fail_len:
497 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
498 return false;
499}
500
501static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
502 struct drm_dp_sideband_msg_reply_body *repmsg)
503{
504 int idx = 1;
505 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
506 idx++;
507 if (idx > raw->curlen)
508 goto fail_len;
509 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
510 idx++;
511 if (idx > raw->curlen)
512 goto fail_len;
513 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
514 idx += 2;
515 if (idx > raw->curlen)
516 goto fail_len;
517 return true;
518fail_len:
519 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
520 return false;
521}
522
523static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
524 struct drm_dp_sideband_msg_reply_body *repmsg)
525{
526 int idx = 1;
527 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
528 idx++;
529 if (idx > raw->curlen)
530 goto fail_len;
531 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
532 idx += 2;
533 if (idx > raw->curlen)
534 goto fail_len;
535 return true;
536fail_len:
537 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
538 return false;
539}
540
541static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
542 struct drm_dp_sideband_msg_reply_body *msg)
543{
544 memset(msg, 0, sizeof(*msg));
545 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
546 msg->req_type = (raw->msg[0] & 0x7f);
547
548 if (msg->reply_type) {
549 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
550 msg->u.nak.reason = raw->msg[17];
551 msg->u.nak.nak_data = raw->msg[18];
552 return false;
553 }
554
555 switch (msg->req_type) {
556 case DP_LINK_ADDRESS:
557 return drm_dp_sideband_parse_link_address(raw, msg);
558 case DP_QUERY_PAYLOAD:
559 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
560 case DP_REMOTE_DPCD_READ:
561 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
562 case DP_REMOTE_DPCD_WRITE:
563 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
564 case DP_REMOTE_I2C_READ:
565 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
566 case DP_ENUM_PATH_RESOURCES:
567 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
568 case DP_ALLOCATE_PAYLOAD:
569 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
570 default:
571 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
572 return false;
573 }
574}
575
576static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
577 struct drm_dp_sideband_msg_req_body *msg)
578{
579 int idx = 1;
580
581 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
582 idx++;
583 if (idx > raw->curlen)
584 goto fail_len;
585
586 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
587 idx += 16;
588 if (idx > raw->curlen)
589 goto fail_len;
590
591 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
592 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
593 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
594 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
595 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
596 idx++;
597 return true;
598fail_len:
599 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
600 return false;
601}
602
603static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
604 struct drm_dp_sideband_msg_req_body *msg)
605{
606 int idx = 1;
607
608 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
609 idx++;
610 if (idx > raw->curlen)
611 goto fail_len;
612
613 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
614 idx += 16;
615 if (idx > raw->curlen)
616 goto fail_len;
617
618 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
619 idx++;
620 return true;
621fail_len:
622 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
623 return false;
624}
625
626static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
627 struct drm_dp_sideband_msg_req_body *msg)
628{
629 memset(msg, 0, sizeof(*msg));
630 msg->req_type = (raw->msg[0] & 0x7f);
631
632 switch (msg->req_type) {
633 case DP_CONNECTION_STATUS_NOTIFY:
634 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
635 case DP_RESOURCE_STATUS_NOTIFY:
636 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
637 default:
638 DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
639 return false;
640 }
641}
642
643static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
644{
645 struct drm_dp_sideband_msg_req_body req;
646
647 req.req_type = DP_REMOTE_DPCD_WRITE;
648 req.u.dpcd_write.port_number = port_num;
649 req.u.dpcd_write.dpcd_address = offset;
650 req.u.dpcd_write.num_bytes = num_bytes;
651 req.u.dpcd_write.bytes = bytes;
652 drm_dp_encode_sideband_req(&req, msg);
653
654 return 0;
655}
656
657static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
658{
659 struct drm_dp_sideband_msg_req_body req;
660
661 req.req_type = DP_LINK_ADDRESS;
662 drm_dp_encode_sideband_req(&req, msg);
663 return 0;
664}
665
666static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
667{
668 struct drm_dp_sideband_msg_req_body req;
669
670 req.req_type = DP_ENUM_PATH_RESOURCES;
671 req.u.port_num.port_number = port_num;
672 drm_dp_encode_sideband_req(&req, msg);
673 msg->path_msg = true;
674 return 0;
675}
676
677static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
678 u8 vcpi, uint16_t pbn,
679 u8 number_sdp_streams,
680 u8 *sdp_stream_sink)
681{
682 struct drm_dp_sideband_msg_req_body req;
683 memset(&req, 0, sizeof(req));
684 req.req_type = DP_ALLOCATE_PAYLOAD;
685 req.u.allocate_payload.port_number = port_num;
686 req.u.allocate_payload.vcpi = vcpi;
687 req.u.allocate_payload.pbn = pbn;
688 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
689 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
690 number_sdp_streams);
691 drm_dp_encode_sideband_req(&req, msg);
692 msg->path_msg = true;
693 return 0;
694}
695
696static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
697 struct drm_dp_vcpi *vcpi)
698{
699 int ret, vcpi_ret;
700
701 mutex_lock(&mgr->payload_lock);
702 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
703 if (ret > mgr->max_payloads) {
704 ret = -EINVAL;
705 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
706 goto out_unlock;
707 }
708
709 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
710 if (vcpi_ret > mgr->max_payloads) {
711 ret = -EINVAL;
712 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
713 goto out_unlock;
714 }
715
716 set_bit(ret, &mgr->payload_mask);
717 set_bit(vcpi_ret, &mgr->vcpi_mask);
718 vcpi->vcpi = vcpi_ret + 1;
719 mgr->proposed_vcpis[ret - 1] = vcpi;
720out_unlock:
721 mutex_unlock(&mgr->payload_lock);
722 return ret;
723}
724
725static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
726 int vcpi)
727{
728 int i;
729 if (vcpi == 0)
730 return;
731
732 mutex_lock(&mgr->payload_lock);
733 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
734 clear_bit(vcpi - 1, &mgr->vcpi_mask);
735
736 for (i = 0; i < mgr->max_payloads; i++) {
737 if (mgr->proposed_vcpis[i])
738 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
739 mgr->proposed_vcpis[i] = NULL;
740 clear_bit(i + 1, &mgr->payload_mask);
741 }
742 }
743 mutex_unlock(&mgr->payload_lock);
744}
745
746static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
747 struct drm_dp_sideband_msg_tx *txmsg)
748{
749 unsigned int state;
750
751
752
753
754
755
756 state = READ_ONCE(txmsg->state);
757 return (state == DRM_DP_SIDEBAND_TX_RX ||
758 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
759}
760
761static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
762 struct drm_dp_sideband_msg_tx *txmsg)
763{
764 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
765 int ret;
766
767 ret = wait_event_timeout(mgr->tx_waitq,
768 check_txmsg_state(mgr, txmsg),
769 (4 * HZ));
770 mutex_lock(&mstb->mgr->qlock);
771 if (ret > 0) {
772 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
773 ret = -EIO;
774 goto out;
775 }
776 } else {
777 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
778
779
780 ret = -EIO;
781
782
783 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
784 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
785 list_del(&txmsg->next);
786 }
787
788 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
789 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
790 mstb->tx_slots[txmsg->seqno] = NULL;
791 }
792 }
793out:
794 mutex_unlock(&mgr->qlock);
795
796 return ret;
797}
798
799static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
800{
801 struct drm_dp_mst_branch *mstb;
802
803 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
804 if (!mstb)
805 return NULL;
806
807 mstb->lct = lct;
808 if (lct > 1)
809 memcpy(mstb->rad, rad, lct / 2);
810 INIT_LIST_HEAD(&mstb->ports);
811 kref_init(&mstb->kref);
812 return mstb;
813}
814
815static void drm_dp_free_mst_port(struct kref *kref);
816
817static void drm_dp_free_mst_branch_device(struct kref *kref)
818{
819 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
820 if (mstb->port_parent) {
821 if (list_empty(&mstb->port_parent->next))
822 kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
823 }
824 kfree(mstb);
825}
826
827static void drm_dp_destroy_mst_branch_device(struct kref *kref)
828{
829 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
830 struct drm_dp_mst_port *port, *tmp;
831 bool wake_tx = false;
832
833
834
835
836
837 kref_init(kref);
838
839 if (mstb->port_parent && list_empty(&mstb->port_parent->next))
840 kref_get(&mstb->port_parent->kref);
841
842
843
844
845
846
847 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
848 list_del(&port->next);
849 drm_dp_put_port(port);
850 }
851
852
853 mutex_lock(&mstb->mgr->qlock);
854 if (mstb->tx_slots[0]) {
855 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
856 mstb->tx_slots[0] = NULL;
857 wake_tx = true;
858 }
859 if (mstb->tx_slots[1]) {
860 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
861 mstb->tx_slots[1] = NULL;
862 wake_tx = true;
863 }
864 mutex_unlock(&mstb->mgr->qlock);
865
866 if (wake_tx)
867 wake_up_all(&mstb->mgr->tx_waitq);
868
869 kref_put(kref, drm_dp_free_mst_branch_device);
870}
871
872static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
873{
874 kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
875}
876
877
878static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
879{
880 struct drm_dp_mst_branch *mstb;
881
882 switch (old_pdt) {
883 case DP_PEER_DEVICE_DP_LEGACY_CONV:
884 case DP_PEER_DEVICE_SST_SINK:
885
886 drm_dp_mst_unregister_i2c_bus(&port->aux);
887 break;
888 case DP_PEER_DEVICE_MST_BRANCHING:
889 mstb = port->mstb;
890 port->mstb = NULL;
891 drm_dp_put_mst_branch_device(mstb);
892 break;
893 }
894}
895
896static void drm_dp_destroy_port(struct kref *kref)
897{
898 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
899 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
900
901 if (!port->input) {
902 port->vcpi.num_slots = 0;
903
904 kfree(port->cached_edid);
905
906
907
908
909
910
911 if (port->connector) {
912
913
914
915
916 mutex_lock(&mgr->destroy_connector_lock);
917 kref_get(&port->parent->kref);
918 list_add(&port->next, &mgr->destroy_connector_list);
919 mutex_unlock(&mgr->destroy_connector_lock);
920 schedule_work(&mgr->destroy_connector_work);
921 return;
922 }
923
924
925 drm_dp_port_teardown_pdt(port, port->pdt);
926 port->pdt = DP_PEER_DEVICE_NONE;
927 }
928 kfree(port);
929}
930
931static void drm_dp_put_port(struct drm_dp_mst_port *port)
932{
933 kref_put(&port->kref, drm_dp_destroy_port);
934}
935
936static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
937{
938 struct drm_dp_mst_port *port;
939 struct drm_dp_mst_branch *rmstb;
940 if (to_find == mstb) {
941 kref_get(&mstb->kref);
942 return mstb;
943 }
944 list_for_each_entry(port, &mstb->ports, next) {
945 if (port->mstb) {
946 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
947 if (rmstb)
948 return rmstb;
949 }
950 }
951 return NULL;
952}
953
954static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
955{
956 struct drm_dp_mst_branch *rmstb = NULL;
957 mutex_lock(&mgr->lock);
958 if (mgr->mst_primary)
959 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
960 mutex_unlock(&mgr->lock);
961 return rmstb;
962}
963
964static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
965{
966 struct drm_dp_mst_port *port, *mport;
967
968 list_for_each_entry(port, &mstb->ports, next) {
969 if (port == to_find) {
970 kref_get(&port->kref);
971 return port;
972 }
973 if (port->mstb) {
974 mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
975 if (mport)
976 return mport;
977 }
978 }
979 return NULL;
980}
981
982static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
983{
984 struct drm_dp_mst_port *rport = NULL;
985 mutex_lock(&mgr->lock);
986 if (mgr->mst_primary)
987 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
988 mutex_unlock(&mgr->lock);
989 return rport;
990}
991
992static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
993{
994 struct drm_dp_mst_port *port;
995
996 list_for_each_entry(port, &mstb->ports, next) {
997 if (port->port_num == port_num) {
998 kref_get(&port->kref);
999 return port;
1000 }
1001 }
1002
1003 return NULL;
1004}
1005
1006
1007
1008
1009
1010
1011static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1012 u8 *rad)
1013{
1014 int parent_lct = port->parent->lct;
1015 int shift = 4;
1016 int idx = (parent_lct - 1) / 2;
1017 if (parent_lct > 1) {
1018 memcpy(rad, port->parent->rad, idx + 1);
1019 shift = (parent_lct % 2) ? 4 : 0;
1020 } else
1021 rad[0] = 0;
1022
1023 rad[idx] |= port->port_num << shift;
1024 return parent_lct + 1;
1025}
1026
1027
1028
1029
1030static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1031{
1032 int ret;
1033 u8 rad[6], lct;
1034 bool send_link = false;
1035 switch (port->pdt) {
1036 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1037 case DP_PEER_DEVICE_SST_SINK:
1038
1039 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1040 break;
1041 case DP_PEER_DEVICE_MST_BRANCHING:
1042 lct = drm_dp_calculate_rad(port, rad);
1043
1044 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1045 port->mstb->mgr = port->mgr;
1046 port->mstb->port_parent = port;
1047
1048 send_link = true;
1049 break;
1050 }
1051 return send_link;
1052}
1053
1054static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1055{
1056 int ret;
1057
1058 memcpy(mstb->guid, guid, 16);
1059
1060 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1061 if (mstb->port_parent) {
1062 ret = drm_dp_send_dpcd_write(
1063 mstb->mgr,
1064 mstb->port_parent,
1065 DP_GUID,
1066 16,
1067 mstb->guid);
1068 } else {
1069
1070 ret = drm_dp_dpcd_write(
1071 mstb->mgr->aux,
1072 DP_GUID,
1073 mstb->guid,
1074 16);
1075 }
1076 }
1077}
1078
1079static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1080 int pnum,
1081 char *proppath,
1082 size_t proppath_size)
1083{
1084 int i;
1085 char temp[8];
1086 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1087 for (i = 0; i < (mstb->lct - 1); i++) {
1088 int shift = (i % 2) ? 0 : 4;
1089 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1090 snprintf(temp, sizeof(temp), "-%d", port_num);
1091 strlcat(proppath, temp, proppath_size);
1092 }
1093 snprintf(temp, sizeof(temp), "-%d", pnum);
1094 strlcat(proppath, temp, proppath_size);
1095}
1096
1097static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1098 struct drm_device *dev,
1099 struct drm_dp_link_addr_reply_port *port_msg)
1100{
1101 struct drm_dp_mst_port *port;
1102 bool ret;
1103 bool created = false;
1104 int old_pdt = 0;
1105 int old_ddps = 0;
1106 port = drm_dp_get_port(mstb, port_msg->port_number);
1107 if (!port) {
1108 port = kzalloc(sizeof(*port), GFP_KERNEL);
1109 if (!port)
1110 return;
1111 kref_init(&port->kref);
1112 port->parent = mstb;
1113 port->port_num = port_msg->port_number;
1114 port->mgr = mstb->mgr;
1115 port->aux.name = "DPMST";
1116 port->aux.dev = dev->dev;
1117 created = true;
1118 } else {
1119 old_pdt = port->pdt;
1120 old_ddps = port->ddps;
1121 }
1122
1123 port->pdt = port_msg->peer_device_type;
1124 port->input = port_msg->input_port;
1125 port->mcs = port_msg->mcs;
1126 port->ddps = port_msg->ddps;
1127 port->ldps = port_msg->legacy_device_plug_status;
1128 port->dpcd_rev = port_msg->dpcd_revision;
1129 port->num_sdp_streams = port_msg->num_sdp_streams;
1130 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1131
1132
1133
1134 if (created) {
1135 mutex_lock(&mstb->mgr->lock);
1136 kref_get(&port->kref);
1137 list_add(&port->next, &mstb->ports);
1138 mutex_unlock(&mstb->mgr->lock);
1139 }
1140
1141 if (old_ddps != port->ddps) {
1142 if (port->ddps) {
1143 if (!port->input)
1144 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1145 } else {
1146 port->available_pbn = 0;
1147 }
1148 }
1149
1150 if (old_pdt != port->pdt && !port->input) {
1151 drm_dp_port_teardown_pdt(port, old_pdt);
1152
1153 ret = drm_dp_port_setup_pdt(port);
1154 if (ret == true)
1155 drm_dp_send_link_address(mstb->mgr, port->mstb);
1156 }
1157
1158 if (created && !port->input) {
1159 char proppath[255];
1160
1161 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1162 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1163 if (!port->connector) {
1164
1165 mutex_lock(&mstb->mgr->lock);
1166 list_del(&port->next);
1167 mutex_unlock(&mstb->mgr->lock);
1168
1169 drm_dp_put_port(port);
1170 goto out;
1171 }
1172 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1173 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1174 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1175 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1176 drm_mode_connector_set_tile_property(port->connector);
1177 }
1178 (*mstb->mgr->cbs->register_connector)(port->connector);
1179 }
1180
1181out:
1182
1183 drm_dp_put_port(port);
1184}
1185
1186static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1187 struct drm_dp_connection_status_notify *conn_stat)
1188{
1189 struct drm_dp_mst_port *port;
1190 int old_pdt;
1191 int old_ddps;
1192 bool dowork = false;
1193 port = drm_dp_get_port(mstb, conn_stat->port_number);
1194 if (!port)
1195 return;
1196
1197 old_ddps = port->ddps;
1198 old_pdt = port->pdt;
1199 port->pdt = conn_stat->peer_device_type;
1200 port->mcs = conn_stat->message_capability_status;
1201 port->ldps = conn_stat->legacy_device_plug_status;
1202 port->ddps = conn_stat->displayport_device_plug_status;
1203
1204 if (old_ddps != port->ddps) {
1205 if (port->ddps) {
1206 dowork = true;
1207 } else {
1208 port->available_pbn = 0;
1209 }
1210 }
1211 if (old_pdt != port->pdt && !port->input) {
1212 drm_dp_port_teardown_pdt(port, old_pdt);
1213
1214 if (drm_dp_port_setup_pdt(port))
1215 dowork = true;
1216 }
1217
1218 drm_dp_put_port(port);
1219 if (dowork)
1220 queue_work(system_long_wq, &mstb->mgr->work);
1221
1222}
1223
1224static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1225 u8 lct, u8 *rad)
1226{
1227 struct drm_dp_mst_branch *mstb;
1228 struct drm_dp_mst_port *port;
1229 int i;
1230
1231
1232 mutex_lock(&mgr->lock);
1233 mstb = mgr->mst_primary;
1234
1235 for (i = 0; i < lct - 1; i++) {
1236 int shift = (i % 2) ? 0 : 4;
1237 int port_num = (rad[i / 2] >> shift) & 0xf;
1238
1239 list_for_each_entry(port, &mstb->ports, next) {
1240 if (port->port_num == port_num) {
1241 mstb = port->mstb;
1242 if (!mstb) {
1243 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1244 goto out;
1245 }
1246
1247 break;
1248 }
1249 }
1250 }
1251 kref_get(&mstb->kref);
1252out:
1253 mutex_unlock(&mgr->lock);
1254 return mstb;
1255}
1256
1257static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1258 struct drm_dp_mst_branch *mstb,
1259 uint8_t *guid)
1260{
1261 struct drm_dp_mst_branch *found_mstb;
1262 struct drm_dp_mst_port *port;
1263
1264 if (memcmp(mstb->guid, guid, 16) == 0)
1265 return mstb;
1266
1267
1268 list_for_each_entry(port, &mstb->ports, next) {
1269 if (!port->mstb)
1270 continue;
1271
1272 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1273
1274 if (found_mstb)
1275 return found_mstb;
1276 }
1277
1278 return NULL;
1279}
1280
1281static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1282 struct drm_dp_mst_topology_mgr *mgr,
1283 uint8_t *guid)
1284{
1285 struct drm_dp_mst_branch *mstb;
1286
1287
1288 mutex_lock(&mgr->lock);
1289
1290 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1291
1292 if (mstb)
1293 kref_get(&mstb->kref);
1294
1295 mutex_unlock(&mgr->lock);
1296 return mstb;
1297}
1298
1299static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1300 struct drm_dp_mst_branch *mstb)
1301{
1302 struct drm_dp_mst_port *port;
1303 struct drm_dp_mst_branch *mstb_child;
1304 if (!mstb->link_address_sent)
1305 drm_dp_send_link_address(mgr, mstb);
1306
1307 list_for_each_entry(port, &mstb->ports, next) {
1308 if (port->input)
1309 continue;
1310
1311 if (!port->ddps)
1312 continue;
1313
1314 if (!port->available_pbn)
1315 drm_dp_send_enum_path_resources(mgr, mstb, port);
1316
1317 if (port->mstb) {
1318 mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1319 if (mstb_child) {
1320 drm_dp_check_and_send_link_address(mgr, mstb_child);
1321 drm_dp_put_mst_branch_device(mstb_child);
1322 }
1323 }
1324 }
1325}
1326
1327static void drm_dp_mst_link_probe_work(struct work_struct *work)
1328{
1329 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1330 struct drm_dp_mst_branch *mstb;
1331
1332 mutex_lock(&mgr->lock);
1333 mstb = mgr->mst_primary;
1334 if (mstb) {
1335 kref_get(&mstb->kref);
1336 }
1337 mutex_unlock(&mgr->lock);
1338 if (mstb) {
1339 drm_dp_check_and_send_link_address(mgr, mstb);
1340 drm_dp_put_mst_branch_device(mstb);
1341 }
1342}
1343
1344static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1345 u8 *guid)
1346{
1347 u64 salt;
1348
1349 if (memchr_inv(guid, 0, 16))
1350 return true;
1351
1352 salt = get_jiffies_64();
1353
1354 memcpy(&guid[0], &salt, sizeof(u64));
1355 memcpy(&guid[8], &salt, sizeof(u64));
1356
1357 return false;
1358}
1359
1360#if 0
1361static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1362{
1363 struct drm_dp_sideband_msg_req_body req;
1364
1365 req.req_type = DP_REMOTE_DPCD_READ;
1366 req.u.dpcd_read.port_number = port_num;
1367 req.u.dpcd_read.dpcd_address = offset;
1368 req.u.dpcd_read.num_bytes = num_bytes;
1369 drm_dp_encode_sideband_req(&req, msg);
1370
1371 return 0;
1372}
1373#endif
1374
1375static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1376 bool up, u8 *msg, int len)
1377{
1378 int ret;
1379 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1380 int tosend, total, offset;
1381 int retries = 0;
1382
1383retry:
1384 total = len;
1385 offset = 0;
1386 do {
1387 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1388
1389 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1390 &msg[offset],
1391 tosend);
1392 if (ret != tosend) {
1393 if (ret == -EIO && retries < 5) {
1394 retries++;
1395 goto retry;
1396 }
1397 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1398
1399 return -EIO;
1400 }
1401 offset += tosend;
1402 total -= tosend;
1403 } while (total > 0);
1404 return 0;
1405}
1406
1407static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1408 struct drm_dp_sideband_msg_tx *txmsg)
1409{
1410 struct drm_dp_mst_branch *mstb = txmsg->dst;
1411 u8 req_type;
1412
1413
1414 if (txmsg->seqno == -1) {
1415 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1416 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1417 return -EAGAIN;
1418 }
1419 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1420 txmsg->seqno = mstb->last_seqno;
1421 mstb->last_seqno ^= 1;
1422 } else if (mstb->tx_slots[0] == NULL)
1423 txmsg->seqno = 0;
1424 else
1425 txmsg->seqno = 1;
1426 mstb->tx_slots[txmsg->seqno] = txmsg;
1427 }
1428
1429 req_type = txmsg->msg[0] & 0x7f;
1430 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1431 req_type == DP_RESOURCE_STATUS_NOTIFY)
1432 hdr->broadcast = 1;
1433 else
1434 hdr->broadcast = 0;
1435 hdr->path_msg = txmsg->path_msg;
1436 hdr->lct = mstb->lct;
1437 hdr->lcr = mstb->lct - 1;
1438 if (mstb->lct > 1)
1439 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1440 hdr->seqno = txmsg->seqno;
1441 return 0;
1442}
1443
1444
1445
1446static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1447 struct drm_dp_sideband_msg_tx *txmsg,
1448 bool up)
1449{
1450 u8 chunk[48];
1451 struct drm_dp_sideband_msg_hdr hdr;
1452 int len, space, idx, tosend;
1453 int ret;
1454
1455 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1456
1457 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1458 txmsg->seqno = -1;
1459 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1460 }
1461
1462
1463
1464 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1465 if (ret < 0)
1466 return ret;
1467
1468
1469 len = txmsg->cur_len - txmsg->cur_offset;
1470
1471
1472 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1473
1474 tosend = min(len, space);
1475 if (len == txmsg->cur_len)
1476 hdr.somt = 1;
1477 if (space >= len)
1478 hdr.eomt = 1;
1479
1480
1481 hdr.msg_len = tosend + 1;
1482 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1483 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1484
1485 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1486 idx += tosend + 1;
1487
1488 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1489 if (ret) {
1490 DRM_DEBUG_KMS("sideband msg failed to send\n");
1491 return ret;
1492 }
1493
1494 txmsg->cur_offset += tosend;
1495 if (txmsg->cur_offset == txmsg->cur_len) {
1496 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1497 return 1;
1498 }
1499 return 0;
1500}
1501
1502static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1503{
1504 struct drm_dp_sideband_msg_tx *txmsg;
1505 int ret;
1506
1507 WARN_ON(!mutex_is_locked(&mgr->qlock));
1508
1509
1510 if (list_empty(&mgr->tx_msg_downq))
1511 return;
1512
1513 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1514 ret = process_single_tx_qlock(mgr, txmsg, false);
1515 if (ret == 1) {
1516
1517 list_del(&txmsg->next);
1518 } else if (ret) {
1519 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1520 list_del(&txmsg->next);
1521 if (txmsg->seqno != -1)
1522 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1523 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1524 wake_up_all(&mgr->tx_waitq);
1525 }
1526}
1527
1528
1529static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1530 struct drm_dp_sideband_msg_tx *txmsg)
1531{
1532 int ret;
1533
1534
1535 ret = process_single_tx_qlock(mgr, txmsg, true);
1536
1537 if (ret != 1)
1538 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1539
1540 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1541}
1542
1543static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1544 struct drm_dp_sideband_msg_tx *txmsg)
1545{
1546 mutex_lock(&mgr->qlock);
1547 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1548 if (list_is_singular(&mgr->tx_msg_downq))
1549 process_single_down_tx_qlock(mgr);
1550 mutex_unlock(&mgr->qlock);
1551}
1552
1553static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1554 struct drm_dp_mst_branch *mstb)
1555{
1556 int len;
1557 struct drm_dp_sideband_msg_tx *txmsg;
1558 int ret;
1559
1560 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1561 if (!txmsg)
1562 return;
1563
1564 txmsg->dst = mstb;
1565 len = build_link_address(txmsg);
1566
1567 mstb->link_address_sent = true;
1568 drm_dp_queue_down_tx(mgr, txmsg);
1569
1570 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1571 if (ret > 0) {
1572 int i;
1573
1574 if (txmsg->reply.reply_type == 1)
1575 DRM_DEBUG_KMS("link address nak received\n");
1576 else {
1577 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1578 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1579 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1580 txmsg->reply.u.link_addr.ports[i].input_port,
1581 txmsg->reply.u.link_addr.ports[i].peer_device_type,
1582 txmsg->reply.u.link_addr.ports[i].port_number,
1583 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1584 txmsg->reply.u.link_addr.ports[i].mcs,
1585 txmsg->reply.u.link_addr.ports[i].ddps,
1586 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1587 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1588 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1589 }
1590
1591 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1592
1593 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1594 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1595 }
1596 (*mgr->cbs->hotplug)(mgr);
1597 }
1598 } else {
1599 mstb->link_address_sent = false;
1600 DRM_DEBUG_KMS("link address failed %d\n", ret);
1601 }
1602
1603 kfree(txmsg);
1604}
1605
1606static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1607 struct drm_dp_mst_branch *mstb,
1608 struct drm_dp_mst_port *port)
1609{
1610 int len;
1611 struct drm_dp_sideband_msg_tx *txmsg;
1612 int ret;
1613
1614 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1615 if (!txmsg)
1616 return -ENOMEM;
1617
1618 txmsg->dst = mstb;
1619 len = build_enum_path_resources(txmsg, port->port_num);
1620
1621 drm_dp_queue_down_tx(mgr, txmsg);
1622
1623 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1624 if (ret > 0) {
1625 if (txmsg->reply.reply_type == 1)
1626 DRM_DEBUG_KMS("enum path resources nak received\n");
1627 else {
1628 if (port->port_num != txmsg->reply.u.path_resources.port_number)
1629 DRM_ERROR("got incorrect port in response\n");
1630 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1631 txmsg->reply.u.path_resources.avail_payload_bw_number);
1632 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1633 }
1634 }
1635
1636 kfree(txmsg);
1637 return 0;
1638}
1639
1640static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
1641{
1642 if (!mstb->port_parent)
1643 return NULL;
1644
1645 if (mstb->port_parent->mstb != mstb)
1646 return mstb->port_parent;
1647
1648 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
1649}
1650
1651static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1652 struct drm_dp_mst_branch *mstb,
1653 int *port_num)
1654{
1655 struct drm_dp_mst_branch *rmstb = NULL;
1656 struct drm_dp_mst_port *found_port;
1657 mutex_lock(&mgr->lock);
1658 if (mgr->mst_primary) {
1659 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
1660
1661 if (found_port) {
1662 rmstb = found_port->parent;
1663 kref_get(&rmstb->kref);
1664 *port_num = found_port->port_num;
1665 }
1666 }
1667 mutex_unlock(&mgr->lock);
1668 return rmstb;
1669}
1670
1671static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1672 struct drm_dp_mst_port *port,
1673 int id,
1674 int pbn)
1675{
1676 struct drm_dp_sideband_msg_tx *txmsg;
1677 struct drm_dp_mst_branch *mstb;
1678 int len, ret, port_num;
1679 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
1680 int i;
1681
1682 port = drm_dp_get_validated_port_ref(mgr, port);
1683 if (!port)
1684 return -EINVAL;
1685
1686 port_num = port->port_num;
1687 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1688 if (!mstb) {
1689 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1690
1691 if (!mstb) {
1692 drm_dp_put_port(port);
1693 return -EINVAL;
1694 }
1695 }
1696
1697 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1698 if (!txmsg) {
1699 ret = -ENOMEM;
1700 goto fail_put;
1701 }
1702
1703 for (i = 0; i < port->num_sdp_streams; i++)
1704 sinks[i] = i;
1705
1706 txmsg->dst = mstb;
1707 len = build_allocate_payload(txmsg, port_num,
1708 id,
1709 pbn, port->num_sdp_streams, sinks);
1710
1711 drm_dp_queue_down_tx(mgr, txmsg);
1712
1713 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1714 if (ret > 0) {
1715 if (txmsg->reply.reply_type == 1) {
1716 ret = -EINVAL;
1717 } else
1718 ret = 0;
1719 }
1720 kfree(txmsg);
1721fail_put:
1722 drm_dp_put_mst_branch_device(mstb);
1723 drm_dp_put_port(port);
1724 return ret;
1725}
1726
1727static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1728 int id,
1729 struct drm_dp_payload *payload)
1730{
1731 int ret;
1732
1733 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1734 if (ret < 0) {
1735 payload->payload_state = 0;
1736 return ret;
1737 }
1738 payload->payload_state = DP_PAYLOAD_LOCAL;
1739 return 0;
1740}
1741
1742static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1743 struct drm_dp_mst_port *port,
1744 int id,
1745 struct drm_dp_payload *payload)
1746{
1747 int ret;
1748 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1749 if (ret < 0)
1750 return ret;
1751 payload->payload_state = DP_PAYLOAD_REMOTE;
1752 return ret;
1753}
1754
1755static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1756 struct drm_dp_mst_port *port,
1757 int id,
1758 struct drm_dp_payload *payload)
1759{
1760 DRM_DEBUG_KMS("\n");
1761
1762 if (port) {
1763 drm_dp_payload_send_msg(mgr, port, id, 0);
1764 }
1765
1766 drm_dp_dpcd_write_payload(mgr, id, payload);
1767 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
1768 return 0;
1769}
1770
1771static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1772 int id,
1773 struct drm_dp_payload *payload)
1774{
1775 payload->payload_state = 0;
1776 return 0;
1777}
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1793{
1794 int i, j;
1795 int cur_slots = 1;
1796 struct drm_dp_payload req_payload;
1797 struct drm_dp_mst_port *port;
1798
1799 mutex_lock(&mgr->payload_lock);
1800 for (i = 0; i < mgr->max_payloads; i++) {
1801
1802
1803 req_payload.start_slot = cur_slots;
1804 if (mgr->proposed_vcpis[i]) {
1805 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1806 port = drm_dp_get_validated_port_ref(mgr, port);
1807 if (!port) {
1808 mutex_unlock(&mgr->payload_lock);
1809 return -EINVAL;
1810 }
1811 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1812 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
1813 } else {
1814 port = NULL;
1815 req_payload.num_slots = 0;
1816 }
1817
1818 if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1819 mgr->payloads[i].start_slot = req_payload.start_slot;
1820 }
1821
1822 if (mgr->payloads[i].num_slots != req_payload.num_slots) {
1823
1824
1825 if (req_payload.num_slots) {
1826 drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1827 mgr->payloads[i].num_slots = req_payload.num_slots;
1828 mgr->payloads[i].vcpi = req_payload.vcpi;
1829 } else if (mgr->payloads[i].num_slots) {
1830 mgr->payloads[i].num_slots = 0;
1831 drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
1832 req_payload.payload_state = mgr->payloads[i].payload_state;
1833 mgr->payloads[i].start_slot = 0;
1834 }
1835 mgr->payloads[i].payload_state = req_payload.payload_state;
1836 }
1837 cur_slots += req_payload.num_slots;
1838
1839 if (port)
1840 drm_dp_put_port(port);
1841 }
1842
1843 for (i = 0; i < mgr->max_payloads; i++) {
1844 if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1845 DRM_DEBUG_KMS("removing payload %d\n", i);
1846 for (j = i; j < mgr->max_payloads - 1; j++) {
1847 memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1848 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1849 if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1850 set_bit(j + 1, &mgr->payload_mask);
1851 } else {
1852 clear_bit(j + 1, &mgr->payload_mask);
1853 }
1854 }
1855 memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1856 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1857 clear_bit(mgr->max_payloads, &mgr->payload_mask);
1858
1859 }
1860 }
1861 mutex_unlock(&mgr->payload_lock);
1862
1863 return 0;
1864}
1865EXPORT_SYMBOL(drm_dp_update_payload_part1);
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1877{
1878 struct drm_dp_mst_port *port;
1879 int i;
1880 int ret = 0;
1881 mutex_lock(&mgr->payload_lock);
1882 for (i = 0; i < mgr->max_payloads; i++) {
1883
1884 if (!mgr->proposed_vcpis[i])
1885 continue;
1886
1887 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1888
1889 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1890 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1891 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1892 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1893 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1894 }
1895 if (ret) {
1896 mutex_unlock(&mgr->payload_lock);
1897 return ret;
1898 }
1899 }
1900 mutex_unlock(&mgr->payload_lock);
1901 return 0;
1902}
1903EXPORT_SYMBOL(drm_dp_update_payload_part2);
1904
1905#if 0
1906static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1907 struct drm_dp_mst_port *port,
1908 int offset, int size)
1909{
1910 int len;
1911 struct drm_dp_sideband_msg_tx *txmsg;
1912
1913 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1914 if (!txmsg)
1915 return -ENOMEM;
1916
1917 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1918 txmsg->dst = port->parent;
1919
1920 drm_dp_queue_down_tx(mgr, txmsg);
1921
1922 return 0;
1923}
1924#endif
1925
1926static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1927 struct drm_dp_mst_port *port,
1928 int offset, int size, u8 *bytes)
1929{
1930 int len;
1931 int ret;
1932 struct drm_dp_sideband_msg_tx *txmsg;
1933 struct drm_dp_mst_branch *mstb;
1934
1935 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1936 if (!mstb)
1937 return -EINVAL;
1938
1939 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1940 if (!txmsg) {
1941 ret = -ENOMEM;
1942 goto fail_put;
1943 }
1944
1945 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1946 txmsg->dst = mstb;
1947
1948 drm_dp_queue_down_tx(mgr, txmsg);
1949
1950 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1951 if (ret > 0) {
1952 if (txmsg->reply.reply_type == 1) {
1953 ret = -EINVAL;
1954 } else
1955 ret = 0;
1956 }
1957 kfree(txmsg);
1958fail_put:
1959 drm_dp_put_mst_branch_device(mstb);
1960 return ret;
1961}
1962
1963static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1964{
1965 struct drm_dp_sideband_msg_reply_body reply;
1966
1967 reply.reply_type = 0;
1968 reply.req_type = req_type;
1969 drm_dp_encode_sideband_reply(&reply, msg);
1970 return 0;
1971}
1972
1973static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1974 struct drm_dp_mst_branch *mstb,
1975 int req_type, int seqno, bool broadcast)
1976{
1977 struct drm_dp_sideband_msg_tx *txmsg;
1978
1979 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1980 if (!txmsg)
1981 return -ENOMEM;
1982
1983 txmsg->dst = mstb;
1984 txmsg->seqno = seqno;
1985 drm_dp_encode_up_ack_reply(txmsg, req_type);
1986
1987 mutex_lock(&mgr->qlock);
1988
1989 process_single_up_tx_qlock(mgr, txmsg);
1990
1991 mutex_unlock(&mgr->qlock);
1992
1993 kfree(txmsg);
1994 return 0;
1995}
1996
1997static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1998 int dp_link_count,
1999 int *out)
2000{
2001 switch (dp_link_bw) {
2002 default:
2003 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2004 dp_link_bw, dp_link_count);
2005 return false;
2006
2007 case DP_LINK_BW_1_62:
2008 *out = 3 * dp_link_count;
2009 break;
2010 case DP_LINK_BW_2_7:
2011 *out = 5 * dp_link_count;
2012 break;
2013 case DP_LINK_BW_5_4:
2014 *out = 10 * dp_link_count;
2015 break;
2016 }
2017 return true;
2018}
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2029{
2030 int ret = 0;
2031 struct drm_dp_mst_branch *mstb = NULL;
2032
2033 mutex_lock(&mgr->lock);
2034 if (mst_state == mgr->mst_state)
2035 goto out_unlock;
2036
2037 mgr->mst_state = mst_state;
2038
2039 if (mst_state) {
2040 WARN_ON(mgr->mst_primary);
2041
2042
2043 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2044 if (ret != DP_RECEIVER_CAP_SIZE) {
2045 DRM_DEBUG_KMS("failed to read DPCD\n");
2046 goto out_unlock;
2047 }
2048
2049 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2050 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2051 &mgr->pbn_div)) {
2052 ret = -EINVAL;
2053 goto out_unlock;
2054 }
2055
2056
2057 mstb = drm_dp_add_mst_branch_device(1, NULL);
2058 if (mstb == NULL) {
2059 ret = -ENOMEM;
2060 goto out_unlock;
2061 }
2062 mstb->mgr = mgr;
2063
2064
2065 mgr->mst_primary = mstb;
2066 kref_get(&mgr->mst_primary->kref);
2067
2068 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2069 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2070 if (ret < 0) {
2071 goto out_unlock;
2072 }
2073
2074 {
2075 struct drm_dp_payload reset_pay;
2076 reset_pay.start_slot = 0;
2077 reset_pay.num_slots = 0x3f;
2078 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2079 }
2080
2081 queue_work(system_long_wq, &mgr->work);
2082
2083 ret = 0;
2084 } else {
2085
2086 mstb = mgr->mst_primary;
2087 mgr->mst_primary = NULL;
2088
2089 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2090 ret = 0;
2091 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2092 mgr->payload_mask = 0;
2093 set_bit(0, &mgr->payload_mask);
2094 mgr->vcpi_mask = 0;
2095 }
2096
2097out_unlock:
2098 mutex_unlock(&mgr->lock);
2099 if (mstb)
2100 drm_dp_put_mst_branch_device(mstb);
2101 return ret;
2102
2103}
2104EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2105
2106
2107
2108
2109
2110
2111
2112
2113void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2114{
2115 mutex_lock(&mgr->lock);
2116 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2117 DP_MST_EN | DP_UPSTREAM_IS_SRC);
2118 mutex_unlock(&mgr->lock);
2119 flush_work(&mgr->work);
2120 flush_work(&mgr->destroy_connector_work);
2121}
2122EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2135{
2136 int ret = 0;
2137
2138 mutex_lock(&mgr->lock);
2139
2140 if (mgr->mst_primary) {
2141 int sret;
2142 u8 guid[16];
2143
2144 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2145 if (sret != DP_RECEIVER_CAP_SIZE) {
2146 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2147 ret = -1;
2148 goto out_unlock;
2149 }
2150
2151 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2152 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2153 if (ret < 0) {
2154 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2155 ret = -1;
2156 goto out_unlock;
2157 }
2158
2159
2160 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2161 if (sret != 16) {
2162 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2163 ret = -1;
2164 goto out_unlock;
2165 }
2166 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2167
2168 ret = 0;
2169 } else
2170 ret = -1;
2171
2172out_unlock:
2173 mutex_unlock(&mgr->lock);
2174 return ret;
2175}
2176EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2177
2178static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2179{
2180 int len;
2181 u8 replyblock[32];
2182 int replylen, origlen, curreply;
2183 int ret;
2184 struct drm_dp_sideband_msg_rx *msg;
2185 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2186 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2187
2188 len = min(mgr->max_dpcd_transaction_bytes, 16);
2189 ret = drm_dp_dpcd_read(mgr->aux, basereg,
2190 replyblock, len);
2191 if (ret != len) {
2192 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2193 return false;
2194 }
2195 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2196 if (!ret) {
2197 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2198 return false;
2199 }
2200 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2201
2202 origlen = replylen;
2203 replylen -= len;
2204 curreply = len;
2205 while (replylen > 0) {
2206 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2207 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2208 replyblock, len);
2209 if (ret != len) {
2210 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2211 len, ret);
2212 return false;
2213 }
2214
2215 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2216 if (!ret) {
2217 DRM_DEBUG_KMS("failed to build sideband msg\n");
2218 return false;
2219 }
2220
2221 curreply += len;
2222 replylen -= len;
2223 }
2224 return true;
2225}
2226
2227static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2228{
2229 int ret = 0;
2230
2231 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2232 memset(&mgr->down_rep_recv, 0,
2233 sizeof(struct drm_dp_sideband_msg_rx));
2234 return 0;
2235 }
2236
2237 if (mgr->down_rep_recv.have_eomt) {
2238 struct drm_dp_sideband_msg_tx *txmsg;
2239 struct drm_dp_mst_branch *mstb;
2240 int slot = -1;
2241 mstb = drm_dp_get_mst_branch_device(mgr,
2242 mgr->down_rep_recv.initial_hdr.lct,
2243 mgr->down_rep_recv.initial_hdr.rad);
2244
2245 if (!mstb) {
2246 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2247 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2248 return 0;
2249 }
2250
2251
2252 slot = mgr->down_rep_recv.initial_hdr.seqno;
2253 mutex_lock(&mgr->qlock);
2254 txmsg = mstb->tx_slots[slot];
2255
2256 mutex_unlock(&mgr->qlock);
2257
2258 if (!txmsg) {
2259 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2260 mstb,
2261 mgr->down_rep_recv.initial_hdr.seqno,
2262 mgr->down_rep_recv.initial_hdr.lct,
2263 mgr->down_rep_recv.initial_hdr.rad[0],
2264 mgr->down_rep_recv.msg[0]);
2265 drm_dp_put_mst_branch_device(mstb);
2266 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2267 return 0;
2268 }
2269
2270 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2271 if (txmsg->reply.reply_type == 1) {
2272 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2273 }
2274
2275 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2276 drm_dp_put_mst_branch_device(mstb);
2277
2278 mutex_lock(&mgr->qlock);
2279 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2280 mstb->tx_slots[slot] = NULL;
2281 mutex_unlock(&mgr->qlock);
2282
2283 wake_up_all(&mgr->tx_waitq);
2284 }
2285 return ret;
2286}
2287
2288static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2289{
2290 int ret = 0;
2291
2292 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2293 memset(&mgr->up_req_recv, 0,
2294 sizeof(struct drm_dp_sideband_msg_rx));
2295 return 0;
2296 }
2297
2298 if (mgr->up_req_recv.have_eomt) {
2299 struct drm_dp_sideband_msg_req_body msg;
2300 struct drm_dp_mst_branch *mstb = NULL;
2301 bool seqno;
2302
2303 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2304 mstb = drm_dp_get_mst_branch_device(mgr,
2305 mgr->up_req_recv.initial_hdr.lct,
2306 mgr->up_req_recv.initial_hdr.rad);
2307 if (!mstb) {
2308 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2309 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2310 return 0;
2311 }
2312 }
2313
2314 seqno = mgr->up_req_recv.initial_hdr.seqno;
2315 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2316
2317 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2318 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2319
2320 if (!mstb)
2321 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2322
2323 if (!mstb) {
2324 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2325 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2326 return 0;
2327 }
2328
2329 drm_dp_update_port(mstb, &msg.u.conn_stat);
2330
2331 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2332 (*mgr->cbs->hotplug)(mgr);
2333
2334 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2335 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2336 if (!mstb)
2337 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2338
2339 if (!mstb) {
2340 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2341 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2342 return 0;
2343 }
2344
2345 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2346 }
2347
2348 if (mstb)
2349 drm_dp_put_mst_branch_device(mstb);
2350
2351 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2352 }
2353 return ret;
2354}
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2368{
2369 int ret = 0;
2370 int sc;
2371 *handled = false;
2372 sc = esi[0] & 0x3f;
2373
2374 if (sc != mgr->sink_count) {
2375 mgr->sink_count = sc;
2376 *handled = true;
2377 }
2378
2379 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2380 ret = drm_dp_mst_handle_down_rep(mgr);
2381 *handled = true;
2382 }
2383
2384 if (esi[1] & DP_UP_REQ_MSG_RDY) {
2385 ret |= drm_dp_mst_handle_up_req(mgr);
2386 *handled = true;
2387 }
2388
2389 drm_dp_mst_kick_tx(mgr);
2390 return ret;
2391}
2392EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2404 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2405{
2406 enum drm_connector_status status = connector_status_disconnected;
2407
2408
2409 port = drm_dp_get_validated_port_ref(mgr, port);
2410 if (!port)
2411 return connector_status_disconnected;
2412
2413 if (!port->ddps)
2414 goto out;
2415
2416 switch (port->pdt) {
2417 case DP_PEER_DEVICE_NONE:
2418 case DP_PEER_DEVICE_MST_BRANCHING:
2419 break;
2420
2421 case DP_PEER_DEVICE_SST_SINK:
2422 status = connector_status_connected;
2423
2424 if (port->port_num >= 8 && !port->cached_edid) {
2425 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2426 }
2427 break;
2428 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2429 if (port->ldps)
2430 status = connector_status_connected;
2431 break;
2432 }
2433out:
2434 drm_dp_put_port(port);
2435 return status;
2436}
2437EXPORT_SYMBOL(drm_dp_mst_detect_port);
2438
2439
2440
2441
2442
2443
2444
2445
2446bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
2447 struct drm_dp_mst_port *port)
2448{
2449 bool ret = false;
2450
2451 port = drm_dp_get_validated_port_ref(mgr, port);
2452 if (!port)
2453 return ret;
2454 ret = port->has_audio;
2455 drm_dp_put_port(port);
2456 return ret;
2457}
2458EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2471{
2472 struct edid *edid = NULL;
2473
2474
2475 port = drm_dp_get_validated_port_ref(mgr, port);
2476 if (!port)
2477 return NULL;
2478
2479 if (port->cached_edid)
2480 edid = drm_edid_duplicate(port->cached_edid);
2481 else {
2482 edid = drm_get_edid(connector, &port->aux.ddc);
2483 drm_mode_connector_set_tile_property(connector);
2484 }
2485 port->has_audio = drm_detect_monitor_audio(edid);
2486 drm_dp_put_port(port);
2487 return edid;
2488}
2489EXPORT_SYMBOL(drm_dp_mst_get_edid);
2490
2491
2492
2493
2494
2495
2496int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2497 int pbn)
2498{
2499 int num_slots;
2500
2501 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2502
2503
2504 if (num_slots > 63)
2505 return -ENOSPC;
2506 return num_slots;
2507}
2508EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2509
2510static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2511 struct drm_dp_vcpi *vcpi, int pbn, int slots)
2512{
2513 int ret;
2514
2515
2516 if (slots > 63)
2517 return -ENOSPC;
2518
2519 vcpi->pbn = pbn;
2520 vcpi->aligned_pbn = slots * mgr->pbn_div;
2521 vcpi->num_slots = slots;
2522
2523 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2524 if (ret < 0)
2525 return ret;
2526 return 0;
2527}
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
2540 struct drm_dp_mst_topology_mgr *mgr,
2541 struct drm_dp_mst_port *port, int pbn)
2542{
2543 struct drm_dp_mst_topology_state *topology_state;
2544 int req_slots;
2545
2546 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
2547 if (IS_ERR(topology_state))
2548 return PTR_ERR(topology_state);
2549
2550 port = drm_dp_get_validated_port_ref(mgr, port);
2551 if (port == NULL)
2552 return -EINVAL;
2553 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2554 DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n",
2555 req_slots, topology_state->avail_slots);
2556
2557 if (req_slots > topology_state->avail_slots) {
2558 drm_dp_put_port(port);
2559 return -ENOSPC;
2560 }
2561
2562 topology_state->avail_slots -= req_slots;
2563 DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots);
2564
2565 drm_dp_put_port(port);
2566 return req_slots;
2567}
2568EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
2581 struct drm_dp_mst_topology_mgr *mgr,
2582 int slots)
2583{
2584 struct drm_dp_mst_topology_state *topology_state;
2585
2586 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
2587 if (IS_ERR(topology_state))
2588 return PTR_ERR(topology_state);
2589
2590
2591
2592
2593
2594
2595
2596 topology_state->avail_slots += slots;
2597 DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n",
2598 slots, topology_state->avail_slots);
2599
2600 return 0;
2601}
2602EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
2603
2604
2605
2606
2607
2608
2609
2610
2611bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2612 struct drm_dp_mst_port *port, int pbn, int slots)
2613{
2614 int ret;
2615
2616 port = drm_dp_get_validated_port_ref(mgr, port);
2617 if (!port)
2618 return false;
2619
2620 if (slots < 0)
2621 return false;
2622
2623 if (port->vcpi.vcpi > 0) {
2624 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2625 if (pbn == port->vcpi.pbn) {
2626 drm_dp_put_port(port);
2627 return true;
2628 }
2629 }
2630
2631 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
2632 if (ret) {
2633 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
2634 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
2635 goto out;
2636 }
2637 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
2638 pbn, port->vcpi.num_slots);
2639
2640 drm_dp_put_port(port);
2641 return true;
2642out:
2643 return false;
2644}
2645EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2646
2647int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2648{
2649 int slots = 0;
2650 port = drm_dp_get_validated_port_ref(mgr, port);
2651 if (!port)
2652 return slots;
2653
2654 slots = port->vcpi.num_slots;
2655 drm_dp_put_port(port);
2656 return slots;
2657}
2658EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
2659
2660
2661
2662
2663
2664
2665
2666
2667void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2668{
2669 port = drm_dp_get_validated_port_ref(mgr, port);
2670 if (!port)
2671 return;
2672 port->vcpi.num_slots = 0;
2673 drm_dp_put_port(port);
2674}
2675EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2676
2677
2678
2679
2680
2681
2682void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2683{
2684 port = drm_dp_get_validated_port_ref(mgr, port);
2685 if (!port)
2686 return;
2687
2688 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2689 port->vcpi.num_slots = 0;
2690 port->vcpi.pbn = 0;
2691 port->vcpi.aligned_pbn = 0;
2692 port->vcpi.vcpi = 0;
2693 drm_dp_put_port(port);
2694}
2695EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2696
2697static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2698 int id, struct drm_dp_payload *payload)
2699{
2700 u8 payload_alloc[3], status;
2701 int ret;
2702 int retries = 0;
2703
2704 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2705 DP_PAYLOAD_TABLE_UPDATED);
2706
2707 payload_alloc[0] = id;
2708 payload_alloc[1] = payload->start_slot;
2709 payload_alloc[2] = payload->num_slots;
2710
2711 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2712 if (ret != 3) {
2713 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2714 goto fail;
2715 }
2716
2717retry:
2718 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2719 if (ret < 0) {
2720 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2721 goto fail;
2722 }
2723
2724 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2725 retries++;
2726 if (retries < 20) {
2727 usleep_range(10000, 20000);
2728 goto retry;
2729 }
2730 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2731 ret = -EINVAL;
2732 goto fail;
2733 }
2734 ret = 0;
2735fail:
2736 return ret;
2737}
2738
2739
2740
2741
2742
2743
2744
2745
2746int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2747{
2748 u8 status;
2749 int ret;
2750 int count = 0;
2751
2752 do {
2753 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2754
2755 if (ret < 0) {
2756 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2757 goto fail;
2758 }
2759
2760 if (status & DP_PAYLOAD_ACT_HANDLED)
2761 break;
2762 count++;
2763 udelay(100);
2764
2765 } while (count < 30);
2766
2767 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2768 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2769 ret = -EINVAL;
2770 goto fail;
2771 }
2772 return 0;
2773fail:
2774 return ret;
2775}
2776EXPORT_SYMBOL(drm_dp_check_act_status);
2777
2778
2779
2780
2781
2782
2783
2784
2785int drm_dp_calc_pbn_mode(int clock, int bpp)
2786{
2787 u64 kbps;
2788 s64 peak_kbps;
2789 u32 numerator;
2790 u32 denominator;
2791
2792 kbps = clock * bpp;
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805 numerator = 64 * 1006;
2806 denominator = 54 * 8 * 1000 * 1000;
2807
2808 kbps *= numerator;
2809 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2810
2811 return drm_fixp2int_ceil(peak_kbps);
2812}
2813EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2814
2815static int test_calc_pbn_mode(void)
2816{
2817 int ret;
2818 ret = drm_dp_calc_pbn_mode(154000, 30);
2819 if (ret != 689) {
2820 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2821 154000, 30, 689, ret);
2822 return -EINVAL;
2823 }
2824 ret = drm_dp_calc_pbn_mode(234000, 30);
2825 if (ret != 1047) {
2826 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2827 234000, 30, 1047, ret);
2828 return -EINVAL;
2829 }
2830 ret = drm_dp_calc_pbn_mode(297000, 24);
2831 if (ret != 1063) {
2832 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2833 297000, 24, 1063, ret);
2834 return -EINVAL;
2835 }
2836 return 0;
2837}
2838
2839
2840static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2841{
2842 queue_work(system_long_wq, &mgr->tx_work);
2843}
2844
2845static void drm_dp_mst_dump_mstb(struct seq_file *m,
2846 struct drm_dp_mst_branch *mstb)
2847{
2848 struct drm_dp_mst_port *port;
2849 int tabs = mstb->lct;
2850 char prefix[10];
2851 int i;
2852
2853 for (i = 0; i < tabs; i++)
2854 prefix[i] = '\t';
2855 prefix[i] = '\0';
2856
2857 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2858 list_for_each_entry(port, &mstb->ports, next) {
2859 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
2860 if (port->mstb)
2861 drm_dp_mst_dump_mstb(m, port->mstb);
2862 }
2863}
2864
2865static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2866 char *buf)
2867{
2868 int i;
2869
2870 for (i = 0; i < 64; i += 16) {
2871 if (drm_dp_dpcd_read(mgr->aux,
2872 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
2873 &buf[i], 16) != 16)
2874 return false;
2875 }
2876 return true;
2877}
2878
2879static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
2880 struct drm_dp_mst_port *port, char *name,
2881 int namelen)
2882{
2883 struct edid *mst_edid;
2884
2885 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
2886 drm_edid_get_monitor_name(mst_edid, name, namelen);
2887}
2888
2889
2890
2891
2892
2893
2894
2895
2896void drm_dp_mst_dump_topology(struct seq_file *m,
2897 struct drm_dp_mst_topology_mgr *mgr)
2898{
2899 int i;
2900 struct drm_dp_mst_port *port;
2901
2902 mutex_lock(&mgr->lock);
2903 if (mgr->mst_primary)
2904 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2905
2906
2907 mutex_unlock(&mgr->lock);
2908
2909 mutex_lock(&mgr->payload_lock);
2910 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
2911 mgr->max_payloads);
2912
2913 for (i = 0; i < mgr->max_payloads; i++) {
2914 if (mgr->proposed_vcpis[i]) {
2915 char name[14];
2916
2917 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2918 fetch_monitor_name(mgr, port, name, sizeof(name));
2919 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
2920 port->port_num, port->vcpi.vcpi,
2921 port->vcpi.num_slots,
2922 (*name != 0) ? name : "Unknown");
2923 } else
2924 seq_printf(m, "vcpi %d:unused\n", i);
2925 }
2926 for (i = 0; i < mgr->max_payloads; i++) {
2927 seq_printf(m, "payload %d: %d, %d, %d\n",
2928 i,
2929 mgr->payloads[i].payload_state,
2930 mgr->payloads[i].start_slot,
2931 mgr->payloads[i].num_slots);
2932
2933
2934 }
2935 mutex_unlock(&mgr->payload_lock);
2936
2937 mutex_lock(&mgr->lock);
2938 if (mgr->mst_primary) {
2939 u8 buf[64];
2940 int ret;
2941
2942 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
2943 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
2944 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
2945 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
2946 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
2947 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
2948
2949
2950 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
2951 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
2952 for (i = 0x3; i < 0x8 && buf[i]; i++)
2953 seq_printf(m, "%c", buf[i]);
2954 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
2955 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
2956 if (dump_dp_payload_table(mgr, buf))
2957 seq_printf(m, "payload table: %*ph\n", 63, buf);
2958
2959 }
2960
2961 mutex_unlock(&mgr->lock);
2962
2963}
2964EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2965
2966static void drm_dp_tx_work(struct work_struct *work)
2967{
2968 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2969
2970 mutex_lock(&mgr->qlock);
2971 if (!list_empty(&mgr->tx_msg_downq))
2972 process_single_down_tx_qlock(mgr);
2973 mutex_unlock(&mgr->qlock);
2974}
2975
2976static void drm_dp_free_mst_port(struct kref *kref)
2977{
2978 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
2979 kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
2980 kfree(port);
2981}
2982
2983static void drm_dp_destroy_connector_work(struct work_struct *work)
2984{
2985 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2986 struct drm_dp_mst_port *port;
2987 bool send_hotplug = false;
2988
2989
2990
2991
2992
2993 for (;;) {
2994 mutex_lock(&mgr->destroy_connector_lock);
2995 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
2996 if (!port) {
2997 mutex_unlock(&mgr->destroy_connector_lock);
2998 break;
2999 }
3000 list_del(&port->next);
3001 mutex_unlock(&mgr->destroy_connector_lock);
3002
3003 kref_init(&port->kref);
3004 INIT_LIST_HEAD(&port->next);
3005
3006 mgr->cbs->destroy_connector(mgr, port->connector);
3007
3008 drm_dp_port_teardown_pdt(port, port->pdt);
3009 port->pdt = DP_PEER_DEVICE_NONE;
3010
3011 if (!port->input && port->vcpi.vcpi > 0) {
3012 drm_dp_mst_reset_vcpi_slots(mgr, port);
3013 drm_dp_update_payload_part1(mgr);
3014 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3015 }
3016
3017 kref_put(&port->kref, drm_dp_free_mst_port);
3018 send_hotplug = true;
3019 }
3020 if (send_hotplug)
3021 (*mgr->cbs->hotplug)(mgr);
3022}
3023
3024static struct drm_private_state *
3025drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
3026{
3027 struct drm_dp_mst_topology_state *state;
3028
3029 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
3030 if (!state)
3031 return NULL;
3032
3033 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
3034
3035 return &state->base;
3036}
3037
3038static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
3039 struct drm_private_state *state)
3040{
3041 struct drm_dp_mst_topology_state *mst_state =
3042 to_dp_mst_topology_state(state);
3043
3044 kfree(mst_state);
3045}
3046
3047static const struct drm_private_state_funcs mst_state_funcs = {
3048 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
3049 .atomic_destroy_state = drm_dp_mst_destroy_state,
3050};
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
3068 struct drm_dp_mst_topology_mgr *mgr)
3069{
3070 struct drm_device *dev = mgr->dev;
3071
3072 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3073 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
3074}
3075EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3089 struct drm_device *dev, struct drm_dp_aux *aux,
3090 int max_dpcd_transaction_bytes,
3091 int max_payloads, int conn_base_id)
3092{
3093 struct drm_dp_mst_topology_state *mst_state;
3094
3095 mutex_init(&mgr->lock);
3096 mutex_init(&mgr->qlock);
3097 mutex_init(&mgr->payload_lock);
3098 mutex_init(&mgr->destroy_connector_lock);
3099 INIT_LIST_HEAD(&mgr->tx_msg_downq);
3100 INIT_LIST_HEAD(&mgr->destroy_connector_list);
3101 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3102 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
3103 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
3104 init_waitqueue_head(&mgr->tx_waitq);
3105 mgr->dev = dev;
3106 mgr->aux = aux;
3107 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
3108 mgr->max_payloads = max_payloads;
3109 mgr->conn_base_id = conn_base_id;
3110 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
3111 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
3112 return -EINVAL;
3113 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
3114 if (!mgr->payloads)
3115 return -ENOMEM;
3116 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
3117 if (!mgr->proposed_vcpis)
3118 return -ENOMEM;
3119 set_bit(0, &mgr->payload_mask);
3120 if (test_calc_pbn_mode() < 0)
3121 DRM_ERROR("MST PBN self-test failed\n");
3122
3123 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
3124 if (mst_state == NULL)
3125 return -ENOMEM;
3126
3127 mst_state->mgr = mgr;
3128
3129
3130 mst_state->avail_slots = 63;
3131
3132 drm_atomic_private_obj_init(&mgr->base,
3133 &mst_state->base,
3134 &mst_state_funcs);
3135
3136 return 0;
3137}
3138EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
3139
3140
3141
3142
3143
3144void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
3145{
3146 flush_work(&mgr->work);
3147 flush_work(&mgr->destroy_connector_work);
3148 mutex_lock(&mgr->payload_lock);
3149 kfree(mgr->payloads);
3150 mgr->payloads = NULL;
3151 kfree(mgr->proposed_vcpis);
3152 mgr->proposed_vcpis = NULL;
3153 mutex_unlock(&mgr->payload_lock);
3154 mgr->dev = NULL;
3155 mgr->aux = NULL;
3156 drm_atomic_private_obj_fini(&mgr->base);
3157 mgr->funcs = NULL;
3158}
3159EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
3160
3161
3162static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
3163 int num)
3164{
3165 struct drm_dp_aux *aux = adapter->algo_data;
3166 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
3167 struct drm_dp_mst_branch *mstb;
3168 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
3169 unsigned int i;
3170 bool reading = false;
3171 struct drm_dp_sideband_msg_req_body msg;
3172 struct drm_dp_sideband_msg_tx *txmsg = NULL;
3173 int ret;
3174
3175 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3176 if (!mstb)
3177 return -EREMOTEIO;
3178
3179
3180
3181 if (msgs[num - 1].flags & I2C_M_RD)
3182 reading = true;
3183
3184 if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
3185 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
3186 ret = -EIO;
3187 goto out;
3188 }
3189
3190 memset(&msg, 0, sizeof(msg));
3191 msg.req_type = DP_REMOTE_I2C_READ;
3192 msg.u.i2c_read.num_transactions = num - 1;
3193 msg.u.i2c_read.port_number = port->port_num;
3194 for (i = 0; i < num - 1; i++) {
3195 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
3196 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
3197 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
3198 }
3199 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
3200 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
3201
3202 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3203 if (!txmsg) {
3204 ret = -ENOMEM;
3205 goto out;
3206 }
3207
3208 txmsg->dst = mstb;
3209 drm_dp_encode_sideband_req(&msg, txmsg);
3210
3211 drm_dp_queue_down_tx(mgr, txmsg);
3212
3213 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3214 if (ret > 0) {
3215
3216 if (txmsg->reply.reply_type == 1) {
3217 ret = -EREMOTEIO;
3218 goto out;
3219 }
3220 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
3221 ret = -EIO;
3222 goto out;
3223 }
3224 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
3225 ret = num;
3226 }
3227out:
3228 kfree(txmsg);
3229 drm_dp_put_mst_branch_device(mstb);
3230 return ret;
3231}
3232
3233static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
3234{
3235 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
3236 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
3237 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
3238 I2C_FUNC_10BIT_ADDR;
3239}
3240
3241static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
3242 .functionality = drm_dp_mst_i2c_functionality,
3243 .master_xfer = drm_dp_mst_i2c_xfer,
3244};
3245
3246
3247
3248
3249
3250
3251
3252static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
3253{
3254 aux->ddc.algo = &drm_dp_mst_i2c_algo;
3255 aux->ddc.algo_data = aux;
3256 aux->ddc.retries = 3;
3257
3258 aux->ddc.class = I2C_CLASS_DDC;
3259 aux->ddc.owner = THIS_MODULE;
3260 aux->ddc.dev.parent = aux->dev;
3261 aux->ddc.dev.of_node = aux->dev->of_node;
3262
3263 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
3264 sizeof(aux->ddc.name));
3265
3266 return i2c_add_adapter(&aux->ddc);
3267}
3268
3269
3270
3271
3272
3273static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
3274{
3275 i2c_del_adapter(&aux->ddc);
3276}
3277