1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/i2c.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/seq_file.h>
30
31#include <drm/drm_atomic.h>
32#include <drm/drm_atomic_helper.h>
33#include <drm/drm_dp_mst_helper.h>
34#include <drm/drm_drv.h>
35#include <drm/drm_fixed.h>
36#include <drm/drm_print.h>
37#include <drm/drm_probe_helper.h>
38
39#include "drm_crtc_helper_internal.h"
40
41
42
43
44
45
46
47
48static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
49 char *buf);
50static int test_calc_pbn_mode(void);
51
52static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
53
54static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
55 int id,
56 struct drm_dp_payload *payload);
57
58static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
59 struct drm_dp_mst_port *port,
60 int offset, int size, u8 *bytes);
61static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
62 struct drm_dp_mst_port *port,
63 int offset, int size, u8 *bytes);
64
65static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
66 struct drm_dp_mst_branch *mstb);
67static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
68 struct drm_dp_mst_branch *mstb,
69 struct drm_dp_mst_port *port);
70static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
71 u8 *guid);
72
73static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
74static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
75static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
76
77#define DP_STR(x) [DP_ ## x] = #x
78
79static const char *drm_dp_mst_req_type_str(u8 req_type)
80{
81 static const char * const req_type_str[] = {
82 DP_STR(GET_MSG_TRANSACTION_VERSION),
83 DP_STR(LINK_ADDRESS),
84 DP_STR(CONNECTION_STATUS_NOTIFY),
85 DP_STR(ENUM_PATH_RESOURCES),
86 DP_STR(ALLOCATE_PAYLOAD),
87 DP_STR(QUERY_PAYLOAD),
88 DP_STR(RESOURCE_STATUS_NOTIFY),
89 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
90 DP_STR(REMOTE_DPCD_READ),
91 DP_STR(REMOTE_DPCD_WRITE),
92 DP_STR(REMOTE_I2C_READ),
93 DP_STR(REMOTE_I2C_WRITE),
94 DP_STR(POWER_UP_PHY),
95 DP_STR(POWER_DOWN_PHY),
96 DP_STR(SINK_EVENT_NOTIFY),
97 DP_STR(QUERY_STREAM_ENC_STATUS),
98 };
99
100 if (req_type >= ARRAY_SIZE(req_type_str) ||
101 !req_type_str[req_type])
102 return "unknown";
103
104 return req_type_str[req_type];
105}
106
107#undef DP_STR
108#define DP_STR(x) [DP_NAK_ ## x] = #x
109
110static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
111{
112 static const char * const nak_reason_str[] = {
113 DP_STR(WRITE_FAILURE),
114 DP_STR(INVALID_READ),
115 DP_STR(CRC_FAILURE),
116 DP_STR(BAD_PARAM),
117 DP_STR(DEFER),
118 DP_STR(LINK_FAILURE),
119 DP_STR(NO_RESOURCES),
120 DP_STR(DPCD_FAIL),
121 DP_STR(I2C_NAK),
122 DP_STR(ALLOCATE_FAIL),
123 };
124
125 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
126 !nak_reason_str[nak_reason])
127 return "unknown";
128
129 return nak_reason_str[nak_reason];
130}
131
132#undef DP_STR
133
134
135static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
136{
137 u8 bitmask = 0x80;
138 u8 bitshift = 7;
139 u8 array_index = 0;
140 int number_of_bits = num_nibbles * 4;
141 u8 remainder = 0;
142
143 while (number_of_bits != 0) {
144 number_of_bits--;
145 remainder <<= 1;
146 remainder |= (data[array_index] & bitmask) >> bitshift;
147 bitmask >>= 1;
148 bitshift--;
149 if (bitmask == 0) {
150 bitmask = 0x80;
151 bitshift = 7;
152 array_index++;
153 }
154 if ((remainder & 0x10) == 0x10)
155 remainder ^= 0x13;
156 }
157
158 number_of_bits = 4;
159 while (number_of_bits != 0) {
160 number_of_bits--;
161 remainder <<= 1;
162 if ((remainder & 0x10) != 0)
163 remainder ^= 0x13;
164 }
165
166 return remainder;
167}
168
169static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
170{
171 u8 bitmask = 0x80;
172 u8 bitshift = 7;
173 u8 array_index = 0;
174 int number_of_bits = number_of_bytes * 8;
175 u16 remainder = 0;
176
177 while (number_of_bits != 0) {
178 number_of_bits--;
179 remainder <<= 1;
180 remainder |= (data[array_index] & bitmask) >> bitshift;
181 bitmask >>= 1;
182 bitshift--;
183 if (bitmask == 0) {
184 bitmask = 0x80;
185 bitshift = 7;
186 array_index++;
187 }
188 if ((remainder & 0x100) == 0x100)
189 remainder ^= 0xd5;
190 }
191
192 number_of_bits = 8;
193 while (number_of_bits != 0) {
194 number_of_bits--;
195 remainder <<= 1;
196 if ((remainder & 0x100) != 0)
197 remainder ^= 0xd5;
198 }
199
200 return remainder & 0xff;
201}
202static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
203{
204 u8 size = 3;
205 size += (hdr->lct / 2);
206 return size;
207}
208
209static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
210 u8 *buf, int *len)
211{
212 int idx = 0;
213 int i;
214 u8 crc4;
215 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
216 for (i = 0; i < (hdr->lct / 2); i++)
217 buf[idx++] = hdr->rad[i];
218 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
219 (hdr->msg_len & 0x3f);
220 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
221
222 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
223 buf[idx - 1] |= (crc4 & 0xf);
224
225 *len = idx;
226}
227
228static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
229 u8 *buf, int buflen, u8 *hdrlen)
230{
231 u8 crc4;
232 u8 len;
233 int i;
234 u8 idx;
235 if (buf[0] == 0)
236 return false;
237 len = 3;
238 len += ((buf[0] & 0xf0) >> 4) / 2;
239 if (len > buflen)
240 return false;
241 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
242
243 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
244 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
245 return false;
246 }
247
248 hdr->lct = (buf[0] & 0xf0) >> 4;
249 hdr->lcr = (buf[0] & 0xf);
250 idx = 1;
251 for (i = 0; i < (hdr->lct / 2); i++)
252 hdr->rad[i] = buf[idx++];
253 hdr->broadcast = (buf[idx] >> 7) & 0x1;
254 hdr->path_msg = (buf[idx] >> 6) & 0x1;
255 hdr->msg_len = buf[idx] & 0x3f;
256 idx++;
257 hdr->somt = (buf[idx] >> 7) & 0x1;
258 hdr->eomt = (buf[idx] >> 6) & 0x1;
259 hdr->seqno = (buf[idx] >> 4) & 0x1;
260 idx++;
261 *hdrlen = idx;
262 return true;
263}
264
265static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
266 struct drm_dp_sideband_msg_tx *raw)
267{
268 int idx = 0;
269 int i;
270 u8 *buf = raw->msg;
271 buf[idx++] = req->req_type & 0x7f;
272
273 switch (req->req_type) {
274 case DP_ENUM_PATH_RESOURCES:
275 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
276 idx++;
277 break;
278 case DP_ALLOCATE_PAYLOAD:
279 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
280 (req->u.allocate_payload.number_sdp_streams & 0xf);
281 idx++;
282 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
283 idx++;
284 buf[idx] = (req->u.allocate_payload.pbn >> 8);
285 idx++;
286 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
287 idx++;
288 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
289 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
290 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
291 idx++;
292 }
293 if (req->u.allocate_payload.number_sdp_streams & 1) {
294 i = req->u.allocate_payload.number_sdp_streams - 1;
295 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
296 idx++;
297 }
298 break;
299 case DP_QUERY_PAYLOAD:
300 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
301 idx++;
302 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
303 idx++;
304 break;
305 case DP_REMOTE_DPCD_READ:
306 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
307 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
308 idx++;
309 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
310 idx++;
311 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
312 idx++;
313 buf[idx] = (req->u.dpcd_read.num_bytes);
314 idx++;
315 break;
316
317 case DP_REMOTE_DPCD_WRITE:
318 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
319 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
320 idx++;
321 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
322 idx++;
323 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
324 idx++;
325 buf[idx] = (req->u.dpcd_write.num_bytes);
326 idx++;
327 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
328 idx += req->u.dpcd_write.num_bytes;
329 break;
330 case DP_REMOTE_I2C_READ:
331 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
332 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
333 idx++;
334 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
335 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
336 idx++;
337 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
338 idx++;
339 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
340 idx += req->u.i2c_read.transactions[i].num_bytes;
341
342 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
343 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
344 idx++;
345 }
346 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
347 idx++;
348 buf[idx] = (req->u.i2c_read.num_bytes_read);
349 idx++;
350 break;
351
352 case DP_REMOTE_I2C_WRITE:
353 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
354 idx++;
355 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
356 idx++;
357 buf[idx] = (req->u.i2c_write.num_bytes);
358 idx++;
359 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
360 idx += req->u.i2c_write.num_bytes;
361 break;
362
363 case DP_POWER_DOWN_PHY:
364 case DP_POWER_UP_PHY:
365 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
366 idx++;
367 break;
368 }
369 raw->cur_len = idx;
370}
371
372static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
373{
374 u8 crc4;
375 crc4 = drm_dp_msg_data_crc4(msg, len);
376 msg[len] = crc4;
377}
378
379static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
380 struct drm_dp_sideband_msg_tx *raw)
381{
382 int idx = 0;
383 u8 *buf = raw->msg;
384
385 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
386
387 raw->cur_len = idx;
388}
389
390
391static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
392 u8 *replybuf, u8 replybuflen, bool hdr)
393{
394 int ret;
395 u8 crc4;
396
397 if (hdr) {
398 u8 hdrlen;
399 struct drm_dp_sideband_msg_hdr recv_hdr;
400 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
401 if (ret == false) {
402 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
403 return false;
404 }
405
406
407
408
409
410 if (!recv_hdr.somt && !msg->have_somt)
411 return false;
412
413
414 msg->curchunk_len = recv_hdr.msg_len;
415 msg->curchunk_hdrlen = hdrlen;
416
417
418 if (recv_hdr.somt && msg->have_somt)
419 return false;
420
421 if (recv_hdr.somt) {
422 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
423 msg->have_somt = true;
424 }
425 if (recv_hdr.eomt)
426 msg->have_eomt = true;
427
428
429 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
430 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
431 } else {
432 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
433 msg->curchunk_idx += replybuflen;
434 }
435
436 if (msg->curchunk_idx >= msg->curchunk_len) {
437
438 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
439
440 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
441 msg->curlen += msg->curchunk_len - 1;
442 }
443 return true;
444}
445
446static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
447 struct drm_dp_sideband_msg_reply_body *repmsg)
448{
449 int idx = 1;
450 int i;
451 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
452 idx += 16;
453 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
454 idx++;
455 if (idx > raw->curlen)
456 goto fail_len;
457 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
458 if (raw->msg[idx] & 0x80)
459 repmsg->u.link_addr.ports[i].input_port = 1;
460
461 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
462 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
463
464 idx++;
465 if (idx > raw->curlen)
466 goto fail_len;
467 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
468 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
469 if (repmsg->u.link_addr.ports[i].input_port == 0)
470 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
471 idx++;
472 if (idx > raw->curlen)
473 goto fail_len;
474 if (repmsg->u.link_addr.ports[i].input_port == 0) {
475 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
476 idx++;
477 if (idx > raw->curlen)
478 goto fail_len;
479 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
480 idx += 16;
481 if (idx > raw->curlen)
482 goto fail_len;
483 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
484 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
485 idx++;
486
487 }
488 if (idx > raw->curlen)
489 goto fail_len;
490 }
491
492 return true;
493fail_len:
494 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
495 return false;
496}
497
498static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
499 struct drm_dp_sideband_msg_reply_body *repmsg)
500{
501 int idx = 1;
502 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
503 idx++;
504 if (idx > raw->curlen)
505 goto fail_len;
506 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
507 idx++;
508 if (idx > raw->curlen)
509 goto fail_len;
510
511 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
512 return true;
513fail_len:
514 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
515 return false;
516}
517
518static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
519 struct drm_dp_sideband_msg_reply_body *repmsg)
520{
521 int idx = 1;
522 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
523 idx++;
524 if (idx > raw->curlen)
525 goto fail_len;
526 return true;
527fail_len:
528 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
529 return false;
530}
531
532static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
533 struct drm_dp_sideband_msg_reply_body *repmsg)
534{
535 int idx = 1;
536
537 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
538 idx++;
539 if (idx > raw->curlen)
540 goto fail_len;
541 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
542 idx++;
543
544 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
545 return true;
546fail_len:
547 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
548 return false;
549}
550
551static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
552 struct drm_dp_sideband_msg_reply_body *repmsg)
553{
554 int idx = 1;
555 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
556 idx++;
557 if (idx > raw->curlen)
558 goto fail_len;
559 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
560 idx += 2;
561 if (idx > raw->curlen)
562 goto fail_len;
563 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
564 idx += 2;
565 if (idx > raw->curlen)
566 goto fail_len;
567 return true;
568fail_len:
569 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
570 return false;
571}
572
573static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
574 struct drm_dp_sideband_msg_reply_body *repmsg)
575{
576 int idx = 1;
577 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
578 idx++;
579 if (idx > raw->curlen)
580 goto fail_len;
581 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
582 idx++;
583 if (idx > raw->curlen)
584 goto fail_len;
585 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
586 idx += 2;
587 if (idx > raw->curlen)
588 goto fail_len;
589 return true;
590fail_len:
591 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
592 return false;
593}
594
595static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
596 struct drm_dp_sideband_msg_reply_body *repmsg)
597{
598 int idx = 1;
599 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
600 idx++;
601 if (idx > raw->curlen)
602 goto fail_len;
603 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
604 idx += 2;
605 if (idx > raw->curlen)
606 goto fail_len;
607 return true;
608fail_len:
609 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
610 return false;
611}
612
613static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
614 struct drm_dp_sideband_msg_reply_body *repmsg)
615{
616 int idx = 1;
617
618 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
619 idx++;
620 if (idx > raw->curlen) {
621 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
622 idx, raw->curlen);
623 return false;
624 }
625 return true;
626}
627
628static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
629 struct drm_dp_sideband_msg_reply_body *msg)
630{
631 memset(msg, 0, sizeof(*msg));
632 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
633 msg->req_type = (raw->msg[0] & 0x7f);
634
635 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
636 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
637 msg->u.nak.reason = raw->msg[17];
638 msg->u.nak.nak_data = raw->msg[18];
639 return false;
640 }
641
642 switch (msg->req_type) {
643 case DP_LINK_ADDRESS:
644 return drm_dp_sideband_parse_link_address(raw, msg);
645 case DP_QUERY_PAYLOAD:
646 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
647 case DP_REMOTE_DPCD_READ:
648 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
649 case DP_REMOTE_DPCD_WRITE:
650 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
651 case DP_REMOTE_I2C_READ:
652 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
653 case DP_ENUM_PATH_RESOURCES:
654 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
655 case DP_ALLOCATE_PAYLOAD:
656 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
657 case DP_POWER_DOWN_PHY:
658 case DP_POWER_UP_PHY:
659 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
660 default:
661 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
662 drm_dp_mst_req_type_str(msg->req_type));
663 return false;
664 }
665}
666
667static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
668 struct drm_dp_sideband_msg_req_body *msg)
669{
670 int idx = 1;
671
672 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
673 idx++;
674 if (idx > raw->curlen)
675 goto fail_len;
676
677 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
678 idx += 16;
679 if (idx > raw->curlen)
680 goto fail_len;
681
682 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
683 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
684 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
685 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
686 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
687 idx++;
688 return true;
689fail_len:
690 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
691 return false;
692}
693
694static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
695 struct drm_dp_sideband_msg_req_body *msg)
696{
697 int idx = 1;
698
699 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
700 idx++;
701 if (idx > raw->curlen)
702 goto fail_len;
703
704 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
705 idx += 16;
706 if (idx > raw->curlen)
707 goto fail_len;
708
709 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
710 idx++;
711 return true;
712fail_len:
713 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
714 return false;
715}
716
717static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
718 struct drm_dp_sideband_msg_req_body *msg)
719{
720 memset(msg, 0, sizeof(*msg));
721 msg->req_type = (raw->msg[0] & 0x7f);
722
723 switch (msg->req_type) {
724 case DP_CONNECTION_STATUS_NOTIFY:
725 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
726 case DP_RESOURCE_STATUS_NOTIFY:
727 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
728 default:
729 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
730 drm_dp_mst_req_type_str(msg->req_type));
731 return false;
732 }
733}
734
735static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
736{
737 struct drm_dp_sideband_msg_req_body req;
738
739 req.req_type = DP_REMOTE_DPCD_WRITE;
740 req.u.dpcd_write.port_number = port_num;
741 req.u.dpcd_write.dpcd_address = offset;
742 req.u.dpcd_write.num_bytes = num_bytes;
743 req.u.dpcd_write.bytes = bytes;
744 drm_dp_encode_sideband_req(&req, msg);
745
746 return 0;
747}
748
749static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
750{
751 struct drm_dp_sideband_msg_req_body req;
752
753 req.req_type = DP_LINK_ADDRESS;
754 drm_dp_encode_sideband_req(&req, msg);
755 return 0;
756}
757
758static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
759{
760 struct drm_dp_sideband_msg_req_body req;
761
762 req.req_type = DP_ENUM_PATH_RESOURCES;
763 req.u.port_num.port_number = port_num;
764 drm_dp_encode_sideband_req(&req, msg);
765 msg->path_msg = true;
766 return 0;
767}
768
769static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
770 u8 vcpi, uint16_t pbn,
771 u8 number_sdp_streams,
772 u8 *sdp_stream_sink)
773{
774 struct drm_dp_sideband_msg_req_body req;
775 memset(&req, 0, sizeof(req));
776 req.req_type = DP_ALLOCATE_PAYLOAD;
777 req.u.allocate_payload.port_number = port_num;
778 req.u.allocate_payload.vcpi = vcpi;
779 req.u.allocate_payload.pbn = pbn;
780 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
781 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
782 number_sdp_streams);
783 drm_dp_encode_sideband_req(&req, msg);
784 msg->path_msg = true;
785 return 0;
786}
787
788static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
789 int port_num, bool power_up)
790{
791 struct drm_dp_sideband_msg_req_body req;
792
793 if (power_up)
794 req.req_type = DP_POWER_UP_PHY;
795 else
796 req.req_type = DP_POWER_DOWN_PHY;
797
798 req.u.port_num.port_number = port_num;
799 drm_dp_encode_sideband_req(&req, msg);
800 msg->path_msg = true;
801 return 0;
802}
803
804static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
805 struct drm_dp_vcpi *vcpi)
806{
807 int ret, vcpi_ret;
808
809 mutex_lock(&mgr->payload_lock);
810 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
811 if (ret > mgr->max_payloads) {
812 ret = -EINVAL;
813 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
814 goto out_unlock;
815 }
816
817 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
818 if (vcpi_ret > mgr->max_payloads) {
819 ret = -EINVAL;
820 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
821 goto out_unlock;
822 }
823
824 set_bit(ret, &mgr->payload_mask);
825 set_bit(vcpi_ret, &mgr->vcpi_mask);
826 vcpi->vcpi = vcpi_ret + 1;
827 mgr->proposed_vcpis[ret - 1] = vcpi;
828out_unlock:
829 mutex_unlock(&mgr->payload_lock);
830 return ret;
831}
832
833static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
834 int vcpi)
835{
836 int i;
837 if (vcpi == 0)
838 return;
839
840 mutex_lock(&mgr->payload_lock);
841 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
842 clear_bit(vcpi - 1, &mgr->vcpi_mask);
843
844 for (i = 0; i < mgr->max_payloads; i++) {
845 if (mgr->proposed_vcpis[i])
846 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
847 mgr->proposed_vcpis[i] = NULL;
848 clear_bit(i + 1, &mgr->payload_mask);
849 }
850 }
851 mutex_unlock(&mgr->payload_lock);
852}
853
854static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
855 struct drm_dp_sideband_msg_tx *txmsg)
856{
857 unsigned int state;
858
859
860
861
862
863
864 state = READ_ONCE(txmsg->state);
865 return (state == DRM_DP_SIDEBAND_TX_RX ||
866 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
867}
868
869static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
870 struct drm_dp_sideband_msg_tx *txmsg)
871{
872 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
873 int ret;
874
875 ret = wait_event_timeout(mgr->tx_waitq,
876 check_txmsg_state(mgr, txmsg),
877 (4 * HZ));
878 mutex_lock(&mstb->mgr->qlock);
879 if (ret > 0) {
880 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
881 ret = -EIO;
882 goto out;
883 }
884 } else {
885 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
886
887
888 ret = -EIO;
889
890
891 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
892 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
893 list_del(&txmsg->next);
894 }
895
896 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
897 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
898 mstb->tx_slots[txmsg->seqno] = NULL;
899 }
900 }
901out:
902 mutex_unlock(&mgr->qlock);
903
904 return ret;
905}
906
907static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
908{
909 struct drm_dp_mst_branch *mstb;
910
911 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
912 if (!mstb)
913 return NULL;
914
915 mstb->lct = lct;
916 if (lct > 1)
917 memcpy(mstb->rad, rad, lct / 2);
918 INIT_LIST_HEAD(&mstb->ports);
919 kref_init(&mstb->topology_kref);
920 kref_init(&mstb->malloc_kref);
921 return mstb;
922}
923
924static void drm_dp_free_mst_branch_device(struct kref *kref)
925{
926 struct drm_dp_mst_branch *mstb =
927 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
928
929 if (mstb->port_parent)
930 drm_dp_mst_put_port_malloc(mstb->port_parent);
931
932 kfree(mstb);
933}
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034static void
1035drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1036{
1037 kref_get(&mstb->malloc_kref);
1038 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static void
1053drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1054{
1055 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1056 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1057}
1058
1059static void drm_dp_free_mst_port(struct kref *kref)
1060{
1061 struct drm_dp_mst_port *port =
1062 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1063
1064 drm_dp_mst_put_mstb_malloc(port->parent);
1065 kfree(port);
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085void
1086drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1087{
1088 kref_get(&port->malloc_kref);
1089 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1090}
1091EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103void
1104drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1105{
1106 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1107 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1108}
1109EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1110
1111static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1112{
1113 struct drm_dp_mst_branch *mstb =
1114 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1115 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1116 struct drm_dp_mst_port *port, *tmp;
1117 bool wake_tx = false;
1118
1119 mutex_lock(&mgr->lock);
1120 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
1121 list_del(&port->next);
1122 drm_dp_mst_topology_put_port(port);
1123 }
1124 mutex_unlock(&mgr->lock);
1125
1126
1127 mutex_lock(&mstb->mgr->qlock);
1128 if (mstb->tx_slots[0]) {
1129 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1130 mstb->tx_slots[0] = NULL;
1131 wake_tx = true;
1132 }
1133 if (mstb->tx_slots[1]) {
1134 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1135 mstb->tx_slots[1] = NULL;
1136 wake_tx = true;
1137 }
1138 mutex_unlock(&mstb->mgr->qlock);
1139
1140 if (wake_tx)
1141 wake_up_all(&mstb->mgr->tx_waitq);
1142
1143 drm_dp_mst_put_mstb_malloc(mstb);
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168static int __must_check
1169drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1170{
1171 int ret = kref_get_unless_zero(&mstb->topology_kref);
1172
1173 if (ret)
1174 DRM_DEBUG("mstb %p (%d)\n", mstb,
1175 kref_read(&mstb->topology_kref));
1176
1177 return ret;
1178}
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1195{
1196 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1197 kref_get(&mstb->topology_kref);
1198 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213static void
1214drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1215{
1216 DRM_DEBUG("mstb %p (%d)\n",
1217 mstb, kref_read(&mstb->topology_kref) - 1);
1218 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1219}
1220
1221static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1222{
1223 struct drm_dp_mst_branch *mstb;
1224
1225 switch (old_pdt) {
1226 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1227 case DP_PEER_DEVICE_SST_SINK:
1228
1229 drm_dp_mst_unregister_i2c_bus(&port->aux);
1230 break;
1231 case DP_PEER_DEVICE_MST_BRANCHING:
1232 mstb = port->mstb;
1233 port->mstb = NULL;
1234 drm_dp_mst_topology_put_mstb(mstb);
1235 break;
1236 }
1237}
1238
1239static void drm_dp_destroy_port(struct kref *kref)
1240{
1241 struct drm_dp_mst_port *port =
1242 container_of(kref, struct drm_dp_mst_port, topology_kref);
1243 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1244
1245 if (!port->input) {
1246 kfree(port->cached_edid);
1247
1248
1249
1250
1251
1252
1253 if (port->connector) {
1254
1255
1256
1257
1258 mutex_lock(&mgr->destroy_connector_lock);
1259 list_add(&port->next, &mgr->destroy_connector_list);
1260 mutex_unlock(&mgr->destroy_connector_lock);
1261 schedule_work(&mgr->destroy_connector_work);
1262 return;
1263 }
1264
1265
1266 drm_dp_port_teardown_pdt(port, port->pdt);
1267 port->pdt = DP_PEER_DEVICE_NONE;
1268 }
1269 drm_dp_mst_put_port_malloc(port);
1270}
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294static int __must_check
1295drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1296{
1297 int ret = kref_get_unless_zero(&port->topology_kref);
1298
1299 if (ret)
1300 DRM_DEBUG("port %p (%d)\n", port,
1301 kref_read(&port->topology_kref));
1302
1303 return ret;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1320{
1321 WARN_ON(kref_read(&port->topology_kref) == 0);
1322 kref_get(&port->topology_kref);
1323 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1338{
1339 DRM_DEBUG("port %p (%d)\n",
1340 port, kref_read(&port->topology_kref) - 1);
1341 kref_put(&port->topology_kref, drm_dp_destroy_port);
1342}
1343
1344static struct drm_dp_mst_branch *
1345drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1346 struct drm_dp_mst_branch *to_find)
1347{
1348 struct drm_dp_mst_port *port;
1349 struct drm_dp_mst_branch *rmstb;
1350
1351 if (to_find == mstb)
1352 return mstb;
1353
1354 list_for_each_entry(port, &mstb->ports, next) {
1355 if (port->mstb) {
1356 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1357 port->mstb, to_find);
1358 if (rmstb)
1359 return rmstb;
1360 }
1361 }
1362 return NULL;
1363}
1364
1365static struct drm_dp_mst_branch *
1366drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1367 struct drm_dp_mst_branch *mstb)
1368{
1369 struct drm_dp_mst_branch *rmstb = NULL;
1370
1371 mutex_lock(&mgr->lock);
1372 if (mgr->mst_primary) {
1373 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1374 mgr->mst_primary, mstb);
1375
1376 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1377 rmstb = NULL;
1378 }
1379 mutex_unlock(&mgr->lock);
1380 return rmstb;
1381}
1382
1383static struct drm_dp_mst_port *
1384drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1385 struct drm_dp_mst_port *to_find)
1386{
1387 struct drm_dp_mst_port *port, *mport;
1388
1389 list_for_each_entry(port, &mstb->ports, next) {
1390 if (port == to_find)
1391 return port;
1392
1393 if (port->mstb) {
1394 mport = drm_dp_mst_topology_get_port_validated_locked(
1395 port->mstb, to_find);
1396 if (mport)
1397 return mport;
1398 }
1399 }
1400 return NULL;
1401}
1402
1403static struct drm_dp_mst_port *
1404drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1405 struct drm_dp_mst_port *port)
1406{
1407 struct drm_dp_mst_port *rport = NULL;
1408
1409 mutex_lock(&mgr->lock);
1410 if (mgr->mst_primary) {
1411 rport = drm_dp_mst_topology_get_port_validated_locked(
1412 mgr->mst_primary, port);
1413
1414 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1415 rport = NULL;
1416 }
1417 mutex_unlock(&mgr->lock);
1418 return rport;
1419}
1420
1421static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1422{
1423 struct drm_dp_mst_port *port;
1424 int ret;
1425
1426 list_for_each_entry(port, &mstb->ports, next) {
1427 if (port->port_num == port_num) {
1428 ret = drm_dp_mst_topology_try_get_port(port);
1429 return ret ? port : NULL;
1430 }
1431 }
1432
1433 return NULL;
1434}
1435
1436
1437
1438
1439
1440
1441static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1442 u8 *rad)
1443{
1444 int parent_lct = port->parent->lct;
1445 int shift = 4;
1446 int idx = (parent_lct - 1) / 2;
1447 if (parent_lct > 1) {
1448 memcpy(rad, port->parent->rad, idx + 1);
1449 shift = (parent_lct % 2) ? 4 : 0;
1450 } else
1451 rad[0] = 0;
1452
1453 rad[idx] |= port->port_num << shift;
1454 return parent_lct + 1;
1455}
1456
1457
1458
1459
1460static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1461{
1462 int ret;
1463 u8 rad[6], lct;
1464 bool send_link = false;
1465 switch (port->pdt) {
1466 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1467 case DP_PEER_DEVICE_SST_SINK:
1468
1469 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1470 break;
1471 case DP_PEER_DEVICE_MST_BRANCHING:
1472 lct = drm_dp_calculate_rad(port, rad);
1473
1474 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1475 if (port->mstb) {
1476 port->mstb->mgr = port->mgr;
1477 port->mstb->port_parent = port;
1478
1479
1480
1481
1482 drm_dp_mst_get_port_malloc(port);
1483
1484 send_link = true;
1485 }
1486 break;
1487 }
1488 return send_link;
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
1505 unsigned int offset, void *buffer, size_t size)
1506{
1507 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1508 aux);
1509
1510 return drm_dp_send_dpcd_read(port->mgr, port,
1511 offset, size, buffer);
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
1528 unsigned int offset, void *buffer, size_t size)
1529{
1530 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1531 aux);
1532
1533 return drm_dp_send_dpcd_write(port->mgr, port,
1534 offset, size, buffer);
1535}
1536
1537static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1538{
1539 int ret;
1540
1541 memcpy(mstb->guid, guid, 16);
1542
1543 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1544 if (mstb->port_parent) {
1545 ret = drm_dp_send_dpcd_write(
1546 mstb->mgr,
1547 mstb->port_parent,
1548 DP_GUID,
1549 16,
1550 mstb->guid);
1551 } else {
1552
1553 ret = drm_dp_dpcd_write(
1554 mstb->mgr->aux,
1555 DP_GUID,
1556 mstb->guid,
1557 16);
1558 }
1559 }
1560}
1561
1562static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1563 int pnum,
1564 char *proppath,
1565 size_t proppath_size)
1566{
1567 int i;
1568 char temp[8];
1569 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1570 for (i = 0; i < (mstb->lct - 1); i++) {
1571 int shift = (i % 2) ? 0 : 4;
1572 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1573 snprintf(temp, sizeof(temp), "-%d", port_num);
1574 strlcat(proppath, temp, proppath_size);
1575 }
1576 snprintf(temp, sizeof(temp), "-%d", pnum);
1577 strlcat(proppath, temp, proppath_size);
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591int drm_dp_mst_connector_late_register(struct drm_connector *connector,
1592 struct drm_dp_mst_port *port)
1593{
1594 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
1595 port->aux.name, connector->kdev->kobj.name);
1596
1597 port->aux.dev = connector->kdev;
1598 return drm_dp_aux_register_devnode(&port->aux);
1599}
1600EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
1612 struct drm_dp_mst_port *port)
1613{
1614 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
1615 port->aux.name, connector->kdev->kobj.name);
1616 drm_dp_aux_unregister_devnode(&port->aux);
1617}
1618EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
1619
1620static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1621 struct drm_device *dev,
1622 struct drm_dp_link_addr_reply_port *port_msg)
1623{
1624 struct drm_dp_mst_port *port;
1625 bool ret;
1626 bool created = false;
1627 int old_pdt = 0;
1628 int old_ddps = 0;
1629
1630 port = drm_dp_get_port(mstb, port_msg->port_number);
1631 if (!port) {
1632 port = kzalloc(sizeof(*port), GFP_KERNEL);
1633 if (!port)
1634 return;
1635 kref_init(&port->topology_kref);
1636 kref_init(&port->malloc_kref);
1637 port->parent = mstb;
1638 port->port_num = port_msg->port_number;
1639 port->mgr = mstb->mgr;
1640 port->aux.name = "DPMST";
1641 port->aux.dev = dev->dev;
1642 port->aux.is_remote = true;
1643
1644
1645
1646
1647
1648 drm_dp_mst_get_mstb_malloc(mstb);
1649
1650 created = true;
1651 } else {
1652 old_pdt = port->pdt;
1653 old_ddps = port->ddps;
1654 }
1655
1656 port->pdt = port_msg->peer_device_type;
1657 port->input = port_msg->input_port;
1658 port->mcs = port_msg->mcs;
1659 port->ddps = port_msg->ddps;
1660 port->ldps = port_msg->legacy_device_plug_status;
1661 port->dpcd_rev = port_msg->dpcd_revision;
1662 port->num_sdp_streams = port_msg->num_sdp_streams;
1663 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1664
1665
1666
1667 if (created) {
1668 mutex_lock(&mstb->mgr->lock);
1669 drm_dp_mst_topology_get_port(port);
1670 list_add(&port->next, &mstb->ports);
1671 mutex_unlock(&mstb->mgr->lock);
1672 }
1673
1674 if (old_ddps != port->ddps) {
1675 if (port->ddps) {
1676 if (!port->input) {
1677 drm_dp_send_enum_path_resources(mstb->mgr,
1678 mstb, port);
1679 }
1680 } else {
1681 port->available_pbn = 0;
1682 }
1683 }
1684
1685 if (old_pdt != port->pdt && !port->input) {
1686 drm_dp_port_teardown_pdt(port, old_pdt);
1687
1688 ret = drm_dp_port_setup_pdt(port);
1689 if (ret == true)
1690 drm_dp_send_link_address(mstb->mgr, port->mstb);
1691 }
1692
1693 if (created && !port->input) {
1694 char proppath[255];
1695
1696 build_mst_prop_path(mstb, port->port_num, proppath,
1697 sizeof(proppath));
1698 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
1699 port,
1700 proppath);
1701 if (!port->connector) {
1702
1703 mutex_lock(&mstb->mgr->lock);
1704 list_del(&port->next);
1705 mutex_unlock(&mstb->mgr->lock);
1706
1707 drm_dp_mst_topology_put_port(port);
1708 goto out;
1709 }
1710 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1711 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1712 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1713 port->cached_edid = drm_get_edid(port->connector,
1714 &port->aux.ddc);
1715 drm_connector_set_tile_property(port->connector);
1716 }
1717 (*mstb->mgr->cbs->register_connector)(port->connector);
1718 }
1719
1720out:
1721
1722 drm_dp_mst_topology_put_port(port);
1723}
1724
1725static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1726 struct drm_dp_connection_status_notify *conn_stat)
1727{
1728 struct drm_dp_mst_port *port;
1729 int old_pdt;
1730 int old_ddps;
1731 bool dowork = false;
1732 port = drm_dp_get_port(mstb, conn_stat->port_number);
1733 if (!port)
1734 return;
1735
1736 old_ddps = port->ddps;
1737 old_pdt = port->pdt;
1738 port->pdt = conn_stat->peer_device_type;
1739 port->mcs = conn_stat->message_capability_status;
1740 port->ldps = conn_stat->legacy_device_plug_status;
1741 port->ddps = conn_stat->displayport_device_plug_status;
1742
1743 if (old_ddps != port->ddps) {
1744 if (port->ddps) {
1745 dowork = true;
1746 } else {
1747 port->available_pbn = 0;
1748 }
1749 }
1750 if (old_pdt != port->pdt && !port->input) {
1751 drm_dp_port_teardown_pdt(port, old_pdt);
1752
1753 if (drm_dp_port_setup_pdt(port))
1754 dowork = true;
1755 }
1756
1757 drm_dp_mst_topology_put_port(port);
1758 if (dowork)
1759 queue_work(system_long_wq, &mstb->mgr->work);
1760
1761}
1762
1763static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1764 u8 lct, u8 *rad)
1765{
1766 struct drm_dp_mst_branch *mstb;
1767 struct drm_dp_mst_port *port;
1768 int i, ret;
1769
1770
1771 mutex_lock(&mgr->lock);
1772 mstb = mgr->mst_primary;
1773
1774 if (!mstb)
1775 goto out;
1776
1777 for (i = 0; i < lct - 1; i++) {
1778 int shift = (i % 2) ? 0 : 4;
1779 int port_num = (rad[i / 2] >> shift) & 0xf;
1780
1781 list_for_each_entry(port, &mstb->ports, next) {
1782 if (port->port_num == port_num) {
1783 mstb = port->mstb;
1784 if (!mstb) {
1785 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1786 goto out;
1787 }
1788
1789 break;
1790 }
1791 }
1792 }
1793 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1794 if (!ret)
1795 mstb = NULL;
1796out:
1797 mutex_unlock(&mgr->lock);
1798 return mstb;
1799}
1800
1801static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1802 struct drm_dp_mst_branch *mstb,
1803 uint8_t *guid)
1804{
1805 struct drm_dp_mst_branch *found_mstb;
1806 struct drm_dp_mst_port *port;
1807
1808 if (memcmp(mstb->guid, guid, 16) == 0)
1809 return mstb;
1810
1811
1812 list_for_each_entry(port, &mstb->ports, next) {
1813 if (!port->mstb)
1814 continue;
1815
1816 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1817
1818 if (found_mstb)
1819 return found_mstb;
1820 }
1821
1822 return NULL;
1823}
1824
1825static struct drm_dp_mst_branch *
1826drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
1827 uint8_t *guid)
1828{
1829 struct drm_dp_mst_branch *mstb;
1830 int ret;
1831
1832
1833 mutex_lock(&mgr->lock);
1834
1835 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1836 if (mstb) {
1837 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1838 if (!ret)
1839 mstb = NULL;
1840 }
1841
1842 mutex_unlock(&mgr->lock);
1843 return mstb;
1844}
1845
1846static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1847 struct drm_dp_mst_branch *mstb)
1848{
1849 struct drm_dp_mst_port *port;
1850 struct drm_dp_mst_branch *mstb_child;
1851 if (!mstb->link_address_sent)
1852 drm_dp_send_link_address(mgr, mstb);
1853
1854 list_for_each_entry(port, &mstb->ports, next) {
1855 if (port->input)
1856 continue;
1857
1858 if (!port->ddps)
1859 continue;
1860
1861 if (!port->available_pbn)
1862 drm_dp_send_enum_path_resources(mgr, mstb, port);
1863
1864 if (port->mstb) {
1865 mstb_child = drm_dp_mst_topology_get_mstb_validated(
1866 mgr, port->mstb);
1867 if (mstb_child) {
1868 drm_dp_check_and_send_link_address(mgr, mstb_child);
1869 drm_dp_mst_topology_put_mstb(mstb_child);
1870 }
1871 }
1872 }
1873}
1874
1875static void drm_dp_mst_link_probe_work(struct work_struct *work)
1876{
1877 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1878 struct drm_dp_mst_branch *mstb;
1879 int ret;
1880
1881 mutex_lock(&mgr->lock);
1882 mstb = mgr->mst_primary;
1883 if (mstb) {
1884 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1885 if (!ret)
1886 mstb = NULL;
1887 }
1888 mutex_unlock(&mgr->lock);
1889 if (mstb) {
1890 drm_dp_check_and_send_link_address(mgr, mstb);
1891 drm_dp_mst_topology_put_mstb(mstb);
1892 }
1893}
1894
1895static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1896 u8 *guid)
1897{
1898 u64 salt;
1899
1900 if (memchr_inv(guid, 0, 16))
1901 return true;
1902
1903 salt = get_jiffies_64();
1904
1905 memcpy(&guid[0], &salt, sizeof(u64));
1906 memcpy(&guid[8], &salt, sizeof(u64));
1907
1908 return false;
1909}
1910
1911static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1912{
1913 struct drm_dp_sideband_msg_req_body req;
1914
1915 req.req_type = DP_REMOTE_DPCD_READ;
1916 req.u.dpcd_read.port_number = port_num;
1917 req.u.dpcd_read.dpcd_address = offset;
1918 req.u.dpcd_read.num_bytes = num_bytes;
1919 drm_dp_encode_sideband_req(&req, msg);
1920
1921 return 0;
1922}
1923
1924static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1925 bool up, u8 *msg, int len)
1926{
1927 int ret;
1928 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1929 int tosend, total, offset;
1930 int retries = 0;
1931
1932retry:
1933 total = len;
1934 offset = 0;
1935 do {
1936 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1937
1938 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1939 &msg[offset],
1940 tosend);
1941 if (ret != tosend) {
1942 if (ret == -EIO && retries < 5) {
1943 retries++;
1944 goto retry;
1945 }
1946 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1947
1948 return -EIO;
1949 }
1950 offset += tosend;
1951 total -= tosend;
1952 } while (total > 0);
1953 return 0;
1954}
1955
1956static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1957 struct drm_dp_sideband_msg_tx *txmsg)
1958{
1959 struct drm_dp_mst_branch *mstb = txmsg->dst;
1960 u8 req_type;
1961
1962
1963 if (txmsg->seqno == -1) {
1964 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1965 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1966 return -EAGAIN;
1967 }
1968 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1969 txmsg->seqno = mstb->last_seqno;
1970 mstb->last_seqno ^= 1;
1971 } else if (mstb->tx_slots[0] == NULL)
1972 txmsg->seqno = 0;
1973 else
1974 txmsg->seqno = 1;
1975 mstb->tx_slots[txmsg->seqno] = txmsg;
1976 }
1977
1978 req_type = txmsg->msg[0] & 0x7f;
1979 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1980 req_type == DP_RESOURCE_STATUS_NOTIFY)
1981 hdr->broadcast = 1;
1982 else
1983 hdr->broadcast = 0;
1984 hdr->path_msg = txmsg->path_msg;
1985 hdr->lct = mstb->lct;
1986 hdr->lcr = mstb->lct - 1;
1987 if (mstb->lct > 1)
1988 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1989 hdr->seqno = txmsg->seqno;
1990 return 0;
1991}
1992
1993
1994
1995static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1996 struct drm_dp_sideband_msg_tx *txmsg,
1997 bool up)
1998{
1999 u8 chunk[48];
2000 struct drm_dp_sideband_msg_hdr hdr;
2001 int len, space, idx, tosend;
2002 int ret;
2003
2004 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2005
2006 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2007 txmsg->seqno = -1;
2008 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2009 }
2010
2011
2012
2013 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2014 if (ret < 0)
2015 return ret;
2016
2017
2018 len = txmsg->cur_len - txmsg->cur_offset;
2019
2020
2021 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2022
2023 tosend = min(len, space);
2024 if (len == txmsg->cur_len)
2025 hdr.somt = 1;
2026 if (space >= len)
2027 hdr.eomt = 1;
2028
2029
2030 hdr.msg_len = tosend + 1;
2031 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2032 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2033
2034 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2035 idx += tosend + 1;
2036
2037 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2038 if (ret) {
2039 DRM_DEBUG_KMS("sideband msg failed to send\n");
2040 return ret;
2041 }
2042
2043 txmsg->cur_offset += tosend;
2044 if (txmsg->cur_offset == txmsg->cur_len) {
2045 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2046 return 1;
2047 }
2048 return 0;
2049}
2050
2051static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2052{
2053 struct drm_dp_sideband_msg_tx *txmsg;
2054 int ret;
2055
2056 WARN_ON(!mutex_is_locked(&mgr->qlock));
2057
2058
2059 if (list_empty(&mgr->tx_msg_downq))
2060 return;
2061
2062 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2063 ret = process_single_tx_qlock(mgr, txmsg, false);
2064 if (ret == 1) {
2065
2066 list_del(&txmsg->next);
2067 } else if (ret) {
2068 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2069 list_del(&txmsg->next);
2070 if (txmsg->seqno != -1)
2071 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2072 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2073 wake_up_all(&mgr->tx_waitq);
2074 }
2075}
2076
2077
2078static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2079 struct drm_dp_sideband_msg_tx *txmsg)
2080{
2081 int ret;
2082
2083
2084 ret = process_single_tx_qlock(mgr, txmsg, true);
2085
2086 if (ret != 1)
2087 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2088
2089 if (txmsg->seqno != -1) {
2090 WARN_ON((unsigned int)txmsg->seqno >
2091 ARRAY_SIZE(txmsg->dst->tx_slots));
2092 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2093 }
2094}
2095
2096static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2097 struct drm_dp_sideband_msg_tx *txmsg)
2098{
2099 mutex_lock(&mgr->qlock);
2100 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2101 if (list_is_singular(&mgr->tx_msg_downq))
2102 process_single_down_tx_qlock(mgr);
2103 mutex_unlock(&mgr->qlock);
2104}
2105
2106static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2107 struct drm_dp_mst_branch *mstb)
2108{
2109 int len;
2110 struct drm_dp_sideband_msg_tx *txmsg;
2111 int ret;
2112
2113 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2114 if (!txmsg)
2115 return;
2116
2117 txmsg->dst = mstb;
2118 len = build_link_address(txmsg);
2119
2120 mstb->link_address_sent = true;
2121 drm_dp_queue_down_tx(mgr, txmsg);
2122
2123 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2124 if (ret > 0) {
2125 int i;
2126
2127 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2128 DRM_DEBUG_KMS("link address nak received\n");
2129 } else {
2130 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
2131 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2132 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
2133 txmsg->reply.u.link_addr.ports[i].input_port,
2134 txmsg->reply.u.link_addr.ports[i].peer_device_type,
2135 txmsg->reply.u.link_addr.ports[i].port_number,
2136 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
2137 txmsg->reply.u.link_addr.ports[i].mcs,
2138 txmsg->reply.u.link_addr.ports[i].ddps,
2139 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
2140 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
2141 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
2142 }
2143
2144 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
2145
2146 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2147 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
2148 }
2149 drm_kms_helper_hotplug_event(mgr->dev);
2150 }
2151 } else {
2152 mstb->link_address_sent = false;
2153 DRM_DEBUG_KMS("link address failed %d\n", ret);
2154 }
2155
2156 kfree(txmsg);
2157}
2158
2159static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2160 struct drm_dp_mst_branch *mstb,
2161 struct drm_dp_mst_port *port)
2162{
2163 int len;
2164 struct drm_dp_sideband_msg_tx *txmsg;
2165 int ret;
2166
2167 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2168 if (!txmsg)
2169 return -ENOMEM;
2170
2171 txmsg->dst = mstb;
2172 len = build_enum_path_resources(txmsg, port->port_num);
2173
2174 drm_dp_queue_down_tx(mgr, txmsg);
2175
2176 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2177 if (ret > 0) {
2178 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2179 DRM_DEBUG_KMS("enum path resources nak received\n");
2180 } else {
2181 if (port->port_num != txmsg->reply.u.path_resources.port_number)
2182 DRM_ERROR("got incorrect port in response\n");
2183 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
2184 txmsg->reply.u.path_resources.avail_payload_bw_number);
2185 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
2186 }
2187 }
2188
2189 kfree(txmsg);
2190 return 0;
2191}
2192
2193static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2194{
2195 if (!mstb->port_parent)
2196 return NULL;
2197
2198 if (mstb->port_parent->mstb != mstb)
2199 return mstb->port_parent;
2200
2201 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2202}
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212static struct drm_dp_mst_branch *
2213drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2214 struct drm_dp_mst_branch *mstb,
2215 int *port_num)
2216{
2217 struct drm_dp_mst_branch *rmstb = NULL;
2218 struct drm_dp_mst_port *found_port;
2219
2220 mutex_lock(&mgr->lock);
2221 if (!mgr->mst_primary)
2222 goto out;
2223
2224 do {
2225 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2226 if (!found_port)
2227 break;
2228
2229 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2230 rmstb = found_port->parent;
2231 *port_num = found_port->port_num;
2232 } else {
2233
2234 mstb = found_port->parent;
2235 }
2236 } while (!rmstb);
2237out:
2238 mutex_unlock(&mgr->lock);
2239 return rmstb;
2240}
2241
2242static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2243 struct drm_dp_mst_port *port,
2244 int id,
2245 int pbn)
2246{
2247 struct drm_dp_sideband_msg_tx *txmsg;
2248 struct drm_dp_mst_branch *mstb;
2249 int len, ret, port_num;
2250 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2251 int i;
2252
2253 port_num = port->port_num;
2254 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2255 if (!mstb) {
2256 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2257 port->parent,
2258 &port_num);
2259
2260 if (!mstb)
2261 return -EINVAL;
2262 }
2263
2264 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2265 if (!txmsg) {
2266 ret = -ENOMEM;
2267 goto fail_put;
2268 }
2269
2270 for (i = 0; i < port->num_sdp_streams; i++)
2271 sinks[i] = i;
2272
2273 txmsg->dst = mstb;
2274 len = build_allocate_payload(txmsg, port_num,
2275 id,
2276 pbn, port->num_sdp_streams, sinks);
2277
2278 drm_dp_queue_down_tx(mgr, txmsg);
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2289 if (ret > 0) {
2290 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2291 ret = -EINVAL;
2292 else
2293 ret = 0;
2294 }
2295 kfree(txmsg);
2296fail_put:
2297 drm_dp_mst_topology_put_mstb(mstb);
2298 return ret;
2299}
2300
2301int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
2302 struct drm_dp_mst_port *port, bool power_up)
2303{
2304 struct drm_dp_sideband_msg_tx *txmsg;
2305 int len, ret;
2306
2307 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2308 if (!port)
2309 return -EINVAL;
2310
2311 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2312 if (!txmsg) {
2313 drm_dp_mst_topology_put_port(port);
2314 return -ENOMEM;
2315 }
2316
2317 txmsg->dst = port->parent;
2318 len = build_power_updown_phy(txmsg, port->port_num, power_up);
2319 drm_dp_queue_down_tx(mgr, txmsg);
2320
2321 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
2322 if (ret > 0) {
2323 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2324 ret = -EINVAL;
2325 else
2326 ret = 0;
2327 }
2328 kfree(txmsg);
2329 drm_dp_mst_topology_put_port(port);
2330
2331 return ret;
2332}
2333EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
2334
2335static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2336 int id,
2337 struct drm_dp_payload *payload)
2338{
2339 int ret;
2340
2341 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
2342 if (ret < 0) {
2343 payload->payload_state = 0;
2344 return ret;
2345 }
2346 payload->payload_state = DP_PAYLOAD_LOCAL;
2347 return 0;
2348}
2349
2350static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2351 struct drm_dp_mst_port *port,
2352 int id,
2353 struct drm_dp_payload *payload)
2354{
2355 int ret;
2356 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
2357 if (ret < 0)
2358 return ret;
2359 payload->payload_state = DP_PAYLOAD_REMOTE;
2360 return ret;
2361}
2362
2363static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2364 struct drm_dp_mst_port *port,
2365 int id,
2366 struct drm_dp_payload *payload)
2367{
2368 DRM_DEBUG_KMS("\n");
2369
2370 if (port) {
2371 drm_dp_payload_send_msg(mgr, port, id, 0);
2372 }
2373
2374 drm_dp_dpcd_write_payload(mgr, id, payload);
2375 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
2376 return 0;
2377}
2378
2379static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2380 int id,
2381 struct drm_dp_payload *payload)
2382{
2383 payload->payload_state = 0;
2384 return 0;
2385}
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2401{
2402 struct drm_dp_payload req_payload;
2403 struct drm_dp_mst_port *port;
2404 int i, j;
2405 int cur_slots = 1;
2406
2407 mutex_lock(&mgr->payload_lock);
2408 for (i = 0; i < mgr->max_payloads; i++) {
2409 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2410 struct drm_dp_payload *payload = &mgr->payloads[i];
2411 bool put_port = false;
2412
2413
2414
2415 req_payload.start_slot = cur_slots;
2416 if (vcpi) {
2417 port = container_of(vcpi, struct drm_dp_mst_port,
2418 vcpi);
2419
2420
2421
2422
2423 if (vcpi->num_slots) {
2424 port = drm_dp_mst_topology_get_port_validated(
2425 mgr, port);
2426 if (!port) {
2427 mutex_unlock(&mgr->payload_lock);
2428 return -EINVAL;
2429 }
2430 put_port = true;
2431 }
2432
2433 req_payload.num_slots = vcpi->num_slots;
2434 req_payload.vcpi = vcpi->vcpi;
2435 } else {
2436 port = NULL;
2437 req_payload.num_slots = 0;
2438 }
2439
2440 payload->start_slot = req_payload.start_slot;
2441
2442 if (payload->num_slots != req_payload.num_slots) {
2443
2444
2445 if (req_payload.num_slots) {
2446 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
2447 &req_payload);
2448 payload->num_slots = req_payload.num_slots;
2449 payload->vcpi = req_payload.vcpi;
2450
2451 } else if (payload->num_slots) {
2452 payload->num_slots = 0;
2453 drm_dp_destroy_payload_step1(mgr, port,
2454 payload->vcpi,
2455 payload);
2456 req_payload.payload_state =
2457 payload->payload_state;
2458 payload->start_slot = 0;
2459 }
2460 payload->payload_state = req_payload.payload_state;
2461 }
2462 cur_slots += req_payload.num_slots;
2463
2464 if (put_port)
2465 drm_dp_mst_topology_put_port(port);
2466 }
2467
2468 for (i = 0; i < mgr->max_payloads; i++) {
2469 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
2470 continue;
2471
2472 DRM_DEBUG_KMS("removing payload %d\n", i);
2473 for (j = i; j < mgr->max_payloads - 1; j++) {
2474 mgr->payloads[j] = mgr->payloads[j + 1];
2475 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
2476
2477 if (mgr->proposed_vcpis[j] &&
2478 mgr->proposed_vcpis[j]->num_slots) {
2479 set_bit(j + 1, &mgr->payload_mask);
2480 } else {
2481 clear_bit(j + 1, &mgr->payload_mask);
2482 }
2483 }
2484
2485 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
2486 sizeof(struct drm_dp_payload));
2487 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
2488 clear_bit(mgr->max_payloads, &mgr->payload_mask);
2489 }
2490 mutex_unlock(&mgr->payload_lock);
2491
2492 return 0;
2493}
2494EXPORT_SYMBOL(drm_dp_update_payload_part1);
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
2506{
2507 struct drm_dp_mst_port *port;
2508 int i;
2509 int ret = 0;
2510 mutex_lock(&mgr->payload_lock);
2511 for (i = 0; i < mgr->max_payloads; i++) {
2512
2513 if (!mgr->proposed_vcpis[i])
2514 continue;
2515
2516 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2517
2518 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
2519 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
2520 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2521 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
2522 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2523 }
2524 if (ret) {
2525 mutex_unlock(&mgr->payload_lock);
2526 return ret;
2527 }
2528 }
2529 mutex_unlock(&mgr->payload_lock);
2530 return 0;
2531}
2532EXPORT_SYMBOL(drm_dp_update_payload_part2);
2533
2534static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2535 struct drm_dp_mst_port *port,
2536 int offset, int size, u8 *bytes)
2537{
2538 int len;
2539 int ret = 0;
2540 struct drm_dp_sideband_msg_tx *txmsg;
2541 struct drm_dp_mst_branch *mstb;
2542
2543 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2544 if (!mstb)
2545 return -EINVAL;
2546
2547 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2548 if (!txmsg) {
2549 ret = -ENOMEM;
2550 goto fail_put;
2551 }
2552
2553 len = build_dpcd_read(txmsg, port->port_num, offset, size);
2554 txmsg->dst = port->parent;
2555
2556 drm_dp_queue_down_tx(mgr, txmsg);
2557
2558 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2559 if (ret < 0)
2560 goto fail_free;
2561
2562
2563 if (txmsg->reply.reply_type == 1) {
2564 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
2565 mstb, port->port_num, offset, size);
2566 ret = -EIO;
2567 goto fail_free;
2568 }
2569
2570 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
2571 ret = -EPROTO;
2572 goto fail_free;
2573 }
2574
2575 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
2576 size);
2577 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
2578
2579fail_free:
2580 kfree(txmsg);
2581fail_put:
2582 drm_dp_mst_topology_put_mstb(mstb);
2583
2584 return ret;
2585}
2586
2587static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2588 struct drm_dp_mst_port *port,
2589 int offset, int size, u8 *bytes)
2590{
2591 int len;
2592 int ret;
2593 struct drm_dp_sideband_msg_tx *txmsg;
2594 struct drm_dp_mst_branch *mstb;
2595
2596 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2597 if (!mstb)
2598 return -EINVAL;
2599
2600 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2601 if (!txmsg) {
2602 ret = -ENOMEM;
2603 goto fail_put;
2604 }
2605
2606 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
2607 txmsg->dst = mstb;
2608
2609 drm_dp_queue_down_tx(mgr, txmsg);
2610
2611 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2612 if (ret > 0) {
2613 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2614 ret = -EIO;
2615 else
2616 ret = 0;
2617 }
2618 kfree(txmsg);
2619fail_put:
2620 drm_dp_mst_topology_put_mstb(mstb);
2621 return ret;
2622}
2623
2624static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
2625{
2626 struct drm_dp_sideband_msg_reply_body reply;
2627
2628 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
2629 reply.req_type = req_type;
2630 drm_dp_encode_sideband_reply(&reply, msg);
2631 return 0;
2632}
2633
2634static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
2635 struct drm_dp_mst_branch *mstb,
2636 int req_type, int seqno, bool broadcast)
2637{
2638 struct drm_dp_sideband_msg_tx *txmsg;
2639
2640 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2641 if (!txmsg)
2642 return -ENOMEM;
2643
2644 txmsg->dst = mstb;
2645 txmsg->seqno = seqno;
2646 drm_dp_encode_up_ack_reply(txmsg, req_type);
2647
2648 mutex_lock(&mgr->qlock);
2649
2650 process_single_up_tx_qlock(mgr, txmsg);
2651
2652 mutex_unlock(&mgr->qlock);
2653
2654 kfree(txmsg);
2655 return 0;
2656}
2657
2658static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
2659 int dp_link_count,
2660 int *out)
2661{
2662 switch (dp_link_bw) {
2663 default:
2664 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2665 dp_link_bw, dp_link_count);
2666 return false;
2667
2668 case DP_LINK_BW_1_62:
2669 *out = 3 * dp_link_count;
2670 break;
2671 case DP_LINK_BW_2_7:
2672 *out = 5 * dp_link_count;
2673 break;
2674 case DP_LINK_BW_5_4:
2675 *out = 10 * dp_link_count;
2676 break;
2677 case DP_LINK_BW_8_1:
2678 *out = 15 * dp_link_count;
2679 break;
2680 }
2681 return true;
2682}
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2693{
2694 int ret = 0;
2695 struct drm_dp_mst_branch *mstb = NULL;
2696
2697 mutex_lock(&mgr->lock);
2698 if (mst_state == mgr->mst_state)
2699 goto out_unlock;
2700
2701 mgr->mst_state = mst_state;
2702
2703 if (mst_state) {
2704 WARN_ON(mgr->mst_primary);
2705
2706
2707 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2708 if (ret != DP_RECEIVER_CAP_SIZE) {
2709 DRM_DEBUG_KMS("failed to read DPCD\n");
2710 goto out_unlock;
2711 }
2712
2713 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2714 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2715 &mgr->pbn_div)) {
2716 ret = -EINVAL;
2717 goto out_unlock;
2718 }
2719
2720
2721 mstb = drm_dp_add_mst_branch_device(1, NULL);
2722 if (mstb == NULL) {
2723 ret = -ENOMEM;
2724 goto out_unlock;
2725 }
2726 mstb->mgr = mgr;
2727
2728
2729 mgr->mst_primary = mstb;
2730 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
2731
2732 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2733 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2734 if (ret < 0) {
2735 goto out_unlock;
2736 }
2737
2738 {
2739 struct drm_dp_payload reset_pay;
2740 reset_pay.start_slot = 0;
2741 reset_pay.num_slots = 0x3f;
2742 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2743 }
2744
2745 queue_work(system_long_wq, &mgr->work);
2746
2747 ret = 0;
2748 } else {
2749
2750 mstb = mgr->mst_primary;
2751 mgr->mst_primary = NULL;
2752
2753 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2754 ret = 0;
2755 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2756 mgr->payload_mask = 0;
2757 set_bit(0, &mgr->payload_mask);
2758 mgr->vcpi_mask = 0;
2759 }
2760
2761out_unlock:
2762 mutex_unlock(&mgr->lock);
2763 if (mstb)
2764 drm_dp_mst_topology_put_mstb(mstb);
2765 return ret;
2766
2767}
2768EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2769
2770
2771
2772
2773
2774
2775
2776
2777void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2778{
2779 mutex_lock(&mgr->lock);
2780 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2781 DP_MST_EN | DP_UPSTREAM_IS_SRC);
2782 mutex_unlock(&mgr->lock);
2783 flush_work(&mgr->work);
2784 flush_work(&mgr->destroy_connector_work);
2785}
2786EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2799{
2800 int ret = 0;
2801
2802 mutex_lock(&mgr->lock);
2803
2804 if (mgr->mst_primary) {
2805 int sret;
2806 u8 guid[16];
2807
2808 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2809 if (sret != DP_RECEIVER_CAP_SIZE) {
2810 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2811 ret = -1;
2812 goto out_unlock;
2813 }
2814
2815 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2816 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2817 if (ret < 0) {
2818 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2819 ret = -1;
2820 goto out_unlock;
2821 }
2822
2823
2824 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2825 if (sret != 16) {
2826 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2827 ret = -1;
2828 goto out_unlock;
2829 }
2830 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2831
2832 ret = 0;
2833 } else
2834 ret = -1;
2835
2836out_unlock:
2837 mutex_unlock(&mgr->lock);
2838 return ret;
2839}
2840EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2841
2842static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2843{
2844 int len;
2845 u8 replyblock[32];
2846 int replylen, origlen, curreply;
2847 int ret;
2848 struct drm_dp_sideband_msg_rx *msg;
2849 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2850 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2851
2852 len = min(mgr->max_dpcd_transaction_bytes, 16);
2853 ret = drm_dp_dpcd_read(mgr->aux, basereg,
2854 replyblock, len);
2855 if (ret != len) {
2856 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2857 return false;
2858 }
2859 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2860 if (!ret) {
2861 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2862 return false;
2863 }
2864 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2865
2866 origlen = replylen;
2867 replylen -= len;
2868 curreply = len;
2869 while (replylen > 0) {
2870 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2871 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2872 replyblock, len);
2873 if (ret != len) {
2874 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2875 len, ret);
2876 return false;
2877 }
2878
2879 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2880 if (!ret) {
2881 DRM_DEBUG_KMS("failed to build sideband msg\n");
2882 return false;
2883 }
2884
2885 curreply += len;
2886 replylen -= len;
2887 }
2888 return true;
2889}
2890
2891static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2892{
2893 int ret = 0;
2894
2895 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2896 memset(&mgr->down_rep_recv, 0,
2897 sizeof(struct drm_dp_sideband_msg_rx));
2898 return 0;
2899 }
2900
2901 if (mgr->down_rep_recv.have_eomt) {
2902 struct drm_dp_sideband_msg_tx *txmsg;
2903 struct drm_dp_mst_branch *mstb;
2904 int slot = -1;
2905 mstb = drm_dp_get_mst_branch_device(mgr,
2906 mgr->down_rep_recv.initial_hdr.lct,
2907 mgr->down_rep_recv.initial_hdr.rad);
2908
2909 if (!mstb) {
2910 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2911 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2912 return 0;
2913 }
2914
2915
2916 slot = mgr->down_rep_recv.initial_hdr.seqno;
2917 mutex_lock(&mgr->qlock);
2918 txmsg = mstb->tx_slots[slot];
2919
2920 mutex_unlock(&mgr->qlock);
2921
2922 if (!txmsg) {
2923 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2924 mstb,
2925 mgr->down_rep_recv.initial_hdr.seqno,
2926 mgr->down_rep_recv.initial_hdr.lct,
2927 mgr->down_rep_recv.initial_hdr.rad[0],
2928 mgr->down_rep_recv.msg[0]);
2929 drm_dp_mst_topology_put_mstb(mstb);
2930 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2931 return 0;
2932 }
2933
2934 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2935
2936 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2937 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
2938 txmsg->reply.req_type,
2939 drm_dp_mst_req_type_str(txmsg->reply.req_type),
2940 txmsg->reply.u.nak.reason,
2941 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
2942 txmsg->reply.u.nak.nak_data);
2943
2944 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2945 drm_dp_mst_topology_put_mstb(mstb);
2946
2947 mutex_lock(&mgr->qlock);
2948 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2949 mstb->tx_slots[slot] = NULL;
2950 mutex_unlock(&mgr->qlock);
2951
2952 wake_up_all(&mgr->tx_waitq);
2953 }
2954 return ret;
2955}
2956
2957static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2958{
2959 int ret = 0;
2960
2961 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2962 memset(&mgr->up_req_recv, 0,
2963 sizeof(struct drm_dp_sideband_msg_rx));
2964 return 0;
2965 }
2966
2967 if (mgr->up_req_recv.have_eomt) {
2968 struct drm_dp_sideband_msg_req_body msg;
2969 struct drm_dp_mst_branch *mstb = NULL;
2970 bool seqno;
2971
2972 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2973 mstb = drm_dp_get_mst_branch_device(mgr,
2974 mgr->up_req_recv.initial_hdr.lct,
2975 mgr->up_req_recv.initial_hdr.rad);
2976 if (!mstb) {
2977 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2978 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2979 return 0;
2980 }
2981 }
2982
2983 seqno = mgr->up_req_recv.initial_hdr.seqno;
2984 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2985
2986 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2987 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2988
2989 if (!mstb)
2990 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2991
2992 if (!mstb) {
2993 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2994 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2995 return 0;
2996 }
2997
2998 drm_dp_update_port(mstb, &msg.u.conn_stat);
2999
3000 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
3001 drm_kms_helper_hotplug_event(mgr->dev);
3002
3003 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3004 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
3005 if (!mstb)
3006 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
3007
3008 if (!mstb) {
3009 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3010 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3011 return 0;
3012 }
3013
3014 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
3015 }
3016
3017 if (mstb)
3018 drm_dp_mst_topology_put_mstb(mstb);
3019
3020 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3021 }
3022 return ret;
3023}
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3037{
3038 int ret = 0;
3039 int sc;
3040 *handled = false;
3041 sc = esi[0] & 0x3f;
3042
3043 if (sc != mgr->sink_count) {
3044 mgr->sink_count = sc;
3045 *handled = true;
3046 }
3047
3048 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3049 ret = drm_dp_mst_handle_down_rep(mgr);
3050 *handled = true;
3051 }
3052
3053 if (esi[1] & DP_UP_REQ_MSG_RDY) {
3054 ret |= drm_dp_mst_handle_up_req(mgr);
3055 *handled = true;
3056 }
3057
3058 drm_dp_mst_kick_tx(mgr);
3059 return ret;
3060}
3061EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
3073 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3074{
3075 enum drm_connector_status status = connector_status_disconnected;
3076
3077
3078 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3079 if (!port)
3080 return connector_status_disconnected;
3081
3082 if (!port->ddps)
3083 goto out;
3084
3085 switch (port->pdt) {
3086 case DP_PEER_DEVICE_NONE:
3087 case DP_PEER_DEVICE_MST_BRANCHING:
3088 break;
3089
3090 case DP_PEER_DEVICE_SST_SINK:
3091 status = connector_status_connected;
3092
3093 if (port->port_num >= 8 && !port->cached_edid) {
3094 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
3095 }
3096 break;
3097 case DP_PEER_DEVICE_DP_LEGACY_CONV:
3098 if (port->ldps)
3099 status = connector_status_connected;
3100 break;
3101 }
3102out:
3103 drm_dp_mst_topology_put_port(port);
3104 return status;
3105}
3106EXPORT_SYMBOL(drm_dp_mst_detect_port);
3107
3108
3109
3110
3111
3112
3113
3114
3115bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
3116 struct drm_dp_mst_port *port)
3117{
3118 bool ret = false;
3119
3120 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3121 if (!port)
3122 return ret;
3123 ret = port->has_audio;
3124 drm_dp_mst_topology_put_port(port);
3125 return ret;
3126}
3127EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3140{
3141 struct edid *edid = NULL;
3142
3143
3144 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3145 if (!port)
3146 return NULL;
3147
3148 if (port->cached_edid)
3149 edid = drm_edid_duplicate(port->cached_edid);
3150 else {
3151 edid = drm_get_edid(connector, &port->aux.ddc);
3152 }
3153 port->has_audio = drm_detect_monitor_audio(edid);
3154 drm_dp_mst_topology_put_port(port);
3155 return edid;
3156}
3157EXPORT_SYMBOL(drm_dp_mst_get_edid);
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3172 int pbn)
3173{
3174 int num_slots;
3175
3176 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3177
3178
3179 if (num_slots > 63)
3180 return -ENOSPC;
3181 return num_slots;
3182}
3183EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3184
3185static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3186 struct drm_dp_vcpi *vcpi, int pbn, int slots)
3187{
3188 int ret;
3189
3190
3191 if (slots > 63)
3192 return -ENOSPC;
3193
3194 vcpi->pbn = pbn;
3195 vcpi->aligned_pbn = slots * mgr->pbn_div;
3196 vcpi->num_slots = slots;
3197
3198 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
3199 if (ret < 0)
3200 return ret;
3201 return 0;
3202}
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
3235 struct drm_dp_mst_topology_mgr *mgr,
3236 struct drm_dp_mst_port *port, int pbn)
3237{
3238 struct drm_dp_mst_topology_state *topology_state;
3239 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3240 int prev_slots, req_slots, ret;
3241
3242 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3243 if (IS_ERR(topology_state))
3244 return PTR_ERR(topology_state);
3245
3246
3247 list_for_each_entry(pos, &topology_state->vcpis, next) {
3248 if (pos->port == port) {
3249 vcpi = pos;
3250 prev_slots = vcpi->vcpi;
3251
3252
3253
3254
3255
3256
3257 if (WARN_ON(!prev_slots)) {
3258 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3259 port);
3260 return -EINVAL;
3261 }
3262
3263 break;
3264 }
3265 }
3266 if (!vcpi)
3267 prev_slots = 0;
3268
3269 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3270
3271 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
3272 port->connector->base.id, port->connector->name,
3273 port, prev_slots, req_slots);
3274
3275
3276 if (!vcpi) {
3277 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3278 if (!vcpi)
3279 return -ENOMEM;
3280
3281 drm_dp_mst_get_port_malloc(port);
3282 vcpi->port = port;
3283 list_add(&vcpi->next, &topology_state->vcpis);
3284 }
3285 vcpi->vcpi = req_slots;
3286
3287 ret = req_slots;
3288 return ret;
3289}
3290EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
3319 struct drm_dp_mst_topology_mgr *mgr,
3320 struct drm_dp_mst_port *port)
3321{
3322 struct drm_dp_mst_topology_state *topology_state;
3323 struct drm_dp_vcpi_allocation *pos;
3324 bool found = false;
3325
3326 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3327 if (IS_ERR(topology_state))
3328 return PTR_ERR(topology_state);
3329
3330 list_for_each_entry(pos, &topology_state->vcpis, next) {
3331 if (pos->port == port) {
3332 found = true;
3333 break;
3334 }
3335 }
3336 if (WARN_ON(!found)) {
3337 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
3338 port, &topology_state->base);
3339 return -EINVAL;
3340 }
3341
3342 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3343 if (pos->vcpi) {
3344 drm_dp_mst_put_port_malloc(port);
3345 pos->vcpi = 0;
3346 }
3347
3348 return 0;
3349}
3350EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
3351
3352
3353
3354
3355
3356
3357
3358
3359bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3360 struct drm_dp_mst_port *port, int pbn, int slots)
3361{
3362 int ret;
3363
3364 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3365 if (!port)
3366 return false;
3367
3368 if (slots < 0)
3369 return false;
3370
3371 if (port->vcpi.vcpi > 0) {
3372 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3373 port->vcpi.vcpi, port->vcpi.pbn, pbn);
3374 if (pbn == port->vcpi.pbn) {
3375 drm_dp_mst_topology_put_port(port);
3376 return true;
3377 }
3378 }
3379
3380 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
3381 if (ret) {
3382 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
3383 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
3384 goto out;
3385 }
3386 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3387 pbn, port->vcpi.num_slots);
3388
3389
3390 drm_dp_mst_get_port_malloc(port);
3391 drm_dp_mst_topology_put_port(port);
3392 return true;
3393out:
3394 return false;
3395}
3396EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
3397
3398int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3399{
3400 int slots = 0;
3401 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3402 if (!port)
3403 return slots;
3404
3405 slots = port->vcpi.num_slots;
3406 drm_dp_mst_topology_put_port(port);
3407 return slots;
3408}
3409EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3410
3411
3412
3413
3414
3415
3416
3417
3418void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3419{
3420
3421
3422
3423
3424
3425 port->vcpi.num_slots = 0;
3426}
3427EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3438 struct drm_dp_mst_port *port)
3439{
3440 if (!port->vcpi.vcpi)
3441 return;
3442
3443 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3444 port->vcpi.num_slots = 0;
3445 port->vcpi.pbn = 0;
3446 port->vcpi.aligned_pbn = 0;
3447 port->vcpi.vcpi = 0;
3448 drm_dp_mst_put_port_malloc(port);
3449}
3450EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3451
3452static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
3453 int id, struct drm_dp_payload *payload)
3454{
3455 u8 payload_alloc[3], status;
3456 int ret;
3457 int retries = 0;
3458
3459 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
3460 DP_PAYLOAD_TABLE_UPDATED);
3461
3462 payload_alloc[0] = id;
3463 payload_alloc[1] = payload->start_slot;
3464 payload_alloc[2] = payload->num_slots;
3465
3466 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
3467 if (ret != 3) {
3468 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
3469 goto fail;
3470 }
3471
3472retry:
3473 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3474 if (ret < 0) {
3475 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3476 goto fail;
3477 }
3478
3479 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
3480 retries++;
3481 if (retries < 20) {
3482 usleep_range(10000, 20000);
3483 goto retry;
3484 }
3485 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
3486 ret = -EINVAL;
3487 goto fail;
3488 }
3489 ret = 0;
3490fail:
3491 return ret;
3492}
3493
3494
3495
3496
3497
3498
3499
3500
3501int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
3502{
3503 u8 status;
3504 int ret;
3505 int count = 0;
3506
3507 do {
3508 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3509
3510 if (ret < 0) {
3511 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3512 goto fail;
3513 }
3514
3515 if (status & DP_PAYLOAD_ACT_HANDLED)
3516 break;
3517 count++;
3518 udelay(100);
3519
3520 } while (count < 30);
3521
3522 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
3523 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
3524 ret = -EINVAL;
3525 goto fail;
3526 }
3527 return 0;
3528fail:
3529 return ret;
3530}
3531EXPORT_SYMBOL(drm_dp_check_act_status);
3532
3533
3534
3535
3536
3537
3538
3539
3540int drm_dp_calc_pbn_mode(int clock, int bpp)
3541{
3542 u64 kbps;
3543 s64 peak_kbps;
3544 u32 numerator;
3545 u32 denominator;
3546
3547 kbps = clock * bpp;
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560 numerator = 64 * 1006;
3561 denominator = 54 * 8 * 1000 * 1000;
3562
3563 kbps *= numerator;
3564 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3565
3566 return drm_fixp2int_ceil(peak_kbps);
3567}
3568EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3569
3570static int test_calc_pbn_mode(void)
3571{
3572 int ret;
3573 ret = drm_dp_calc_pbn_mode(154000, 30);
3574 if (ret != 689) {
3575 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3576 154000, 30, 689, ret);
3577 return -EINVAL;
3578 }
3579 ret = drm_dp_calc_pbn_mode(234000, 30);
3580 if (ret != 1047) {
3581 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3582 234000, 30, 1047, ret);
3583 return -EINVAL;
3584 }
3585 ret = drm_dp_calc_pbn_mode(297000, 24);
3586 if (ret != 1063) {
3587 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3588 297000, 24, 1063, ret);
3589 return -EINVAL;
3590 }
3591 return 0;
3592}
3593
3594
3595static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
3596{
3597 queue_work(system_long_wq, &mgr->tx_work);
3598}
3599
3600static void drm_dp_mst_dump_mstb(struct seq_file *m,
3601 struct drm_dp_mst_branch *mstb)
3602{
3603 struct drm_dp_mst_port *port;
3604 int tabs = mstb->lct;
3605 char prefix[10];
3606 int i;
3607
3608 for (i = 0; i < tabs; i++)
3609 prefix[i] = '\t';
3610 prefix[i] = '\0';
3611
3612 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
3613 list_for_each_entry(port, &mstb->ports, next) {
3614 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
3615 if (port->mstb)
3616 drm_dp_mst_dump_mstb(m, port->mstb);
3617 }
3618}
3619
3620#define DP_PAYLOAD_TABLE_SIZE 64
3621
3622static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
3623 char *buf)
3624{
3625 int i;
3626
3627 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
3628 if (drm_dp_dpcd_read(mgr->aux,
3629 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
3630 &buf[i], 16) != 16)
3631 return false;
3632 }
3633 return true;
3634}
3635
3636static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
3637 struct drm_dp_mst_port *port, char *name,
3638 int namelen)
3639{
3640 struct edid *mst_edid;
3641
3642 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
3643 drm_edid_get_monitor_name(mst_edid, name, namelen);
3644}
3645
3646
3647
3648
3649
3650
3651
3652
3653void drm_dp_mst_dump_topology(struct seq_file *m,
3654 struct drm_dp_mst_topology_mgr *mgr)
3655{
3656 int i;
3657 struct drm_dp_mst_port *port;
3658
3659 mutex_lock(&mgr->lock);
3660 if (mgr->mst_primary)
3661 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
3662
3663
3664 mutex_unlock(&mgr->lock);
3665
3666 mutex_lock(&mgr->payload_lock);
3667 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
3668 mgr->max_payloads);
3669
3670 for (i = 0; i < mgr->max_payloads; i++) {
3671 if (mgr->proposed_vcpis[i]) {
3672 char name[14];
3673
3674 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3675 fetch_monitor_name(mgr, port, name, sizeof(name));
3676 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
3677 port->port_num, port->vcpi.vcpi,
3678 port->vcpi.num_slots,
3679 (*name != 0) ? name : "Unknown");
3680 } else
3681 seq_printf(m, "vcpi %d:unused\n", i);
3682 }
3683 for (i = 0; i < mgr->max_payloads; i++) {
3684 seq_printf(m, "payload %d: %d, %d, %d\n",
3685 i,
3686 mgr->payloads[i].payload_state,
3687 mgr->payloads[i].start_slot,
3688 mgr->payloads[i].num_slots);
3689
3690
3691 }
3692 mutex_unlock(&mgr->payload_lock);
3693
3694 mutex_lock(&mgr->lock);
3695 if (mgr->mst_primary) {
3696 u8 buf[DP_PAYLOAD_TABLE_SIZE];
3697 int ret;
3698
3699 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
3700 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
3701 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
3702 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
3703 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
3704 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
3705
3706
3707 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
3708 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
3709 for (i = 0x3; i < 0x8 && buf[i]; i++)
3710 seq_printf(m, "%c", buf[i]);
3711 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
3712 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
3713 if (dump_dp_payload_table(mgr, buf))
3714 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
3715 }
3716
3717 mutex_unlock(&mgr->lock);
3718
3719}
3720EXPORT_SYMBOL(drm_dp_mst_dump_topology);
3721
3722static void drm_dp_tx_work(struct work_struct *work)
3723{
3724 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
3725
3726 mutex_lock(&mgr->qlock);
3727 if (!list_empty(&mgr->tx_msg_downq))
3728 process_single_down_tx_qlock(mgr);
3729 mutex_unlock(&mgr->qlock);
3730}
3731
3732static void drm_dp_destroy_connector_work(struct work_struct *work)
3733{
3734 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3735 struct drm_dp_mst_port *port;
3736 bool send_hotplug = false;
3737
3738
3739
3740
3741
3742 for (;;) {
3743 mutex_lock(&mgr->destroy_connector_lock);
3744 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
3745 if (!port) {
3746 mutex_unlock(&mgr->destroy_connector_lock);
3747 break;
3748 }
3749 list_del(&port->next);
3750 mutex_unlock(&mgr->destroy_connector_lock);
3751
3752 INIT_LIST_HEAD(&port->next);
3753
3754 mgr->cbs->destroy_connector(mgr, port->connector);
3755
3756 drm_dp_port_teardown_pdt(port, port->pdt);
3757 port->pdt = DP_PEER_DEVICE_NONE;
3758
3759 drm_dp_mst_put_port_malloc(port);
3760 send_hotplug = true;
3761 }
3762 if (send_hotplug)
3763 drm_kms_helper_hotplug_event(mgr->dev);
3764}
3765
3766static struct drm_private_state *
3767drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
3768{
3769 struct drm_dp_mst_topology_state *state, *old_state =
3770 to_dp_mst_topology_state(obj->state);
3771 struct drm_dp_vcpi_allocation *pos, *vcpi;
3772
3773 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
3774 if (!state)
3775 return NULL;
3776
3777 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
3778
3779 INIT_LIST_HEAD(&state->vcpis);
3780
3781 list_for_each_entry(pos, &old_state->vcpis, next) {
3782
3783 if (!pos->vcpi)
3784 continue;
3785
3786 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
3787 if (!vcpi)
3788 goto fail;
3789
3790 drm_dp_mst_get_port_malloc(vcpi->port);
3791 list_add(&vcpi->next, &state->vcpis);
3792 }
3793
3794 return &state->base;
3795
3796fail:
3797 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
3798 drm_dp_mst_put_port_malloc(pos->port);
3799 kfree(pos);
3800 }
3801 kfree(state);
3802
3803 return NULL;
3804}
3805
3806static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
3807 struct drm_private_state *state)
3808{
3809 struct drm_dp_mst_topology_state *mst_state =
3810 to_dp_mst_topology_state(state);
3811 struct drm_dp_vcpi_allocation *pos, *tmp;
3812
3813 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
3814
3815 if (pos->vcpi)
3816 drm_dp_mst_put_port_malloc(pos->port);
3817 kfree(pos);
3818 }
3819
3820 kfree(mst_state);
3821}
3822
3823static inline int
3824drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
3825 struct drm_dp_mst_topology_state *mst_state)
3826{
3827 struct drm_dp_vcpi_allocation *vcpi;
3828 int avail_slots = 63, payload_count = 0;
3829
3830 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
3831
3832 if (!vcpi->vcpi) {
3833 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
3834 vcpi->port);
3835 continue;
3836 }
3837
3838 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
3839 vcpi->port, vcpi->vcpi);
3840
3841 avail_slots -= vcpi->vcpi;
3842 if (avail_slots < 0) {
3843 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
3844 vcpi->port, mst_state,
3845 avail_slots + vcpi->vcpi);
3846 return -ENOSPC;
3847 }
3848
3849 if (++payload_count > mgr->max_payloads) {
3850 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
3851 mgr, mst_state, mgr->max_payloads);
3852 return -EINVAL;
3853 }
3854 }
3855 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
3856 mgr, mst_state, avail_slots,
3857 63 - avail_slots);
3858
3859 return 0;
3860}
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
3884{
3885 struct drm_dp_mst_topology_mgr *mgr;
3886 struct drm_dp_mst_topology_state *mst_state;
3887 int i, ret = 0;
3888
3889 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
3890 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
3891 if (ret)
3892 break;
3893 }
3894
3895 return ret;
3896}
3897EXPORT_SYMBOL(drm_dp_mst_atomic_check);
3898
3899const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
3900 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
3901 .atomic_destroy_state = drm_dp_mst_destroy_state,
3902};
3903EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
3921 struct drm_dp_mst_topology_mgr *mgr)
3922{
3923 struct drm_device *dev = mgr->dev;
3924
3925 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3926 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
3927}
3928EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3942 struct drm_device *dev, struct drm_dp_aux *aux,
3943 int max_dpcd_transaction_bytes,
3944 int max_payloads, int conn_base_id)
3945{
3946 struct drm_dp_mst_topology_state *mst_state;
3947
3948 mutex_init(&mgr->lock);
3949 mutex_init(&mgr->qlock);
3950 mutex_init(&mgr->payload_lock);
3951 mutex_init(&mgr->destroy_connector_lock);
3952 INIT_LIST_HEAD(&mgr->tx_msg_downq);
3953 INIT_LIST_HEAD(&mgr->destroy_connector_list);
3954 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3955 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
3956 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
3957 init_waitqueue_head(&mgr->tx_waitq);
3958 mgr->dev = dev;
3959 mgr->aux = aux;
3960 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
3961 mgr->max_payloads = max_payloads;
3962 mgr->conn_base_id = conn_base_id;
3963 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
3964 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
3965 return -EINVAL;
3966 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
3967 if (!mgr->payloads)
3968 return -ENOMEM;
3969 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
3970 if (!mgr->proposed_vcpis)
3971 return -ENOMEM;
3972 set_bit(0, &mgr->payload_mask);
3973 if (test_calc_pbn_mode() < 0)
3974 DRM_ERROR("MST PBN self-test failed\n");
3975
3976 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
3977 if (mst_state == NULL)
3978 return -ENOMEM;
3979
3980 mst_state->mgr = mgr;
3981 INIT_LIST_HEAD(&mst_state->vcpis);
3982
3983 drm_atomic_private_obj_init(dev, &mgr->base,
3984 &mst_state->base,
3985 &drm_dp_mst_topology_state_funcs);
3986
3987 return 0;
3988}
3989EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
3990
3991
3992
3993
3994
3995void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
3996{
3997 drm_dp_mst_topology_mgr_set_mst(mgr, false);
3998 flush_work(&mgr->work);
3999 flush_work(&mgr->destroy_connector_work);
4000 mutex_lock(&mgr->payload_lock);
4001 kfree(mgr->payloads);
4002 mgr->payloads = NULL;
4003 kfree(mgr->proposed_vcpis);
4004 mgr->proposed_vcpis = NULL;
4005 mutex_unlock(&mgr->payload_lock);
4006 mgr->dev = NULL;
4007 mgr->aux = NULL;
4008 drm_atomic_private_obj_fini(&mgr->base);
4009 mgr->funcs = NULL;
4010}
4011EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
4012
4013static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
4014{
4015 int i;
4016
4017 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
4018 return false;
4019
4020 for (i = 0; i < num - 1; i++) {
4021 if (msgs[i].flags & I2C_M_RD ||
4022 msgs[i].len > 0xff)
4023 return false;
4024 }
4025
4026 return msgs[num - 1].flags & I2C_M_RD &&
4027 msgs[num - 1].len <= 0xff;
4028}
4029
4030
4031static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
4032 int num)
4033{
4034 struct drm_dp_aux *aux = adapter->algo_data;
4035 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
4036 struct drm_dp_mst_branch *mstb;
4037 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
4038 unsigned int i;
4039 struct drm_dp_sideband_msg_req_body msg;
4040 struct drm_dp_sideband_msg_tx *txmsg = NULL;
4041 int ret;
4042
4043 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
4044 if (!mstb)
4045 return -EREMOTEIO;
4046
4047 if (!remote_i2c_read_ok(msgs, num)) {
4048 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
4049 ret = -EIO;
4050 goto out;
4051 }
4052
4053 memset(&msg, 0, sizeof(msg));
4054 msg.req_type = DP_REMOTE_I2C_READ;
4055 msg.u.i2c_read.num_transactions = num - 1;
4056 msg.u.i2c_read.port_number = port->port_num;
4057 for (i = 0; i < num - 1; i++) {
4058 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
4059 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
4060 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
4061 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
4062 }
4063 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
4064 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
4065
4066 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
4067 if (!txmsg) {
4068 ret = -ENOMEM;
4069 goto out;
4070 }
4071
4072 txmsg->dst = mstb;
4073 drm_dp_encode_sideband_req(&msg, txmsg);
4074
4075 drm_dp_queue_down_tx(mgr, txmsg);
4076
4077 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
4078 if (ret > 0) {
4079
4080 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4081 ret = -EREMOTEIO;
4082 goto out;
4083 }
4084 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
4085 ret = -EIO;
4086 goto out;
4087 }
4088 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
4089 ret = num;
4090 }
4091out:
4092 kfree(txmsg);
4093 drm_dp_mst_topology_put_mstb(mstb);
4094 return ret;
4095}
4096
4097static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
4098{
4099 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
4100 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
4101 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
4102 I2C_FUNC_10BIT_ADDR;
4103}
4104
4105static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
4106 .functionality = drm_dp_mst_i2c_functionality,
4107 .master_xfer = drm_dp_mst_i2c_xfer,
4108};
4109
4110
4111
4112
4113
4114
4115
4116static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
4117{
4118 aux->ddc.algo = &drm_dp_mst_i2c_algo;
4119 aux->ddc.algo_data = aux;
4120 aux->ddc.retries = 3;
4121
4122 aux->ddc.class = I2C_CLASS_DDC;
4123 aux->ddc.owner = THIS_MODULE;
4124 aux->ddc.dev.parent = aux->dev;
4125 aux->ddc.dev.of_node = aux->dev->of_node;
4126
4127 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4128 sizeof(aux->ddc.name));
4129
4130 return i2c_add_adapter(&aux->ddc);
4131}
4132
4133
4134
4135
4136
4137static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4138{
4139 i2c_del_adapter(&aux->ddc);
4140}
4141