1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/bitfield.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/i2c.h>
27#include <linux/init.h>
28#include <linux/kernel.h>
29#include <linux/random.h>
30#include <linux/sched.h>
31#include <linux/seq_file.h>
32#include <linux/iopoll.h>
33
34#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
35#include <linux/stacktrace.h>
36#include <linux/sort.h>
37#include <linux/timekeeping.h>
38#include <linux/math64.h>
39#endif
40
41#include <drm/dp/drm_dp_mst_helper.h>
42#include <drm/drm_atomic.h>
43#include <drm/drm_atomic_helper.h>
44#include <drm/drm_drv.h>
45#include <drm/drm_print.h>
46#include <drm/drm_probe_helper.h>
47
48#include "drm_dp_helper_internal.h"
49#include "drm_dp_mst_topology_internal.h"
50
51
52
53
54
55
56
57
58struct drm_dp_pending_up_req {
59 struct drm_dp_sideband_msg_hdr hdr;
60 struct drm_dp_sideband_msg_req_body msg;
61 struct list_head next;
62};
63
64static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
65 char *buf);
66
67static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
68
69static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
70 int id,
71 struct drm_dp_payload *payload);
72
73static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
74 struct drm_dp_mst_port *port,
75 int offset, int size, u8 *bytes);
76static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
77 struct drm_dp_mst_port *port,
78 int offset, int size, u8 *bytes);
79
80static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
81 struct drm_dp_mst_branch *mstb);
82
83static void
84drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
85 struct drm_dp_mst_branch *mstb);
86
87static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
88 struct drm_dp_mst_branch *mstb,
89 struct drm_dp_mst_port *port);
90static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
91 u8 *guid);
92
93static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
94static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
95static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
96
97static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
98 struct drm_dp_mst_branch *branch);
99
100#define DBG_PREFIX "[dp_mst]"
101
102#define DP_STR(x) [DP_ ## x] = #x
103
104static const char *drm_dp_mst_req_type_str(u8 req_type)
105{
106 static const char * const req_type_str[] = {
107 DP_STR(GET_MSG_TRANSACTION_VERSION),
108 DP_STR(LINK_ADDRESS),
109 DP_STR(CONNECTION_STATUS_NOTIFY),
110 DP_STR(ENUM_PATH_RESOURCES),
111 DP_STR(ALLOCATE_PAYLOAD),
112 DP_STR(QUERY_PAYLOAD),
113 DP_STR(RESOURCE_STATUS_NOTIFY),
114 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
115 DP_STR(REMOTE_DPCD_READ),
116 DP_STR(REMOTE_DPCD_WRITE),
117 DP_STR(REMOTE_I2C_READ),
118 DP_STR(REMOTE_I2C_WRITE),
119 DP_STR(POWER_UP_PHY),
120 DP_STR(POWER_DOWN_PHY),
121 DP_STR(SINK_EVENT_NOTIFY),
122 DP_STR(QUERY_STREAM_ENC_STATUS),
123 };
124
125 if (req_type >= ARRAY_SIZE(req_type_str) ||
126 !req_type_str[req_type])
127 return "unknown";
128
129 return req_type_str[req_type];
130}
131
132#undef DP_STR
133#define DP_STR(x) [DP_NAK_ ## x] = #x
134
135static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
136{
137 static const char * const nak_reason_str[] = {
138 DP_STR(WRITE_FAILURE),
139 DP_STR(INVALID_READ),
140 DP_STR(CRC_FAILURE),
141 DP_STR(BAD_PARAM),
142 DP_STR(DEFER),
143 DP_STR(LINK_FAILURE),
144 DP_STR(NO_RESOURCES),
145 DP_STR(DPCD_FAIL),
146 DP_STR(I2C_NAK),
147 DP_STR(ALLOCATE_FAIL),
148 };
149
150 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
151 !nak_reason_str[nak_reason])
152 return "unknown";
153
154 return nak_reason_str[nak_reason];
155}
156
157#undef DP_STR
158#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
159
160static const char *drm_dp_mst_sideband_tx_state_str(int state)
161{
162 static const char * const sideband_reason_str[] = {
163 DP_STR(QUEUED),
164 DP_STR(START_SEND),
165 DP_STR(SENT),
166 DP_STR(RX),
167 DP_STR(TIMEOUT),
168 };
169
170 if (state >= ARRAY_SIZE(sideband_reason_str) ||
171 !sideband_reason_str[state])
172 return "unknown";
173
174 return sideband_reason_str[state];
175}
176
177static int
178drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
179{
180 int i;
181 u8 unpacked_rad[16];
182
183 for (i = 0; i < lct; i++) {
184 if (i % 2)
185 unpacked_rad[i] = rad[i / 2] >> 4;
186 else
187 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
188 }
189
190
191
192
193 return snprintf(out, len, "%*phC", lct, unpacked_rad);
194}
195
196
197static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
198{
199 u8 bitmask = 0x80;
200 u8 bitshift = 7;
201 u8 array_index = 0;
202 int number_of_bits = num_nibbles * 4;
203 u8 remainder = 0;
204
205 while (number_of_bits != 0) {
206 number_of_bits--;
207 remainder <<= 1;
208 remainder |= (data[array_index] & bitmask) >> bitshift;
209 bitmask >>= 1;
210 bitshift--;
211 if (bitmask == 0) {
212 bitmask = 0x80;
213 bitshift = 7;
214 array_index++;
215 }
216 if ((remainder & 0x10) == 0x10)
217 remainder ^= 0x13;
218 }
219
220 number_of_bits = 4;
221 while (number_of_bits != 0) {
222 number_of_bits--;
223 remainder <<= 1;
224 if ((remainder & 0x10) != 0)
225 remainder ^= 0x13;
226 }
227
228 return remainder;
229}
230
231static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
232{
233 u8 bitmask = 0x80;
234 u8 bitshift = 7;
235 u8 array_index = 0;
236 int number_of_bits = number_of_bytes * 8;
237 u16 remainder = 0;
238
239 while (number_of_bits != 0) {
240 number_of_bits--;
241 remainder <<= 1;
242 remainder |= (data[array_index] & bitmask) >> bitshift;
243 bitmask >>= 1;
244 bitshift--;
245 if (bitmask == 0) {
246 bitmask = 0x80;
247 bitshift = 7;
248 array_index++;
249 }
250 if ((remainder & 0x100) == 0x100)
251 remainder ^= 0xd5;
252 }
253
254 number_of_bits = 8;
255 while (number_of_bits != 0) {
256 number_of_bits--;
257 remainder <<= 1;
258 if ((remainder & 0x100) != 0)
259 remainder ^= 0xd5;
260 }
261
262 return remainder & 0xff;
263}
264static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
265{
266 u8 size = 3;
267
268 size += (hdr->lct / 2);
269 return size;
270}
271
272static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
273 u8 *buf, int *len)
274{
275 int idx = 0;
276 int i;
277 u8 crc4;
278
279 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
280 for (i = 0; i < (hdr->lct / 2); i++)
281 buf[idx++] = hdr->rad[i];
282 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
283 (hdr->msg_len & 0x3f);
284 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
285
286 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
287 buf[idx - 1] |= (crc4 & 0xf);
288
289 *len = idx;
290}
291
292static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,
293 struct drm_dp_sideband_msg_hdr *hdr,
294 u8 *buf, int buflen, u8 *hdrlen)
295{
296 u8 crc4;
297 u8 len;
298 int i;
299 u8 idx;
300
301 if (buf[0] == 0)
302 return false;
303 len = 3;
304 len += ((buf[0] & 0xf0) >> 4) / 2;
305 if (len > buflen)
306 return false;
307 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
308
309 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
310 drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
311 return false;
312 }
313
314 hdr->lct = (buf[0] & 0xf0) >> 4;
315 hdr->lcr = (buf[0] & 0xf);
316 idx = 1;
317 for (i = 0; i < (hdr->lct / 2); i++)
318 hdr->rad[i] = buf[idx++];
319 hdr->broadcast = (buf[idx] >> 7) & 0x1;
320 hdr->path_msg = (buf[idx] >> 6) & 0x1;
321 hdr->msg_len = buf[idx] & 0x3f;
322 idx++;
323 hdr->somt = (buf[idx] >> 7) & 0x1;
324 hdr->eomt = (buf[idx] >> 6) & 0x1;
325 hdr->seqno = (buf[idx] >> 4) & 0x1;
326 idx++;
327 *hdrlen = idx;
328 return true;
329}
330
331void
332drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
333 struct drm_dp_sideband_msg_tx *raw)
334{
335 int idx = 0;
336 int i;
337 u8 *buf = raw->msg;
338
339 buf[idx++] = req->req_type & 0x7f;
340
341 switch (req->req_type) {
342 case DP_ENUM_PATH_RESOURCES:
343 case DP_POWER_DOWN_PHY:
344 case DP_POWER_UP_PHY:
345 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
346 idx++;
347 break;
348 case DP_ALLOCATE_PAYLOAD:
349 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
350 (req->u.allocate_payload.number_sdp_streams & 0xf);
351 idx++;
352 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
353 idx++;
354 buf[idx] = (req->u.allocate_payload.pbn >> 8);
355 idx++;
356 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
357 idx++;
358 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
359 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
360 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
361 idx++;
362 }
363 if (req->u.allocate_payload.number_sdp_streams & 1) {
364 i = req->u.allocate_payload.number_sdp_streams - 1;
365 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
366 idx++;
367 }
368 break;
369 case DP_QUERY_PAYLOAD:
370 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
371 idx++;
372 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
373 idx++;
374 break;
375 case DP_REMOTE_DPCD_READ:
376 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
377 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
378 idx++;
379 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
380 idx++;
381 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
382 idx++;
383 buf[idx] = (req->u.dpcd_read.num_bytes);
384 idx++;
385 break;
386
387 case DP_REMOTE_DPCD_WRITE:
388 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
389 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
390 idx++;
391 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
392 idx++;
393 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
394 idx++;
395 buf[idx] = (req->u.dpcd_write.num_bytes);
396 idx++;
397 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
398 idx += req->u.dpcd_write.num_bytes;
399 break;
400 case DP_REMOTE_I2C_READ:
401 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
402 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
403 idx++;
404 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
405 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
406 idx++;
407 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
408 idx++;
409 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
410 idx += req->u.i2c_read.transactions[i].num_bytes;
411
412 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
413 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
414 idx++;
415 }
416 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
417 idx++;
418 buf[idx] = (req->u.i2c_read.num_bytes_read);
419 idx++;
420 break;
421
422 case DP_REMOTE_I2C_WRITE:
423 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
424 idx++;
425 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
426 idx++;
427 buf[idx] = (req->u.i2c_write.num_bytes);
428 idx++;
429 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
430 idx += req->u.i2c_write.num_bytes;
431 break;
432 case DP_QUERY_STREAM_ENC_STATUS: {
433 const struct drm_dp_query_stream_enc_status *msg;
434
435 msg = &req->u.enc_status;
436 buf[idx] = msg->stream_id;
437 idx++;
438 memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
439 idx += sizeof(msg->client_id);
440 buf[idx] = 0;
441 buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
442 buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
443 buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
444 buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
445 idx++;
446 }
447 break;
448 }
449 raw->cur_len = idx;
450}
451EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
452
453
454int
455drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
456 struct drm_dp_sideband_msg_req_body *req)
457{
458 const u8 *buf = raw->msg;
459 int i, idx = 0;
460
461 req->req_type = buf[idx++] & 0x7f;
462 switch (req->req_type) {
463 case DP_ENUM_PATH_RESOURCES:
464 case DP_POWER_DOWN_PHY:
465 case DP_POWER_UP_PHY:
466 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
467 break;
468 case DP_ALLOCATE_PAYLOAD:
469 {
470 struct drm_dp_allocate_payload *a =
471 &req->u.allocate_payload;
472
473 a->number_sdp_streams = buf[idx] & 0xf;
474 a->port_number = (buf[idx] >> 4) & 0xf;
475
476 WARN_ON(buf[++idx] & 0x80);
477 a->vcpi = buf[idx] & 0x7f;
478
479 a->pbn = buf[++idx] << 8;
480 a->pbn |= buf[++idx];
481
482 idx++;
483 for (i = 0; i < a->number_sdp_streams; i++) {
484 a->sdp_stream_sink[i] =
485 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
486 }
487 }
488 break;
489 case DP_QUERY_PAYLOAD:
490 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
491 WARN_ON(buf[++idx] & 0x80);
492 req->u.query_payload.vcpi = buf[idx] & 0x7f;
493 break;
494 case DP_REMOTE_DPCD_READ:
495 {
496 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
497
498 r->port_number = (buf[idx] >> 4) & 0xf;
499
500 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
501 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
502 r->dpcd_address |= buf[++idx] & 0xff;
503
504 r->num_bytes = buf[++idx];
505 }
506 break;
507 case DP_REMOTE_DPCD_WRITE:
508 {
509 struct drm_dp_remote_dpcd_write *w =
510 &req->u.dpcd_write;
511
512 w->port_number = (buf[idx] >> 4) & 0xf;
513
514 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
515 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
516 w->dpcd_address |= buf[++idx] & 0xff;
517
518 w->num_bytes = buf[++idx];
519
520 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
521 GFP_KERNEL);
522 if (!w->bytes)
523 return -ENOMEM;
524 }
525 break;
526 case DP_REMOTE_I2C_READ:
527 {
528 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
529 struct drm_dp_remote_i2c_read_tx *tx;
530 bool failed = false;
531
532 r->num_transactions = buf[idx] & 0x3;
533 r->port_number = (buf[idx] >> 4) & 0xf;
534 for (i = 0; i < r->num_transactions; i++) {
535 tx = &r->transactions[i];
536
537 tx->i2c_dev_id = buf[++idx] & 0x7f;
538 tx->num_bytes = buf[++idx];
539 tx->bytes = kmemdup(&buf[++idx],
540 tx->num_bytes,
541 GFP_KERNEL);
542 if (!tx->bytes) {
543 failed = true;
544 break;
545 }
546 idx += tx->num_bytes;
547 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
548 tx->i2c_transaction_delay = buf[idx] & 0xf;
549 }
550
551 if (failed) {
552 for (i = 0; i < r->num_transactions; i++) {
553 tx = &r->transactions[i];
554 kfree(tx->bytes);
555 }
556 return -ENOMEM;
557 }
558
559 r->read_i2c_device_id = buf[++idx] & 0x7f;
560 r->num_bytes_read = buf[++idx];
561 }
562 break;
563 case DP_REMOTE_I2C_WRITE:
564 {
565 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
566
567 w->port_number = (buf[idx] >> 4) & 0xf;
568 w->write_i2c_device_id = buf[++idx] & 0x7f;
569 w->num_bytes = buf[++idx];
570 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
571 GFP_KERNEL);
572 if (!w->bytes)
573 return -ENOMEM;
574 }
575 break;
576 case DP_QUERY_STREAM_ENC_STATUS:
577 req->u.enc_status.stream_id = buf[idx++];
578 for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
579 req->u.enc_status.client_id[i] = buf[idx++];
580
581 req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
582 buf[idx]);
583 req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
584 buf[idx]);
585 req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
586 buf[idx]);
587 req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
588 buf[idx]);
589 break;
590 }
591
592 return 0;
593}
594EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
595
596void
597drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
598 int indent, struct drm_printer *printer)
599{
600 int i;
601
602#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
603 if (req->req_type == DP_LINK_ADDRESS) {
604
605 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
606 return;
607 }
608
609 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
610 indent++;
611
612 switch (req->req_type) {
613 case DP_ENUM_PATH_RESOURCES:
614 case DP_POWER_DOWN_PHY:
615 case DP_POWER_UP_PHY:
616 P("port=%d\n", req->u.port_num.port_number);
617 break;
618 case DP_ALLOCATE_PAYLOAD:
619 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
620 req->u.allocate_payload.port_number,
621 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
622 req->u.allocate_payload.number_sdp_streams,
623 req->u.allocate_payload.number_sdp_streams,
624 req->u.allocate_payload.sdp_stream_sink);
625 break;
626 case DP_QUERY_PAYLOAD:
627 P("port=%d vcpi=%d\n",
628 req->u.query_payload.port_number,
629 req->u.query_payload.vcpi);
630 break;
631 case DP_REMOTE_DPCD_READ:
632 P("port=%d dpcd_addr=%05x len=%d\n",
633 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
634 req->u.dpcd_read.num_bytes);
635 break;
636 case DP_REMOTE_DPCD_WRITE:
637 P("port=%d addr=%05x len=%d: %*ph\n",
638 req->u.dpcd_write.port_number,
639 req->u.dpcd_write.dpcd_address,
640 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
641 req->u.dpcd_write.bytes);
642 break;
643 case DP_REMOTE_I2C_READ:
644 P("port=%d num_tx=%d id=%d size=%d:\n",
645 req->u.i2c_read.port_number,
646 req->u.i2c_read.num_transactions,
647 req->u.i2c_read.read_i2c_device_id,
648 req->u.i2c_read.num_bytes_read);
649
650 indent++;
651 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
652 const struct drm_dp_remote_i2c_read_tx *rtx =
653 &req->u.i2c_read.transactions[i];
654
655 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
656 i, rtx->i2c_dev_id, rtx->num_bytes,
657 rtx->no_stop_bit, rtx->i2c_transaction_delay,
658 rtx->num_bytes, rtx->bytes);
659 }
660 break;
661 case DP_REMOTE_I2C_WRITE:
662 P("port=%d id=%d size=%d: %*ph\n",
663 req->u.i2c_write.port_number,
664 req->u.i2c_write.write_i2c_device_id,
665 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
666 req->u.i2c_write.bytes);
667 break;
668 case DP_QUERY_STREAM_ENC_STATUS:
669 P("stream_id=%u client_id=%*ph stream_event=%x "
670 "valid_event=%d stream_behavior=%x valid_behavior=%d",
671 req->u.enc_status.stream_id,
672 (int)ARRAY_SIZE(req->u.enc_status.client_id),
673 req->u.enc_status.client_id, req->u.enc_status.stream_event,
674 req->u.enc_status.valid_stream_event,
675 req->u.enc_status.stream_behavior,
676 req->u.enc_status.valid_stream_behavior);
677 break;
678 default:
679 P("???\n");
680 break;
681 }
682#undef P
683}
684EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
685
686static inline void
687drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
688 const struct drm_dp_sideband_msg_tx *txmsg)
689{
690 struct drm_dp_sideband_msg_req_body req;
691 char buf[64];
692 int ret;
693 int i;
694
695 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
696 sizeof(buf));
697 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
698 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
699 drm_dp_mst_sideband_tx_state_str(txmsg->state),
700 txmsg->path_msg, buf);
701
702 ret = drm_dp_decode_sideband_req(txmsg, &req);
703 if (ret) {
704 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
705 return;
706 }
707 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
708
709 switch (req.req_type) {
710 case DP_REMOTE_DPCD_WRITE:
711 kfree(req.u.dpcd_write.bytes);
712 break;
713 case DP_REMOTE_I2C_READ:
714 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
715 kfree(req.u.i2c_read.transactions[i].bytes);
716 break;
717 case DP_REMOTE_I2C_WRITE:
718 kfree(req.u.i2c_write.bytes);
719 break;
720 }
721}
722
723static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
724{
725 u8 crc4;
726
727 crc4 = drm_dp_msg_data_crc4(msg, len);
728 msg[len] = crc4;
729}
730
731static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
732 struct drm_dp_sideband_msg_tx *raw)
733{
734 int idx = 0;
735 u8 *buf = raw->msg;
736
737 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
738
739 raw->cur_len = idx;
740}
741
742static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
743 struct drm_dp_sideband_msg_hdr *hdr,
744 u8 hdrlen)
745{
746
747
748
749
750 if (!hdr->somt && !msg->have_somt)
751 return false;
752
753
754 msg->curchunk_idx = 0;
755 msg->curchunk_len = hdr->msg_len;
756 msg->curchunk_hdrlen = hdrlen;
757
758
759 if (hdr->somt && msg->have_somt)
760 return false;
761
762 if (hdr->somt) {
763 memcpy(&msg->initial_hdr, hdr,
764 sizeof(struct drm_dp_sideband_msg_hdr));
765 msg->have_somt = true;
766 }
767 if (hdr->eomt)
768 msg->have_eomt = true;
769
770 return true;
771}
772
773
774static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
775 u8 *replybuf, u8 replybuflen)
776{
777 u8 crc4;
778
779 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
780 msg->curchunk_idx += replybuflen;
781
782 if (msg->curchunk_idx >= msg->curchunk_len) {
783
784 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
785 if (crc4 != msg->chunk[msg->curchunk_len - 1])
786 print_hex_dump(KERN_DEBUG, "wrong crc",
787 DUMP_PREFIX_NONE, 16, 1,
788 msg->chunk, msg->curchunk_len, false);
789
790 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
791 msg->curlen += msg->curchunk_len - 1;
792 }
793 return true;
794}
795
796static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,
797 struct drm_dp_sideband_msg_rx *raw,
798 struct drm_dp_sideband_msg_reply_body *repmsg)
799{
800 int idx = 1;
801 int i;
802
803 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
804 idx += 16;
805 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
806 idx++;
807 if (idx > raw->curlen)
808 goto fail_len;
809 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
810 if (raw->msg[idx] & 0x80)
811 repmsg->u.link_addr.ports[i].input_port = 1;
812
813 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
814 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
815
816 idx++;
817 if (idx > raw->curlen)
818 goto fail_len;
819 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
820 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
821 if (repmsg->u.link_addr.ports[i].input_port == 0)
822 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
823 idx++;
824 if (idx > raw->curlen)
825 goto fail_len;
826 if (repmsg->u.link_addr.ports[i].input_port == 0) {
827 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
828 idx++;
829 if (idx > raw->curlen)
830 goto fail_len;
831 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
832 idx += 16;
833 if (idx > raw->curlen)
834 goto fail_len;
835 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
836 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
837 idx++;
838
839 }
840 if (idx > raw->curlen)
841 goto fail_len;
842 }
843
844 return true;
845fail_len:
846 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
847 return false;
848}
849
850static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
851 struct drm_dp_sideband_msg_reply_body *repmsg)
852{
853 int idx = 1;
854
855 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
856 idx++;
857 if (idx > raw->curlen)
858 goto fail_len;
859 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
860 idx++;
861 if (idx > raw->curlen)
862 goto fail_len;
863
864 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
865 return true;
866fail_len:
867 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
868 return false;
869}
870
871static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
872 struct drm_dp_sideband_msg_reply_body *repmsg)
873{
874 int idx = 1;
875
876 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
877 idx++;
878 if (idx > raw->curlen)
879 goto fail_len;
880 return true;
881fail_len:
882 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
883 return false;
884}
885
886static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
887 struct drm_dp_sideband_msg_reply_body *repmsg)
888{
889 int idx = 1;
890
891 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
892 idx++;
893 if (idx > raw->curlen)
894 goto fail_len;
895 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
896 idx++;
897
898 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
899 return true;
900fail_len:
901 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
902 return false;
903}
904
905static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
906 struct drm_dp_sideband_msg_reply_body *repmsg)
907{
908 int idx = 1;
909
910 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
911 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
912 idx++;
913 if (idx > raw->curlen)
914 goto fail_len;
915 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
916 idx += 2;
917 if (idx > raw->curlen)
918 goto fail_len;
919 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
920 idx += 2;
921 if (idx > raw->curlen)
922 goto fail_len;
923 return true;
924fail_len:
925 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
926 return false;
927}
928
929static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
930 struct drm_dp_sideband_msg_reply_body *repmsg)
931{
932 int idx = 1;
933
934 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
935 idx++;
936 if (idx > raw->curlen)
937 goto fail_len;
938 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
939 idx++;
940 if (idx > raw->curlen)
941 goto fail_len;
942 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
943 idx += 2;
944 if (idx > raw->curlen)
945 goto fail_len;
946 return true;
947fail_len:
948 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
949 return false;
950}
951
952static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
953 struct drm_dp_sideband_msg_reply_body *repmsg)
954{
955 int idx = 1;
956
957 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
958 idx++;
959 if (idx > raw->curlen)
960 goto fail_len;
961 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
962 idx += 2;
963 if (idx > raw->curlen)
964 goto fail_len;
965 return true;
966fail_len:
967 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
968 return false;
969}
970
971static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
972 struct drm_dp_sideband_msg_reply_body *repmsg)
973{
974 int idx = 1;
975
976 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
977 idx++;
978 if (idx > raw->curlen) {
979 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
980 idx, raw->curlen);
981 return false;
982 }
983 return true;
984}
985
986static bool
987drm_dp_sideband_parse_query_stream_enc_status(
988 struct drm_dp_sideband_msg_rx *raw,
989 struct drm_dp_sideband_msg_reply_body *repmsg)
990{
991 struct drm_dp_query_stream_enc_status_ack_reply *reply;
992
993 reply = &repmsg->u.enc_status;
994
995 reply->stream_id = raw->msg[3];
996
997 reply->reply_signed = raw->msg[2] & BIT(0);
998
999
1000
1001
1002
1003
1004
1005
1006
1007 reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
1008 reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
1009
1010 reply->query_capable_device_present = raw->msg[2] & BIT(5);
1011 reply->legacy_device_present = raw->msg[2] & BIT(6);
1012 reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
1013
1014 reply->auth_completed = !!(raw->msg[1] & BIT(3));
1015 reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
1016 reply->repeater_present = !!(raw->msg[1] & BIT(5));
1017 reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
1018
1019 return true;
1020}
1021
1022static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,
1023 struct drm_dp_sideband_msg_rx *raw,
1024 struct drm_dp_sideband_msg_reply_body *msg)
1025{
1026 memset(msg, 0, sizeof(*msg));
1027 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
1028 msg->req_type = (raw->msg[0] & 0x7f);
1029
1030 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
1031 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
1032 msg->u.nak.reason = raw->msg[17];
1033 msg->u.nak.nak_data = raw->msg[18];
1034 return false;
1035 }
1036
1037 switch (msg->req_type) {
1038 case DP_LINK_ADDRESS:
1039 return drm_dp_sideband_parse_link_address(mgr, raw, msg);
1040 case DP_QUERY_PAYLOAD:
1041 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
1042 case DP_REMOTE_DPCD_READ:
1043 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
1044 case DP_REMOTE_DPCD_WRITE:
1045 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
1046 case DP_REMOTE_I2C_READ:
1047 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
1048 case DP_REMOTE_I2C_WRITE:
1049 return true;
1050 case DP_ENUM_PATH_RESOURCES:
1051 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
1052 case DP_ALLOCATE_PAYLOAD:
1053 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
1054 case DP_POWER_DOWN_PHY:
1055 case DP_POWER_UP_PHY:
1056 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
1057 case DP_CLEAR_PAYLOAD_ID_TABLE:
1058 return true;
1059 case DP_QUERY_STREAM_ENC_STATUS:
1060 return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
1061 default:
1062 drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",
1063 msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
1064 return false;
1065 }
1066}
1067
1068static bool
1069drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1070 struct drm_dp_sideband_msg_rx *raw,
1071 struct drm_dp_sideband_msg_req_body *msg)
1072{
1073 int idx = 1;
1074
1075 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1076 idx++;
1077 if (idx > raw->curlen)
1078 goto fail_len;
1079
1080 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
1081 idx += 16;
1082 if (idx > raw->curlen)
1083 goto fail_len;
1084
1085 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
1086 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
1087 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
1088 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
1089 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
1090 idx++;
1091 return true;
1092fail_len:
1093 drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",
1094 idx, raw->curlen);
1095 return false;
1096}
1097
1098static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1099 struct drm_dp_sideband_msg_rx *raw,
1100 struct drm_dp_sideband_msg_req_body *msg)
1101{
1102 int idx = 1;
1103
1104 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1105 idx++;
1106 if (idx > raw->curlen)
1107 goto fail_len;
1108
1109 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
1110 idx += 16;
1111 if (idx > raw->curlen)
1112 goto fail_len;
1113
1114 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1115 idx++;
1116 return true;
1117fail_len:
1118 drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen);
1119 return false;
1120}
1121
1122static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,
1123 struct drm_dp_sideband_msg_rx *raw,
1124 struct drm_dp_sideband_msg_req_body *msg)
1125{
1126 memset(msg, 0, sizeof(*msg));
1127 msg->req_type = (raw->msg[0] & 0x7f);
1128
1129 switch (msg->req_type) {
1130 case DP_CONNECTION_STATUS_NOTIFY:
1131 return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);
1132 case DP_RESOURCE_STATUS_NOTIFY:
1133 return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);
1134 default:
1135 drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",
1136 msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
1137 return false;
1138 }
1139}
1140
1141static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1142 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1143{
1144 struct drm_dp_sideband_msg_req_body req;
1145
1146 req.req_type = DP_REMOTE_DPCD_WRITE;
1147 req.u.dpcd_write.port_number = port_num;
1148 req.u.dpcd_write.dpcd_address = offset;
1149 req.u.dpcd_write.num_bytes = num_bytes;
1150 req.u.dpcd_write.bytes = bytes;
1151 drm_dp_encode_sideband_req(&req, msg);
1152}
1153
1154static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
1155{
1156 struct drm_dp_sideband_msg_req_body req;
1157
1158 req.req_type = DP_LINK_ADDRESS;
1159 drm_dp_encode_sideband_req(&req, msg);
1160}
1161
1162static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1163{
1164 struct drm_dp_sideband_msg_req_body req;
1165
1166 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1167 drm_dp_encode_sideband_req(&req, msg);
1168 msg->path_msg = true;
1169}
1170
1171static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1172 int port_num)
1173{
1174 struct drm_dp_sideband_msg_req_body req;
1175
1176 req.req_type = DP_ENUM_PATH_RESOURCES;
1177 req.u.port_num.port_number = port_num;
1178 drm_dp_encode_sideband_req(&req, msg);
1179 msg->path_msg = true;
1180 return 0;
1181}
1182
1183static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1184 int port_num,
1185 u8 vcpi, uint16_t pbn,
1186 u8 number_sdp_streams,
1187 u8 *sdp_stream_sink)
1188{
1189 struct drm_dp_sideband_msg_req_body req;
1190
1191 memset(&req, 0, sizeof(req));
1192 req.req_type = DP_ALLOCATE_PAYLOAD;
1193 req.u.allocate_payload.port_number = port_num;
1194 req.u.allocate_payload.vcpi = vcpi;
1195 req.u.allocate_payload.pbn = pbn;
1196 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1197 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1198 number_sdp_streams);
1199 drm_dp_encode_sideband_req(&req, msg);
1200 msg->path_msg = true;
1201}
1202
1203static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1204 int port_num, bool power_up)
1205{
1206 struct drm_dp_sideband_msg_req_body req;
1207
1208 if (power_up)
1209 req.req_type = DP_POWER_UP_PHY;
1210 else
1211 req.req_type = DP_POWER_DOWN_PHY;
1212
1213 req.u.port_num.port_number = port_num;
1214 drm_dp_encode_sideband_req(&req, msg);
1215 msg->path_msg = true;
1216}
1217
1218static int
1219build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
1220 u8 *q_id)
1221{
1222 struct drm_dp_sideband_msg_req_body req;
1223
1224 req.req_type = DP_QUERY_STREAM_ENC_STATUS;
1225 req.u.enc_status.stream_id = stream_id;
1226 memcpy(req.u.enc_status.client_id, q_id,
1227 sizeof(req.u.enc_status.client_id));
1228 req.u.enc_status.stream_event = 0;
1229 req.u.enc_status.valid_stream_event = false;
1230 req.u.enc_status.stream_behavior = 0;
1231 req.u.enc_status.valid_stream_behavior = false;
1232
1233 drm_dp_encode_sideband_req(&req, msg);
1234 return 0;
1235}
1236
1237static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1238 struct drm_dp_vcpi *vcpi)
1239{
1240 int ret, vcpi_ret;
1241
1242 mutex_lock(&mgr->payload_lock);
1243 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1244 if (ret > mgr->max_payloads) {
1245 ret = -EINVAL;
1246 drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret);
1247 goto out_unlock;
1248 }
1249
1250 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1251 if (vcpi_ret > mgr->max_payloads) {
1252 ret = -EINVAL;
1253 drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret);
1254 goto out_unlock;
1255 }
1256
1257 set_bit(ret, &mgr->payload_mask);
1258 set_bit(vcpi_ret, &mgr->vcpi_mask);
1259 vcpi->vcpi = vcpi_ret + 1;
1260 mgr->proposed_vcpis[ret - 1] = vcpi;
1261out_unlock:
1262 mutex_unlock(&mgr->payload_lock);
1263 return ret;
1264}
1265
1266static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1267 int vcpi)
1268{
1269 int i;
1270
1271 if (vcpi == 0)
1272 return;
1273
1274 mutex_lock(&mgr->payload_lock);
1275 drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi);
1276 clear_bit(vcpi - 1, &mgr->vcpi_mask);
1277
1278 for (i = 0; i < mgr->max_payloads; i++) {
1279 if (mgr->proposed_vcpis[i] &&
1280 mgr->proposed_vcpis[i]->vcpi == vcpi) {
1281 mgr->proposed_vcpis[i] = NULL;
1282 clear_bit(i + 1, &mgr->payload_mask);
1283 }
1284 }
1285 mutex_unlock(&mgr->payload_lock);
1286}
1287
1288static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1289 struct drm_dp_sideband_msg_tx *txmsg)
1290{
1291 unsigned int state;
1292
1293
1294
1295
1296
1297
1298 state = READ_ONCE(txmsg->state);
1299 return (state == DRM_DP_SIDEBAND_TX_RX ||
1300 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1301}
1302
1303static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1304 struct drm_dp_sideband_msg_tx *txmsg)
1305{
1306 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1307 unsigned long wait_timeout = msecs_to_jiffies(4000);
1308 unsigned long wait_expires = jiffies + wait_timeout;
1309 int ret;
1310
1311 for (;;) {
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 ret = wait_event_timeout(mgr->tx_waitq,
1326 check_txmsg_state(mgr, txmsg),
1327 mgr->cbs->poll_hpd_irq ?
1328 msecs_to_jiffies(50) :
1329 wait_timeout);
1330
1331 if (ret || !mgr->cbs->poll_hpd_irq ||
1332 time_after(jiffies, wait_expires))
1333 break;
1334
1335 mgr->cbs->poll_hpd_irq(mgr);
1336 }
1337
1338 mutex_lock(&mgr->qlock);
1339 if (ret > 0) {
1340 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1341 ret = -EIO;
1342 goto out;
1343 }
1344 } else {
1345 drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",
1346 txmsg, txmsg->state, txmsg->seqno);
1347
1348
1349 ret = -EIO;
1350
1351
1352 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1353 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1354 txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
1355 list_del(&txmsg->next);
1356 }
1357out:
1358 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1359 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1360
1361 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1362 }
1363 mutex_unlock(&mgr->qlock);
1364
1365 drm_dp_mst_kick_tx(mgr);
1366 return ret;
1367}
1368
1369static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1370{
1371 struct drm_dp_mst_branch *mstb;
1372
1373 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1374 if (!mstb)
1375 return NULL;
1376
1377 mstb->lct = lct;
1378 if (lct > 1)
1379 memcpy(mstb->rad, rad, lct / 2);
1380 INIT_LIST_HEAD(&mstb->ports);
1381 kref_init(&mstb->topology_kref);
1382 kref_init(&mstb->malloc_kref);
1383 return mstb;
1384}
1385
1386static void drm_dp_free_mst_branch_device(struct kref *kref)
1387{
1388 struct drm_dp_mst_branch *mstb =
1389 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1390
1391 if (mstb->port_parent)
1392 drm_dp_mst_put_port_malloc(mstb->port_parent);
1393
1394 kfree(mstb);
1395}
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496static void
1497drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1498{
1499 kref_get(&mstb->malloc_kref);
1500 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514static void
1515drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1516{
1517 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1518 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1519}
1520
1521static void drm_dp_free_mst_port(struct kref *kref)
1522{
1523 struct drm_dp_mst_port *port =
1524 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1525
1526 drm_dp_mst_put_mstb_malloc(port->parent);
1527 kfree(port);
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547void
1548drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1549{
1550 kref_get(&port->malloc_kref);
1551 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref));
1552}
1553EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565void
1566drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1567{
1568 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1569 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1570}
1571EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1572
1573#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1574
1575#define STACK_DEPTH 8
1576
1577static noinline void
1578__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1579 struct drm_dp_mst_topology_ref_history *history,
1580 enum drm_dp_mst_topology_ref_type type)
1581{
1582 struct drm_dp_mst_topology_ref_entry *entry = NULL;
1583 depot_stack_handle_t backtrace;
1584 ulong stack_entries[STACK_DEPTH];
1585 uint n;
1586 int i;
1587
1588 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1589 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1590 if (!backtrace)
1591 return;
1592
1593
1594 for (i = 0; i < history->len; i++) {
1595 if (history->entries[i].backtrace == backtrace) {
1596 entry = &history->entries[i];
1597 break;
1598 }
1599 }
1600
1601
1602 if (!entry) {
1603 struct drm_dp_mst_topology_ref_entry *new;
1604 int new_len = history->len + 1;
1605
1606 new = krealloc(history->entries, sizeof(*new) * new_len,
1607 GFP_KERNEL);
1608 if (!new)
1609 return;
1610
1611 entry = &new[history->len];
1612 history->len = new_len;
1613 history->entries = new;
1614
1615 entry->backtrace = backtrace;
1616 entry->type = type;
1617 entry->count = 0;
1618 }
1619 entry->count++;
1620 entry->ts_nsec = ktime_get_ns();
1621}
1622
1623static int
1624topology_ref_history_cmp(const void *a, const void *b)
1625{
1626 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1627
1628 if (entry_a->ts_nsec > entry_b->ts_nsec)
1629 return 1;
1630 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1631 return -1;
1632 else
1633 return 0;
1634}
1635
1636static inline const char *
1637topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1638{
1639 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1640 return "get";
1641 else
1642 return "put";
1643}
1644
1645static void
1646__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1647 void *ptr, const char *type_str)
1648{
1649 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1650 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1651 int i;
1652
1653 if (!buf)
1654 return;
1655
1656 if (!history->len)
1657 goto out;
1658
1659
1660
1661
1662 sort(history->entries, history->len, sizeof(*history->entries),
1663 topology_ref_history_cmp, NULL);
1664
1665 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1666 type_str, ptr);
1667
1668 for (i = 0; i < history->len; i++) {
1669 const struct drm_dp_mst_topology_ref_entry *entry =
1670 &history->entries[i];
1671 u64 ts_nsec = entry->ts_nsec;
1672 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1673
1674 stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4);
1675
1676 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1677 entry->count,
1678 topology_ref_type_to_str(entry->type),
1679 ts_nsec, rem_nsec / 1000, buf);
1680 }
1681
1682
1683 kfree(history->entries);
1684out:
1685 kfree(buf);
1686}
1687
1688static __always_inline void
1689drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1690{
1691 __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1692 "MSTB");
1693}
1694
1695static __always_inline void
1696drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1697{
1698 __dump_topology_ref_history(&port->topology_ref_history, port,
1699 "Port");
1700}
1701
1702static __always_inline void
1703save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1704 enum drm_dp_mst_topology_ref_type type)
1705{
1706 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1707}
1708
1709static __always_inline void
1710save_port_topology_ref(struct drm_dp_mst_port *port,
1711 enum drm_dp_mst_topology_ref_type type)
1712{
1713 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1714}
1715
1716static inline void
1717topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1718{
1719 mutex_lock(&mgr->topology_ref_history_lock);
1720}
1721
1722static inline void
1723topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1724{
1725 mutex_unlock(&mgr->topology_ref_history_lock);
1726}
1727#else
1728static inline void
1729topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1730static inline void
1731topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1732static inline void
1733drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1734static inline void
1735drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1736#define save_mstb_topology_ref(mstb, type)
1737#define save_port_topology_ref(port, type)
1738#endif
1739
1740static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1741{
1742 struct drm_dp_mst_branch *mstb =
1743 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1744 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1745
1746 drm_dp_mst_dump_mstb_topology_history(mstb);
1747
1748 INIT_LIST_HEAD(&mstb->destroy_next);
1749
1750
1751
1752
1753
1754 mutex_lock(&mgr->delayed_destroy_lock);
1755 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1756 mutex_unlock(&mgr->delayed_destroy_lock);
1757 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1758}
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782static int __must_check
1783drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1784{
1785 int ret;
1786
1787 topology_ref_history_lock(mstb->mgr);
1788 ret = kref_get_unless_zero(&mstb->topology_kref);
1789 if (ret) {
1790 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1791 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1792 }
1793
1794 topology_ref_history_unlock(mstb->mgr);
1795
1796 return ret;
1797}
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1814{
1815 topology_ref_history_lock(mstb->mgr);
1816
1817 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1818 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1819 kref_get(&mstb->topology_kref);
1820 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1821
1822 topology_ref_history_unlock(mstb->mgr);
1823}
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837static void
1838drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1839{
1840 topology_ref_history_lock(mstb->mgr);
1841
1842 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1);
1843 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1844
1845 topology_ref_history_unlock(mstb->mgr);
1846 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1847}
1848
1849static void drm_dp_destroy_port(struct kref *kref)
1850{
1851 struct drm_dp_mst_port *port =
1852 container_of(kref, struct drm_dp_mst_port, topology_kref);
1853 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1854
1855 drm_dp_mst_dump_port_topology_history(port);
1856
1857
1858 if (port->input) {
1859 drm_dp_mst_put_port_malloc(port);
1860 return;
1861 }
1862
1863 kfree(port->cached_edid);
1864
1865
1866
1867
1868
1869 mutex_lock(&mgr->delayed_destroy_lock);
1870 list_add(&port->next, &mgr->destroy_port_list);
1871 mutex_unlock(&mgr->delayed_destroy_lock);
1872 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1873}
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897static int __must_check
1898drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1899{
1900 int ret;
1901
1902 topology_ref_history_lock(port->mgr);
1903 ret = kref_get_unless_zero(&port->topology_kref);
1904 if (ret) {
1905 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
1906 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1907 }
1908
1909 topology_ref_history_unlock(port->mgr);
1910 return ret;
1911}
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1927{
1928 topology_ref_history_lock(port->mgr);
1929
1930 WARN_ON(kref_read(&port->topology_kref) == 0);
1931 kref_get(&port->topology_kref);
1932 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
1933 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1934
1935 topology_ref_history_unlock(port->mgr);
1936}
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1950{
1951 topology_ref_history_lock(port->mgr);
1952
1953 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1);
1954 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1955
1956 topology_ref_history_unlock(port->mgr);
1957 kref_put(&port->topology_kref, drm_dp_destroy_port);
1958}
1959
1960static struct drm_dp_mst_branch *
1961drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1962 struct drm_dp_mst_branch *to_find)
1963{
1964 struct drm_dp_mst_port *port;
1965 struct drm_dp_mst_branch *rmstb;
1966
1967 if (to_find == mstb)
1968 return mstb;
1969
1970 list_for_each_entry(port, &mstb->ports, next) {
1971 if (port->mstb) {
1972 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1973 port->mstb, to_find);
1974 if (rmstb)
1975 return rmstb;
1976 }
1977 }
1978 return NULL;
1979}
1980
1981static struct drm_dp_mst_branch *
1982drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1983 struct drm_dp_mst_branch *mstb)
1984{
1985 struct drm_dp_mst_branch *rmstb = NULL;
1986
1987 mutex_lock(&mgr->lock);
1988 if (mgr->mst_primary) {
1989 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1990 mgr->mst_primary, mstb);
1991
1992 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1993 rmstb = NULL;
1994 }
1995 mutex_unlock(&mgr->lock);
1996 return rmstb;
1997}
1998
1999static struct drm_dp_mst_port *
2000drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
2001 struct drm_dp_mst_port *to_find)
2002{
2003 struct drm_dp_mst_port *port, *mport;
2004
2005 list_for_each_entry(port, &mstb->ports, next) {
2006 if (port == to_find)
2007 return port;
2008
2009 if (port->mstb) {
2010 mport = drm_dp_mst_topology_get_port_validated_locked(
2011 port->mstb, to_find);
2012 if (mport)
2013 return mport;
2014 }
2015 }
2016 return NULL;
2017}
2018
2019static struct drm_dp_mst_port *
2020drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
2021 struct drm_dp_mst_port *port)
2022{
2023 struct drm_dp_mst_port *rport = NULL;
2024
2025 mutex_lock(&mgr->lock);
2026 if (mgr->mst_primary) {
2027 rport = drm_dp_mst_topology_get_port_validated_locked(
2028 mgr->mst_primary, port);
2029
2030 if (rport && !drm_dp_mst_topology_try_get_port(rport))
2031 rport = NULL;
2032 }
2033 mutex_unlock(&mgr->lock);
2034 return rport;
2035}
2036
2037static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
2038{
2039 struct drm_dp_mst_port *port;
2040 int ret;
2041
2042 list_for_each_entry(port, &mstb->ports, next) {
2043 if (port->port_num == port_num) {
2044 ret = drm_dp_mst_topology_try_get_port(port);
2045 return ret ? port : NULL;
2046 }
2047 }
2048
2049 return NULL;
2050}
2051
2052
2053
2054
2055
2056
2057static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
2058 u8 *rad)
2059{
2060 int parent_lct = port->parent->lct;
2061 int shift = 4;
2062 int idx = (parent_lct - 1) / 2;
2063
2064 if (parent_lct > 1) {
2065 memcpy(rad, port->parent->rad, idx + 1);
2066 shift = (parent_lct % 2) ? 4 : 0;
2067 } else
2068 rad[0] = 0;
2069
2070 rad[idx] |= port->port_num << shift;
2071 return parent_lct + 1;
2072}
2073
2074static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
2075{
2076 switch (pdt) {
2077 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2078 case DP_PEER_DEVICE_SST_SINK:
2079 return true;
2080 case DP_PEER_DEVICE_MST_BRANCHING:
2081
2082 if (!mcs)
2083 return true;
2084
2085 return false;
2086 }
2087 return true;
2088}
2089
2090static int
2091drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
2092 bool new_mcs)
2093{
2094 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2095 struct drm_dp_mst_branch *mstb;
2096 u8 rad[8], lct;
2097 int ret = 0;
2098
2099 if (port->pdt == new_pdt && port->mcs == new_mcs)
2100 return 0;
2101
2102
2103 if (port->pdt != DP_PEER_DEVICE_NONE) {
2104 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2105
2106
2107
2108
2109 if (new_pdt != DP_PEER_DEVICE_NONE &&
2110 drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
2111 port->pdt = new_pdt;
2112 port->mcs = new_mcs;
2113 return 0;
2114 }
2115
2116
2117 drm_dp_mst_unregister_i2c_bus(port);
2118 } else {
2119 mutex_lock(&mgr->lock);
2120 drm_dp_mst_topology_put_mstb(port->mstb);
2121 port->mstb = NULL;
2122 mutex_unlock(&mgr->lock);
2123 }
2124 }
2125
2126 port->pdt = new_pdt;
2127 port->mcs = new_mcs;
2128
2129 if (port->pdt != DP_PEER_DEVICE_NONE) {
2130 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2131
2132 ret = drm_dp_mst_register_i2c_bus(port);
2133 } else {
2134 lct = drm_dp_calculate_rad(port, rad);
2135 mstb = drm_dp_add_mst_branch_device(lct, rad);
2136 if (!mstb) {
2137 ret = -ENOMEM;
2138 drm_err(mgr->dev, "Failed to create MSTB for port %p", port);
2139 goto out;
2140 }
2141
2142 mutex_lock(&mgr->lock);
2143 port->mstb = mstb;
2144 mstb->mgr = port->mgr;
2145 mstb->port_parent = port;
2146
2147
2148
2149
2150
2151 drm_dp_mst_get_port_malloc(port);
2152 mutex_unlock(&mgr->lock);
2153
2154
2155 ret = 1;
2156 }
2157 }
2158
2159out:
2160 if (ret < 0)
2161 port->pdt = DP_PEER_DEVICE_NONE;
2162 return ret;
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2179 unsigned int offset, void *buffer, size_t size)
2180{
2181 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2182 aux);
2183
2184 return drm_dp_send_dpcd_read(port->mgr, port,
2185 offset, size, buffer);
2186}
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2202 unsigned int offset, void *buffer, size_t size)
2203{
2204 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2205 aux);
2206
2207 return drm_dp_send_dpcd_write(port->mgr, port,
2208 offset, size, buffer);
2209}
2210
2211static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2212{
2213 int ret = 0;
2214
2215 memcpy(mstb->guid, guid, 16);
2216
2217 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2218 if (mstb->port_parent) {
2219 ret = drm_dp_send_dpcd_write(mstb->mgr,
2220 mstb->port_parent,
2221 DP_GUID, 16, mstb->guid);
2222 } else {
2223 ret = drm_dp_dpcd_write(mstb->mgr->aux,
2224 DP_GUID, mstb->guid, 16);
2225 }
2226 }
2227
2228 if (ret < 16 && ret > 0)
2229 return -EPROTO;
2230
2231 return ret == 16 ? 0 : ret;
2232}
2233
2234static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2235 int pnum,
2236 char *proppath,
2237 size_t proppath_size)
2238{
2239 int i;
2240 char temp[8];
2241
2242 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2243 for (i = 0; i < (mstb->lct - 1); i++) {
2244 int shift = (i % 2) ? 0 : 4;
2245 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2246
2247 snprintf(temp, sizeof(temp), "-%d", port_num);
2248 strlcat(proppath, temp, proppath_size);
2249 }
2250 snprintf(temp, sizeof(temp), "-%d", pnum);
2251 strlcat(proppath, temp, proppath_size);
2252}
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2266 struct drm_dp_mst_port *port)
2267{
2268 drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",
2269 port->aux.name, connector->kdev->kobj.name);
2270
2271 port->aux.dev = connector->kdev;
2272 return drm_dp_aux_register_devnode(&port->aux);
2273}
2274EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2286 struct drm_dp_mst_port *port)
2287{
2288 drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",
2289 port->aux.name, connector->kdev->kobj.name);
2290 drm_dp_aux_unregister_devnode(&port->aux);
2291}
2292EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2293
2294static void
2295drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2296 struct drm_dp_mst_port *port)
2297{
2298 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2299 char proppath[255];
2300 int ret;
2301
2302 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2303 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2304 if (!port->connector) {
2305 ret = -ENOMEM;
2306 goto error;
2307 }
2308
2309 if (port->pdt != DP_PEER_DEVICE_NONE &&
2310 drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
2311 port->port_num >= DP_MST_LOGICAL_PORT_0)
2312 port->cached_edid = drm_get_edid(port->connector,
2313 &port->aux.ddc);
2314
2315 drm_connector_register(port->connector);
2316 return;
2317
2318error:
2319 drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret);
2320}
2321
2322
2323
2324
2325
2326static void
2327drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2328 struct drm_dp_mst_port *port)
2329{
2330 mutex_lock(&mgr->lock);
2331 port->parent->num_ports--;
2332 list_del(&port->next);
2333 mutex_unlock(&mgr->lock);
2334 drm_dp_mst_topology_put_port(port);
2335}
2336
2337static struct drm_dp_mst_port *
2338drm_dp_mst_add_port(struct drm_device *dev,
2339 struct drm_dp_mst_topology_mgr *mgr,
2340 struct drm_dp_mst_branch *mstb, u8 port_number)
2341{
2342 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2343
2344 if (!port)
2345 return NULL;
2346
2347 kref_init(&port->topology_kref);
2348 kref_init(&port->malloc_kref);
2349 port->parent = mstb;
2350 port->port_num = port_number;
2351 port->mgr = mgr;
2352 port->aux.name = "DPMST";
2353 port->aux.dev = dev->dev;
2354 port->aux.is_remote = true;
2355
2356
2357 port->aux.drm_dev = dev;
2358 drm_dp_remote_aux_init(&port->aux);
2359
2360
2361
2362
2363
2364 drm_dp_mst_get_mstb_malloc(mstb);
2365
2366 return port;
2367}
2368
2369static int
2370drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2371 struct drm_device *dev,
2372 struct drm_dp_link_addr_reply_port *port_msg)
2373{
2374 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2375 struct drm_dp_mst_port *port;
2376 int old_ddps = 0, ret;
2377 u8 new_pdt = DP_PEER_DEVICE_NONE;
2378 bool new_mcs = 0;
2379 bool created = false, send_link_addr = false, changed = false;
2380
2381 port = drm_dp_get_port(mstb, port_msg->port_number);
2382 if (!port) {
2383 port = drm_dp_mst_add_port(dev, mgr, mstb,
2384 port_msg->port_number);
2385 if (!port)
2386 return -ENOMEM;
2387 created = true;
2388 changed = true;
2389 } else if (!port->input && port_msg->input_port && port->connector) {
2390
2391
2392
2393 drm_dp_mst_topology_unlink_port(mgr, port);
2394 drm_dp_mst_topology_put_port(port);
2395 port = drm_dp_mst_add_port(dev, mgr, mstb,
2396 port_msg->port_number);
2397 if (!port)
2398 return -ENOMEM;
2399 changed = true;
2400 created = true;
2401 } else if (port->input && !port_msg->input_port) {
2402 changed = true;
2403 } else if (port->connector) {
2404
2405
2406
2407 drm_modeset_lock(&mgr->base.lock, NULL);
2408
2409 old_ddps = port->ddps;
2410 changed = port->ddps != port_msg->ddps ||
2411 (port->ddps &&
2412 (port->ldps != port_msg->legacy_device_plug_status ||
2413 port->dpcd_rev != port_msg->dpcd_revision ||
2414 port->mcs != port_msg->mcs ||
2415 port->pdt != port_msg->peer_device_type ||
2416 port->num_sdp_stream_sinks !=
2417 port_msg->num_sdp_stream_sinks));
2418 }
2419
2420 port->input = port_msg->input_port;
2421 if (!port->input)
2422 new_pdt = port_msg->peer_device_type;
2423 new_mcs = port_msg->mcs;
2424 port->ddps = port_msg->ddps;
2425 port->ldps = port_msg->legacy_device_plug_status;
2426 port->dpcd_rev = port_msg->dpcd_revision;
2427 port->num_sdp_streams = port_msg->num_sdp_streams;
2428 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2429
2430
2431
2432 if (created) {
2433 mutex_lock(&mgr->lock);
2434 drm_dp_mst_topology_get_port(port);
2435 list_add(&port->next, &mstb->ports);
2436 mstb->num_ports++;
2437 mutex_unlock(&mgr->lock);
2438 }
2439
2440
2441
2442
2443
2444 if (old_ddps != port->ddps || !created) {
2445 if (port->ddps && !port->input) {
2446 ret = drm_dp_send_enum_path_resources(mgr, mstb,
2447 port);
2448 if (ret == 1)
2449 changed = true;
2450 } else {
2451 port->full_pbn = 0;
2452 }
2453 }
2454
2455 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2456 if (ret == 1) {
2457 send_link_addr = true;
2458 } else if (ret < 0) {
2459 drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret);
2460 goto fail;
2461 }
2462
2463
2464
2465
2466
2467
2468 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2469 port->mcs)
2470 send_link_addr = true;
2471
2472 if (port->connector)
2473 drm_modeset_unlock(&mgr->base.lock);
2474 else if (!port->input)
2475 drm_dp_mst_port_add_connector(mstb, port);
2476
2477 if (send_link_addr && port->mstb) {
2478 ret = drm_dp_send_link_address(mgr, port->mstb);
2479 if (ret == 1)
2480 changed = true;
2481 else if (ret < 0)
2482 goto fail_put;
2483 }
2484
2485
2486 drm_dp_mst_topology_put_port(port);
2487 return changed;
2488
2489fail:
2490 drm_dp_mst_topology_unlink_port(mgr, port);
2491 if (port->connector)
2492 drm_modeset_unlock(&mgr->base.lock);
2493fail_put:
2494 drm_dp_mst_topology_put_port(port);
2495 return ret;
2496}
2497
2498static void
2499drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2500 struct drm_dp_connection_status_notify *conn_stat)
2501{
2502 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2503 struct drm_dp_mst_port *port;
2504 int old_ddps, ret;
2505 u8 new_pdt;
2506 bool new_mcs;
2507 bool dowork = false, create_connector = false;
2508
2509 port = drm_dp_get_port(mstb, conn_stat->port_number);
2510 if (!port)
2511 return;
2512
2513 if (port->connector) {
2514 if (!port->input && conn_stat->input_port) {
2515
2516
2517
2518
2519
2520 drm_dp_mst_topology_unlink_port(mgr, port);
2521 mstb->link_address_sent = false;
2522 dowork = true;
2523 goto out;
2524 }
2525
2526
2527 drm_modeset_lock(&mgr->base.lock, NULL);
2528 } else if (port->input && !conn_stat->input_port) {
2529 create_connector = true;
2530
2531 mstb->link_address_sent = false;
2532 dowork = true;
2533 }
2534
2535 old_ddps = port->ddps;
2536 port->input = conn_stat->input_port;
2537 port->ldps = conn_stat->legacy_device_plug_status;
2538 port->ddps = conn_stat->displayport_device_plug_status;
2539
2540 if (old_ddps != port->ddps) {
2541 if (port->ddps && !port->input)
2542 drm_dp_send_enum_path_resources(mgr, mstb, port);
2543 else
2544 port->full_pbn = 0;
2545 }
2546
2547 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2548 new_mcs = conn_stat->message_capability_status;
2549 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2550 if (ret == 1) {
2551 dowork = true;
2552 } else if (ret < 0) {
2553 drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret);
2554 dowork = false;
2555 }
2556
2557 if (port->connector)
2558 drm_modeset_unlock(&mgr->base.lock);
2559 else if (create_connector)
2560 drm_dp_mst_port_add_connector(mstb, port);
2561
2562out:
2563 drm_dp_mst_topology_put_port(port);
2564 if (dowork)
2565 queue_work(system_long_wq, &mstb->mgr->work);
2566}
2567
2568static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2569 u8 lct, u8 *rad)
2570{
2571 struct drm_dp_mst_branch *mstb;
2572 struct drm_dp_mst_port *port;
2573 int i, ret;
2574
2575
2576 mutex_lock(&mgr->lock);
2577 mstb = mgr->mst_primary;
2578
2579 if (!mstb)
2580 goto out;
2581
2582 for (i = 0; i < lct - 1; i++) {
2583 int shift = (i % 2) ? 0 : 4;
2584 int port_num = (rad[i / 2] >> shift) & 0xf;
2585
2586 list_for_each_entry(port, &mstb->ports, next) {
2587 if (port->port_num == port_num) {
2588 mstb = port->mstb;
2589 if (!mstb) {
2590 drm_err(mgr->dev,
2591 "failed to lookup MSTB with lct %d, rad %02x\n",
2592 lct, rad[0]);
2593 goto out;
2594 }
2595
2596 break;
2597 }
2598 }
2599 }
2600 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2601 if (!ret)
2602 mstb = NULL;
2603out:
2604 mutex_unlock(&mgr->lock);
2605 return mstb;
2606}
2607
2608static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2609 struct drm_dp_mst_branch *mstb,
2610 const uint8_t *guid)
2611{
2612 struct drm_dp_mst_branch *found_mstb;
2613 struct drm_dp_mst_port *port;
2614
2615 if (memcmp(mstb->guid, guid, 16) == 0)
2616 return mstb;
2617
2618
2619 list_for_each_entry(port, &mstb->ports, next) {
2620 if (!port->mstb)
2621 continue;
2622
2623 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2624
2625 if (found_mstb)
2626 return found_mstb;
2627 }
2628
2629 return NULL;
2630}
2631
2632static struct drm_dp_mst_branch *
2633drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2634 const uint8_t *guid)
2635{
2636 struct drm_dp_mst_branch *mstb;
2637 int ret;
2638
2639
2640 mutex_lock(&mgr->lock);
2641
2642 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2643 if (mstb) {
2644 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2645 if (!ret)
2646 mstb = NULL;
2647 }
2648
2649 mutex_unlock(&mgr->lock);
2650 return mstb;
2651}
2652
2653static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2654 struct drm_dp_mst_branch *mstb)
2655{
2656 struct drm_dp_mst_port *port;
2657 int ret;
2658 bool changed = false;
2659
2660 if (!mstb->link_address_sent) {
2661 ret = drm_dp_send_link_address(mgr, mstb);
2662 if (ret == 1)
2663 changed = true;
2664 else if (ret < 0)
2665 return ret;
2666 }
2667
2668 list_for_each_entry(port, &mstb->ports, next) {
2669 struct drm_dp_mst_branch *mstb_child = NULL;
2670
2671 if (port->input || !port->ddps)
2672 continue;
2673
2674 if (port->mstb)
2675 mstb_child = drm_dp_mst_topology_get_mstb_validated(
2676 mgr, port->mstb);
2677
2678 if (mstb_child) {
2679 ret = drm_dp_check_and_send_link_address(mgr,
2680 mstb_child);
2681 drm_dp_mst_topology_put_mstb(mstb_child);
2682 if (ret == 1)
2683 changed = true;
2684 else if (ret < 0)
2685 return ret;
2686 }
2687 }
2688
2689 return changed;
2690}
2691
2692static void drm_dp_mst_link_probe_work(struct work_struct *work)
2693{
2694 struct drm_dp_mst_topology_mgr *mgr =
2695 container_of(work, struct drm_dp_mst_topology_mgr, work);
2696 struct drm_device *dev = mgr->dev;
2697 struct drm_dp_mst_branch *mstb;
2698 int ret;
2699 bool clear_payload_id_table;
2700
2701 mutex_lock(&mgr->probe_lock);
2702
2703 mutex_lock(&mgr->lock);
2704 clear_payload_id_table = !mgr->payload_id_table_cleared;
2705 mgr->payload_id_table_cleared = true;
2706
2707 mstb = mgr->mst_primary;
2708 if (mstb) {
2709 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2710 if (!ret)
2711 mstb = NULL;
2712 }
2713 mutex_unlock(&mgr->lock);
2714 if (!mstb) {
2715 mutex_unlock(&mgr->probe_lock);
2716 return;
2717 }
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727 if (clear_payload_id_table) {
2728 drm_dbg_kms(dev, "Clearing payload ID table\n");
2729 drm_dp_send_clear_payload_id_table(mgr, mstb);
2730 }
2731
2732 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2733 drm_dp_mst_topology_put_mstb(mstb);
2734
2735 mutex_unlock(&mgr->probe_lock);
2736 if (ret > 0)
2737 drm_kms_helper_hotplug_event(dev);
2738}
2739
2740static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2741 u8 *guid)
2742{
2743 u64 salt;
2744
2745 if (memchr_inv(guid, 0, 16))
2746 return true;
2747
2748 salt = get_jiffies_64();
2749
2750 memcpy(&guid[0], &salt, sizeof(u64));
2751 memcpy(&guid[8], &salt, sizeof(u64));
2752
2753 return false;
2754}
2755
2756static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2757 u8 port_num, u32 offset, u8 num_bytes)
2758{
2759 struct drm_dp_sideband_msg_req_body req;
2760
2761 req.req_type = DP_REMOTE_DPCD_READ;
2762 req.u.dpcd_read.port_number = port_num;
2763 req.u.dpcd_read.dpcd_address = offset;
2764 req.u.dpcd_read.num_bytes = num_bytes;
2765 drm_dp_encode_sideband_req(&req, msg);
2766}
2767
2768static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2769 bool up, u8 *msg, int len)
2770{
2771 int ret;
2772 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2773 int tosend, total, offset;
2774 int retries = 0;
2775
2776retry:
2777 total = len;
2778 offset = 0;
2779 do {
2780 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2781
2782 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2783 &msg[offset],
2784 tosend);
2785 if (ret != tosend) {
2786 if (ret == -EIO && retries < 5) {
2787 retries++;
2788 goto retry;
2789 }
2790 drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
2791
2792 return -EIO;
2793 }
2794 offset += tosend;
2795 total -= tosend;
2796 } while (total > 0);
2797 return 0;
2798}
2799
2800static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2801 struct drm_dp_sideband_msg_tx *txmsg)
2802{
2803 struct drm_dp_mst_branch *mstb = txmsg->dst;
2804 u8 req_type;
2805
2806 req_type = txmsg->msg[0] & 0x7f;
2807 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2808 req_type == DP_RESOURCE_STATUS_NOTIFY ||
2809 req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
2810 hdr->broadcast = 1;
2811 else
2812 hdr->broadcast = 0;
2813 hdr->path_msg = txmsg->path_msg;
2814 if (hdr->broadcast) {
2815 hdr->lct = 1;
2816 hdr->lcr = 6;
2817 } else {
2818 hdr->lct = mstb->lct;
2819 hdr->lcr = mstb->lct - 1;
2820 }
2821
2822 memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
2823
2824 return 0;
2825}
2826
2827
2828
2829static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2830 struct drm_dp_sideband_msg_tx *txmsg,
2831 bool up)
2832{
2833 u8 chunk[48];
2834 struct drm_dp_sideband_msg_hdr hdr;
2835 int len, space, idx, tosend;
2836 int ret;
2837
2838 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
2839 return 0;
2840
2841 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2842
2843 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
2844 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2845
2846
2847 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2848 if (ret < 0)
2849 return ret;
2850
2851
2852 len = txmsg->cur_len - txmsg->cur_offset;
2853
2854
2855 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2856
2857 tosend = min(len, space);
2858 if (len == txmsg->cur_len)
2859 hdr.somt = 1;
2860 if (space >= len)
2861 hdr.eomt = 1;
2862
2863
2864 hdr.msg_len = tosend + 1;
2865 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2866 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2867
2868 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2869 idx += tosend + 1;
2870
2871 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2872 if (ret) {
2873 if (drm_debug_enabled(DRM_UT_DP)) {
2874 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2875
2876 drm_printf(&p, "sideband msg failed to send\n");
2877 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2878 }
2879 return ret;
2880 }
2881
2882 txmsg->cur_offset += tosend;
2883 if (txmsg->cur_offset == txmsg->cur_len) {
2884 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2885 return 1;
2886 }
2887 return 0;
2888}
2889
2890static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2891{
2892 struct drm_dp_sideband_msg_tx *txmsg;
2893 int ret;
2894
2895 WARN_ON(!mutex_is_locked(&mgr->qlock));
2896
2897
2898 if (list_empty(&mgr->tx_msg_downq))
2899 return;
2900
2901 txmsg = list_first_entry(&mgr->tx_msg_downq,
2902 struct drm_dp_sideband_msg_tx, next);
2903 ret = process_single_tx_qlock(mgr, txmsg, false);
2904 if (ret < 0) {
2905 drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret);
2906 list_del(&txmsg->next);
2907 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2908 wake_up_all(&mgr->tx_waitq);
2909 }
2910}
2911
2912static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2913 struct drm_dp_sideband_msg_tx *txmsg)
2914{
2915 mutex_lock(&mgr->qlock);
2916 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2917
2918 if (drm_debug_enabled(DRM_UT_DP)) {
2919 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2920
2921 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2922 }
2923
2924 if (list_is_singular(&mgr->tx_msg_downq))
2925 process_single_down_tx_qlock(mgr);
2926 mutex_unlock(&mgr->qlock);
2927}
2928
2929static void
2930drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,
2931 struct drm_dp_link_address_ack_reply *reply)
2932{
2933 struct drm_dp_link_addr_reply_port *port_reply;
2934 int i;
2935
2936 for (i = 0; i < reply->nports; i++) {
2937 port_reply = &reply->ports[i];
2938 drm_dbg_kms(mgr->dev,
2939 "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2940 i,
2941 port_reply->input_port,
2942 port_reply->peer_device_type,
2943 port_reply->port_number,
2944 port_reply->dpcd_revision,
2945 port_reply->mcs,
2946 port_reply->ddps,
2947 port_reply->legacy_device_plug_status,
2948 port_reply->num_sdp_streams,
2949 port_reply->num_sdp_stream_sinks);
2950 }
2951}
2952
2953static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2954 struct drm_dp_mst_branch *mstb)
2955{
2956 struct drm_dp_sideband_msg_tx *txmsg;
2957 struct drm_dp_link_address_ack_reply *reply;
2958 struct drm_dp_mst_port *port, *tmp;
2959 int i, ret, port_mask = 0;
2960 bool changed = false;
2961
2962 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2963 if (!txmsg)
2964 return -ENOMEM;
2965
2966 txmsg->dst = mstb;
2967 build_link_address(txmsg);
2968
2969 mstb->link_address_sent = true;
2970 drm_dp_queue_down_tx(mgr, txmsg);
2971
2972
2973 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2974 if (ret <= 0) {
2975 drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
2976 goto out;
2977 }
2978 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2979 drm_err(mgr->dev, "link address NAK received\n");
2980 ret = -EIO;
2981 goto out;
2982 }
2983
2984 reply = &txmsg->reply.u.link_addr;
2985 drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
2986 drm_dp_dump_link_address(mgr, reply);
2987
2988 ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2989 if (ret) {
2990 char buf[64];
2991
2992 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2993 drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret);
2994 goto out;
2995 }
2996
2997 for (i = 0; i < reply->nports; i++) {
2998 port_mask |= BIT(reply->ports[i].port_number);
2999 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
3000 &reply->ports[i]);
3001 if (ret == 1)
3002 changed = true;
3003 else if (ret < 0)
3004 goto out;
3005 }
3006
3007
3008
3009
3010
3011
3012 mutex_lock(&mgr->lock);
3013 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
3014 if (port_mask & BIT(port->port_num))
3015 continue;
3016
3017 drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",
3018 port->port_num);
3019 list_del(&port->next);
3020 drm_dp_mst_topology_put_port(port);
3021 changed = true;
3022 }
3023 mutex_unlock(&mgr->lock);
3024
3025out:
3026 if (ret <= 0)
3027 mstb->link_address_sent = false;
3028 kfree(txmsg);
3029 return ret < 0 ? ret : changed;
3030}
3031
3032static void
3033drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
3034 struct drm_dp_mst_branch *mstb)
3035{
3036 struct drm_dp_sideband_msg_tx *txmsg;
3037 int ret;
3038
3039 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3040 if (!txmsg)
3041 return;
3042
3043 txmsg->dst = mstb;
3044 build_clear_payload_id_table(txmsg);
3045
3046 drm_dp_queue_down_tx(mgr, txmsg);
3047
3048 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3049 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3050 drm_dbg_kms(mgr->dev, "clear payload table id nak received\n");
3051
3052 kfree(txmsg);
3053}
3054
3055static int
3056drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3057 struct drm_dp_mst_branch *mstb,
3058 struct drm_dp_mst_port *port)
3059{
3060 struct drm_dp_enum_path_resources_ack_reply *path_res;
3061 struct drm_dp_sideband_msg_tx *txmsg;
3062 int ret;
3063
3064 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3065 if (!txmsg)
3066 return -ENOMEM;
3067
3068 txmsg->dst = mstb;
3069 build_enum_path_resources(txmsg, port->port_num);
3070
3071 drm_dp_queue_down_tx(mgr, txmsg);
3072
3073 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3074 if (ret > 0) {
3075 ret = 0;
3076 path_res = &txmsg->reply.u.path_resources;
3077
3078 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3079 drm_dbg_kms(mgr->dev, "enum path resources nak received\n");
3080 } else {
3081 if (port->port_num != path_res->port_number)
3082 DRM_ERROR("got incorrect port in response\n");
3083
3084 drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",
3085 path_res->port_number,
3086 path_res->full_payload_bw_number,
3087 path_res->avail_payload_bw_number);
3088
3089
3090
3091
3092
3093 if (port->full_pbn != path_res->full_payload_bw_number ||
3094 port->fec_capable != path_res->fec_capable)
3095 ret = 1;
3096
3097 port->full_pbn = path_res->full_payload_bw_number;
3098 port->fec_capable = path_res->fec_capable;
3099 }
3100 }
3101
3102 kfree(txmsg);
3103 return ret;
3104}
3105
3106static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3107{
3108 if (!mstb->port_parent)
3109 return NULL;
3110
3111 if (mstb->port_parent->mstb != mstb)
3112 return mstb->port_parent;
3113
3114 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3115}
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125static struct drm_dp_mst_branch *
3126drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3127 struct drm_dp_mst_branch *mstb,
3128 int *port_num)
3129{
3130 struct drm_dp_mst_branch *rmstb = NULL;
3131 struct drm_dp_mst_port *found_port;
3132
3133 mutex_lock(&mgr->lock);
3134 if (!mgr->mst_primary)
3135 goto out;
3136
3137 do {
3138 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3139 if (!found_port)
3140 break;
3141
3142 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3143 rmstb = found_port->parent;
3144 *port_num = found_port->port_num;
3145 } else {
3146
3147 mstb = found_port->parent;
3148 }
3149 } while (!rmstb);
3150out:
3151 mutex_unlock(&mgr->lock);
3152 return rmstb;
3153}
3154
3155static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3156 struct drm_dp_mst_port *port,
3157 int id,
3158 int pbn)
3159{
3160 struct drm_dp_sideband_msg_tx *txmsg;
3161 struct drm_dp_mst_branch *mstb;
3162 int ret, port_num;
3163 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3164 int i;
3165
3166 port_num = port->port_num;
3167 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3168 if (!mstb) {
3169 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3170 port->parent,
3171 &port_num);
3172
3173 if (!mstb)
3174 return -EINVAL;
3175 }
3176
3177 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3178 if (!txmsg) {
3179 ret = -ENOMEM;
3180 goto fail_put;
3181 }
3182
3183 for (i = 0; i < port->num_sdp_streams; i++)
3184 sinks[i] = i;
3185
3186 txmsg->dst = mstb;
3187 build_allocate_payload(txmsg, port_num,
3188 id,
3189 pbn, port->num_sdp_streams, sinks);
3190
3191 drm_dp_queue_down_tx(mgr, txmsg);
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3202 if (ret > 0) {
3203 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3204 ret = -EINVAL;
3205 else
3206 ret = 0;
3207 }
3208 kfree(txmsg);
3209fail_put:
3210 drm_dp_mst_topology_put_mstb(mstb);
3211 return ret;
3212}
3213
3214int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3215 struct drm_dp_mst_port *port, bool power_up)
3216{
3217 struct drm_dp_sideband_msg_tx *txmsg;
3218 int ret;
3219
3220 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3221 if (!port)
3222 return -EINVAL;
3223
3224 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3225 if (!txmsg) {
3226 drm_dp_mst_topology_put_port(port);
3227 return -ENOMEM;
3228 }
3229
3230 txmsg->dst = port->parent;
3231 build_power_updown_phy(txmsg, port->port_num, power_up);
3232 drm_dp_queue_down_tx(mgr, txmsg);
3233
3234 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3235 if (ret > 0) {
3236 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3237 ret = -EINVAL;
3238 else
3239 ret = 0;
3240 }
3241 kfree(txmsg);
3242 drm_dp_mst_topology_put_port(port);
3243
3244 return ret;
3245}
3246EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3247
3248int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
3249 struct drm_dp_mst_port *port,
3250 struct drm_dp_query_stream_enc_status_ack_reply *status)
3251{
3252 struct drm_dp_sideband_msg_tx *txmsg;
3253 u8 nonce[7];
3254 int ret;
3255
3256 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3257 if (!txmsg)
3258 return -ENOMEM;
3259
3260 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3261 if (!port) {
3262 ret = -EINVAL;
3263 goto out_get_port;
3264 }
3265
3266 get_random_bytes(nonce, sizeof(nonce));
3267
3268
3269
3270
3271
3272
3273 txmsg->dst = mgr->mst_primary;
3274
3275 build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
3276
3277 drm_dp_queue_down_tx(mgr, txmsg);
3278
3279 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
3280 if (ret < 0) {
3281 goto out;
3282 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3283 drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
3284 ret = -ENXIO;
3285 goto out;
3286 }
3287
3288 ret = 0;
3289 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
3290
3291out:
3292 drm_dp_mst_topology_put_port(port);
3293out_get_port:
3294 kfree(txmsg);
3295 return ret;
3296}
3297EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
3298
3299static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3300 int id,
3301 struct drm_dp_payload *payload)
3302{
3303 int ret;
3304
3305 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3306 if (ret < 0) {
3307 payload->payload_state = 0;
3308 return ret;
3309 }
3310 payload->payload_state = DP_PAYLOAD_LOCAL;
3311 return 0;
3312}
3313
3314static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3315 struct drm_dp_mst_port *port,
3316 int id,
3317 struct drm_dp_payload *payload)
3318{
3319 int ret;
3320
3321 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3322 if (ret < 0)
3323 return ret;
3324 payload->payload_state = DP_PAYLOAD_REMOTE;
3325 return ret;
3326}
3327
3328static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3329 struct drm_dp_mst_port *port,
3330 int id,
3331 struct drm_dp_payload *payload)
3332{
3333 drm_dbg_kms(mgr->dev, "\n");
3334
3335 if (port) {
3336 drm_dp_payload_send_msg(mgr, port, id, 0);
3337 }
3338
3339 drm_dp_dpcd_write_payload(mgr, id, payload);
3340 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3341 return 0;
3342}
3343
3344static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3345 int id,
3346 struct drm_dp_payload *payload)
3347{
3348 payload->payload_state = 0;
3349 return 0;
3350}
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot)
3370{
3371 struct drm_dp_payload req_payload;
3372 struct drm_dp_mst_port *port;
3373 int i, j;
3374 int cur_slots = start_slot;
3375 bool skip;
3376
3377 mutex_lock(&mgr->payload_lock);
3378 for (i = 0; i < mgr->max_payloads; i++) {
3379 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3380 struct drm_dp_payload *payload = &mgr->payloads[i];
3381 bool put_port = false;
3382
3383
3384
3385 req_payload.start_slot = cur_slots;
3386 if (vcpi) {
3387 port = container_of(vcpi, struct drm_dp_mst_port,
3388 vcpi);
3389
3390 mutex_lock(&mgr->lock);
3391 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
3392 mutex_unlock(&mgr->lock);
3393
3394 if (skip) {
3395 drm_dbg_kms(mgr->dev,
3396 "Virtual channel %d is not in current topology\n",
3397 i);
3398 continue;
3399 }
3400
3401
3402
3403 if (vcpi->num_slots) {
3404 port = drm_dp_mst_topology_get_port_validated(
3405 mgr, port);
3406 if (!port) {
3407 if (vcpi->num_slots == payload->num_slots) {
3408 cur_slots += vcpi->num_slots;
3409 payload->start_slot = req_payload.start_slot;
3410 continue;
3411 } else {
3412 drm_dbg_kms(mgr->dev,
3413 "Fail:set payload to invalid sink");
3414 mutex_unlock(&mgr->payload_lock);
3415 return -EINVAL;
3416 }
3417 }
3418 put_port = true;
3419 }
3420
3421 req_payload.num_slots = vcpi->num_slots;
3422 req_payload.vcpi = vcpi->vcpi;
3423 } else {
3424 port = NULL;
3425 req_payload.num_slots = 0;
3426 }
3427
3428 payload->start_slot = req_payload.start_slot;
3429
3430 if (payload->num_slots != req_payload.num_slots) {
3431
3432
3433 if (req_payload.num_slots) {
3434 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3435 &req_payload);
3436 payload->num_slots = req_payload.num_slots;
3437 payload->vcpi = req_payload.vcpi;
3438
3439 } else if (payload->num_slots) {
3440 payload->num_slots = 0;
3441 drm_dp_destroy_payload_step1(mgr, port,
3442 payload->vcpi,
3443 payload);
3444 req_payload.payload_state =
3445 payload->payload_state;
3446 payload->start_slot = 0;
3447 }
3448 payload->payload_state = req_payload.payload_state;
3449 }
3450 cur_slots += req_payload.num_slots;
3451
3452 if (put_port)
3453 drm_dp_mst_topology_put_port(port);
3454 }
3455
3456 for (i = 0; i < mgr->max_payloads; ) {
3457 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3458 i++;
3459 continue;
3460 }
3461
3462 drm_dbg_kms(mgr->dev, "removing payload %d\n", i);
3463 for (j = i; j < mgr->max_payloads - 1; j++) {
3464 mgr->payloads[j] = mgr->payloads[j + 1];
3465 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3466
3467 if (mgr->proposed_vcpis[j] &&
3468 mgr->proposed_vcpis[j]->num_slots) {
3469 set_bit(j + 1, &mgr->payload_mask);
3470 } else {
3471 clear_bit(j + 1, &mgr->payload_mask);
3472 }
3473 }
3474
3475 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3476 sizeof(struct drm_dp_payload));
3477 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3478 clear_bit(mgr->max_payloads, &mgr->payload_mask);
3479 }
3480 mutex_unlock(&mgr->payload_lock);
3481
3482 return 0;
3483}
3484EXPORT_SYMBOL(drm_dp_update_payload_part1);
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3496{
3497 struct drm_dp_mst_port *port;
3498 int i;
3499 int ret = 0;
3500 bool skip;
3501
3502 mutex_lock(&mgr->payload_lock);
3503 for (i = 0; i < mgr->max_payloads; i++) {
3504
3505 if (!mgr->proposed_vcpis[i])
3506 continue;
3507
3508 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3509
3510 mutex_lock(&mgr->lock);
3511 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
3512 mutex_unlock(&mgr->lock);
3513
3514 if (skip)
3515 continue;
3516
3517 drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state);
3518 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3519 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3520 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3521 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3522 }
3523 if (ret) {
3524 mutex_unlock(&mgr->payload_lock);
3525 return ret;
3526 }
3527 }
3528 mutex_unlock(&mgr->payload_lock);
3529 return 0;
3530}
3531EXPORT_SYMBOL(drm_dp_update_payload_part2);
3532
3533static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3534 struct drm_dp_mst_port *port,
3535 int offset, int size, u8 *bytes)
3536{
3537 int ret = 0;
3538 struct drm_dp_sideband_msg_tx *txmsg;
3539 struct drm_dp_mst_branch *mstb;
3540
3541 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3542 if (!mstb)
3543 return -EINVAL;
3544
3545 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3546 if (!txmsg) {
3547 ret = -ENOMEM;
3548 goto fail_put;
3549 }
3550
3551 build_dpcd_read(txmsg, port->port_num, offset, size);
3552 txmsg->dst = port->parent;
3553
3554 drm_dp_queue_down_tx(mgr, txmsg);
3555
3556 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3557 if (ret < 0)
3558 goto fail_free;
3559
3560
3561 if (txmsg->reply.reply_type == 1) {
3562 drm_err(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3563 mstb, port->port_num, offset, size);
3564 ret = -EIO;
3565 goto fail_free;
3566 }
3567
3568 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3569 ret = -EPROTO;
3570 goto fail_free;
3571 }
3572
3573 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3574 size);
3575 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3576
3577fail_free:
3578 kfree(txmsg);
3579fail_put:
3580 drm_dp_mst_topology_put_mstb(mstb);
3581
3582 return ret;
3583}
3584
3585static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3586 struct drm_dp_mst_port *port,
3587 int offset, int size, u8 *bytes)
3588{
3589 int ret;
3590 struct drm_dp_sideband_msg_tx *txmsg;
3591 struct drm_dp_mst_branch *mstb;
3592
3593 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3594 if (!mstb)
3595 return -EINVAL;
3596
3597 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3598 if (!txmsg) {
3599 ret = -ENOMEM;
3600 goto fail_put;
3601 }
3602
3603 build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3604 txmsg->dst = mstb;
3605
3606 drm_dp_queue_down_tx(mgr, txmsg);
3607
3608 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3609 if (ret > 0) {
3610 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3611 ret = -EIO;
3612 else
3613 ret = size;
3614 }
3615
3616 kfree(txmsg);
3617fail_put:
3618 drm_dp_mst_topology_put_mstb(mstb);
3619 return ret;
3620}
3621
3622static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3623{
3624 struct drm_dp_sideband_msg_reply_body reply;
3625
3626 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3627 reply.req_type = req_type;
3628 drm_dp_encode_sideband_reply(&reply, msg);
3629 return 0;
3630}
3631
3632static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3633 struct drm_dp_mst_branch *mstb,
3634 int req_type, bool broadcast)
3635{
3636 struct drm_dp_sideband_msg_tx *txmsg;
3637
3638 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3639 if (!txmsg)
3640 return -ENOMEM;
3641
3642 txmsg->dst = mstb;
3643 drm_dp_encode_up_ack_reply(txmsg, req_type);
3644
3645 mutex_lock(&mgr->qlock);
3646
3647 process_single_tx_qlock(mgr, txmsg, true);
3648 mutex_unlock(&mgr->qlock);
3649
3650 kfree(txmsg);
3651 return 0;
3652}
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
3666 int link_rate, int link_lane_count)
3667{
3668 if (link_rate == 0 || link_lane_count == 0)
3669 drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
3670 link_rate, link_lane_count);
3671
3672
3673 return link_rate * link_lane_count / 54000;
3674}
3675EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
3676
3677
3678
3679
3680
3681
3682
3683
3684bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
3685 const u8 dpcd[DP_RECEIVER_CAP_SIZE])
3686{
3687 u8 mstm_cap;
3688
3689 if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
3690 return false;
3691
3692 if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
3693 return false;
3694
3695 return mstm_cap & DP_MST_CAP;
3696}
3697EXPORT_SYMBOL(drm_dp_read_mst_cap);
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3708{
3709 int ret = 0;
3710 struct drm_dp_mst_branch *mstb = NULL;
3711
3712 mutex_lock(&mgr->payload_lock);
3713 mutex_lock(&mgr->lock);
3714 if (mst_state == mgr->mst_state)
3715 goto out_unlock;
3716
3717 mgr->mst_state = mst_state;
3718
3719 if (mst_state) {
3720 struct drm_dp_payload reset_pay;
3721 int lane_count;
3722 int link_rate;
3723
3724 WARN_ON(mgr->mst_primary);
3725
3726
3727 ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
3728 if (ret < 0) {
3729 drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
3730 mgr->aux->name, ret);
3731 goto out_unlock;
3732 }
3733
3734 lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count);
3735 link_rate = min_t(int, drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->max_link_rate);
3736 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr,
3737 link_rate,
3738 lane_count);
3739 if (mgr->pbn_div == 0) {
3740 ret = -EINVAL;
3741 goto out_unlock;
3742 }
3743
3744
3745 mstb = drm_dp_add_mst_branch_device(1, NULL);
3746 if (mstb == NULL) {
3747 ret = -ENOMEM;
3748 goto out_unlock;
3749 }
3750 mstb->mgr = mgr;
3751
3752
3753 mgr->mst_primary = mstb;
3754 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3755
3756 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3757 DP_MST_EN |
3758 DP_UP_REQ_EN |
3759 DP_UPSTREAM_IS_SRC);
3760 if (ret < 0)
3761 goto out_unlock;
3762
3763 reset_pay.start_slot = 0;
3764 reset_pay.num_slots = 0x3f;
3765 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3766
3767 queue_work(system_long_wq, &mgr->work);
3768
3769 ret = 0;
3770 } else {
3771
3772 mstb = mgr->mst_primary;
3773 mgr->mst_primary = NULL;
3774
3775 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3776 ret = 0;
3777 memset(mgr->payloads, 0,
3778 mgr->max_payloads * sizeof(mgr->payloads[0]));
3779 memset(mgr->proposed_vcpis, 0,
3780 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
3781 mgr->payload_mask = 0;
3782 set_bit(0, &mgr->payload_mask);
3783 mgr->vcpi_mask = 0;
3784 mgr->payload_id_table_cleared = false;
3785 }
3786
3787out_unlock:
3788 mutex_unlock(&mgr->lock);
3789 mutex_unlock(&mgr->payload_lock);
3790 if (mstb)
3791 drm_dp_mst_topology_put_mstb(mstb);
3792 return ret;
3793
3794}
3795EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3796
3797static void
3798drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3799{
3800 struct drm_dp_mst_port *port;
3801
3802
3803 mstb->link_address_sent = false;
3804
3805 list_for_each_entry(port, &mstb->ports, next)
3806 if (port->mstb)
3807 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3808}
3809
3810
3811
3812
3813
3814
3815
3816
3817void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3818{
3819 mutex_lock(&mgr->lock);
3820 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3821 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3822 mutex_unlock(&mgr->lock);
3823 flush_work(&mgr->up_req_work);
3824 flush_work(&mgr->work);
3825 flush_work(&mgr->delayed_destroy_work);
3826
3827 mutex_lock(&mgr->lock);
3828 if (mgr->mst_state && mgr->mst_primary)
3829 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3830 mutex_unlock(&mgr->lock);
3831}
3832EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3855 bool sync)
3856{
3857 int ret;
3858 u8 guid[16];
3859
3860 mutex_lock(&mgr->lock);
3861 if (!mgr->mst_primary)
3862 goto out_fail;
3863
3864 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3865 DP_RECEIVER_CAP_SIZE);
3866 if (ret != DP_RECEIVER_CAP_SIZE) {
3867 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
3868 goto out_fail;
3869 }
3870
3871 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3872 DP_MST_EN |
3873 DP_UP_REQ_EN |
3874 DP_UPSTREAM_IS_SRC);
3875 if (ret < 0) {
3876 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
3877 goto out_fail;
3878 }
3879
3880
3881 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3882 if (ret != 16) {
3883 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
3884 goto out_fail;
3885 }
3886
3887 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3888 if (ret) {
3889 drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
3890 goto out_fail;
3891 }
3892
3893
3894
3895
3896
3897
3898 queue_work(system_long_wq, &mgr->work);
3899 mutex_unlock(&mgr->lock);
3900
3901 if (sync) {
3902 drm_dbg_kms(mgr->dev,
3903 "Waiting for link probe work to finish re-syncing topology...\n");
3904 flush_work(&mgr->work);
3905 }
3906
3907 return 0;
3908
3909out_fail:
3910 mutex_unlock(&mgr->lock);
3911 return -1;
3912}
3913EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3914
3915static bool
3916drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3917 struct drm_dp_mst_branch **mstb)
3918{
3919 int len;
3920 u8 replyblock[32];
3921 int replylen, curreply;
3922 int ret;
3923 u8 hdrlen;
3924 struct drm_dp_sideband_msg_hdr hdr;
3925 struct drm_dp_sideband_msg_rx *msg =
3926 up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3927 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
3928 DP_SIDEBAND_MSG_DOWN_REP_BASE;
3929
3930 if (!up)
3931 *mstb = NULL;
3932
3933 len = min(mgr->max_dpcd_transaction_bytes, 16);
3934 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
3935 if (ret != len) {
3936 drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
3937 return false;
3938 }
3939
3940 ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);
3941 if (ret == false) {
3942 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
3943 1, replyblock, len, false);
3944 drm_dbg_kms(mgr->dev, "ERROR: failed header\n");
3945 return false;
3946 }
3947
3948 if (!up) {
3949
3950 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3951 if (!*mstb) {
3952 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct);
3953 return false;
3954 }
3955 }
3956
3957 if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3958 drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]);
3959 return false;
3960 }
3961
3962 replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
3963 ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
3964 if (!ret) {
3965 drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]);
3966 return false;
3967 }
3968
3969 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
3970 curreply = len;
3971 while (replylen > 0) {
3972 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3973 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3974 replyblock, len);
3975 if (ret != len) {
3976 drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
3977 len, ret);
3978 return false;
3979 }
3980
3981 ret = drm_dp_sideband_append_payload(msg, replyblock, len);
3982 if (!ret) {
3983 drm_dbg_kms(mgr->dev, "failed to build sideband msg\n");
3984 return false;
3985 }
3986
3987 curreply += len;
3988 replylen -= len;
3989 }
3990 return true;
3991}
3992
3993static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3994{
3995 struct drm_dp_sideband_msg_tx *txmsg;
3996 struct drm_dp_mst_branch *mstb = NULL;
3997 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
3998
3999 if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
4000 goto out;
4001
4002
4003 if (!msg->have_eomt)
4004 goto out;
4005
4006
4007 mutex_lock(&mgr->qlock);
4008 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
4009 struct drm_dp_sideband_msg_tx, next);
4010 mutex_unlock(&mgr->qlock);
4011
4012
4013 if (!txmsg || txmsg->dst != mstb) {
4014 struct drm_dp_sideband_msg_hdr *hdr;
4015
4016 hdr = &msg->initial_hdr;
4017 drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
4018 mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
4019 goto out_clear_reply;
4020 }
4021
4022 drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
4023
4024 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4025 drm_dbg_kms(mgr->dev,
4026 "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
4027 txmsg->reply.req_type,
4028 drm_dp_mst_req_type_str(txmsg->reply.req_type),
4029 txmsg->reply.u.nak.reason,
4030 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
4031 txmsg->reply.u.nak.nak_data);
4032 }
4033
4034 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
4035 drm_dp_mst_topology_put_mstb(mstb);
4036
4037 mutex_lock(&mgr->qlock);
4038 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
4039 list_del(&txmsg->next);
4040 mutex_unlock(&mgr->qlock);
4041
4042 wake_up_all(&mgr->tx_waitq);
4043
4044 return 0;
4045
4046out_clear_reply:
4047 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
4048out:
4049 if (mstb)
4050 drm_dp_mst_topology_put_mstb(mstb);
4051
4052 return 0;
4053}
4054
4055static inline bool
4056drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
4057 struct drm_dp_pending_up_req *up_req)
4058{
4059 struct drm_dp_mst_branch *mstb = NULL;
4060 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
4061 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
4062 bool hotplug = false;
4063
4064 if (hdr->broadcast) {
4065 const u8 *guid = NULL;
4066
4067 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
4068 guid = msg->u.conn_stat.guid;
4069 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
4070 guid = msg->u.resource_stat.guid;
4071
4072 if (guid)
4073 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
4074 } else {
4075 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
4076 }
4077
4078 if (!mstb) {
4079 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct);
4080 return false;
4081 }
4082
4083
4084 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
4085 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
4086 hotplug = true;
4087 }
4088
4089 drm_dp_mst_topology_put_mstb(mstb);
4090 return hotplug;
4091}
4092
4093static void drm_dp_mst_up_req_work(struct work_struct *work)
4094{
4095 struct drm_dp_mst_topology_mgr *mgr =
4096 container_of(work, struct drm_dp_mst_topology_mgr,
4097 up_req_work);
4098 struct drm_dp_pending_up_req *up_req;
4099 bool send_hotplug = false;
4100
4101 mutex_lock(&mgr->probe_lock);
4102 while (true) {
4103 mutex_lock(&mgr->up_req_lock);
4104 up_req = list_first_entry_or_null(&mgr->up_req_list,
4105 struct drm_dp_pending_up_req,
4106 next);
4107 if (up_req)
4108 list_del(&up_req->next);
4109 mutex_unlock(&mgr->up_req_lock);
4110
4111 if (!up_req)
4112 break;
4113
4114 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
4115 kfree(up_req);
4116 }
4117 mutex_unlock(&mgr->probe_lock);
4118
4119 if (send_hotplug)
4120 drm_kms_helper_hotplug_event(mgr->dev);
4121}
4122
4123static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
4124{
4125 struct drm_dp_pending_up_req *up_req;
4126
4127 if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
4128 goto out;
4129
4130 if (!mgr->up_req_recv.have_eomt)
4131 return 0;
4132
4133 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
4134 if (!up_req)
4135 return -ENOMEM;
4136
4137 INIT_LIST_HEAD(&up_req->next);
4138
4139 drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);
4140
4141 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
4142 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
4143 drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
4144 up_req->msg.req_type);
4145 kfree(up_req);
4146 goto out;
4147 }
4148
4149 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
4150 false);
4151
4152 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
4153 const struct drm_dp_connection_status_notify *conn_stat =
4154 &up_req->msg.u.conn_stat;
4155
4156 drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
4157 conn_stat->port_number,
4158 conn_stat->legacy_device_plug_status,
4159 conn_stat->displayport_device_plug_status,
4160 conn_stat->message_capability_status,
4161 conn_stat->input_port,
4162 conn_stat->peer_device_type);
4163 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
4164 const struct drm_dp_resource_status_notify *res_stat =
4165 &up_req->msg.u.resource_stat;
4166
4167 drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",
4168 res_stat->port_number,
4169 res_stat->available_pbn);
4170 }
4171
4172 up_req->hdr = mgr->up_req_recv.initial_hdr;
4173 mutex_lock(&mgr->up_req_lock);
4174 list_add_tail(&up_req->next, &mgr->up_req_list);
4175 mutex_unlock(&mgr->up_req_lock);
4176 queue_work(system_long_wq, &mgr->up_req_work);
4177
4178out:
4179 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
4180 return 0;
4181}
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
4195{
4196 int ret = 0;
4197 int sc;
4198 *handled = false;
4199 sc = DP_GET_SINK_COUNT(esi[0]);
4200
4201 if (sc != mgr->sink_count) {
4202 mgr->sink_count = sc;
4203 *handled = true;
4204 }
4205
4206 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
4207 ret = drm_dp_mst_handle_down_rep(mgr);
4208 *handled = true;
4209 }
4210
4211 if (esi[1] & DP_UP_REQ_MSG_RDY) {
4212 ret |= drm_dp_mst_handle_up_req(mgr);
4213 *handled = true;
4214 }
4215
4216 drm_dp_mst_kick_tx(mgr);
4217 return ret;
4218}
4219EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230int
4231drm_dp_mst_detect_port(struct drm_connector *connector,
4232 struct drm_modeset_acquire_ctx *ctx,
4233 struct drm_dp_mst_topology_mgr *mgr,
4234 struct drm_dp_mst_port *port)
4235{
4236 int ret;
4237
4238
4239 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4240 if (!port)
4241 return connector_status_disconnected;
4242
4243 ret = drm_modeset_lock(&mgr->base.lock, ctx);
4244 if (ret)
4245 goto out;
4246
4247 ret = connector_status_disconnected;
4248
4249 if (!port->ddps)
4250 goto out;
4251
4252 switch (port->pdt) {
4253 case DP_PEER_DEVICE_NONE:
4254 break;
4255 case DP_PEER_DEVICE_MST_BRANCHING:
4256 if (!port->mcs)
4257 ret = connector_status_connected;
4258 break;
4259
4260 case DP_PEER_DEVICE_SST_SINK:
4261 ret = connector_status_connected;
4262
4263 if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid)
4264 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
4265 break;
4266 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4267 if (port->ldps)
4268 ret = connector_status_connected;
4269 break;
4270 }
4271out:
4272 drm_dp_mst_topology_put_port(port);
4273 return ret;
4274}
4275EXPORT_SYMBOL(drm_dp_mst_detect_port);
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4288{
4289 struct edid *edid = NULL;
4290
4291
4292 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4293 if (!port)
4294 return NULL;
4295
4296 if (port->cached_edid)
4297 edid = drm_edid_duplicate(port->cached_edid);
4298 else {
4299 edid = drm_get_edid(connector, &port->aux.ddc);
4300 }
4301 port->has_audio = drm_detect_monitor_audio(edid);
4302 drm_dp_mst_topology_put_port(port);
4303 return edid;
4304}
4305EXPORT_SYMBOL(drm_dp_mst_get_edid);
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4320 int pbn)
4321{
4322 int num_slots;
4323
4324 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4325
4326
4327 if (num_slots > 63)
4328 return -ENOSPC;
4329 return num_slots;
4330}
4331EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4332
4333static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4334 struct drm_dp_vcpi *vcpi, int pbn, int slots)
4335{
4336 int ret;
4337
4338 vcpi->pbn = pbn;
4339 vcpi->aligned_pbn = slots * mgr->pbn_div;
4340 vcpi->num_slots = slots;
4341
4342 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4343 if (ret < 0)
4344 return ret;
4345 return 0;
4346}
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4380 struct drm_dp_mst_topology_mgr *mgr,
4381 struct drm_dp_mst_port *port, int pbn,
4382 int pbn_div)
4383{
4384 struct drm_dp_mst_topology_state *topology_state;
4385 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4386 int prev_slots, prev_bw, req_slots;
4387
4388 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4389 if (IS_ERR(topology_state))
4390 return PTR_ERR(topology_state);
4391
4392
4393 list_for_each_entry(pos, &topology_state->vcpis, next) {
4394 if (pos->port == port) {
4395 vcpi = pos;
4396 prev_slots = vcpi->vcpi;
4397 prev_bw = vcpi->pbn;
4398
4399
4400
4401
4402
4403
4404 if (WARN_ON(!prev_slots)) {
4405 drm_err(mgr->dev,
4406 "cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4407 port);
4408 return -EINVAL;
4409 }
4410
4411 break;
4412 }
4413 }
4414 if (!vcpi) {
4415 prev_slots = 0;
4416 prev_bw = 0;
4417 }
4418
4419 if (pbn_div <= 0)
4420 pbn_div = mgr->pbn_div;
4421
4422 req_slots = DIV_ROUND_UP(pbn, pbn_div);
4423
4424 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4425 port->connector->base.id, port->connector->name,
4426 port, prev_slots, req_slots);
4427 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4428 port->connector->base.id, port->connector->name,
4429 port, prev_bw, pbn);
4430
4431
4432 if (!vcpi) {
4433 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4434 if (!vcpi)
4435 return -ENOMEM;
4436
4437 drm_dp_mst_get_port_malloc(port);
4438 vcpi->port = port;
4439 list_add(&vcpi->next, &topology_state->vcpis);
4440 }
4441 vcpi->vcpi = req_slots;
4442 vcpi->pbn = pbn;
4443
4444 return req_slots;
4445}
4446EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4475 struct drm_dp_mst_topology_mgr *mgr,
4476 struct drm_dp_mst_port *port)
4477{
4478 struct drm_dp_mst_topology_state *topology_state;
4479 struct drm_dp_vcpi_allocation *pos;
4480 bool found = false;
4481
4482 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4483 if (IS_ERR(topology_state))
4484 return PTR_ERR(topology_state);
4485
4486 list_for_each_entry(pos, &topology_state->vcpis, next) {
4487 if (pos->port == port) {
4488 found = true;
4489 break;
4490 }
4491 }
4492 if (WARN_ON(!found)) {
4493 drm_err(mgr->dev, "no VCPI for [MST PORT:%p] found in mst state %p\n",
4494 port, &topology_state->base);
4495 return -EINVAL;
4496 }
4497
4498 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4499 if (pos->vcpi) {
4500 drm_dp_mst_put_port_malloc(port);
4501 pos->vcpi = 0;
4502 pos->pbn = 0;
4503 }
4504
4505 return 0;
4506}
4507EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4508
4509
4510
4511
4512
4513
4514void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
4515{
4516 if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
4517 mst_state->total_avail_slots = 64;
4518 mst_state->start_slot = 0;
4519 } else {
4520 mst_state->total_avail_slots = 63;
4521 mst_state->start_slot = 1;
4522 }
4523
4524 DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
4525 (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
4526 mst_state);
4527}
4528EXPORT_SYMBOL(drm_dp_mst_update_slots);
4529
4530
4531
4532
4533
4534
4535
4536
4537bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4538 struct drm_dp_mst_port *port, int pbn, int slots)
4539{
4540 int ret;
4541
4542 if (slots < 0)
4543 return false;
4544
4545 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4546 if (!port)
4547 return false;
4548
4549 if (port->vcpi.vcpi > 0) {
4550 drm_dbg_kms(mgr->dev,
4551 "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4552 port->vcpi.vcpi, port->vcpi.pbn, pbn);
4553 if (pbn == port->vcpi.pbn) {
4554 drm_dp_mst_topology_put_port(port);
4555 return true;
4556 }
4557 }
4558
4559 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4560 if (ret) {
4561 drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n",
4562 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4563 drm_dp_mst_topology_put_port(port);
4564 goto out;
4565 }
4566 drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots);
4567
4568
4569 drm_dp_mst_get_port_malloc(port);
4570 drm_dp_mst_topology_put_port(port);
4571 return true;
4572out:
4573 return false;
4574}
4575EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4576
4577int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4578{
4579 int slots = 0;
4580
4581 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4582 if (!port)
4583 return slots;
4584
4585 slots = port->vcpi.num_slots;
4586 drm_dp_mst_topology_put_port(port);
4587 return slots;
4588}
4589EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4590
4591
4592
4593
4594
4595
4596
4597
4598void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4599{
4600
4601
4602
4603
4604
4605 port->vcpi.num_slots = 0;
4606}
4607EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4618 struct drm_dp_mst_port *port)
4619{
4620 bool skip;
4621
4622 if (!port->vcpi.vcpi)
4623 return;
4624
4625 mutex_lock(&mgr->lock);
4626 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
4627 mutex_unlock(&mgr->lock);
4628
4629 if (skip)
4630 return;
4631
4632 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4633 port->vcpi.num_slots = 0;
4634 port->vcpi.pbn = 0;
4635 port->vcpi.aligned_pbn = 0;
4636 port->vcpi.vcpi = 0;
4637 drm_dp_mst_put_port_malloc(port);
4638}
4639EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4640
4641static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4642 int id, struct drm_dp_payload *payload)
4643{
4644 u8 payload_alloc[3], status;
4645 int ret;
4646 int retries = 0;
4647
4648 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4649 DP_PAYLOAD_TABLE_UPDATED);
4650
4651 payload_alloc[0] = id;
4652 payload_alloc[1] = payload->start_slot;
4653 payload_alloc[2] = payload->num_slots;
4654
4655 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4656 if (ret != 3) {
4657 drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
4658 goto fail;
4659 }
4660
4661retry:
4662 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4663 if (ret < 0) {
4664 drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
4665 goto fail;
4666 }
4667
4668 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4669 retries++;
4670 if (retries < 20) {
4671 usleep_range(10000, 20000);
4672 goto retry;
4673 }
4674 drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
4675 status);
4676 ret = -EINVAL;
4677 goto fail;
4678 }
4679 ret = 0;
4680fail:
4681 return ret;
4682}
4683
4684static int do_get_act_status(struct drm_dp_aux *aux)
4685{
4686 int ret;
4687 u8 status;
4688
4689 ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4690 if (ret < 0)
4691 return ret;
4692
4693 return status;
4694}
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4708{
4709
4710
4711
4712
4713
4714
4715 const int timeout_ms = 3000;
4716 int ret, status;
4717
4718 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
4719 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
4720 200, timeout_ms * USEC_PER_MSEC);
4721 if (ret < 0 && status >= 0) {
4722 drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
4723 timeout_ms, status);
4724 return -EINVAL;
4725 } else if (status < 0) {
4726
4727
4728
4729
4730 drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
4731 return status;
4732 }
4733
4734 return 0;
4735}
4736EXPORT_SYMBOL(drm_dp_check_act_status);
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
4747{
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763 if (dsc)
4764 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4765 8 * 54 * 1000 * 1000);
4766
4767 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4768 8 * 54 * 1000 * 1000);
4769}
4770EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4771
4772
4773static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4774{
4775 queue_work(system_long_wq, &mgr->tx_work);
4776}
4777
4778
4779
4780
4781
4782static const char *pdt_to_string(u8 pdt)
4783{
4784 switch (pdt) {
4785 case DP_PEER_DEVICE_NONE:
4786 return "NONE";
4787 case DP_PEER_DEVICE_SOURCE_OR_SST:
4788 return "SOURCE OR SST";
4789 case DP_PEER_DEVICE_MST_BRANCHING:
4790 return "MST BRANCHING";
4791 case DP_PEER_DEVICE_SST_SINK:
4792 return "SST SINK";
4793 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4794 return "DP LEGACY CONV";
4795 default:
4796 return "ERR";
4797 }
4798}
4799
4800static void drm_dp_mst_dump_mstb(struct seq_file *m,
4801 struct drm_dp_mst_branch *mstb)
4802{
4803 struct drm_dp_mst_port *port;
4804 int tabs = mstb->lct;
4805 char prefix[10];
4806 int i;
4807
4808 for (i = 0; i < tabs; i++)
4809 prefix[i] = '\t';
4810 prefix[i] = '\0';
4811
4812 seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);
4813 list_for_each_entry(port, &mstb->ports, next) {
4814 seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
4815 prefix,
4816 port->port_num,
4817 port,
4818 port->input ? "input" : "output",
4819 pdt_to_string(port->pdt),
4820 port->ddps,
4821 port->ldps,
4822 port->num_sdp_streams,
4823 port->num_sdp_stream_sinks,
4824 port->fec_capable ? "true" : "false",
4825 port->connector);
4826 if (port->mstb)
4827 drm_dp_mst_dump_mstb(m, port->mstb);
4828 }
4829}
4830
4831#define DP_PAYLOAD_TABLE_SIZE 64
4832
4833static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4834 char *buf)
4835{
4836 int i;
4837
4838 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4839 if (drm_dp_dpcd_read(mgr->aux,
4840 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4841 &buf[i], 16) != 16)
4842 return false;
4843 }
4844 return true;
4845}
4846
4847static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4848 struct drm_dp_mst_port *port, char *name,
4849 int namelen)
4850{
4851 struct edid *mst_edid;
4852
4853 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4854 drm_edid_get_monitor_name(mst_edid, name, namelen);
4855 kfree(mst_edid);
4856}
4857
4858
4859
4860
4861
4862
4863
4864
4865void drm_dp_mst_dump_topology(struct seq_file *m,
4866 struct drm_dp_mst_topology_mgr *mgr)
4867{
4868 int i;
4869 struct drm_dp_mst_port *port;
4870
4871 mutex_lock(&mgr->lock);
4872 if (mgr->mst_primary)
4873 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4874
4875
4876 mutex_unlock(&mgr->lock);
4877
4878 mutex_lock(&mgr->payload_lock);
4879 seq_printf(m, "\n*** VCPI Info ***\n");
4880 seq_printf(m, "payload_mask: %lx, vcpi_mask: %lx, max_payloads: %d\n", mgr->payload_mask, mgr->vcpi_mask, mgr->max_payloads);
4881
4882 seq_printf(m, "\n| idx | port # | vcp_id | # slots | sink name |\n");
4883 for (i = 0; i < mgr->max_payloads; i++) {
4884 if (mgr->proposed_vcpis[i]) {
4885 char name[14];
4886
4887 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4888 fetch_monitor_name(mgr, port, name, sizeof(name));
4889 seq_printf(m, "%10d%10d%10d%10d%20s\n",
4890 i,
4891 port->port_num,
4892 port->vcpi.vcpi,
4893 port->vcpi.num_slots,
4894 (*name != 0) ? name : "Unknown");
4895 } else
4896 seq_printf(m, "%6d - Unused\n", i);
4897 }
4898 seq_printf(m, "\n*** Payload Info ***\n");
4899 seq_printf(m, "| idx | state | start slot | # slots |\n");
4900 for (i = 0; i < mgr->max_payloads; i++) {
4901 seq_printf(m, "%10d%10d%15d%10d\n",
4902 i,
4903 mgr->payloads[i].payload_state,
4904 mgr->payloads[i].start_slot,
4905 mgr->payloads[i].num_slots);
4906 }
4907 mutex_unlock(&mgr->payload_lock);
4908
4909 seq_printf(m, "\n*** DPCD Info ***\n");
4910 mutex_lock(&mgr->lock);
4911 if (mgr->mst_primary) {
4912 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4913 int ret;
4914
4915 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4916 if (ret) {
4917 seq_printf(m, "dpcd read failed\n");
4918 goto out;
4919 }
4920 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4921
4922 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4923 if (ret) {
4924 seq_printf(m, "faux/mst read failed\n");
4925 goto out;
4926 }
4927 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4928
4929 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4930 if (ret) {
4931 seq_printf(m, "mst ctrl read failed\n");
4932 goto out;
4933 }
4934 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4935
4936
4937 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4938 if (ret) {
4939 seq_printf(m, "branch oui read failed\n");
4940 goto out;
4941 }
4942 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4943
4944 for (i = 0x3; i < 0x8 && buf[i]; i++)
4945 seq_printf(m, "%c", buf[i]);
4946 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4947 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4948 if (dump_dp_payload_table(mgr, buf))
4949 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4950 }
4951
4952out:
4953 mutex_unlock(&mgr->lock);
4954
4955}
4956EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4957
4958static void drm_dp_tx_work(struct work_struct *work)
4959{
4960 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4961
4962 mutex_lock(&mgr->qlock);
4963 if (!list_empty(&mgr->tx_msg_downq))
4964 process_single_down_tx_qlock(mgr);
4965 mutex_unlock(&mgr->qlock);
4966}
4967
4968static inline void
4969drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4970{
4971 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4972
4973 if (port->connector) {
4974 drm_connector_unregister(port->connector);
4975 drm_connector_put(port->connector);
4976 }
4977
4978 drm_dp_mst_put_port_malloc(port);
4979}
4980
4981static inline void
4982drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4983{
4984 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4985 struct drm_dp_mst_port *port, *port_tmp;
4986 struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
4987 bool wake_tx = false;
4988
4989 mutex_lock(&mgr->lock);
4990 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
4991 list_del(&port->next);
4992 drm_dp_mst_topology_put_port(port);
4993 }
4994 mutex_unlock(&mgr->lock);
4995
4996
4997 mutex_lock(&mstb->mgr->qlock);
4998 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
4999 if (txmsg->dst != mstb)
5000 continue;
5001
5002 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
5003 list_del(&txmsg->next);
5004 wake_tx = true;
5005 }
5006 mutex_unlock(&mstb->mgr->qlock);
5007
5008 if (wake_tx)
5009 wake_up_all(&mstb->mgr->tx_waitq);
5010
5011 drm_dp_mst_put_mstb_malloc(mstb);
5012}
5013
5014static void drm_dp_delayed_destroy_work(struct work_struct *work)
5015{
5016 struct drm_dp_mst_topology_mgr *mgr =
5017 container_of(work, struct drm_dp_mst_topology_mgr,
5018 delayed_destroy_work);
5019 bool send_hotplug = false, go_again;
5020
5021
5022
5023
5024
5025
5026 do {
5027 go_again = false;
5028
5029 for (;;) {
5030 struct drm_dp_mst_branch *mstb;
5031
5032 mutex_lock(&mgr->delayed_destroy_lock);
5033 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
5034 struct drm_dp_mst_branch,
5035 destroy_next);
5036 if (mstb)
5037 list_del(&mstb->destroy_next);
5038 mutex_unlock(&mgr->delayed_destroy_lock);
5039
5040 if (!mstb)
5041 break;
5042
5043 drm_dp_delayed_destroy_mstb(mstb);
5044 go_again = true;
5045 }
5046
5047 for (;;) {
5048 struct drm_dp_mst_port *port;
5049
5050 mutex_lock(&mgr->delayed_destroy_lock);
5051 port = list_first_entry_or_null(&mgr->destroy_port_list,
5052 struct drm_dp_mst_port,
5053 next);
5054 if (port)
5055 list_del(&port->next);
5056 mutex_unlock(&mgr->delayed_destroy_lock);
5057
5058 if (!port)
5059 break;
5060
5061 drm_dp_delayed_destroy_port(port);
5062 send_hotplug = true;
5063 go_again = true;
5064 }
5065 } while (go_again);
5066
5067 if (send_hotplug)
5068 drm_kms_helper_hotplug_event(mgr->dev);
5069}
5070
5071static struct drm_private_state *
5072drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
5073{
5074 struct drm_dp_mst_topology_state *state, *old_state =
5075 to_dp_mst_topology_state(obj->state);
5076 struct drm_dp_vcpi_allocation *pos, *vcpi;
5077
5078 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
5079 if (!state)
5080 return NULL;
5081
5082 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
5083
5084 INIT_LIST_HEAD(&state->vcpis);
5085
5086 list_for_each_entry(pos, &old_state->vcpis, next) {
5087
5088 if (!pos->vcpi)
5089 continue;
5090
5091 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
5092 if (!vcpi)
5093 goto fail;
5094
5095 drm_dp_mst_get_port_malloc(vcpi->port);
5096 list_add(&vcpi->next, &state->vcpis);
5097 }
5098
5099 return &state->base;
5100
5101fail:
5102 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
5103 drm_dp_mst_put_port_malloc(pos->port);
5104 kfree(pos);
5105 }
5106 kfree(state);
5107
5108 return NULL;
5109}
5110
5111static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
5112 struct drm_private_state *state)
5113{
5114 struct drm_dp_mst_topology_state *mst_state =
5115 to_dp_mst_topology_state(state);
5116 struct drm_dp_vcpi_allocation *pos, *tmp;
5117
5118 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
5119
5120 if (pos->vcpi)
5121 drm_dp_mst_put_port_malloc(pos->port);
5122 kfree(pos);
5123 }
5124
5125 kfree(mst_state);
5126}
5127
5128static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
5129 struct drm_dp_mst_branch *branch)
5130{
5131 while (port->parent) {
5132 if (port->parent == branch)
5133 return true;
5134
5135 if (port->parent->port_parent)
5136 port = port->parent->port_parent;
5137 else
5138 break;
5139 }
5140 return false;
5141}
5142
5143static int
5144drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5145 struct drm_dp_mst_topology_state *state);
5146
5147static int
5148drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
5149 struct drm_dp_mst_topology_state *state)
5150{
5151 struct drm_dp_vcpi_allocation *vcpi;
5152 struct drm_dp_mst_port *port;
5153 int pbn_used = 0, ret;
5154 bool found = false;
5155
5156
5157
5158
5159 list_for_each_entry(vcpi, &state->vcpis, next) {
5160 if (!vcpi->pbn ||
5161 !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
5162 continue;
5163
5164 found = true;
5165 break;
5166 }
5167 if (!found)
5168 return 0;
5169
5170 if (mstb->port_parent)
5171 drm_dbg_atomic(mstb->mgr->dev,
5172 "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
5173 mstb->port_parent->parent, mstb->port_parent, mstb);
5174 else
5175 drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
5176
5177 list_for_each_entry(port, &mstb->ports, next) {
5178 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
5179 if (ret < 0)
5180 return ret;
5181
5182 pbn_used += ret;
5183 }
5184
5185 return pbn_used;
5186}
5187
5188static int
5189drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5190 struct drm_dp_mst_topology_state *state)
5191{
5192 struct drm_dp_vcpi_allocation *vcpi;
5193 int pbn_used = 0;
5194
5195 if (port->pdt == DP_PEER_DEVICE_NONE)
5196 return 0;
5197
5198 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
5199 bool found = false;
5200
5201 list_for_each_entry(vcpi, &state->vcpis, next) {
5202 if (vcpi->port != port)
5203 continue;
5204 if (!vcpi->pbn)
5205 return 0;
5206
5207 found = true;
5208 break;
5209 }
5210 if (!found)
5211 return 0;
5212
5213
5214
5215
5216
5217 if (!port->full_pbn) {
5218 drm_dbg_atomic(port->mgr->dev,
5219 "[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
5220 port->parent, port);
5221 return -EINVAL;
5222 }
5223
5224 pbn_used = vcpi->pbn;
5225 } else {
5226 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
5227 state);
5228 if (pbn_used <= 0)
5229 return pbn_used;
5230 }
5231
5232 if (pbn_used > port->full_pbn) {
5233 drm_dbg_atomic(port->mgr->dev,
5234 "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
5235 port->parent, port, pbn_used, port->full_pbn);
5236 return -ENOSPC;
5237 }
5238
5239 drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
5240 port->parent, port, pbn_used, port->full_pbn);
5241
5242 return pbn_used;
5243}
5244
5245static inline int
5246drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
5247 struct drm_dp_mst_topology_state *mst_state)
5248{
5249 struct drm_dp_vcpi_allocation *vcpi;
5250 int avail_slots = mst_state->total_avail_slots, payload_count = 0;
5251
5252 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
5253
5254 if (!vcpi->vcpi) {
5255 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all VCPI slots\n",
5256 vcpi->port);
5257 continue;
5258 }
5259
5260 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d vcpi slots\n",
5261 vcpi->port, vcpi->vcpi);
5262
5263 avail_slots -= vcpi->vcpi;
5264 if (avail_slots < 0) {
5265 drm_dbg_atomic(mgr->dev,
5266 "[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
5267 vcpi->port, mst_state, avail_slots + vcpi->vcpi);
5268 return -ENOSPC;
5269 }
5270
5271 if (++payload_count > mgr->max_payloads) {
5272 drm_dbg_atomic(mgr->dev,
5273 "[MST MGR:%p] state %p has too many payloads (max=%d)\n",
5274 mgr, mst_state, mgr->max_payloads);
5275 return -EINVAL;
5276 }
5277 }
5278 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
5279 mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots);
5280
5281 return 0;
5282}
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5298{
5299 struct drm_dp_mst_topology_state *mst_state;
5300 struct drm_dp_vcpi_allocation *pos;
5301 struct drm_connector *connector;
5302 struct drm_connector_state *conn_state;
5303 struct drm_crtc *crtc;
5304 struct drm_crtc_state *crtc_state;
5305
5306 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5307
5308 if (IS_ERR(mst_state))
5309 return -EINVAL;
5310
5311 list_for_each_entry(pos, &mst_state->vcpis, next) {
5312
5313 connector = pos->port->connector;
5314
5315 if (!connector)
5316 return -EINVAL;
5317
5318 conn_state = drm_atomic_get_connector_state(state, connector);
5319
5320 if (IS_ERR(conn_state))
5321 return PTR_ERR(conn_state);
5322
5323 crtc = conn_state->crtc;
5324
5325 if (!crtc)
5326 continue;
5327
5328 if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5329 continue;
5330
5331 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5332
5333 if (IS_ERR(crtc_state))
5334 return PTR_ERR(crtc_state);
5335
5336 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5337 mgr, crtc);
5338
5339 crtc_state->mode_changed = true;
5340 }
5341 return 0;
5342}
5343EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5360 struct drm_dp_mst_port *port,
5361 int pbn, int pbn_div,
5362 bool enable)
5363{
5364 struct drm_dp_mst_topology_state *mst_state;
5365 struct drm_dp_vcpi_allocation *pos;
5366 bool found = false;
5367 int vcpi = 0;
5368
5369 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5370
5371 if (IS_ERR(mst_state))
5372 return PTR_ERR(mst_state);
5373
5374 list_for_each_entry(pos, &mst_state->vcpis, next) {
5375 if (pos->port == port) {
5376 found = true;
5377 break;
5378 }
5379 }
5380
5381 if (!found) {
5382 drm_dbg_atomic(state->dev,
5383 "[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
5384 port, mst_state);
5385 return -EINVAL;
5386 }
5387
5388 if (pos->dsc_enabled == enable) {
5389 drm_dbg_atomic(state->dev,
5390 "[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
5391 port, enable, pos->vcpi);
5392 vcpi = pos->vcpi;
5393 }
5394
5395 if (enable) {
5396 vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
5397 drm_dbg_atomic(state->dev,
5398 "[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
5399 port, vcpi);
5400 if (vcpi < 0)
5401 return -EINVAL;
5402 }
5403
5404 pos->dsc_enabled = enable;
5405
5406 return vcpi;
5407}
5408EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5431{
5432 struct drm_dp_mst_topology_mgr *mgr;
5433 struct drm_dp_mst_topology_state *mst_state;
5434 int i, ret = 0;
5435
5436 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5437 if (!mgr->mst_state)
5438 continue;
5439
5440 ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
5441 if (ret)
5442 break;
5443
5444 mutex_lock(&mgr->lock);
5445 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5446 mst_state);
5447 mutex_unlock(&mgr->lock);
5448 if (ret < 0)
5449 break;
5450 else
5451 ret = 0;
5452 }
5453
5454 return ret;
5455}
5456EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5457
5458const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5459 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
5460 .atomic_destroy_state = drm_dp_mst_destroy_state,
5461};
5462EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5480 struct drm_dp_mst_topology_mgr *mgr)
5481{
5482 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5483}
5484EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5500 struct drm_device *dev, struct drm_dp_aux *aux,
5501 int max_dpcd_transaction_bytes, int max_payloads,
5502 int max_lane_count, int max_link_rate,
5503 int conn_base_id)
5504{
5505 struct drm_dp_mst_topology_state *mst_state;
5506
5507 mutex_init(&mgr->lock);
5508 mutex_init(&mgr->qlock);
5509 mutex_init(&mgr->payload_lock);
5510 mutex_init(&mgr->delayed_destroy_lock);
5511 mutex_init(&mgr->up_req_lock);
5512 mutex_init(&mgr->probe_lock);
5513#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5514 mutex_init(&mgr->topology_ref_history_lock);
5515 stack_depot_init();
5516#endif
5517 INIT_LIST_HEAD(&mgr->tx_msg_downq);
5518 INIT_LIST_HEAD(&mgr->destroy_port_list);
5519 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5520 INIT_LIST_HEAD(&mgr->up_req_list);
5521
5522
5523
5524
5525
5526 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
5527 if (mgr->delayed_destroy_wq == NULL)
5528 return -ENOMEM;
5529
5530 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5531 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5532 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5533 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5534 init_waitqueue_head(&mgr->tx_waitq);
5535 mgr->dev = dev;
5536 mgr->aux = aux;
5537 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5538 mgr->max_payloads = max_payloads;
5539 mgr->max_lane_count = max_lane_count;
5540 mgr->max_link_rate = max_link_rate;
5541 mgr->conn_base_id = conn_base_id;
5542 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
5543 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
5544 return -EINVAL;
5545 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
5546 if (!mgr->payloads)
5547 return -ENOMEM;
5548 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
5549 if (!mgr->proposed_vcpis)
5550 return -ENOMEM;
5551 set_bit(0, &mgr->payload_mask);
5552
5553 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
5554 if (mst_state == NULL)
5555 return -ENOMEM;
5556
5557 mst_state->total_avail_slots = 63;
5558 mst_state->start_slot = 1;
5559
5560 mst_state->mgr = mgr;
5561 INIT_LIST_HEAD(&mst_state->vcpis);
5562
5563 drm_atomic_private_obj_init(dev, &mgr->base,
5564 &mst_state->base,
5565 &drm_dp_mst_topology_state_funcs);
5566
5567 return 0;
5568}
5569EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5570
5571
5572
5573
5574
5575void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5576{
5577 drm_dp_mst_topology_mgr_set_mst(mgr, false);
5578 flush_work(&mgr->work);
5579
5580 if (mgr->delayed_destroy_wq) {
5581 destroy_workqueue(mgr->delayed_destroy_wq);
5582 mgr->delayed_destroy_wq = NULL;
5583 }
5584 mutex_lock(&mgr->payload_lock);
5585 kfree(mgr->payloads);
5586 mgr->payloads = NULL;
5587 kfree(mgr->proposed_vcpis);
5588 mgr->proposed_vcpis = NULL;
5589 mutex_unlock(&mgr->payload_lock);
5590 mgr->dev = NULL;
5591 mgr->aux = NULL;
5592 drm_atomic_private_obj_fini(&mgr->base);
5593 mgr->funcs = NULL;
5594
5595 mutex_destroy(&mgr->delayed_destroy_lock);
5596 mutex_destroy(&mgr->payload_lock);
5597 mutex_destroy(&mgr->qlock);
5598 mutex_destroy(&mgr->lock);
5599 mutex_destroy(&mgr->up_req_lock);
5600 mutex_destroy(&mgr->probe_lock);
5601#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5602 mutex_destroy(&mgr->topology_ref_history_lock);
5603#endif
5604}
5605EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5606
5607static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5608{
5609 int i;
5610
5611 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5612 return false;
5613
5614 for (i = 0; i < num - 1; i++) {
5615 if (msgs[i].flags & I2C_M_RD ||
5616 msgs[i].len > 0xff)
5617 return false;
5618 }
5619
5620 return msgs[num - 1].flags & I2C_M_RD &&
5621 msgs[num - 1].len <= 0xff;
5622}
5623
5624static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
5625{
5626 int i;
5627
5628 for (i = 0; i < num - 1; i++) {
5629 if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
5630 msgs[i].len > 0xff)
5631 return false;
5632 }
5633
5634 return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
5635}
5636
5637static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
5638 struct drm_dp_mst_port *port,
5639 struct i2c_msg *msgs, int num)
5640{
5641 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5642 unsigned int i;
5643 struct drm_dp_sideband_msg_req_body msg;
5644 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5645 int ret;
5646
5647 memset(&msg, 0, sizeof(msg));
5648 msg.req_type = DP_REMOTE_I2C_READ;
5649 msg.u.i2c_read.num_transactions = num - 1;
5650 msg.u.i2c_read.port_number = port->port_num;
5651 for (i = 0; i < num - 1; i++) {
5652 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5653 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5654 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5655 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5656 }
5657 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5658 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5659
5660 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5661 if (!txmsg) {
5662 ret = -ENOMEM;
5663 goto out;
5664 }
5665
5666 txmsg->dst = mstb;
5667 drm_dp_encode_sideband_req(&msg, txmsg);
5668
5669 drm_dp_queue_down_tx(mgr, txmsg);
5670
5671 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5672 if (ret > 0) {
5673
5674 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5675 ret = -EREMOTEIO;
5676 goto out;
5677 }
5678 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5679 ret = -EIO;
5680 goto out;
5681 }
5682 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5683 ret = num;
5684 }
5685out:
5686 kfree(txmsg);
5687 return ret;
5688}
5689
5690static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
5691 struct drm_dp_mst_port *port,
5692 struct i2c_msg *msgs, int num)
5693{
5694 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5695 unsigned int i;
5696 struct drm_dp_sideband_msg_req_body msg;
5697 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5698 int ret;
5699
5700 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5701 if (!txmsg) {
5702 ret = -ENOMEM;
5703 goto out;
5704 }
5705 for (i = 0; i < num; i++) {
5706 memset(&msg, 0, sizeof(msg));
5707 msg.req_type = DP_REMOTE_I2C_WRITE;
5708 msg.u.i2c_write.port_number = port->port_num;
5709 msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
5710 msg.u.i2c_write.num_bytes = msgs[i].len;
5711 msg.u.i2c_write.bytes = msgs[i].buf;
5712
5713 memset(txmsg, 0, sizeof(*txmsg));
5714 txmsg->dst = mstb;
5715
5716 drm_dp_encode_sideband_req(&msg, txmsg);
5717 drm_dp_queue_down_tx(mgr, txmsg);
5718
5719 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5720 if (ret > 0) {
5721 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5722 ret = -EREMOTEIO;
5723 goto out;
5724 }
5725 } else {
5726 goto out;
5727 }
5728 }
5729 ret = num;
5730out:
5731 kfree(txmsg);
5732 return ret;
5733}
5734
5735
5736static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
5737 struct i2c_msg *msgs, int num)
5738{
5739 struct drm_dp_aux *aux = adapter->algo_data;
5740 struct drm_dp_mst_port *port =
5741 container_of(aux, struct drm_dp_mst_port, aux);
5742 struct drm_dp_mst_branch *mstb;
5743 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5744 int ret;
5745
5746 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5747 if (!mstb)
5748 return -EREMOTEIO;
5749
5750 if (remote_i2c_read_ok(msgs, num)) {
5751 ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
5752 } else if (remote_i2c_write_ok(msgs, num)) {
5753 ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
5754 } else {
5755 drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n");
5756 ret = -EIO;
5757 }
5758
5759 drm_dp_mst_topology_put_mstb(mstb);
5760 return ret;
5761}
5762
5763static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5764{
5765 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5766 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5767 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5768 I2C_FUNC_10BIT_ADDR;
5769}
5770
5771static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5772 .functionality = drm_dp_mst_i2c_functionality,
5773 .master_xfer = drm_dp_mst_i2c_xfer,
5774};
5775
5776
5777
5778
5779
5780
5781
5782static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
5783{
5784 struct drm_dp_aux *aux = &port->aux;
5785 struct device *parent_dev = port->mgr->dev->dev;
5786
5787 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5788 aux->ddc.algo_data = aux;
5789 aux->ddc.retries = 3;
5790
5791 aux->ddc.class = I2C_CLASS_DDC;
5792 aux->ddc.owner = THIS_MODULE;
5793
5794 aux->ddc.dev.parent = parent_dev;
5795 aux->ddc.dev.of_node = parent_dev->of_node;
5796
5797 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
5798 sizeof(aux->ddc.name));
5799
5800 return i2c_add_adapter(&aux->ddc);
5801}
5802
5803
5804
5805
5806
5807static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
5808{
5809 i2c_del_adapter(&port->aux.ddc);
5810}
5811
5812
5813
5814
5815
5816
5817
5818
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5829{
5830 struct drm_dp_mst_port *downstream_port;
5831
5832 if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5833 return false;
5834
5835
5836 if (port->port_num >= 8)
5837 return true;
5838
5839
5840 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5841 !port->mcs &&
5842 port->ldps)
5843 return true;
5844
5845
5846 mutex_lock(&port->mgr->lock);
5847 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5848 port->mstb &&
5849 port->mstb->num_ports == 2) {
5850 list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5851 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5852 !downstream_port->input) {
5853 mutex_unlock(&port->mgr->lock);
5854 return true;
5855 }
5856 }
5857 }
5858 mutex_unlock(&port->mgr->lock);
5859
5860 return false;
5861}
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5880{
5881 struct drm_dp_mst_port *immediate_upstream_port;
5882 struct drm_dp_mst_port *fec_port;
5883 struct drm_dp_desc desc = {};
5884 u8 endpoint_fec;
5885 u8 endpoint_dsc;
5886
5887 if (!port)
5888 return NULL;
5889
5890 if (port->parent->port_parent)
5891 immediate_upstream_port = port->parent->port_parent;
5892 else
5893 immediate_upstream_port = NULL;
5894
5895 fec_port = immediate_upstream_port;
5896 while (fec_port) {
5897
5898
5899
5900
5901 if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5902 !fec_port->fec_capable)
5903 return NULL;
5904
5905 fec_port = fec_port->parent->port_parent;
5906 }
5907
5908
5909 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5910 u8 upstream_dsc;
5911
5912 if (drm_dp_dpcd_read(&port->aux,
5913 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5914 return NULL;
5915 if (drm_dp_dpcd_read(&port->aux,
5916 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5917 return NULL;
5918 if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5919 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5920 return NULL;
5921
5922
5923 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5924 (endpoint_fec & DP_FEC_CAPABLE) &&
5925 (upstream_dsc & 0x2) )
5926 return &port->aux;
5927
5928
5929 return &immediate_upstream_port->aux;
5930 }
5931
5932
5933 if (drm_dp_mst_is_virtual_dpcd(port))
5934 return &port->aux;
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944 if (drm_dp_read_desc(port->mgr->aux, &desc, true))
5945 return NULL;
5946
5947 if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5948 port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
5949 port->parent == port->mgr->mst_primary) {
5950 u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
5951
5952 if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0)
5953 return NULL;
5954
5955 if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
5956 ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
5957 != DP_DWN_STRM_PORT_TYPE_ANALOG))
5958 return port->mgr->aux;
5959 }
5960
5961
5962
5963
5964
5965
5966
5967 if (drm_dp_dpcd_read(&port->aux,
5968 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5969 return NULL;
5970 if (drm_dp_dpcd_read(&port->aux,
5971 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5972 return NULL;
5973 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5974 (endpoint_fec & DP_FEC_CAPABLE))
5975 return &port->aux;
5976
5977 return NULL;
5978}
5979EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
5980